aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2015-03-23 12:10:34 -0700
committerStephen Hines <srhines@google.com>2015-03-23 12:10:34 -0700
commitebe69fe11e48d322045d5949c83283927a0d790b (patch)
treec92f1907a6b8006628a4b01615f38264d29834ea /lib/Target
parentb7d2e72b02a4cb8034f32f8247a2558d2434e121 (diff)
downloadexternal_llvm-ebe69fe11e48d322045d5949c83283927a0d790b.zip
external_llvm-ebe69fe11e48d322045d5949c83283927a0d790b.tar.gz
external_llvm-ebe69fe11e48d322045d5949c83283927a0d790b.tar.bz2
Update aosp/master LLVM for rebase to r230699.
Change-Id: I2b5be30509658cb8266be782de0ab24f9099f9b9
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/AArch64/AArch64.h3
-rw-r--r--lib/Target/AArch64/AArch64.td2
-rw-r--r--lib/Target/AArch64/AArch64A53Fix835769.cpp13
-rw-r--r--lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp40
-rw-r--r--lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp8
-rw-r--r--lib/Target/AArch64/AArch64AsmPrinter.cpp40
-rw-r--r--lib/Target/AArch64/AArch64BranchRelaxation.cpp4
-rw-r--r--lib/Target/AArch64/AArch64CallingConvention.h141
-rw-r--r--lib/Target/AArch64/AArch64CallingConvention.td47
-rw-r--r--lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp8
-rw-r--r--lib/Target/AArch64/AArch64CollectLOH.cpp10
-rw-r--r--lib/Target/AArch64/AArch64ConditionOptimizer.cpp2
-rw-r--r--lib/Target/AArch64/AArch64ConditionalCompares.cpp6
-rw-r--r--lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp4
-rw-r--r--lib/Target/AArch64/AArch64FastISel.cpp70
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.cpp35
-rw-r--r--lib/Target/AArch64/AArch64ISelDAGToDAG.cpp176
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp292
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.h20
-rw-r--r--lib/Target/AArch64/AArch64InstrFormats.td2
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.cpp7
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.h2
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.td87
-rw-r--r--lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp22
-rw-r--r--lib/Target/AArch64/AArch64PBQPRegAlloc.cpp6
-rw-r--r--lib/Target/AArch64/AArch64PromoteConstant.cpp152
-rw-r--r--lib/Target/AArch64/AArch64RegisterInfo.cpp20
-rw-r--r--lib/Target/AArch64/AArch64SelectionDAGInfo.cpp9
-rw-r--r--lib/Target/AArch64/AArch64StorePairSuppress.cpp19
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.cpp15
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.h6
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.cpp56
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.h6
-rw-r--r--lib/Target/AArch64/AArch64TargetTransformInfo.cpp315
-rw-r--r--lib/Target/AArch64/AArch64TargetTransformInfo.h147
-rw-r--r--lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp80
-rw-r--r--lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp12
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h13
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp42
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp2
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp4
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp2
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h3
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp3
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp89
-rw-r--r--lib/Target/ARM/ARM.h4
-rw-r--r--lib/Target/ARM/ARM.td36
-rw-r--r--lib/Target/ARM/ARMArchExtName.def30
-rw-r--r--lib/Target/ARM/ARMArchExtName.h26
-rw-r--r--lib/Target/ARM/ARMAsmPrinter.cpp278
-rw-r--r--lib/Target/ARM/ARMAsmPrinter.h22
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp51
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.h4
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.cpp38
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.h2
-rw-r--r--lib/Target/ARM/ARMCallingConv.h161
-rw-r--r--lib/Target/ARM/ARMCallingConv.td2
-rw-r--r--lib/Target/ARM/ARMConstantIslandPass.cpp14
-rw-r--r--lib/Target/ARM/ARMExpandPseudoInsts.cpp18
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp57
-rw-r--r--lib/Target/ARM/ARMFrameLowering.cpp255
-rw-r--r--lib/Target/ARM/ARMFrameLowering.h2
-rw-r--r--lib/Target/ARM/ARMHazardRecognizer.cpp5
-rw-r--r--lib/Target/ARM/ARMISelDAGToDAG.cpp37
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp711
-rw-r--r--lib/Target/ARM/ARMISelLowering.h19
-rw-r--r--lib/Target/ARM/ARMInstrInfo.cpp26
-rw-r--r--lib/Target/ARM/ARMInstrInfo.td265
-rw-r--r--lib/Target/ARM/ARMInstrNEON.td20
-rw-r--r--lib/Target/ARM/ARMInstrThumb.td25
-rw-r--r--lib/Target/ARM/ARMInstrThumb2.td106
-rw-r--r--lib/Target/ARM/ARMInstrVFP.td18
-rw-r--r--lib/Target/ARM/ARMLoadStoreOptimizer.cpp68
-rw-r--r--lib/Target/ARM/ARMMCInstLower.cpp36
-rw-r--r--lib/Target/ARM/ARMMachineFunctionInfo.cpp4
-rw-r--r--lib/Target/ARM/ARMMachineFunctionInfo.h2
-rw-r--r--lib/Target/ARM/ARMOptimizeBarriersPass.cpp2
-rw-r--r--lib/Target/ARM/ARMRegisterInfo.td10
-rw-r--r--lib/Target/ARM/ARMSelectionDAGInfo.cpp9
-rw-r--r--lib/Target/ARM/ARMSubtarget.cpp122
-rw-r--r--lib/Target/ARM/ARMSubtarget.h36
-rw-r--r--lib/Target/ARM/ARMTargetMachine.cpp155
-rw-r--r--lib/Target/ARM/ARMTargetMachine.h14
-rw-r--r--lib/Target/ARM/ARMTargetObjectFile.cpp10
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.cpp215
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.h134
-rw-r--r--lib/Target/ARM/AsmParser/ARMAsmParser.cpp510
-rw-r--r--lib/Target/ARM/Disassembler/ARMDisassembler.cpp37
-rw-r--r--lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp65
-rw-r--r--lib/Target/ARM/InstPrinter/ARMInstPrinter.h1
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp21
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp61
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp4
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h3
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp26
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp72
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h3
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp37
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp1
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp14
-rw-r--r--lib/Target/ARM/MLxExpansionPass.cpp2
-rw-r--r--lib/Target/ARM/Thumb1FrameLowering.cpp54
-rw-r--r--lib/Target/ARM/Thumb1InstrInfo.cpp2
-rw-r--r--lib/Target/ARM/Thumb1RegisterInfo.cpp23
-rw-r--r--lib/Target/ARM/Thumb2ITBlockPass.cpp10
-rw-r--r--lib/Target/ARM/Thumb2InstrInfo.cpp9
-rw-r--r--lib/Target/ARM/Thumb2SizeReduction.cpp13
-rw-r--r--lib/Target/Android.mk3
-rw-r--r--lib/Target/BPF/BPF.h22
-rw-r--r--lib/Target/BPF/BPF.td31
-rw-r--r--lib/Target/BPF/BPFAsmPrinter.cpp87
-rw-r--r--lib/Target/BPF/BPFCallingConv.td29
-rw-r--r--lib/Target/BPF/BPFFrameLowering.cpp39
-rw-r--r--lib/Target/BPF/BPFFrameLowering.h41
-rw-r--r--lib/Target/BPF/BPFISelDAGToDAG.cpp159
-rw-r--r--lib/Target/BPF/BPFISelLowering.cpp642
-rw-r--r--lib/Target/BPF/BPFISelLowering.h89
-rw-r--r--lib/Target/BPF/BPFInstrFormats.td33
-rw-r--r--lib/Target/BPF/BPFInstrInfo.cpp168
-rw-r--r--lib/Target/BPF/BPFInstrInfo.h60
-rw-r--r--lib/Target/BPF/BPFInstrInfo.td507
-rw-r--r--lib/Target/BPF/BPFMCInstLower.cpp77
-rw-r--r--lib/Target/BPF/BPFMCInstLower.h43
-rw-r--r--lib/Target/BPF/BPFRegisterInfo.cpp88
-rw-r--r--lib/Target/BPF/BPFRegisterInfo.h41
-rw-r--r--lib/Target/BPF/BPFRegisterInfo.td41
-rw-r--r--lib/Target/BPF/BPFSubtarget.cpp31
-rw-r--r--lib/Target/BPF/BPFSubtarget.h64
-rw-r--r--lib/Target/BPF/BPFTargetMachine.cpp69
-rw-r--r--lib/Target/BPF/BPFTargetMachine.h42
-rw-r--r--lib/Target/BPF/CMakeLists.txt27
-rw-r--r--lib/Target/BPF/InstPrinter/BPFInstPrinter.cpp86
-rw-r--r--lib/Target/BPF/InstPrinter/BPFInstPrinter.h41
-rw-r--r--lib/Target/BPF/InstPrinter/CMakeLists.txt3
-rw-r--r--lib/Target/BPF/InstPrinter/LLVMBuild.txt23
-rw-r--r--lib/Target/BPF/InstPrinter/Makefile16
-rw-r--r--lib/Target/BPF/LLVMBuild.txt32
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp83
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp53
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h36
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp167
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp111
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h59
-rw-r--r--lib/Target/BPF/MCTargetDesc/CMakeLists.txt6
-rw-r--r--lib/Target/BPF/MCTargetDesc/LLVMBuild.txt23
-rw-r--r--lib/Target/BPF/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/BPF/Makefile21
-rw-r--r--lib/Target/BPF/TargetInfo/BPFTargetInfo.cpp18
-rw-r--r--lib/Target/BPF/TargetInfo/CMakeLists.txt3
-rw-r--r--lib/Target/BPF/TargetInfo/LLVMBuild.txt23
-rw-r--r--lib/Target/BPF/TargetInfo/Makefile16
-rw-r--r--lib/Target/CMakeLists.txt8
-rw-r--r--lib/Target/CppBackend/CPPBackend.cpp4
-rw-r--r--lib/Target/Hexagon/CMakeLists.txt1
-rw-r--r--lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp105
-rw-r--r--lib/Target/Hexagon/Disassembler/LLVMBuild.txt2
-rw-r--r--lib/Target/Hexagon/Hexagon.h20
-rw-r--r--lib/Target/Hexagon/Hexagon.td48
-rw-r--r--lib/Target/Hexagon/HexagonAsmPrinter.cpp36
-rw-r--r--lib/Target/Hexagon/HexagonAsmPrinter.h9
-rw-r--r--lib/Target/Hexagon/HexagonCFGOptimizer.cpp42
-rw-r--r--lib/Target/Hexagon/HexagonCallingConvLower.cpp206
-rw-r--r--lib/Target/Hexagon/HexagonCallingConvLower.h187
-rw-r--r--lib/Target/Hexagon/HexagonCopyToCombine.cpp50
-rw-r--r--lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp62
-rw-r--r--lib/Target/Hexagon/HexagonFixupHwLoops.cpp12
-rw-r--r--lib/Target/Hexagon/HexagonFrameLowering.cpp25
-rw-r--r--lib/Target/Hexagon/HexagonHardwareLoops.cpp72
-rw-r--r--lib/Target/Hexagon/HexagonISelDAGToDAG.cpp393
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.cpp116
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.h28
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormats.td35
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormatsV4.td5
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp729
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.td6862
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoV3.td249
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoV4.td5434
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoV5.td1424
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoVector.td65
-rw-r--r--lib/Target/Hexagon/HexagonIntrinsics.td4509
-rw-r--r--lib/Target/Hexagon/HexagonIntrinsicsDerived.td11
-rw-r--r--lib/Target/Hexagon/HexagonIntrinsicsV3.td51
-rw-r--r--lib/Target/Hexagon/HexagonIntrinsicsV4.td578
-rw-r--r--lib/Target/Hexagon/HexagonIntrinsicsV5.td506
-rw-r--r--lib/Target/Hexagon/HexagonMCInstLower.cpp4
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.cpp15
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.h8
-rw-r--r--lib/Target/Hexagon/HexagonNewValueJump.cpp95
-rw-r--r--lib/Target/Hexagon/HexagonOperands.td344
-rw-r--r--lib/Target/Hexagon/HexagonPeephole.cpp23
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.cpp90
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.td131
-rw-r--r--lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp3
-rw-r--r--lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp24
-rw-r--r--lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp115
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.cpp12
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.h23
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.cpp50
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.h3
-rw-r--r--lib/Target/Hexagon/HexagonTargetObjectFile.cpp15
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp173
-rw-r--r--lib/Target/Hexagon/HexagonVarargsCallingConvention.h149
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt2
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h1
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp25
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.h11
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp15
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h5
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.cpp176
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.h100
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp223
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h106
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp2
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h2
-rw-r--r--lib/Target/LLVMBuild.txt4
-rw-r--r--lib/Target/MSP430/MSP430AsmPrinter.cpp4
-rw-r--r--lib/Target/MSP430/MSP430ISelDAGToDAG.cpp7
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.cpp47
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.h9
-rw-r--r--lib/Target/MSP430/MSP430InstrInfo.td6
-rw-r--r--lib/Target/MSP430/MSP430MCInstLower.cpp5
-rw-r--r--lib/Target/MSP430/MSP430Subtarget.cpp8
-rw-r--r--lib/Target/MSP430/MSP430Subtarget.h4
-rw-r--r--lib/Target/MSP430/MSP430TargetMachine.cpp14
-rw-r--r--lib/Target/MSP430/MSP430TargetMachine.h2
-rw-r--r--lib/Target/MSP430/README.txt1
-rw-r--r--lib/Target/Mips/Android.mk1
-rw-r--r--lib/Target/Mips/AsmParser/MipsAsmParser.cpp723
-rw-r--r--lib/Target/Mips/CMakeLists.txt1
-rw-r--r--lib/Target/Mips/Disassembler/MipsDisassembler.cpp618
-rw-r--r--lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp12
-rw-r--r--lib/Target/Mips/InstPrinter/MipsInstPrinter.h1
-rw-r--r--lib/Target/Mips/MCTargetDesc/Android.mk1
-rw-r--r--lib/Target/Mips/MCTargetDesc/CMakeLists.txt1
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h8
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp92
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIInfo.h (renamed from lib/Target/Mips/MipsABIInfo.h)12
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp24
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp14
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h6
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp1
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp204
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h52
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp23
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h11
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp19
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp117
-rw-r--r--lib/Target/Mips/MicroMipsInstrFormats.td126
-rw-r--r--lib/Target/Mips/MicroMipsInstrInfo.td349
-rw-r--r--lib/Target/Mips/Mips.h1
-rw-r--r--lib/Target/Mips/Mips.td56
-rw-r--r--lib/Target/Mips/Mips16FrameLowering.cpp8
-rw-r--r--lib/Target/Mips/Mips16HardFloat.cpp26
-rw-r--r--lib/Target/Mips/Mips16HardFloat.h16
-rw-r--r--lib/Target/Mips/Mips16ISelDAGToDAG.cpp13
-rw-r--r--lib/Target/Mips/Mips16ISelLowering.cpp65
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.cpp6
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.td16
-rw-r--r--lib/Target/Mips/Mips16RegisterInfo.cpp7
-rw-r--r--lib/Target/Mips/Mips32r6InstrInfo.td8
-rw-r--r--lib/Target/Mips/Mips64InstrInfo.td124
-rw-r--r--lib/Target/Mips/MipsABIInfo.cpp45
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.cpp183
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.h41
-rw-r--r--lib/Target/Mips/MipsCCState.cpp4
-rw-r--r--lib/Target/Mips/MipsCCState.h6
-rw-r--r--lib/Target/Mips/MipsCallingConv.td45
-rw-r--r--lib/Target/Mips/MipsCondMov.td37
-rw-r--r--lib/Target/Mips/MipsConstantIslandPass.cpp8
-rw-r--r--lib/Target/Mips/MipsDelaySlotFiller.cpp188
-rw-r--r--lib/Target/Mips/MipsFastISel.cpp282
-rw-r--r--lib/Target/Mips/MipsFrameLowering.cpp2
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.cpp17
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.h8
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp447
-rw-r--r--lib/Target/Mips/MipsISelLowering.h47
-rw-r--r--lib/Target/Mips/MipsInstrFPU.td28
-rw-r--r--lib/Target/Mips/MipsInstrFormats.td27
-rw-r--r--lib/Target/Mips/MipsInstrInfo.cpp2
-rw-r--r--lib/Target/Mips/MipsInstrInfo.td84
-rw-r--r--lib/Target/Mips/MipsLongBranch.cpp55
-rw-r--r--lib/Target/Mips/MipsMachineFunction.cpp30
-rw-r--r--lib/Target/Mips/MipsOptimizePICCall.cpp2
-rw-r--r--lib/Target/Mips/MipsOptionRecord.h6
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.cpp10
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.td42
-rw-r--r--lib/Target/Mips/MipsSEFrameLowering.cpp80
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.cpp67
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.h3
-rw-r--r--lib/Target/Mips/MipsSEISelLowering.cpp90
-rw-r--r--lib/Target/Mips/MipsSEInstrInfo.cpp27
-rw-r--r--lib/Target/Mips/MipsSEInstrInfo.h1
-rw-r--r--lib/Target/Mips/MipsSubtarget.cpp147
-rw-r--r--lib/Target/Mips/MipsSubtarget.h59
-rw-r--r--lib/Target/Mips/MipsTargetMachine.cpp99
-rw-r--r--lib/Target/Mips/MipsTargetMachine.h11
-rw-r--r--lib/Target/Mips/MipsTargetObjectFile.cpp24
-rw-r--r--lib/Target/Mips/MipsTargetStreamer.h24
-rw-r--r--lib/Target/NVPTX/LLVMBuild.txt2
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp1
-rw-r--r--lib/Target/NVPTX/NVPTX.h5
-rw-r--r--lib/Target/NVPTX/NVPTXAllocaHoisting.h3
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.cpp350
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.h39
-rw-r--r--lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXFrameLowering.cpp7
-rw-r--r--lib/Target/NVPTX/NVPTXFrameLowering.h10
-rw-r--r--lib/Target/NVPTX/NVPTXGenericToNVVM.cpp61
-rw-r--r--lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp124
-rw-r--r--lib/Target/NVPTX/NVPTXISelDAGToDAG.h5
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.cpp134
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.h12
-rw-r--r--lib/Target/NVPTX/NVPTXImageOptimizer.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.cpp6
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.h2
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.td72
-rw-r--r--lib/Target/NVPTX/NVPTXLowerAggrCopies.h3
-rw-r--r--lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp6
-rw-r--r--lib/Target/NVPTX/NVPTXRegisterInfo.cpp3
-rw-r--r--lib/Target/NVPTX/NVPTXRegisterInfo.h8
-rw-r--r--lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp8
-rw-r--r--lib/Target/NVPTX/NVPTXSubtarget.cpp40
-rw-r--r--lib/Target/NVPTX/NVPTXSubtarget.h20
-rw-r--r--lib/Target/NVPTX/NVPTXTargetMachine.cpp41
-rw-r--r--lib/Target/NVPTX/NVPTXTargetMachine.h13
-rw-r--r--lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp88
-rw-r--r--lib/Target/NVPTX/NVPTXTargetTransformInfo.h74
-rw-r--r--lib/Target/NVPTX/NVPTXUtilities.cpp13
-rw-r--r--lib/Target/NVPTX/NVPTXVector.td4
-rw-r--r--lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp53
-rw-r--r--lib/Target/PowerPC/CMakeLists.txt6
-rw-r--r--lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp29
-rw-r--r--lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp23
-rw-r--r--lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h1
-rw-r--r--lib/Target/PowerPC/LLVMBuild.txt2
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp2
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp5
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h3
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp4
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp17
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp16
-rw-r--r--lib/Target/PowerPC/PPC.h14
-rw-r--r--lib/Target/PowerPC/PPC.td104
-rw-r--r--lib/Target/PowerPC/PPCAsmPrinter.cpp343
-rw-r--r--lib/Target/PowerPC/PPCBranchSelector.cpp25
-rw-r--r--lib/Target/PowerPC/PPCCTRLoops.cpp37
-rw-r--r--lib/Target/PowerPC/PPCCallingConv.h35
-rw-r--r--lib/Target/PowerPC/PPCCallingConv.td83
-rw-r--r--lib/Target/PowerPC/PPCEarlyReturn.cpp201
-rw-r--r--lib/Target/PowerPC/PPCFastISel.cpp203
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.cpp252
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.h48
-rw-r--r--lib/Target/PowerPC/PPCHazardRecognizers.cpp4
-rw-r--r--lib/Target/PowerPC/PPCISelDAGToDAG.cpp2314
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp2863
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h211
-rw-r--r--lib/Target/PowerPC/PPCInstr64Bit.td142
-rw-r--r--lib/Target/PowerPC/PPCInstrAltivec.td64
-rw-r--r--lib/Target/PowerPC/PPCInstrFormats.td175
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp746
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.h9
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.td247
-rw-r--r--lib/Target/PowerPC/PPCInstrQPX.td1192
-rw-r--r--lib/Target/PowerPC/PPCInstrVSX.td81
-rw-r--r--lib/Target/PowerPC/PPCLoopDataPrefetch.cpp231
-rw-r--r--lib/Target/PowerPC/PPCLoopPreIncPrep.cpp382
-rw-r--r--lib/Target/PowerPC/PPCMCInstLower.cpp10
-rw-r--r--lib/Target/PowerPC/PPCMachineFunctionInfo.cpp7
-rw-r--r--lib/Target/PowerPC/PPCMachineFunctionInfo.h14
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.cpp123
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.h2
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.td86
-rw-r--r--lib/Target/PowerPC/PPCSchedule.td3
-rw-r--r--lib/Target/PowerPC/PPCSchedule440.td8
-rw-r--r--lib/Target/PowerPC/PPCScheduleA2.td2
-rw-r--r--lib/Target/PowerPC/PPCScheduleE500mc.td6
-rw-r--r--lib/Target/PowerPC/PPCScheduleE5500.td6
-rw-r--r--lib/Target/PowerPC/PPCScheduleP7.td7
-rw-r--r--lib/Target/PowerPC/PPCScheduleP8.td401
-rw-r--r--lib/Target/PowerPC/PPCSubtarget.cpp92
-rw-r--r--lib/Target/PowerPC/PPCSubtarget.h48
-rw-r--r--lib/Target/PowerPC/PPCTLSDynamicCall.cpp168
-rw-r--r--lib/Target/PowerPC/PPCTargetMachine.cpp161
-rw-r--r--lib/Target/PowerPC/PPCTargetMachine.h23
-rw-r--r--lib/Target/PowerPC/PPCTargetTransformInfo.cpp253
-rw-r--r--lib/Target/PowerPC/PPCTargetTransformInfo.h103
-rw-r--r--lib/Target/PowerPC/PPCVSXCopy.cpp176
-rw-r--r--lib/Target/PowerPC/PPCVSXFMAMutate.cpp335
-rw-r--r--lib/Target/PowerPC/README.txt275
-rw-r--r--lib/Target/R600/AMDGPU.h16
-rw-r--r--lib/Target/R600/AMDGPU.td17
-rw-r--r--lib/Target/R600/AMDGPUAsmPrinter.cpp288
-rw-r--r--lib/Target/R600/AMDGPUAsmPrinter.h31
-rw-r--r--lib/Target/R600/AMDGPUISelDAGToDAG.cpp231
-rw-r--r--lib/Target/R600/AMDGPUISelLowering.cpp480
-rw-r--r--lib/Target/R600/AMDGPUISelLowering.h40
-rw-r--r--lib/Target/R600/AMDGPUInstrInfo.cpp40
-rw-r--r--lib/Target/R600/AMDGPUInstrInfo.h11
-rw-r--r--lib/Target/R600/AMDGPUInstrInfo.td18
-rw-r--r--lib/Target/R600/AMDGPUInstructions.td73
-rw-r--r--lib/Target/R600/AMDGPUMCInstLower.cpp51
-rw-r--r--lib/Target/R600/AMDGPUMCInstLower.h13
-rw-r--r--lib/Target/R600/AMDGPUMachineFunction.cpp4
-rw-r--r--lib/Target/R600/AMDGPURegisterInfo.cpp3
-rw-r--r--lib/Target/R600/AMDGPUSubtarget.cpp75
-rw-r--r--lib/Target/R600/AMDGPUSubtarget.h42
-rw-r--r--lib/Target/R600/AMDGPUTargetMachine.cpp217
-rw-r--r--lib/Target/R600/AMDGPUTargetMachine.h45
-rw-r--r--lib/Target/R600/AMDGPUTargetTransformInfo.cpp94
-rw-r--r--lib/Target/R600/AMDGPUTargetTransformInfo.h78
-rw-r--r--lib/Target/R600/AMDKernelCodeT.h704
-rw-r--r--lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp32
-rw-r--r--lib/Target/R600/CIInstructions.td42
-rw-r--r--lib/Target/R600/CMakeLists.txt2
-rw-r--r--lib/Target/R600/CaymanInstructions.td4
-rw-r--r--lib/Target/R600/EvergreenInstructions.td14
-rw-r--r--lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp71
-rw-r--r--lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h3
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp2
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp1
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp10
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h1
-rw-r--r--lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp4
-rw-r--r--lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp149
-rw-r--r--lib/Target/R600/Processors.td38
-rw-r--r--lib/Target/R600/R600ControlFlowFinalizer.cpp46
-rw-r--r--lib/Target/R600/R600ISelLowering.cpp62
-rw-r--r--lib/Target/R600/R600ISelLowering.h2
-rw-r--r--lib/Target/R600/R600Instructions.td36
-rw-r--r--lib/Target/R600/R600MachineScheduler.cpp7
-rw-r--r--lib/Target/R600/R600Packetizer.cpp2
-rw-r--r--lib/Target/R600/R700Instructions.td2
-rw-r--r--lib/Target/R600/SIAnnotateControlFlow.cpp27
-rw-r--r--lib/Target/R600/SIDefines.h72
-rw-r--r--lib/Target/R600/SIFixSGPRCopies.cpp36
-rw-r--r--lib/Target/R600/SIFoldOperands.cpp287
-rw-r--r--lib/Target/R600/SIISelLowering.cpp867
-rw-r--r--lib/Target/R600/SIISelLowering.h20
-rw-r--r--lib/Target/R600/SIInsertWaits.cpp117
-rw-r--r--lib/Target/R600/SIInstrFormats.td429
-rw-r--r--lib/Target/R600/SIInstrInfo.cpp621
-rw-r--r--lib/Target/R600/SIInstrInfo.h128
-rw-r--r--lib/Target/R600/SIInstrInfo.td1442
-rw-r--r--lib/Target/R600/SIInstructions.td1846
-rw-r--r--lib/Target/R600/SILoadStoreOptimizer.cpp43
-rw-r--r--lib/Target/R600/SILowerControlFlow.cpp44
-rw-r--r--lib/Target/R600/SILowerI1Copies.cpp78
-rw-r--r--lib/Target/R600/SIMachineFunctionInfo.cpp9
-rw-r--r--lib/Target/R600/SIMachineFunctionInfo.h4
-rw-r--r--lib/Target/R600/SIPrepareScratchRegs.cpp208
-rw-r--r--lib/Target/R600/SIRegisterInfo.cpp247
-rw-r--r--lib/Target/R600/SIRegisterInfo.h39
-rw-r--r--lib/Target/R600/SIRegisterInfo.td50
-rw-r--r--lib/Target/R600/SISchedule.td80
-rw-r--r--lib/Target/R600/SIShrinkInstructions.cpp37
-rw-r--r--lib/Target/R600/SITypeRewriter.cpp3
-rw-r--r--lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp6
-rw-r--r--lib/Target/R600/VIInstrFormats.td166
-rw-r--r--lib/Target/R600/VIInstructions.td25
-rw-r--r--lib/Target/Sparc/AsmParser/SparcAsmParser.cpp5
-rw-r--r--lib/Target/Sparc/DelaySlotFiller.cpp17
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp4
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp4
-rw-r--r--lib/Target/Sparc/SparcAsmPrinter.cpp12
-rw-r--r--lib/Target/Sparc/SparcFrameLowering.cpp7
-rw-r--r--lib/Target/Sparc/SparcISelDAGToDAG.cpp15
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp52
-rw-r--r--lib/Target/Sparc/SparcISelLowering.h8
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.td18
-rw-r--r--lib/Target/Sparc/SparcSubtarget.cpp30
-rw-r--r--lib/Target/Sparc/SparcSubtarget.h2
-rw-r--r--lib/Target/Sparc/SparcTargetMachine.cpp37
-rw-r--r--lib/Target/Sparc/SparcTargetMachine.h2
-rw-r--r--lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp94
-rw-r--r--lib/Target/SystemZ/CMakeLists.txt1
-rw-r--r--lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp24
-rw-r--r--lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h1
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp8
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp2
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp34
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h3
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp48
-rw-r--r--lib/Target/SystemZ/SystemZ.h1
-rw-r--r--lib/Target/SystemZ/SystemZAsmPrinter.cpp46
-rw-r--r--lib/Target/SystemZ/SystemZAsmPrinter.h9
-rw-r--r--lib/Target/SystemZ/SystemZConstantPoolValue.cpp5
-rw-r--r--lib/Target/SystemZ/SystemZConstantPoolValue.h8
-rw-r--r--lib/Target/SystemZ/SystemZElimCompare.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZISelDAGToDAG.cpp16
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp193
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.h16
-rw-r--r--lib/Target/SystemZ/SystemZInstrFP.td4
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.h7
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.td46
-rw-r--r--lib/Target/SystemZ/SystemZLDCleanup.cpp143
-rw-r--r--lib/Target/SystemZ/SystemZMCInstLower.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZMachineFunctionInfo.h8
-rw-r--r--lib/Target/SystemZ/SystemZOperands.td30
-rw-r--r--lib/Target/SystemZ/SystemZOperators.td7
-rw-r--r--lib/Target/SystemZ/SystemZProcessors.td4
-rw-r--r--lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp11
-rw-r--r--lib/Target/SystemZ/SystemZSubtarget.cpp9
-rw-r--r--lib/Target/SystemZ/SystemZSubtarget.h2
-rw-r--r--lib/Target/SystemZ/SystemZTargetMachine.cpp22
-rw-r--r--lib/Target/SystemZ/SystemZTargetMachine.h2
-rw-r--r--lib/Target/Target.cpp17
-rw-r--r--lib/Target/TargetLibraryInfo.cpp753
-rw-r--r--lib/Target/TargetLoweringObjectFile.cpp31
-rw-r--r--lib/Target/TargetMachine.cpp51
-rw-r--r--lib/Target/TargetMachineC.cpp14
-rw-r--r--lib/Target/X86/Android.mk1
-rw-r--r--lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp102
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp221
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParserCommon.h5
-rw-r--r--lib/Target/X86/AsmParser/X86Operand.h75
-rw-r--r--lib/Target/X86/CMakeLists.txt3
-rw-r--r--lib/Target/X86/Disassembler/X86Disassembler.cpp290
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp63
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoder.h19
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h31
-rw-r--r--lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp60
-rw-r--r--lib/Target/X86/InstPrinter/X86ATTInstPrinter.h13
-rw-r--r--lib/Target/X86/InstPrinter/X86InstComments.cpp1706
-rw-r--r--lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp63
-rw-r--r--lib/Target/X86/InstPrinter/X86IntelInstPrinter.h13
-rw-r--r--lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp37
-rw-r--r--lib/Target/X86/MCTargetDesc/X86BaseInfo.h80
-rw-r--r--lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp3
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp12
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h3
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp126
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp19
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h4
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp120
-rw-r--r--lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp6
-rw-r--r--lib/Target/X86/TargetInfo/X86TargetInfo.cpp2
-rw-r--r--lib/Target/X86/Utils/X86ShuffleDecode.cpp829
-rw-r--r--lib/Target/X86/Utils/X86ShuffleDecode.h198
-rw-r--r--lib/Target/X86/X86.h8
-rw-r--r--lib/Target/X86/X86.td238
-rw-r--r--lib/Target/X86/X86AsmPrinter.cpp41
-rw-r--r--lib/Target/X86/X86AsmPrinter.h8
-rw-r--r--lib/Target/X86/X86CallFrameOptimization.cpp480
-rw-r--r--lib/Target/X86/X86CallingConv.td11
-rw-r--r--lib/Target/X86/X86FastISel.cpp358
-rw-r--r--lib/Target/X86/X86FixupLEAs.cpp14
-rw-r--r--lib/Target/X86/X86FloatingPoint.cpp10
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp827
-rw-r--r--lib/Target/X86/X86FrameLowering.h28
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp69
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp7836
-rw-r--r--lib/Target/X86/X86ISelLowering.h65
-rw-r--r--lib/Target/X86/X86InstrAVX512.td1945
-rw-r--r--lib/Target/X86/X86InstrArithmetic.td331
-rw-r--r--lib/Target/X86/X86InstrCompiler.td260
-rw-r--r--lib/Target/X86/X86InstrControl.td129
-rw-r--r--lib/Target/X86/X86InstrExtension.td16
-rw-r--r--lib/Target/X86/X86InstrFMA.td6
-rw-r--r--lib/Target/X86/X86InstrFPStack.td84
-rw-r--r--lib/Target/X86/X86InstrFormats.td165
-rw-r--r--lib/Target/X86/X86InstrFragmentsSIMD.td140
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp1433
-rw-r--r--lib/Target/X86/X86InstrInfo.h6
-rw-r--r--lib/Target/X86/X86InstrInfo.td832
-rw-r--r--lib/Target/X86/X86InstrMMX.td88
-rw-r--r--lib/Target/X86/X86InstrSGX.td4
-rw-r--r--lib/Target/X86/X86InstrSSE.td2681
-rw-r--r--lib/Target/X86/X86InstrShiftRotate.td58
-rw-r--r--lib/Target/X86/X86InstrSystem.td66
-rw-r--r--lib/Target/X86/X86InstrTSX.td9
-rw-r--r--lib/Target/X86/X86InstrXOP.td206
-rw-r--r--lib/Target/X86/X86IntrinsicsInfo.h314
-rw-r--r--lib/Target/X86/X86MCInstLower.cpp138
-rw-r--r--lib/Target/X86/X86MachineFunctionInfo.cpp19
-rw-r--r--lib/Target/X86/X86MachineFunctionInfo.h48
-rw-r--r--lib/Target/X86/X86PadShortFunction.cpp20
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp40
-rw-r--r--lib/Target/X86/X86RegisterInfo.h1
-rw-r--r--lib/Target/X86/X86RegisterInfo.td44
-rw-r--r--lib/Target/X86/X86SchedHaswell.td2
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.cpp17
-rw-r--r--lib/Target/X86/X86Subtarget.cpp56
-rw-r--r--lib/Target/X86/X86Subtarget.h166
-rw-r--r--lib/Target/X86/X86TargetMachine.cpp99
-rw-r--r--lib/Target/X86/X86TargetMachine.h16
-rw-r--r--lib/Target/X86/X86TargetObjectFile.cpp16
-rw-r--r--lib/Target/X86/X86TargetObjectFile.h6
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.cpp348
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.h112
-rw-r--r--lib/Target/X86/X86VZeroUpper.cpp32
-rw-r--r--lib/Target/XCore/CMakeLists.txt1
-rw-r--r--lib/Target/XCore/XCore.h2
-rw-r--r--lib/Target/XCore/XCoreAsmPrinter.cpp13
-rw-r--r--lib/Target/XCore/XCoreFrameLowering.cpp9
-rw-r--r--lib/Target/XCore/XCoreISelDAGToDAG.cpp4
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp44
-rw-r--r--lib/Target/XCore/XCoreISelLowering.h9
-rw-r--r--lib/Target/XCore/XCoreInstrInfo.td34
-rw-r--r--lib/Target/XCore/XCoreLowerThreadLocal.cpp2
-rw-r--r--lib/Target/XCore/XCoreSubtarget.cpp5
-rw-r--r--lib/Target/XCore/XCoreSubtarget.h2
-rw-r--r--lib/Target/XCore/XCoreTargetMachine.cpp20
-rw-r--r--lib/Target/XCore/XCoreTargetMachine.h4
-rw-r--r--lib/Target/XCore/XCoreTargetObjectFile.cpp96
-rw-r--r--lib/Target/XCore/XCoreTargetTransformInfo.cpp80
-rw-r--r--lib/Target/XCore/XCoreTargetTransformInfo.h72
609 files changed, 53923 insertions, 34391 deletions
diff --git a/lib/Target/AArch64/AArch64.h b/lib/Target/AArch64/AArch64.h
index e96d18b..21106c9 100644
--- a/lib/Target/AArch64/AArch64.h
+++ b/lib/Target/AArch64/AArch64.h
@@ -40,9 +40,6 @@ FunctionPass *createAArch64ConditionOptimizerPass();
FunctionPass *createAArch64AddressTypePromotionPass();
FunctionPass *createAArch64A57FPLoadBalancing();
FunctionPass *createAArch64A53Fix835769();
-/// \brief Creates an ARM-specific Target Transformation Info pass.
-ImmutablePass *
-createAArch64TargetTransformInfoPass(const AArch64TargetMachine *TM);
FunctionPass *createAArch64CleanupLocalDynamicTLSPass();
diff --git a/lib/Target/AArch64/AArch64.td b/lib/Target/AArch64/AArch64.td
index e6a27c3..dff48f9 100644
--- a/lib/Target/AArch64/AArch64.td
+++ b/lib/Target/AArch64/AArch64.td
@@ -91,6 +91,8 @@ def : ProcessorModel<"generic", NoSchedModel, [FeatureFPARMv8,
def : ProcessorModel<"cortex-a53", CortexA53Model, [ProcA53]>;
def : ProcessorModel<"cortex-a57", CortexA57Model, [ProcA57]>;
+// FIXME: Cortex-A72 is currently modelled as an Cortex-A57.
+def : ProcessorModel<"cortex-a72", CortexA57Model, [ProcA57]>;
def : ProcessorModel<"cyclone", CycloneModel, [ProcCyclone]>;
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/AArch64/AArch64A53Fix835769.cpp b/lib/Target/AArch64/AArch64A53Fix835769.cpp
index 852a635..dd401c6 100644
--- a/lib/Target/AArch64/AArch64A53Fix835769.cpp
+++ b/lib/Target/AArch64/AArch64A53Fix835769.cpp
@@ -16,8 +16,6 @@
//===----------------------------------------------------------------------===//
#include "AArch64.h"
-#include "AArch64InstrInfo.h"
-#include "AArch64Subtarget.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -26,6 +24,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
@@ -79,7 +78,7 @@ static bool isSecondInstructionInSequence(MachineInstr *MI) {
namespace {
class AArch64A53Fix835769 : public MachineFunctionPass {
- const AArch64InstrInfo *TII;
+ const TargetInstrInfo *TII;
public:
static char ID;
@@ -107,17 +106,13 @@ char AArch64A53Fix835769::ID = 0;
bool
AArch64A53Fix835769::runOnMachineFunction(MachineFunction &F) {
- const TargetMachine &TM = F.getTarget();
-
- bool Changed = false;
DEBUG(dbgs() << "***** AArch64A53Fix835769 *****\n");
-
- TII = TM.getSubtarget<AArch64Subtarget>().getInstrInfo();
+ bool Changed = false;
+ TII = F.getSubtarget().getInstrInfo();
for (auto &MBB : F) {
Changed |= runOnBasicBlock(MBB);
}
-
return Changed;
}
diff --git a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index 2503764..2cf3c22 100644
--- a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -38,8 +38,8 @@
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -96,6 +96,10 @@ static bool isMla(MachineInstr *MI) {
}
}
+namespace llvm {
+static void initializeAArch64A57FPLoadBalancingPass(PassRegistry &);
+}
+
//===----------------------------------------------------------------------===//
namespace {
@@ -109,14 +113,15 @@ static const char *ColorNames[2] = { "Even", "Odd" };
class Chain;
class AArch64A57FPLoadBalancing : public MachineFunctionPass {
- const AArch64InstrInfo *TII;
MachineRegisterInfo *MRI;
const TargetRegisterInfo *TRI;
RegisterClassInfo RCI;
public:
static char ID;
- explicit AArch64A57FPLoadBalancing() : MachineFunctionPass(ID) {}
+ explicit AArch64A57FPLoadBalancing() : MachineFunctionPass(ID) {
+ initializeAArch64A57FPLoadBalancingPass(*PassRegistry::getPassRegistry());
+ }
bool runOnMachineFunction(MachineFunction &F) override;
@@ -143,8 +148,16 @@ private:
Color getColor(unsigned Register);
Chain *getAndEraseNext(Color PreferredColor, std::vector<Chain*> &L);
};
+}
+
char AArch64A57FPLoadBalancing::ID = 0;
+INITIALIZE_PASS_BEGIN(AArch64A57FPLoadBalancing, DEBUG_TYPE,
+ "AArch64 A57 FP Load-Balancing", false, false)
+INITIALIZE_PASS_END(AArch64A57FPLoadBalancing, DEBUG_TYPE,
+ "AArch64 A57 FP Load-Balancing", false, false)
+
+namespace {
/// A Chain is a sequence of instructions that are linked together by
/// an accumulation operand. For example:
///
@@ -259,7 +272,7 @@ public:
}
/// Return true if this chain starts before Other.
- bool startsBefore(Chain *Other) {
+ bool startsBefore(const Chain *Other) const {
return StartInstIdx < Other->StartInstIdx;
}
@@ -297,10 +310,8 @@ bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) {
bool Changed = false;
DEBUG(dbgs() << "***** AArch64A57FPLoadBalancing *****\n");
- const TargetMachine &TM = F.getTarget();
MRI = &F.getRegInfo();
TRI = F.getRegInfo().getTargetRegisterInfo();
- TII = TM.getSubtarget<AArch64Subtarget>().getInstrInfo();
RCI.runOnMachineFunction(F);
for (auto &MBB : F) {
@@ -431,10 +442,17 @@ bool AArch64A57FPLoadBalancing::colorChainSet(std::vector<Chain*> GV,
// chains that we cannot change before we look at those we can,
// so the parity counter is updated and we know what color we should
// change them to!
+ // Final tie-break with instruction order so pass output is stable (i.e. not
+ // dependent on malloc'd pointer values).
std::sort(GV.begin(), GV.end(), [](const Chain *G1, const Chain *G2) {
if (G1->size() != G2->size())
return G1->size() > G2->size();
- return G1->requiresFixup() > G2->requiresFixup();
+ if (G1->requiresFixup() != G2->requiresFixup())
+ return G1->requiresFixup() > G2->requiresFixup();
+ // Make sure startsBefore() produces a stable final order.
+ assert((G1 == G2 || (G1->startsBefore(G2) ^ G2->startsBefore(G1))) &&
+ "Starts before not total order!");
+ return G1->startsBefore(G2);
});
Color PreferredColor = Parity < 0 ? Color::Even : Color::Odd;
@@ -481,10 +499,16 @@ int AArch64A57FPLoadBalancing::scavengeRegister(Chain *G, Color C,
RS.forward(I);
AvailableRegs &= RS.getRegsAvailable(TRI->getRegClass(RegClassID));
- // Remove any registers clobbered by a regmask.
+ // Remove any registers clobbered by a regmask or any def register that is
+ // immediately dead.
for (auto J : I->operands()) {
if (J.isRegMask())
AvailableRegs.clearBitsNotInMask(J.getRegMask());
+
+ if (J.isReg() && J.isDef() && AvailableRegs[J.getReg()]) {
+ assert(J.isDead() && "Non-dead def should have been removed by now!");
+ AvailableRegs.reset(J.getReg());
+ }
}
}
diff --git a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
index 5afe0f4..f27dfc9 100644
--- a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
+++ b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
@@ -64,7 +64,7 @@ STATISTIC(NumCopiesInserted, "Number of cross-class copies inserted");
namespace {
class AArch64AdvSIMDScalar : public MachineFunctionPass {
MachineRegisterInfo *MRI;
- const AArch64InstrInfo *TII;
+ const TargetInstrInfo *TII;
private:
// isProfitableToTransform - Predicate function to determine whether an
@@ -268,7 +268,7 @@ AArch64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
return TransformAll;
}
-static MachineInstr *insertCopy(const AArch64InstrInfo *TII, MachineInstr *MI,
+static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr *MI,
unsigned Dst, unsigned Src, bool IsKill) {
MachineInstrBuilder MIB =
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AArch64::COPY),
@@ -376,10 +376,8 @@ bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
bool Changed = false;
DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
- const TargetMachine &TM = mf.getTarget();
MRI = &mf.getRegInfo();
- TII = static_cast<const AArch64InstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
+ TII = mf.getSubtarget().getInstrInfo();
// Just check things on a one-block-at-a-time basis.
for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I)
diff --git a/lib/Target/AArch64/AArch64AsmPrinter.cpp b/lib/Target/AArch64/AArch64AsmPrinter.cpp
index 8bee4f5..d64d851 100644
--- a/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -43,19 +43,13 @@ using namespace llvm;
namespace {
class AArch64AsmPrinter : public AsmPrinter {
- /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
- /// make the right decision when printing asm code for different targets.
- const AArch64Subtarget *Subtarget;
-
AArch64MCInstLower MCInstLowering;
StackMaps SM;
public:
- AArch64AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer),
- Subtarget(&TM.getSubtarget<AArch64Subtarget>()),
- MCInstLowering(OutContext, *this), SM(*this), AArch64FI(nullptr),
- LOHLabelCounter(0) {}
+ AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)), MCInstLowering(OutContext, *this),
+ SM(*this), AArch64FI(nullptr), LOHLabelCounter(0) {}
const char *getPassName() const override {
return "AArch64 Assembly Printer";
@@ -124,7 +118,8 @@ private:
//===----------------------------------------------------------------------===//
void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) {
- if (Subtarget->isTargetMachO()) {
+ Triple TT(TM.getTargetTriple());
+ if (TT.isOSBinFormatMachO()) {
// Funny Darwin hack: This flag tells the linker that no global symbols
// contain code that falls through to other global symbols (e.g. the obvious
// implementation of multiple entry points). If this doesn't occur, the
@@ -135,7 +130,7 @@ void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) {
}
// Emit a .data.rel section containing any stubs that were created.
- if (Subtarget->isTargetELF()) {
+ if (TT.isOSBinFormatELF()) {
const TargetLoweringObjectFileELF &TLOFELF =
static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
@@ -145,7 +140,7 @@ void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) {
MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
if (!Stubs.empty()) {
OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
OutStreamer.EmitLabel(Stubs[i].first);
@@ -252,8 +247,8 @@ bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
const TargetRegisterClass *RC,
bool isVector, raw_ostream &O) {
assert(MO.isReg() && "Should only get here with a register!");
- const AArch64RegisterInfo *RI = static_cast<const AArch64RegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
+ const AArch64RegisterInfo *RI =
+ MF->getSubtarget<AArch64Subtarget>().getRegisterInfo();
unsigned Reg = MO.getReg();
unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
assert(RI->regsOverlap(RegToPrint, Reg));
@@ -381,8 +376,23 @@ void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
unsigned NumNOPBytes = MI.getOperand(1).getImm();
SM.recordStackMap(MI);
- // Emit padding.
assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
+
+ // Scan ahead to trim the shadow.
+ const MachineBasicBlock &MBB = *MI.getParent();
+ MachineBasicBlock::const_iterator MII(MI);
+ ++MII;
+ while (NumNOPBytes > 0) {
+ if (MII == MBB.end() || MII->isCall() ||
+ MII->getOpcode() == AArch64::DBG_VALUE ||
+ MII->getOpcode() == TargetOpcode::PATCHPOINT ||
+ MII->getOpcode() == TargetOpcode::STACKMAP)
+ break;
+ ++MII;
+ NumNOPBytes -= 4;
+ }
+
+ // Emit nops.
for (unsigned i = 0; i < NumNOPBytes; i += 4)
EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
}
diff --git a/lib/Target/AArch64/AArch64BranchRelaxation.cpp b/lib/Target/AArch64/AArch64BranchRelaxation.cpp
index e2b6367..d973234 100644
--- a/lib/Target/AArch64/AArch64BranchRelaxation.cpp
+++ b/lib/Target/AArch64/AArch64BranchRelaxation.cpp
@@ -476,9 +476,7 @@ bool AArch64BranchRelaxation::runOnMachineFunction(MachineFunction &mf) {
DEBUG(dbgs() << "***** AArch64BranchRelaxation *****\n");
- TII = (const AArch64InstrInfo *)MF->getTarget()
- .getSubtargetImpl()
- ->getInstrInfo();
+ TII = (const AArch64InstrInfo *)MF->getSubtarget().getInstrInfo();
// Renumber all of the machine basic blocks in the function, guaranteeing that
// the numbers agree with the position of the block in the function.
diff --git a/lib/Target/AArch64/AArch64CallingConvention.h b/lib/Target/AArch64/AArch64CallingConvention.h
new file mode 100644
index 0000000..1e2d1c3
--- /dev/null
+++ b/lib/Target/AArch64/AArch64CallingConvention.h
@@ -0,0 +1,141 @@
+//=== AArch64CallingConv.h - Custom Calling Convention Routines -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the custom routines for the AArch64 Calling Convention
+// that aren't done by tablegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64CALLINGCONVENTION_H
+#define LLVM_LIB_TARGET_AARCH64_AARCH64CALLINGCONVENTION_H
+
+#include "AArch64.h"
+#include "AArch64InstrInfo.h"
+#include "AArch64Subtarget.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+namespace {
+using namespace llvm;
+
+static const uint16_t XRegList[] = {AArch64::X0, AArch64::X1, AArch64::X2,
+ AArch64::X3, AArch64::X4, AArch64::X5,
+ AArch64::X6, AArch64::X7};
+static const uint16_t HRegList[] = {AArch64::H0, AArch64::H1, AArch64::H2,
+ AArch64::H3, AArch64::H4, AArch64::H5,
+ AArch64::H6, AArch64::H7};
+static const uint16_t SRegList[] = {AArch64::S0, AArch64::S1, AArch64::S2,
+ AArch64::S3, AArch64::S4, AArch64::S5,
+ AArch64::S6, AArch64::S7};
+static const uint16_t DRegList[] = {AArch64::D0, AArch64::D1, AArch64::D2,
+ AArch64::D3, AArch64::D4, AArch64::D5,
+ AArch64::D6, AArch64::D7};
+static const uint16_t QRegList[] = {AArch64::Q0, AArch64::Q1, AArch64::Q2,
+ AArch64::Q3, AArch64::Q4, AArch64::Q5,
+ AArch64::Q6, AArch64::Q7};
+
+static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
+ MVT LocVT, ISD::ArgFlagsTy &ArgFlags,
+ CCState &State, unsigned SlotAlign) {
+ unsigned Size = LocVT.getSizeInBits() / 8;
+ unsigned StackAlign = State.getMachineFunction()
+ .getTarget()
+ .getDataLayout()
+ ->getStackAlignment();
+ unsigned Align = std::min(ArgFlags.getOrigAlign(), StackAlign);
+
+ for (auto &It : PendingMembers) {
+ It.convertToMem(State.AllocateStack(Size, std::max(Align, SlotAlign)));
+ State.addLoc(It);
+ SlotAlign = 1;
+ }
+
+ // All pending members have now been allocated
+ PendingMembers.clear();
+ return true;
+}
+
+/// The Darwin variadic PCS places anonymous arguments in 8-byte stack slots. An
+/// [N x Ty] type must still be contiguous in memory though.
+static bool CC_AArch64_Custom_Stack_Block(
+ unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
+
+ // Add the argument to the list to be allocated once we know the size of the
+ // block.
+ PendingMembers.push_back(
+ CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
+
+ if (!ArgFlags.isInConsecutiveRegsLast())
+ return true;
+
+ return finishStackBlock(PendingMembers, LocVT, ArgFlags, State, 8);
+}
+
+/// Given an [N x Ty] block, it should be passed in a consecutive sequence of
+/// registers. If no such sequence is available, mark the rest of the registers
+/// of that type as used and place the argument on the stack.
+static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ // Try to allocate a contiguous block of registers, each of the correct
+ // size to hold one member.
+ ArrayRef<uint16_t> RegList;
+ if (LocVT.SimpleTy == MVT::i64)
+ RegList = XRegList;
+ else if (LocVT.SimpleTy == MVT::f16)
+ RegList = HRegList;
+ else if (LocVT.SimpleTy == MVT::f32 || LocVT.is32BitVector())
+ RegList = SRegList;
+ else if (LocVT.SimpleTy == MVT::f64 || LocVT.is64BitVector())
+ RegList = DRegList;
+ else if (LocVT.SimpleTy == MVT::f128 || LocVT.is128BitVector())
+ RegList = QRegList;
+ else {
+ // Not an array we want to split up after all.
+ return false;
+ }
+
+ SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
+
+ // Add the argument to the list to be allocated once we know the size of the
+ // block.
+ PendingMembers.push_back(
+ CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
+
+ if (!ArgFlags.isInConsecutiveRegsLast())
+ return true;
+
+ unsigned RegResult = State.AllocateRegBlock(RegList, PendingMembers.size());
+ if (RegResult) {
+ for (auto &It : PendingMembers) {
+ It.convertToReg(RegResult);
+ State.addLoc(It);
+ ++RegResult;
+ }
+ PendingMembers.clear();
+ return true;
+ }
+
+ // Mark all regs in the class as unavailable
+ for (auto Reg : RegList)
+ State.AllocateReg(Reg);
+
+ const AArch64Subtarget &Subtarget = static_cast<const AArch64Subtarget &>(
+ State.getMachineFunction().getSubtarget());
+ unsigned SlotAlign = Subtarget.isTargetDarwin() ? 1 : 8;
+
+ return finishStackBlock(PendingMembers, LocVT, ArgFlags, State, SlotAlign);
+}
+
+}
+
+#endif
diff --git a/lib/Target/AArch64/AArch64CallingConvention.td b/lib/Target/AArch64/AArch64CallingConvention.td
index 9e707e4..4691e94 100644
--- a/lib/Target/AArch64/AArch64CallingConvention.td
+++ b/lib/Target/AArch64/AArch64CallingConvention.td
@@ -16,7 +16,7 @@ class CCIfAlign<string Align, CCAction A> :
CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>;
/// CCIfBigEndian - Match only if we're in big endian mode.
class CCIfBigEndian<CCAction A> :
- CCIf<"State.getMachineFunction().getSubtarget().getDataLayout()->isBigEndian()", A>;
+ CCIf<"State.getMachineFunction().getTarget().getDataLayout()->isBigEndian()", A>;
//===----------------------------------------------------------------------===//
// ARM AAPCS64 Calling Convention
@@ -40,6 +40,8 @@ def CC_AArch64_AAPCS : CallingConv<[
// slot is 64-bit.
CCIfByVal<CCPassByVal<8, 8>>,
+ CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
+
// Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
// up to eight each of GPR and FPR.
CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
@@ -119,6 +121,8 @@ def CC_AArch64_DarwinPCS : CallingConv<[
// slot is 64-bit.
CCIfByVal<CCPassByVal<8, 8>>,
+ CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
+
// Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
// up to eight each of GPR and FPR.
CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
@@ -159,6 +163,8 @@ def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
+ CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Stack_Block">>,
+
// Handle all scalar types as either i64 or f64.
CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
CCIfType<[f16, f32], CCPromoteToType<f64>>,
@@ -198,6 +204,44 @@ def RetCC_AArch64_WebKit_JS : CallingConv<[
[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
]>;
+//===----------------------------------------------------------------------===//
+// ARM64 Calling Convention for GHC
+//===----------------------------------------------------------------------===//
+
+// This calling convention is specific to the Glasgow Haskell Compiler.
+// The only documentation is the GHC source code, specifically the C header
+// file:
+//
+// https://github.com/ghc/ghc/blob/master/includes/stg/MachRegs.h
+//
+// which defines the registers for the Spineless Tagless G-Machine (STG) that
+// GHC uses to implement lazy evaluation. The generic STG machine has a set of
+// registers which are mapped to appropriate set of architecture specific
+// registers for each CPU architecture.
+//
+// The STG Machine is documented here:
+//
+// https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/GeneratedCode
+//
+// The AArch64 register mapping is under the heading "The ARMv8/AArch64 ABI
+// register mapping".
+
+def CC_AArch64_GHC : CallingConv<[
+ // Handle all vector types as either f64 or v2f64.
+ CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
+ CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, f128], CCBitConvertToType<v2f64>>,
+
+ CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>,
+ CCIfType<[f32], CCAssignToReg<[S8, S9, S10, S11]>>,
+ CCIfType<[f64], CCAssignToReg<[D12, D13, D14, D15]>>,
+
+ // Promote i8/i16/i32 arguments to i64.
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
+
+ // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
+ CCIfType<[i64], CCAssignToReg<[X19, X20, X21, X22, X23, X24, X25, X26, X27, X28]>>
+]>;
+
// FIXME: LR is only callee-saved in the sense that *we* preserve it and are
// presumably a callee to someone. External functions may not do so, but this
// is currently safe since BL has LR as an implicit-def and what happens after a
@@ -243,3 +287,4 @@ def CSR_AArch64_AllRegs
(sequence "S%u", 0, 31), (sequence "D%u", 0, 31),
(sequence "Q%u", 0, 31))>;
+def CSR_AArch64_NoRegs : CalleeSavedRegs<(add)>;
diff --git a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
index aab8e38..3b74481 100644
--- a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
+++ b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
@@ -92,9 +92,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
MachineInstr *replaceTLSBaseAddrCall(MachineInstr *I,
unsigned TLSBaseAddrReg) {
MachineFunction *MF = I->getParent()->getParent();
- const AArch64TargetMachine *TM =
- static_cast<const AArch64TargetMachine *>(&MF->getTarget());
- const AArch64InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
// Insert a Copy from TLSBaseAddrReg to x0, which is where the rest of the
// code sequence assumes the address will be.
@@ -112,9 +110,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
// inserting a copy instruction after I. Returns the new instruction.
MachineInstr *setRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
MachineFunction *MF = I->getParent()->getParent();
- const AArch64TargetMachine *TM =
- static_cast<const AArch64TargetMachine *>(&MF->getTarget());
- const AArch64InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
// Create a virtual register for the TLS base address.
MachineRegisterInfo &RegInfo = MF->getRegInfo();
diff --git a/lib/Target/AArch64/AArch64CollectLOH.cpp b/lib/Target/AArch64/AArch64CollectLOH.cpp
index 87b545b..938dcb3 100644
--- a/lib/Target/AArch64/AArch64CollectLOH.cpp
+++ b/lib/Target/AArch64/AArch64CollectLOH.cpp
@@ -285,9 +285,7 @@ static void initReachingDef(MachineFunction &MF,
BlockToSetOfInstrsPerColor &ReachableUses,
const MapRegToId &RegToId,
const MachineInstr *DummyOp, bool ADRPMode) {
- const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
-
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
unsigned NbReg = RegToId.size();
for (MachineBasicBlock &MBB : MF) {
@@ -1026,8 +1024,7 @@ static void collectInvolvedReg(MachineFunction &MF, MapRegToId &RegToId,
}
bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) {
- const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
const MachineDominatorTree *MDT = &getAnalysis<MachineDominatorTree>();
MapRegToId RegToId;
@@ -1043,8 +1040,7 @@ bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) {
MachineInstr *DummyOp = nullptr;
if (BasicBlockScopeOnly) {
- const AArch64InstrInfo *TII = static_cast<const AArch64InstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
// For local analysis, create a dummy operation to record uses that are not
// local.
DummyOp = MF.CreateMachineInstr(TII->get(AArch64::COPY), DebugLoc());
diff --git a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
index 0fbd3c6..e68571f 100644
--- a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
@@ -304,7 +304,7 @@ bool AArch64ConditionOptimizer::adjustTo(MachineInstr *CmpMI,
bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
<< "********** Function: " << MF.getName() << '\n');
- TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
+ TII = MF.getSubtarget().getInstrInfo();
DomTree = &getAnalysis<MachineDominatorTree>();
bool Changed = false;
diff --git a/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index 54f53dc..fccd8df 100644
--- a/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -893,15 +893,13 @@ bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
<< "********** Function: " << MF.getName() << '\n');
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getSubtarget().getRegisterInfo();
- SchedModel =
- MF.getTarget().getSubtarget<TargetSubtargetInfo>().getSchedModel();
+ SchedModel = MF.getSubtarget().getSchedModel();
MRI = &MF.getRegInfo();
DomTree = &getAnalysis<MachineDominatorTree>();
Loops = getAnalysisIfAvailable<MachineLoopInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
- MinSize = MF.getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::MinSize);
+ MinSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
bool Changed = false;
CmpConv.runOnMachineFunction(MF);
diff --git a/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index c850680..41b1132 100644
--- a/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -229,7 +229,7 @@ static bool isStartChunk(uint64_t Chunk) {
if (Chunk == 0 || Chunk == UINT64_MAX)
return false;
- return (CountLeadingOnes_64(Chunk) + countTrailingZeros(Chunk)) == 64;
+ return isMask_64(~Chunk);
}
/// \brief Check whether this chunk matches the pattern '0...1...' This pattern
@@ -239,7 +239,7 @@ static bool isEndChunk(uint64_t Chunk) {
if (Chunk == 0 || Chunk == UINT64_MAX)
return false;
- return (countLeadingZeros(Chunk) + CountTrailingOnes_64(Chunk)) == 64;
+ return isMask_64(Chunk);
}
/// \brief Clear or set all bits in the chunk at the given index.
diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp
index 612cb00..61017c1 100644
--- a/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/lib/Target/AArch64/AArch64FastISel.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "AArch64.h"
+#include "AArch64CallingConvention.h"
#include "AArch64Subtarget.h"
#include "AArch64TargetMachine.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
@@ -244,9 +245,10 @@ public:
unsigned fastMaterializeFloatZero(const ConstantFP* CF) override;
explicit AArch64FastISel(FunctionLoweringInfo &FuncInfo,
- const TargetLibraryInfo *LibInfo)
+ const TargetLibraryInfo *LibInfo)
: FastISel(FuncInfo, LibInfo, /*SkipTargetIndependentISel=*/true) {
- Subtarget = &TM.getSubtarget<AArch64Subtarget>();
+ Subtarget =
+ &static_cast<const AArch64Subtarget &>(FuncInfo.MF->getSubtarget());
Context = &FuncInfo.Fn->getContext();
}
@@ -301,6 +303,8 @@ static unsigned getImplicitScaleFactor(MVT VT) {
CCAssignFn *AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC) const {
if (CC == CallingConv::WebKit_JS)
return CC_AArch64_WebKit_JS;
+ if (CC == CallingConv::GHC)
+ return CC_AArch64_GHC;
return Subtarget->isTargetDarwin() ? CC_AArch64_DarwinPCS : CC_AArch64_AAPCS;
}
@@ -366,6 +370,24 @@ unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
}
+ // For the MachO large code model materialize the FP constant in code.
+ if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) {
+ unsigned Opc1 = Is64Bit ? AArch64::MOVi64imm : AArch64::MOVi32imm;
+ const TargetRegisterClass *RC = Is64Bit ?
+ &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+
+ unsigned TmpReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc1), TmpReg)
+ .addImm(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(TmpReg, getKillRegState(true));
+
+ return ResultReg;
+ }
+
// Materialize via constant pool. MachineConstantPool wants an explicit
// alignment.
unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
@@ -752,7 +774,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
if (Addr.getOffsetReg())
break;
- if (DL.getTypeSizeInBits(Ty) != 8)
+ if (!Ty || DL.getTypeSizeInBits(Ty) != 8)
break;
const Value *LHS = U->getOperand(0);
@@ -2112,15 +2134,15 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) {
int TestBit = -1;
bool IsCmpNE;
- if ((Predicate == CmpInst::ICMP_EQ) || (Predicate == CmpInst::ICMP_NE)) {
- if (const auto *C = dyn_cast<Constant>(LHS))
- if (C->isNullValue())
- std::swap(LHS, RHS);
-
- if (!isa<Constant>(RHS))
- return false;
+ switch (Predicate) {
+ default:
+ return false;
+ case CmpInst::ICMP_EQ:
+ case CmpInst::ICMP_NE:
+ if (isa<Constant>(LHS) && cast<Constant>(LHS)->isNullValue())
+ std::swap(LHS, RHS);
- if (!cast<Constant>(RHS)->isNullValue())
+ if (!isa<Constant>(RHS) || !cast<Constant>(RHS)->isNullValue())
return false;
if (const auto *AI = dyn_cast<BinaryOperator>(LHS))
@@ -2143,26 +2165,27 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) {
TestBit = 0;
IsCmpNE = Predicate == CmpInst::ICMP_NE;
- } else if (Predicate == CmpInst::ICMP_SLT) {
- if (!isa<Constant>(RHS))
- return false;
-
- if (!cast<Constant>(RHS)->isNullValue())
+ break;
+ case CmpInst::ICMP_SLT:
+ case CmpInst::ICMP_SGE:
+ if (!isa<Constant>(RHS) || !cast<Constant>(RHS)->isNullValue())
return false;
TestBit = BW - 1;
- IsCmpNE = true;
- } else if (Predicate == CmpInst::ICMP_SGT) {
+ IsCmpNE = Predicate == CmpInst::ICMP_SLT;
+ break;
+ case CmpInst::ICMP_SGT:
+ case CmpInst::ICMP_SLE:
if (!isa<ConstantInt>(RHS))
return false;
- if (cast<ConstantInt>(RHS)->getValue() != -1)
+ if (cast<ConstantInt>(RHS)->getValue() != APInt(BW, -1, true))
return false;
TestBit = BW - 1;
- IsCmpNE = false;
- } else
- return false;
+ IsCmpNE = Predicate == CmpInst::ICMP_SLE;
+ break;
+ } // end switch
static const unsigned OpcTable[2][2][2] = {
{ {AArch64::CBZW, AArch64::CBZX },
@@ -3302,8 +3325,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
MFI->setFrameAddressIsTaken(true);
const AArch64RegisterInfo *RegInfo =
- static_cast<const AArch64RegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
+ static_cast<const AArch64RegisterInfo *>(Subtarget->getRegisterInfo());
unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
unsigned SrcReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index a7779d6..84bf317 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -64,8 +64,7 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
return false;
// Don't use the red zone if the function explicitly asks us not to.
// This is typically used for kernel code.
- if (MF.getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::NoRedZone))
+ if (MF.getFunction()->hasFnAttribute(Attribute::NoRedZone))
return false;
const MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -167,7 +166,7 @@ void AArch64FrameLowering::emitCalleeSavedFrameMoves(
if (CSI.empty())
return;
- const DataLayout *TD = MF.getSubtarget().getDataLayout();
+ const DataLayout *TD = MF.getTarget().getDataLayout();
bool HasFP = hasFP(MF);
// Calculate amount of bytes used for return address storing.
@@ -196,7 +195,8 @@ void AArch64FrameLowering::emitCalleeSavedFrameMoves(
unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
nullptr, DwarfReg, Offset - TotalSkipped));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
}
}
@@ -214,6 +214,11 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
bool HasFP = hasFP(MF);
DebugLoc DL = MBB.findDebugLoc(MBBI);
+ // All calls are tail calls in GHC calling conv, and functions have no
+ // prologue/epilogue.
+ if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ return;
+
int NumBytes = (int)MFI->getStackSize();
if (!AFI->hasStackFrame()) {
assert(!HasFP && "unexpected function without stack frame but with FP");
@@ -234,7 +239,8 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
} else if (NumBytes) {
++NumRedZoneFunctions;
}
@@ -301,7 +307,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
TII->copyPhysReg(MBB, MBBI, DL, AArch64::X19, AArch64::SP, false);
if (needsFrameMoves) {
- const DataLayout *TD = MF.getSubtarget().getDataLayout();
+ const DataLayout *TD = MF.getTarget().getDataLayout();
const int StackGrowth = -TD->getPointerSize(0);
unsigned FramePtr = RegInfo->getFrameRegister(MF);
@@ -377,26 +383,30 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfa(nullptr, Reg, 2 * StackGrowth));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
// Record the location of the stored LR
unsigned LR = RegInfo->getDwarfRegNum(AArch64::LR, true);
CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createOffset(nullptr, LR, StackGrowth));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
// Record the location of the stored FP
CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createOffset(nullptr, Reg, 2 * StackGrowth));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
} else {
// Encode the stack size of the leaf function.
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfaOffset(nullptr, -MFI->getStackSize()));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
}
// Now emit the moves for whatever callee saved regs we have.
@@ -445,6 +455,11 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
int NumBytes = MFI->getStackSize();
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ // All calls are tail calls in GHC calling conv, and functions have no
+ // prologue/epilogue.
+ if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ return;
+
// Initial and residual are named for consitency with the prologue. Note that
// in the epilogue, the residual adjustment is executed first.
uint64_t ArgumentPopSize = 0;
diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 87a6d80..ac11c4d 100644
--- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -53,12 +53,10 @@ public:
}
bool runOnMachineFunction(MachineFunction &MF) override {
- AttributeSet FnAttrs = MF.getFunction()->getAttributes();
ForCodeSize =
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize) ||
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
- Subtarget = &TM.getSubtarget<AArch64Subtarget>();
+ MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
+ MF.getFunction()->hasFnAttribute(Attribute::MinSize);
+ Subtarget = &MF.getSubtarget<AArch64Subtarget>();
return SelectionDAGISel::runOnMachineFunction(MF);
}
@@ -134,8 +132,8 @@ public:
/// Generic helper for the createDTuple/createQTuple
/// functions. Those should almost always be called instead.
- SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
- unsigned SubRegs[]);
+ SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
+ const unsigned SubRegs[]);
SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
@@ -569,6 +567,27 @@ bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
return isWorthFolding(N);
}
+/// If there's a use of this ADDlow that's not itself a load/store then we'll
+/// need to create a real ADD instruction from it anyway and there's no point in
+/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
+/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
+/// leads to duplaicated ADRP instructions.
+static bool isWorthFoldingADDlow(SDValue N) {
+ for (auto Use : N->uses()) {
+ if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
+ Use->getOpcode() != ISD::ATOMIC_LOAD &&
+ Use->getOpcode() != ISD::ATOMIC_STORE)
+ return false;
+
+ // ldar and stlr have much more restrictive addressing modes (just a
+ // register).
+ if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
+ return false;
+ }
+
+ return true;
+}
+
/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
/// immediate" address. The "Size" argument is the size in bytes of the memory
/// reference, which determines the scale.
@@ -582,7 +601,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
return true;
}
- if (N.getOpcode() == AArch64ISD::ADDlow) {
+ if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
GlobalAddressSDNode *GAN =
dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
Base = N.getOperand(0);
@@ -594,7 +613,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
unsigned Alignment = GV->getAlignment();
const DataLayout *DL = TLI->getDataLayout();
Type *Ty = GV->getType()->getElementType();
- if (Alignment == 0 && Ty->isSized() && !Subtarget->isTargetDarwin())
+ if (Alignment == 0 && Ty->isSized())
Alignment = DL->getABITypeAlignment(Ty);
if (Alignment >= Size)
@@ -869,26 +888,26 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
}
SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
- static unsigned RegClassIDs[] = {
+ static const unsigned RegClassIDs[] = {
AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
- static unsigned SubRegs[] = { AArch64::dsub0, AArch64::dsub1,
- AArch64::dsub2, AArch64::dsub3 };
+ static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
+ AArch64::dsub2, AArch64::dsub3};
return createTuple(Regs, RegClassIDs, SubRegs);
}
SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
- static unsigned RegClassIDs[] = {
+ static const unsigned RegClassIDs[] = {
AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
- static unsigned SubRegs[] = { AArch64::qsub0, AArch64::qsub1,
- AArch64::qsub2, AArch64::qsub3 };
+ static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
+ AArch64::qsub2, AArch64::qsub3};
return createTuple(Regs, RegClassIDs, SubRegs);
}
SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
- unsigned RegClassIDs[],
- unsigned SubRegs[]) {
+ const unsigned RegClassIDs[],
+ const unsigned SubRegs[]) {
// There's no special register-class for a vector-list of 1 element: it's just
// a vector.
if (Regs.size() == 1)
@@ -1033,13 +1052,10 @@ SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
EVT VT = N->getValueType(0);
SDValue Chain = N->getOperand(0);
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(N->getOperand(2)); // Mem operand;
- Ops.push_back(Chain);
+ SDValue Ops[] = {N->getOperand(2), // Mem operand;
+ Chain};
- std::vector<EVT> ResTys;
- ResTys.push_back(MVT::Untyped);
- ResTys.push_back(MVT::Other);
+ EVT ResTys[] = {MVT::Untyped, MVT::Other};
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
SDValue SuperReg = SDValue(Ld, 0);
@@ -1057,15 +1073,12 @@ SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
EVT VT = N->getValueType(0);
SDValue Chain = N->getOperand(0);
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(N->getOperand(1)); // Mem operand
- Ops.push_back(N->getOperand(2)); // Incremental
- Ops.push_back(Chain);
+ SDValue Ops[] = {N->getOperand(1), // Mem operand
+ N->getOperand(2), // Incremental
+ Chain};
- std::vector<EVT> ResTys;
- ResTys.push_back(MVT::i64); // Type of the write back register
- ResTys.push_back(MVT::Untyped);
- ResTys.push_back(MVT::Other);
+ EVT ResTys[] = {MVT::i64, // Type of the write back register
+ MVT::Untyped, MVT::Other};
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
@@ -1096,10 +1109,7 @@ SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(N->getOperand(NumVecs + 2));
- Ops.push_back(N->getOperand(0));
+ SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
return St;
@@ -1109,20 +1119,18 @@ SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getOperand(2)->getValueType(0);
- SmallVector<EVT, 2> ResTys;
- ResTys.push_back(MVT::i64); // Type of the write back register
- ResTys.push_back(MVT::Other); // Type for the Chain
+ EVT ResTys[] = {MVT::i64, // Type of the write back register
+ MVT::Other}; // Type for the Chain
// Form a REG_SEQUENCE to force register allocation.
bool Is128Bit = VT.getSizeInBits() == 128;
SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(N->getOperand(NumVecs + 1)); // base register
- Ops.push_back(N->getOperand(NumVecs + 2)); // Incremental
- Ops.push_back(N->getOperand(0)); // Chain
+ SDValue Ops[] = {RegSeq,
+ N->getOperand(NumVecs + 1), // base register
+ N->getOperand(NumVecs + 2), // Incremental
+ N->getOperand(0)}; // Chain
SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
return St;
@@ -1176,18 +1184,13 @@ SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
SDValue RegSeq = createQTuple(Regs);
- std::vector<EVT> ResTys;
- ResTys.push_back(MVT::Untyped);
- ResTys.push_back(MVT::Other);
+ EVT ResTys[] = {MVT::Untyped, MVT::Other};
unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
- Ops.push_back(N->getOperand(NumVecs + 3));
- Ops.push_back(N->getOperand(0));
+ SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
+ N->getOperand(NumVecs + 3), N->getOperand(0)};
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
SDValue SuperReg = SDValue(Ld, 0);
@@ -1221,20 +1224,17 @@ SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
SDValue RegSeq = createQTuple(Regs);
- std::vector<EVT> ResTys;
- ResTys.push_back(MVT::i64); // Type of the write back register
- ResTys.push_back(MVT::Untyped);
- ResTys.push_back(MVT::Other);
+ EVT ResTys[] = {MVT::i64, // Type of the write back register
+ MVT::Untyped, MVT::Other};
unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64)); // Lane Number
- Ops.push_back(N->getOperand(NumVecs + 2)); // Base register
- Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
- Ops.push_back(N->getOperand(0));
+ SDValue Ops[] = {RegSeq,
+ CurDAG->getTargetConstant(LaneNo, MVT::i64), // Lane Number
+ N->getOperand(NumVecs + 2), // Base register
+ N->getOperand(NumVecs + 3), // Incremental
+ N->getOperand(0)};
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
// Update uses of the write back register
@@ -1282,11 +1282,8 @@ SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
- Ops.push_back(N->getOperand(NumVecs + 3));
- Ops.push_back(N->getOperand(0));
+ SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
+ N->getOperand(NumVecs + 3), N->getOperand(0)};
SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
// Transfer memoperands.
@@ -1312,19 +1309,16 @@ SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
SDValue RegSeq = createQTuple(Regs);
- SmallVector<EVT, 2> ResTys;
- ResTys.push_back(MVT::i64); // Type of the write back register
- ResTys.push_back(MVT::Other);
+ EVT ResTys[] = {MVT::i64, // Type of the write back register
+ MVT::Other};
unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
- Ops.push_back(N->getOperand(NumVecs + 2)); // Base Register
- Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
- Ops.push_back(N->getOperand(0));
+ SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
+ N->getOperand(NumVecs + 2), // Base Register
+ N->getOperand(NumVecs + 3), // Incremental
+ N->getOperand(0)};
SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
// Transfer memoperands.
@@ -1403,12 +1397,17 @@ static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
} else
return false;
- assert((BiggerPattern || (Srl_imm > 0 && Srl_imm < VT.getSizeInBits())) &&
- "bad amount in shift node!");
+ // Bail out on large immediates. This happens when no proper
+ // combining/constant folding was performed.
+ if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
+ DEBUG((dbgs() << N
+ << ": Found large shift immediate, this should not happen\n"));
+ return false;
+ }
LSB = Srl_imm;
- MSB = Srl_imm + (VT == MVT::i32 ? CountTrailingOnes_32(And_imm)
- : CountTrailingOnes_64(And_imm)) -
+ MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
+ : countTrailingOnes<uint64_t>(And_imm)) -
1;
if (ClampMSB)
// Since we're moving the extend before the right shift operation, we need
@@ -1452,7 +1451,7 @@ static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
return false;
// Check whether we really have several bits extract here.
- unsigned BitWide = 64 - CountLeadingOnes_64(~(And_mask >> Srl_imm));
+ unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
if (BitWide && isMask_64(And_mask >> Srl_imm)) {
if (N->getValueType(0) == MVT::i32)
Opc = AArch64::UBFMWri;
@@ -1508,7 +1507,14 @@ static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
} else
return false;
- assert(Shl_imm < VT.getSizeInBits() && "bad amount in shift node!");
+ // Missing combines/constant folding may have left us with strange
+ // constants.
+ if (Shl_imm >= VT.getSizeInBits()) {
+ DEBUG((dbgs() << N
+ << ": Found large shift immediate, this should not happen\n"));
+ return false;
+ }
+
uint64_t Srl_imm = 0;
if (!isIntImmediate(N->getOperand(1), Srl_imm))
return false;
@@ -1851,7 +1857,7 @@ static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
return false;
ShiftAmount = countTrailingZeros(NonZeroBits);
- MaskWidth = CountTrailingOnes_64(NonZeroBits >> ShiftAmount);
+ MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
// BFI encompasses sufficiently many nodes that it's worth inserting an extra
// LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
@@ -2229,11 +2235,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
SDValue MemAddr = Node->getOperand(4);
// Place arguments in the right order.
- SmallVector<SDValue, 7> Ops;
- Ops.push_back(ValLo);
- Ops.push_back(ValHi);
- Ops.push_back(MemAddr);
- Ops.push_back(Chain);
+ SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
// Transfer memoperands.
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7c94d83..a1b324e 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "AArch64ISelLowering.h"
+#include "AArch64CallingConvention.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64PerfectShuffle.h"
#include "AArch64Subtarget.h"
@@ -66,10 +67,9 @@ EnableAArch64SlrGeneration("aarch64-shift-insert-generation", cl::Hidden,
cl::desc("Allow AArch64 SLI/SRI formation"),
cl::init(false));
-
-AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM)
- : TargetLowering(TM) {
- Subtarget = &TM.getSubtarget<AArch64Subtarget>();
+AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
+ const AArch64Subtarget &STI)
+ : TargetLowering(TM), Subtarget(&STI) {
// AArch64 doesn't have comparisons which set GPRs or setcc instructions, so
// we have to make something up. Arbitrarily, choose ZeroOrOne.
@@ -111,7 +111,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM)
}
// Compute derived properties from the register classes
- computeRegisterProperties();
+ computeRegisterProperties(Subtarget->getRegisterInfo());
// Provide all sorts of operation actions
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
@@ -386,13 +386,24 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM)
setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
}
+ // Make floating-point constants legal for the large code model, so they don't
+ // become loads from the constant pool.
+ if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) {
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
+ }
+
// AArch64 does not have floating-point extending loads, i1 sign-extending
// load, floating-point truncating stores, or v2i32->v2i16 truncating store.
- setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand);
+ for (MVT VT : MVT::fp_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
+ }
+ for (MVT VT : MVT::integer_valuetypes())
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand);
+
setTruncStoreAction(MVT::f32, MVT::f16, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
@@ -531,26 +542,22 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM)
setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
// Likewise, narrowing and extending vector loads/stores aren't handled
// directly.
- for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
-
- setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,
- Expand);
-
- setOperationAction(ISD::MULHS, (MVT::SimpleValueType)VT, Expand);
- setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
- setOperationAction(ISD::MULHU, (MVT::SimpleValueType)VT, Expand);
- setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
-
- setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand);
-
- for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
- setTruncStoreAction((MVT::SimpleValueType)VT,
- (MVT::SimpleValueType)InnerVT, Expand);
- setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand);
- setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand);
+ for (MVT VT : MVT::vector_valuetypes()) {
+ setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
+
+ setOperationAction(ISD::MULHS, VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::MULHU, VT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, VT, Expand);
+
+ setOperationAction(ISD::BSWAP, VT, Expand);
+
+ for (MVT InnerVT : MVT::vector_valuetypes()) {
+ setTruncStoreAction(VT, InnerVT, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
+ }
}
// AArch64 has implementations of a lot of rounding-like FP operations.
@@ -615,7 +622,8 @@ void AArch64TargetLowering::addTypeForNEON(EVT VT, EVT PromotedBitwiseVT) {
setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
setOperationAction(ISD::VSELECT, VT.getSimpleVT(), Expand);
- setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand);
+ for (MVT InnerVT : MVT::all_valuetypes())
+ setLoadExtAction(ISD::EXTLOAD, InnerVT, VT.getSimpleVT(), Expand);
// CNT supports only B element sizes.
if (VT != MVT::v8i8 && VT != MVT::v16i8)
@@ -722,13 +730,6 @@ MVT AArch64TargetLowering::getScalarShiftAmountTy(EVT LHSTy) const {
return MVT::i64;
}
-unsigned AArch64TargetLowering::getMaximalGlobalOffset() const {
- // FIXME: On AArch64, this depends on the type.
- // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
- // and the offset has to be a multiple of the related size in bytes.
- return 4095;
-}
-
FastISel *
AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) const {
@@ -869,9 +870,8 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
// EndBB:
// Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB]
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
MachineFunction *MF = MBB->getParent();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
DebugLoc DL = MI->getDebugLoc();
MachineFunction::iterator It = MBB;
@@ -1330,10 +1330,7 @@ getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
SDValue AArch64TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG,
RTLIB::Libcall Call) const {
- SmallVector<SDValue, 2> Ops;
- for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i)
- Ops.push_back(Op.getOperand(i));
-
+ SmallVector<SDValue, 2> Ops(Op->op_begin(), Op->op_end());
return makeLibCall(DAG, Call, MVT::f128, &Ops[0], Ops.size(), false,
SDLoc(Op)).first;
}
@@ -1561,10 +1558,7 @@ SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op,
else
LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType());
- SmallVector<SDValue, 2> Ops;
- for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i)
- Ops.push_back(Op.getOperand(i));
-
+ SmallVector<SDValue, 2> Ops(Op->op_begin(), Op->op_end());
return makeLibCall(DAG, LC, Op.getValueType(), &Ops[0], Ops.size(), false,
SDLoc(Op)).first;
}
@@ -1981,6 +1975,8 @@ CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
llvm_unreachable("Unsupported calling convention.");
case CallingConv::WebKit_JS:
return CC_AArch64_WebKit_JS;
+ case CallingConv::GHC:
+ return CC_AArch64_GHC;
case CallingConv::C:
case CallingConv::Fast:
if (!Subtarget->isTargetDarwin())
@@ -2012,18 +2008,19 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
unsigned CurArgIdx = 0;
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ValVT = Ins[i].VT;
- std::advance(CurOrigArg, Ins[i].OrigArgIndex - CurArgIdx);
- CurArgIdx = Ins[i].OrigArgIndex;
-
- // Get type of the original argument.
- EVT ActualVT = getValueType(CurOrigArg->getType(), /*AllowUnknown*/ true);
- MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
- // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
- if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
- ValVT = MVT::i8;
- else if (ActualMVT == MVT::i16)
- ValVT = MVT::i16;
+ if (Ins[i].isOrigArg()) {
+ std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx);
+ CurArgIdx = Ins[i].getOrigArgIndex();
+ // Get type of the original argument.
+ EVT ActualVT = getValueType(CurOrigArg->getType(), /*AllowUnknown*/ true);
+ MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
+ // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
+ if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
+ ValVT = MVT::i8;
+ else if (ActualMVT == MVT::i16)
+ ValVT = MVT::i16;
+ }
CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
bool Res =
AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);
@@ -2106,7 +2103,8 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
unsigned ArgSize = VA.getValVT().getSizeInBits() / 8;
uint32_t BEAlign = 0;
- if (ArgSize < 8 && !Subtarget->isLittleEndian())
+ if (!Subtarget->isLittleEndian() && ArgSize < 8 &&
+ !Ins[i].Flags.isInConsecutiveRegs())
BEAlign = 8 - ArgSize;
int FI = MFI->CreateFixedObject(ArgSize, ArgOffset + BEAlign, true);
@@ -2198,8 +2196,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
AArch64::X3, AArch64::X4, AArch64::X5,
AArch64::X6, AArch64::X7 };
static const unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs);
- unsigned FirstVariadicGPR =
- CCInfo.getFirstUnallocated(GPRArgRegs, NumGPRArgRegs);
+ unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs);
unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
int GPRIdx = 0;
@@ -2227,8 +2224,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs);
- unsigned FirstVariadicFPR =
- CCInfo.getFirstUnallocated(FPRArgRegs, NumFPRArgRegs);
+ unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs);
unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
int FPRIdx = 0;
@@ -2349,7 +2345,9 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization(
// cannot rely on the linker replacing the tail call with a return.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
const GlobalValue *GV = G->getGlobal();
- if (GV->hasExternalWeakLinkage())
+ const Triple TT(getTargetMachine().getTargetTriple());
+ if (GV->hasExternalWeakLinkage() &&
+ (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
return false;
}
@@ -2660,7 +2658,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
unsigned OpSize = Flags.isByVal() ? Flags.getByValSize() * 8
: VA.getValVT().getSizeInBits();
OpSize = (OpSize + 7) / 8;
- if (!Subtarget->isLittleEndian() && !Flags.isByVal()) {
+ if (!Subtarget->isLittleEndian() && !Flags.isByVal() &&
+ !Flags.isInConsecutiveRegs()) {
if (OpSize < 8)
BEAlign = 8 - OpSize;
}
@@ -2782,19 +2781,16 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// Add a register mask operand representing the call-preserved registers.
const uint32_t *Mask;
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
- const AArch64RegisterInfo *ARI =
- static_cast<const AArch64RegisterInfo *>(TRI);
+ const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
if (IsThisReturn) {
// For 'this' returns, use the X0-preserving mask if applicable
- Mask = ARI->getThisReturnPreservedMask(CallConv);
+ Mask = TRI->getThisReturnPreservedMask(CallConv);
if (!Mask) {
IsThisReturn = false;
- Mask = ARI->getCallPreservedMask(CallConv);
+ Mask = TRI->getCallPreservedMask(CallConv);
}
} else
- Mask = ARI->getCallPreservedMask(CallConv);
+ Mask = TRI->getCallPreservedMask(CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
@@ -3014,11 +3010,8 @@ AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
// TLS calls preserve all registers except those that absolutely must be
// trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
// silly).
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
- const AArch64RegisterInfo *ARI =
- static_cast<const AArch64RegisterInfo *>(TRI);
- const uint32_t *Mask = ARI->getTLSCallPreservedMask();
+ const uint32_t *Mask =
+ Subtarget->getRegisterInfo()->getTLSCallPreservedMask();
// Finally, we can make the call. This is just a degenerate version of a
// normal AArch64 call node: x0 takes the address of the descriptor, and
@@ -3065,11 +3058,8 @@ SDValue AArch64TargetLowering::LowerELFTLSDescCall(SDValue SymAddr,
// TLS calls preserve all registers except those that absolutely must be
// trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
// silly).
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
- const AArch64RegisterInfo *ARI =
- static_cast<const AArch64RegisterInfo *>(TRI);
- const uint32_t *Mask = ARI->getTLSCallPreservedMask();
+ const uint32_t *Mask =
+ Subtarget->getRegisterInfo()->getTLSCallPreservedMask();
// The function takes only one argument: the address of the descriptor itself
// in X0.
@@ -3259,8 +3249,8 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
OFCC = getInvertedCondCode(OFCC);
SDValue CCVal = DAG.getConstant(OFCC, MVT::i32);
- return DAG.getNode(AArch64ISD::BRCOND, SDLoc(LHS), MVT::Other, Chain, Dest,
- CCVal, Overflow);
+ return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
+ Overflow);
}
if (LHS.getValueType().isInteger()) {
@@ -3429,8 +3419,8 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
}
SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const {
- if (DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::NoImplicitFloat))
+ if (DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ Attribute::NoImplicitFloat))
return SDValue();
if (!Subtarget->hasNEON())
@@ -3447,18 +3437,12 @@ SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const {
SDValue Val = Op.getOperand(0);
SDLoc DL(Op);
EVT VT = Op.getValueType();
- SDValue ZeroVec = DAG.getUNDEF(MVT::v8i8);
- SDValue VecVal;
- if (VT == MVT::i32) {
- VecVal = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
- VecVal = DAG.getTargetInsertSubreg(AArch64::ssub, DL, MVT::v8i8, ZeroVec,
- VecVal);
- } else {
- VecVal = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val);
- }
+ if (VT == MVT::i32)
+ Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
+ Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val);
- SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, VecVal);
+ SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val);
SDValue UaddLV = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, MVT::i32), CtPop);
@@ -4279,7 +4263,8 @@ AArch64TargetLowering::getSingleConstraintMatchWeight(
std::pair<unsigned, const TargetRegisterClass *>
AArch64TargetLowering::getRegForInlineAsmConstraint(
- const std::string &Constraint, MVT VT) const {
+ const TargetRegisterInfo *TRI, const std::string &Constraint,
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r':
@@ -4308,7 +4293,7 @@ AArch64TargetLowering::getRegForInlineAsmConstraint(
// Use the default implementation in TargetLowering to convert the register
// constraint into a member of a register class.
std::pair<unsigned, const TargetRegisterClass *> Res;
- Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
// Not found as a standard register?
if (!Res.second) {
@@ -4615,19 +4600,21 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
// The extraction can just take the second half
Src.ShuffleVec =
DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
- DAG.getIntPtrConstant(NumSrcElts));
+ DAG.getConstant(NumSrcElts, MVT::i64));
Src.WindowBase = -NumSrcElts;
} else if (Src.MaxElt < NumSrcElts) {
// The extraction can just take the first half
- Src.ShuffleVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT,
- Src.ShuffleVec, DAG.getIntPtrConstant(0));
+ Src.ShuffleVec =
+ DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
+ DAG.getConstant(0, MVT::i64));
} else {
// An actual VEXT is needed
- SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT,
- Src.ShuffleVec, DAG.getIntPtrConstant(0));
+ SDValue VEXTSrc1 =
+ DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
+ DAG.getConstant(0, MVT::i64));
SDValue VEXTSrc2 =
DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
- DAG.getIntPtrConstant(NumSrcElts));
+ DAG.getConstant(NumSrcElts, MVT::i64));
unsigned Imm = Src.MinElt * getExtFactor(VEXTSrc1);
Src.ShuffleVec = DAG.getNode(AArch64ISD::EXT, dl, DestVT, VEXTSrc1,
@@ -6270,6 +6257,8 @@ static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
AArch64CC::CondCode CC, bool NoNans, EVT VT,
SDLoc dl, SelectionDAG &DAG) {
EVT SrcVT = LHS.getValueType();
+ assert(VT.getSizeInBits() == SrcVT.getSizeInBits() &&
+ "function only supposed to emit natural comparisons");
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
APInt CnstBits(VT.getSizeInBits(), 0);
@@ -6364,13 +6353,15 @@ SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
+ EVT CmpVT = LHS.getValueType().changeVectorElementTypeToInteger();
SDLoc dl(Op);
if (LHS.getValueType().getVectorElementType().isInteger()) {
assert(LHS.getValueType() == RHS.getValueType());
AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
- return EmitVectorComparison(LHS, RHS, AArch64CC, false, Op.getValueType(),
- dl, DAG);
+ SDValue Cmp =
+ EmitVectorComparison(LHS, RHS, AArch64CC, false, CmpVT, dl, DAG);
+ return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
}
assert(LHS.getValueType().getVectorElementType() == MVT::f32 ||
@@ -6384,19 +6375,21 @@ SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath;
SDValue Cmp =
- EmitVectorComparison(LHS, RHS, CC1, NoNaNs, Op.getValueType(), dl, DAG);
+ EmitVectorComparison(LHS, RHS, CC1, NoNaNs, CmpVT, dl, DAG);
if (!Cmp.getNode())
return SDValue();
if (CC2 != AArch64CC::AL) {
SDValue Cmp2 =
- EmitVectorComparison(LHS, RHS, CC2, NoNaNs, Op.getValueType(), dl, DAG);
+ EmitVectorComparison(LHS, RHS, CC2, NoNaNs, CmpVT, dl, DAG);
if (!Cmp2.getNode())
return SDValue();
- Cmp = DAG.getNode(ISD::OR, dl, Cmp.getValueType(), Cmp, Cmp2);
+ Cmp = DAG.getNode(ISD::OR, dl, CmpVT, Cmp, Cmp2);
}
+ Cmp = DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
+
if (ShouldInvert)
return Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType());
@@ -6534,6 +6527,34 @@ bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
return NumBits1 > NumBits2;
}
+/// Check if it is profitable to hoist instruction in then/else to if.
+/// Not profitable if I and it's user can form a FMA instruction
+/// because we prefer FMSUB/FMADD.
+bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {
+ if (I->getOpcode() != Instruction::FMul)
+ return true;
+
+ if (I->getNumUses() != 1)
+ return true;
+
+ Instruction *User = I->user_back();
+
+ if (User &&
+ !(User->getOpcode() == Instruction::FSub ||
+ User->getOpcode() == Instruction::FAdd))
+ return true;
+
+ const TargetOptions &Options = getTargetMachine().Options;
+ EVT VT = getValueType(User->getOperand(0)->getType());
+
+ if (isFMAFasterThanFMulAndFAdd(VT) &&
+ isOperationLegalOrCustom(ISD::FMA, VT) &&
+ (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath))
+ return false;
+
+ return true;
+}
+
// All 32-bit GPR operations implicitly zero the high-half of the corresponding
// 64-bit GPR.
bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
@@ -6604,8 +6625,7 @@ EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
bool Fast;
const Function *F = MF.getFunction();
if (Subtarget->hasFPARMv8() && !IsMemset && Size >= 16 &&
- !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::NoImplicitFloat) &&
+ !F->hasFnAttribute(Attribute::NoImplicitFloat) &&
(memOpAlign(SrcAlign, DstAlign, 16) ||
(allowsMisalignedMemoryAccesses(MVT::f128, 0, 1, &Fast) && Fast)))
return MVT::f128;
@@ -6948,7 +6968,8 @@ static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
return SDValue();
}
-static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
+ const AArch64Subtarget *Subtarget) {
// First try to optimize away the conversion when it's conditionally from
// a constant. Vectors only.
SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
@@ -6967,7 +6988,7 @@ static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG) {
// conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead.
// This eliminates an "integer-to-vector-move UOP and improve throughput.
SDValue N0 = N->getOperand(0);
- if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
+ if (Subtarget->hasNEON() && ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
// Do not change the width of a volatile load.
!cast<LoadSDNode>(N0)->isVolatile()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
@@ -7756,9 +7777,9 @@ static SDValue performExtendCombine(SDNode *N,
EVT InNVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getVectorElementType(),
LoVT.getVectorNumElements());
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src,
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, MVT::i64));
Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src,
- DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
+ DAG.getConstant(InNVT.getVectorNumElements(), MVT::i64));
Lo = DAG.getNode(N->getOpcode(), DL, LoVT, Lo);
Hi = DAG.getNode(N->getOpcode(), DL, HiVT, Hi);
@@ -7839,14 +7860,13 @@ static SDValue performSTORECombine(SDNode *N,
return SDValue();
// Cyclone has bad performance on unaligned 16B stores when crossing line and
- // page boundries. We want to split such stores.
+ // page boundaries. We want to split such stores.
if (!Subtarget->isCyclone())
return SDValue();
// Don't split at Oz.
MachineFunction &MF = DAG.getMachineFunction();
- bool IsMinSize = MF.getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::MinSize);
+ bool IsMinSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
if (IsMinSize)
return SDValue();
@@ -7880,9 +7900,9 @@ static SDValue performSTORECombine(SDNode *N,
EVT HalfVT =
EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), NumElts);
SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, MVT::i64));
SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
- DAG.getIntPtrConstant(NumElts));
+ DAG.getConstant(NumElts, MVT::i64));
SDValue BasePtr = S->getBasePtr();
SDValue NewST1 =
DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(),
@@ -7973,7 +7993,7 @@ static SDValue performPostLD1Combine(SDNode *N,
LoadSDN->getMemOperand());
// Update the uses.
- std::vector<SDValue> NewResults;
+ SmallVector<SDValue, 2> NewResults;
NewResults.push_back(SDValue(LD, 0)); // The result of load
NewResults.push_back(SDValue(UpdN.getNode(), 2)); // Chain
DCI.CombineTo(LD, NewResults);
@@ -8478,6 +8498,12 @@ static SDValue performSelectCombine(SDNode *N, SelectionDAG &DAG) {
// largest real NEON comparison is 64-bits per lane, which means the result is
// at most 32-bits and an illegal vector. Just bail out for now.
EVT SrcVT = N0.getOperand(0).getValueType();
+
+ // Don't try to do this optimization when the setcc itself has i1 operands.
+ // There are no legal vectors of i1, so this would be pointless.
+ if (SrcVT == MVT::i1)
+ return SDValue();
+
int NumMaskElts = ResVT.getSizeInBits() / SrcVT.getSizeInBits();
if (!ResVT.isVector() || NumMaskElts == 0)
return SDValue();
@@ -8518,7 +8544,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return performMulCombine(N, DAG, DCI, Subtarget);
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
- return performIntToFpCombine(N, DAG);
+ return performIntToFpCombine(N, DAG, Subtarget);
case ISD::OR:
return performORCombine(N, DCI, Subtarget);
case ISD::INTRINSIC_WO_CHAIN:
@@ -8696,13 +8722,12 @@ bool AArch64TargetLowering::getPostIndexedAddressParts(
static void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) {
- if (N->getValueType(0) != MVT::i16)
- return;
-
SDLoc DL(N);
SDValue Op = N->getOperand(0);
- assert(Op.getValueType() == MVT::f16 &&
- "Inconsistent bitcast? Only 16-bit types should be i16 or f16");
+
+ if (N->getValueType(0) != MVT::i16 || Op.getValueType() != MVT::f16)
+ return;
+
Op = SDValue(
DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32,
DAG.getUNDEF(MVT::i32), Op,
@@ -8732,6 +8757,12 @@ bool AArch64TargetLowering::useLoadStackGuardNode() const {
return true;
}
+bool AArch64TargetLowering::combineRepeatedFPDivisors(unsigned NumUsers) const {
+ // Combine multiple FDIVs with the same divisor into multiple FMULs by the
+ // reciprocal if there are three or more FDIVs.
+ return NumUsers > 2;
+}
+
TargetLoweringBase::LegalizeTypeAction
AArch64TargetLowering::getPreferredVectorAction(EVT VT) const {
MVT SVT = VT.getSimpleVT();
@@ -8836,3 +8867,8 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder,
Val, Stxr->getFunctionType()->getParamType(0)),
Addr);
}
+
+bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
+ Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
+ return Ty->isArrayTy();
+}
diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h
index 2f5708d..e973364 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/lib/Target/AArch64/AArch64ISelLowering.h
@@ -18,6 +18,7 @@
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/Target/TargetLowering.h"
namespace llvm {
@@ -207,7 +208,8 @@ class AArch64TargetLowering : public TargetLowering {
bool RequireStrictAlign;
public:
- explicit AArch64TargetLowering(const TargetMachine &TM);
+ explicit AArch64TargetLowering(const TargetMachine &TM,
+ const AArch64Subtarget &STI);
/// Selects the correct CCAssignFn for a given CallingConvention value.
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
@@ -222,7 +224,7 @@ public:
MVT getScalarShiftAmountTy(EVT LHSTy) const override;
/// allowsMisalignedMemoryAccesses - Returns true if the target allows
- /// unaligned memory accesses. of the specified type.
+ /// unaligned memory accesses of the specified type.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
unsigned Align = 1,
bool *Fast = nullptr) const override {
@@ -244,10 +246,6 @@ public:
/// getFunctionAlignment - Return the Log2 alignment of this function.
unsigned getFunctionAlignment(const Function *F) const;
- /// getMaximalGlobalOffset - Returns the maximal possible offset which can
- /// be used for loads / stores from the global.
- unsigned getMaximalGlobalOffset() const override;
-
/// Returns true if a cast between SrcAS and DestAS is a noop.
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
// Addrspacecasts are always noops.
@@ -285,6 +283,8 @@ public:
bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
bool isTruncateFree(EVT VT1, EVT VT2) const override;
+ bool isProfitableToHoist(Instruction *I) const override;
+
bool isZExtFree(Type *Ty1, Type *Ty2) const override;
bool isZExtFree(EVT VT1, EVT VT2) const override;
bool isZExtFree(SDValue Val, EVT VT2) const override;
@@ -440,6 +440,7 @@ private:
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
std::vector<SDNode *> *Created) const override;
+ bool combineRepeatedFPDivisors(unsigned NumUsers) const override;
ConstraintType
getConstraintType(const std::string &Constraint) const override;
@@ -452,7 +453,8 @@ private:
const char *constraint) const override;
std::pair<unsigned, const TargetRegisterClass *>
- getRegForInlineAsmConstraint(const std::string &Constraint,
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
MVT VT) const override;
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
std::vector<SDValue> &Ops,
@@ -472,6 +474,10 @@ private:
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const override;
+
+ bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
+ CallingConv::ID CallConv,
+ bool isVarArg) const override;
};
namespace AArch64 {
diff --git a/lib/Target/AArch64/AArch64InstrFormats.td b/lib/Target/AArch64/AArch64InstrFormats.td
index 2b0f5d2..d295c02 100644
--- a/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/lib/Target/AArch64/AArch64InstrFormats.td
@@ -4383,7 +4383,7 @@ class BaseSIMDVectorLShiftLongBySize<bit Q, bits<2> size,
}
multiclass SIMDVectorLShiftLongBySizeBHS {
- let neverHasSideEffects = 1 in {
+ let hasSideEffects = 0 in {
def v8i8 : BaseSIMDVectorLShiftLongBySize<0, 0b00, V64,
"shll", ".8h", ".8b", "8">;
def v16i8 : BaseSIMDVectorLShiftLongBySize<1, 0b00, V128,
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index 2dbb31c..64cec55 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -12,9 +12,9 @@
//===----------------------------------------------------------------------===//
#include "AArch64InstrInfo.h"
+#include "AArch64MachineCombinerPattern.h"
#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
-#include "AArch64MachineCombinerPattern.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
@@ -707,9 +707,8 @@ static bool UpdateOperandRegClass(MachineInstr *Instr) {
assert(MBB && "Can't get MachineBasicBlock here");
MachineFunction *MF = MBB->getParent();
assert(MF && "Can't get MachineFunction here");
- const TargetMachine *TM = &MF->getTarget();
- const TargetInstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
- const TargetRegisterInfo *TRI = TM->getSubtargetImpl()->getRegisterInfo();
+ const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
+ const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
MachineRegisterInfo *MRI = &MF->getRegInfo();
for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
diff --git a/lib/Target/AArch64/AArch64InstrInfo.h b/lib/Target/AArch64/AArch64InstrInfo.h
index 30bf650..d8f1274 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/lib/Target/AArch64/AArch64InstrInfo.h
@@ -16,8 +16,8 @@
#include "AArch64.h"
#include "AArch64RegisterInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
+#include "llvm/Target/TargetInstrInfo.h"
#define GET_INSTRINFO_HEADER
#include "AArch64GenInstrInfo.inc"
diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td
index 252ed40..6e4c0b0 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/lib/Target/AArch64/AArch64InstrInfo.td
@@ -481,6 +481,24 @@ def trunc_imm : SDNodeXForm<imm, [{
def : Pat<(i64 i64imm_32bit:$src),
(SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
+// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
+def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
+return CurDAG->getTargetConstant(
+ N->getValueAPF().bitcastToAPInt().getZExtValue(), MVT::i32);
+}]>;
+
+def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
+return CurDAG->getTargetConstant(
+ N->getValueAPF().bitcastToAPInt().getZExtValue(), MVT::i64);
+}]>;
+
+
+def : Pat<(f32 fpimm:$in),
+ (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
+def : Pat<(f64 fpimm:$in),
+ (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
+
+
// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
// sequences.
def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
@@ -639,6 +657,10 @@ def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
(MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
(MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
+def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
+ (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
+def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
+ (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
} // AddedComplexity = 7
let AddedComplexity = 5 in {
@@ -789,7 +811,7 @@ def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
//===----------------------------------------------------------------------===//
// Bitfield immediate extraction instruction.
//===----------------------------------------------------------------------===//
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
defm EXTR : ExtractImm<"extr">;
def : InstAlias<"ror $dst, $src, $shift",
(EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
@@ -804,7 +826,7 @@ def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
//===----------------------------------------------------------------------===//
// Other bitfield immediate instructions.
//===----------------------------------------------------------------------===//
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
defm BFM : BitfieldImmWith2RegArgs<0b01, "bfm">;
defm SBFM : BitfieldImm<0b00, "sbfm">;
defm UBFM : BitfieldImm<0b10, "ubfm">;
@@ -977,9 +999,9 @@ def : InstAlias<"cneg $dst, $src, $cc",
// PC-relative instructions.
//===----------------------------------------------------------------------===//
let isReMaterializable = 1 in {
-let neverHasSideEffects = 1, mayStore = 0, mayLoad = 0 in {
+let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
def ADR : ADRI<0, "adr", adrlabel, []>;
-} // neverHasSideEffects = 1
+} // hasSideEffects = 0
def ADRP : ADRI<1, "adrp", adrplabel,
[(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
@@ -1867,6 +1889,33 @@ let Predicates = [IsLE] in {
}
} // AddedComplexity = 10
+// Match stores from lane 0 to the appropriate subreg's store.
+multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
+ ValueType VecTy, ValueType STy,
+ SubRegIndex SubRegIdx,
+ Instruction STRW, Instruction STRX> {
+
+ def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
+ (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
+ (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
+ GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
+
+ def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
+ (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
+ (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
+ GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
+}
+
+let AddedComplexity = 19 in {
+ defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
+ defm : VecROStoreLane0Pat<ro16, store , v8i16, i16, hsub, STRHroW, STRHroX>;
+ defm : VecROStoreLane0Pat<ro32, truncstorei32, v4i32, i32, ssub, STRSroW, STRSroX>;
+ defm : VecROStoreLane0Pat<ro32, store , v4i32, i32, ssub, STRSroW, STRSroX>;
+ defm : VecROStoreLane0Pat<ro32, store , v4f32, f32, ssub, STRSroW, STRSroX>;
+ defm : VecROStoreLane0Pat<ro64, store , v2i64, i64, dsub, STRDroW, STRDroX>;
+ defm : VecROStoreLane0Pat<ro64, store , v2f64, f64, dsub, STRDroW, STRDroX>;
+}
+
//---
// (unsigned immediate)
defm STRX : StoreUI<0b11, 0, 0b00, GPR64, uimm12s8, "str",
@@ -3667,29 +3716,21 @@ defm : Neon_INS_elt_pattern<v2i64, v1i64, i64, INSvi32lane>;
// Floating point vector extractions are codegen'd as either a sequence of
-// subregister extractions, possibly fed by an INS if the lane number is
-// anything other than zero.
+// subregister extractions, or a MOV (aka CPY here, alias for DUP) if
+// the lane number is anything other than zero.
def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
(f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
(f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
(f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
+
def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
- (f64 (EXTRACT_SUBREG
- (INSvi64lane (v2f64 (IMPLICIT_DEF)), 0,
- V128:$Rn, VectorIndexD:$idx),
- dsub))>;
+ (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
- (f32 (EXTRACT_SUBREG
- (INSvi32lane (v4f32 (IMPLICIT_DEF)), 0,
- V128:$Rn, VectorIndexS:$idx),
- ssub))>;
+ (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
- (f16 (EXTRACT_SUBREG
- (INSvi16lane (v8f16 (IMPLICIT_DEF)), 0,
- V128:$Rn, VectorIndexH:$idx),
- hsub))>;
+ (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
// All concat_vectors operations are canonicalised to act on i64 vectors for
// AArch64. In the general case we need an instruction, which had just as well be
@@ -4124,7 +4165,7 @@ def MVNIv4s_msl : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
// AdvSIMD indexed element
//----------------------------------------------------------------------------
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
defm FMLA : SIMDFPIndexedSDTied<0, 0b0001, "fmla">;
defm FMLS : SIMDFPIndexedSDTied<0, 0b0101, "fmls">;
}
@@ -4678,7 +4719,7 @@ defm LD1R : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
defm LD2R : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
defm LD3R : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
defm LD4R : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
-let mayLoad = 1, neverHasSideEffects = 1 in {
+let mayLoad = 1, hasSideEffects = 0 in {
defm LD1 : SIMDLdSingleBTied<0, 0b000, "ld1", VecListOneb, GPR64pi1>;
defm LD1 : SIMDLdSingleHTied<0, 0b010, 0, "ld1", VecListOneh, GPR64pi2>;
defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes, GPR64pi4>;
@@ -4768,7 +4809,7 @@ defm ST1 : SIMDStSingleH<0, 0b010, 0, "st1", VecListOneh, GPR64pi2>;
defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
-let AddedComplexity = 15 in
+let AddedComplexity = 19 in
class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
ValueType VTy, ValueType STy, Instruction ST1>
: Pat<(scalar_store
@@ -4784,7 +4825,7 @@ def : St1Lane128Pat<store, VectorIndexD, v2i64, i64, ST1i64>;
def : St1Lane128Pat<store, VectorIndexD, v2f64, f64, ST1i64>;
def : St1Lane128Pat<store, VectorIndexH, v8f16, f16, ST1i16>;
-let AddedComplexity = 15 in
+let AddedComplexity = 19 in
class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
ValueType VTy, ValueType STy, Instruction ST1>
: Pat<(scalar_store
@@ -4848,7 +4889,7 @@ defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
-let mayStore = 1, neverHasSideEffects = 1 in {
+let mayStore = 1, hasSideEffects = 0 in {
defm ST2 : SIMDStSingleB<1, 0b000, "st2", VecListTwob, GPR64pi2>;
defm ST2 : SIMDStSingleH<1, 0b010, 0, "st2", VecListTwoh, GPR64pi4>;
defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos, GPR64pi8>;
diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 8157981..8463ce6 100644
--- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -135,6 +135,8 @@ static bool isUnscaledLdst(unsigned Opc) {
return true;
case AArch64::LDURXi:
return true;
+ case AArch64::LDURSWi:
+ return true;
}
}
@@ -173,6 +175,9 @@ int AArch64LoadStoreOpt::getMemSize(MachineInstr *MemMI) {
case AArch64::LDRXui:
case AArch64::LDURXi:
return 8;
+ case AArch64::LDRSWui:
+ case AArch64::LDURSWi:
+ return 4;
}
}
@@ -210,6 +215,9 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
case AArch64::LDRXui:
case AArch64::LDURXi:
return AArch64::LDPXi;
+ case AArch64::LDRSWui:
+ case AArch64::LDURSWi:
+ return AArch64::LDPSWi;
}
}
@@ -237,6 +245,8 @@ static unsigned getPreIndexedOpcode(unsigned Opc) {
return AArch64::LDRWpre;
case AArch64::LDRXui:
return AArch64::LDRXpre;
+ case AArch64::LDRSWui:
+ return AArch64::LDRSWpre;
}
}
@@ -264,6 +274,8 @@ static unsigned getPostIndexedOpcode(unsigned Opc) {
return AArch64::LDRWpost;
case AArch64::LDRXui:
return AArch64::LDRXpost;
+ case AArch64::LDRSWui:
+ return AArch64::LDRSWpost;
}
}
@@ -780,6 +792,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB) {
case AArch64::LDRQui:
case AArch64::LDRXui:
case AArch64::LDRWui:
+ case AArch64::LDRSWui:
// do the unscaled versions as well
case AArch64::STURSi:
case AArch64::STURDi:
@@ -790,7 +803,8 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB) {
case AArch64::LDURDi:
case AArch64::LDURQi:
case AArch64::LDURWi:
- case AArch64::LDURXi: {
+ case AArch64::LDURXi:
+ case AArch64::LDURSWi: {
// If this is a volatile load/store, don't mess with it.
if (MI->hasOrderedMemoryRef()) {
++MBBI;
@@ -931,10 +945,8 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB) {
}
bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- const TargetMachine &TM = Fn.getTarget();
- TII = static_cast<const AArch64InstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
- TRI = TM.getSubtargetImpl()->getRegisterInfo();
+ TII = static_cast<const AArch64InstrInfo *>(Fn.getSubtarget().getInstrInfo());
+ TRI = Fn.getSubtarget().getRegisterInfo();
bool Modified = false;
for (auto &MBB : Fn)
diff --git a/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp b/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
index f942c4e..4690177 100644
--- a/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
+++ b/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
@@ -235,7 +235,7 @@ bool A57ChainingConstraint::addIntraChainConstraint(PBQPRAGraph &G, unsigned Rd,
costs[i + 1][j + 1] = sameParityMax + 1.0;
}
}
- G.setEdgeCosts(edge, std::move(costs));
+ G.updateEdgeCosts(edge, std::move(costs));
return true;
}
@@ -312,7 +312,7 @@ void A57ChainingConstraint::addInterChainConstraint(PBQPRAGraph &G, unsigned Rd,
costs[i + 1][j + 1] = sameParityMax + 1.0;
}
}
- G.setEdgeCosts(edge, std::move(costs));
+ G.updateEdgeCosts(edge, std::move(costs));
}
}
}
@@ -328,7 +328,7 @@ void A57ChainingConstraint::apply(PBQPRAGraph &G) {
const MachineFunction &MF = G.getMetadata().MF;
LiveIntervals &LIs = G.getMetadata().LIS;
- TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
+ TRI = MF.getSubtarget().getRegisterInfo();
DEBUG(MF.dump());
for (const auto &MBB: MF) {
diff --git a/lib/Target/AArch64/AArch64PromoteConstant.cpp b/lib/Target/AArch64/AArch64PromoteConstant.cpp
index 16c33b7..c037c86 100644
--- a/lib/Target/AArch64/AArch64PromoteConstant.cpp
+++ b/lib/Target/AArch64/AArch64PromoteConstant.cpp
@@ -22,7 +22,7 @@
#include "AArch64.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/Constants.h"
@@ -31,6 +31,7 @@
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
@@ -112,44 +113,42 @@ private:
AU.addPreserved<DominatorTreeWrapperPass>();
}
- /// Type to store a list of User.
- typedef SmallVector<Value::user_iterator, 4> Users;
+ /// Type to store a list of Uses.
+ typedef SmallVector<Use *, 4> Uses;
/// Map an insertion point to all the uses it dominates.
- typedef DenseMap<Instruction *, Users> InsertionPoints;
+ typedef DenseMap<Instruction *, Uses> InsertionPoints;
/// Map a function to the required insertion point of load for a
/// global variable.
typedef DenseMap<Function *, InsertionPoints> InsertionPointsPerFunc;
/// Find the closest point that dominates the given Use.
- Instruction *findInsertionPoint(Value::user_iterator &Use);
+ Instruction *findInsertionPoint(Use &Use);
/// Check if the given insertion point is dominated by an existing
/// insertion point.
/// If true, the given use is added to the list of dominated uses for
/// the related existing point.
/// \param NewPt the insertion point to be checked
- /// \param UseIt the use to be added into the list of dominated uses
+ /// \param Use the use to be added into the list of dominated uses
/// \param InsertPts existing insertion points
/// \pre NewPt and all instruction in InsertPts belong to the same function
/// \return true if one of the insertion point in InsertPts dominates NewPt,
/// false otherwise
- bool isDominated(Instruction *NewPt, Value::user_iterator &UseIt,
- InsertionPoints &InsertPts);
+ bool isDominated(Instruction *NewPt, Use &Use, InsertionPoints &InsertPts);
/// Check if the given insertion point can be merged with an existing
/// insertion point in a common dominator.
/// If true, the given use is added to the list of the created insertion
/// point.
/// \param NewPt the insertion point to be checked
- /// \param UseIt the use to be added into the list of dominated uses
+ /// \param Use the use to be added into the list of dominated uses
/// \param InsertPts existing insertion points
/// \pre NewPt and all instruction in InsertPts belong to the same function
/// \pre isDominated returns false for the exact same parameters.
/// \return true if it exists an insertion point in InsertPts that could
/// have been merged with NewPt in a common dominator,
/// false otherwise
- bool tryAndMerge(Instruction *NewPt, Value::user_iterator &UseIt,
- InsertionPoints &InsertPts);
+ bool tryAndMerge(Instruction *NewPt, Use &Use, InsertionPoints &InsertPts);
/// Compute the minimal insertion points to dominates all the interesting
/// uses of value.
@@ -182,21 +181,19 @@ private:
bool promoteConstant(Constant *Cst);
/// Transfer the list of dominated uses of IPI to NewPt in InsertPts.
- /// Append UseIt to this list and delete the entry of IPI in InsertPts.
- static void appendAndTransferDominatedUses(Instruction *NewPt,
- Value::user_iterator &UseIt,
+ /// Append Use to this list and delete the entry of IPI in InsertPts.
+ static void appendAndTransferDominatedUses(Instruction *NewPt, Use &Use,
InsertionPoints::iterator &IPI,
InsertionPoints &InsertPts) {
// Record the dominated use.
- IPI->second.push_back(UseIt);
+ IPI->second.push_back(&Use);
// Transfer the dominated uses of IPI to NewPt
// Inserting into the DenseMap may invalidate existing iterator.
// Keep a copy of the key to find the iterator to erase.
Instruction *OldInstr = IPI->first;
- InsertPts.insert(InsertionPoints::value_type(NewPt, IPI->second));
+ InsertPts[NewPt] = std::move(IPI->second);
// Erase IPI.
- IPI = InsertPts.find(OldInstr);
- InsertPts.erase(IPI);
+ InsertPts.erase(OldInstr);
}
};
} // end anonymous namespace
@@ -328,23 +325,18 @@ static bool shouldConvert(const Constant *Cst) {
return isConstantUsingVectorTy(Cst->getType());
}
-Instruction *
-AArch64PromoteConstant::findInsertionPoint(Value::user_iterator &Use) {
+Instruction *AArch64PromoteConstant::findInsertionPoint(Use &Use) {
+ Instruction *User = cast<Instruction>(Use.getUser());
+
// If this user is a phi, the insertion point is in the related
// incoming basic block.
- PHINode *PhiInst = dyn_cast<PHINode>(*Use);
- Instruction *InsertionPoint;
- if (PhiInst)
- InsertionPoint =
- PhiInst->getIncomingBlock(Use.getOperandNo())->getTerminator();
- else
- InsertionPoint = dyn_cast<Instruction>(*Use);
- assert(InsertionPoint && "User is not an instruction!");
- return InsertionPoint;
+ if (PHINode *PhiInst = dyn_cast<PHINode>(User))
+ return PhiInst->getIncomingBlock(Use.getOperandNo())->getTerminator();
+
+ return User;
}
-bool AArch64PromoteConstant::isDominated(Instruction *NewPt,
- Value::user_iterator &UseIt,
+bool AArch64PromoteConstant::isDominated(Instruction *NewPt, Use &Use,
InsertionPoints &InsertPts) {
DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
@@ -363,15 +355,14 @@ bool AArch64PromoteConstant::isDominated(Instruction *NewPt,
DEBUG(dbgs() << "Insertion point dominated by:\n");
DEBUG(IPI.first->print(dbgs()));
DEBUG(dbgs() << '\n');
- IPI.second.push_back(UseIt);
+ IPI.second.push_back(&Use);
return true;
}
}
return false;
}
-bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt,
- Value::user_iterator &UseIt,
+bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt, Use &Use,
InsertionPoints &InsertPts) {
DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
*NewPt->getParent()->getParent()).getDomTree();
@@ -391,7 +382,7 @@ bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt,
DEBUG(dbgs() << "Merge insertion point with:\n");
DEBUG(IPI->first->print(dbgs()));
DEBUG(dbgs() << "\nat considered insertion point.\n");
- appendAndTransferDominatedUses(NewPt, UseIt, IPI, InsertPts);
+ appendAndTransferDominatedUses(NewPt, Use, IPI, InsertPts);
return true;
}
@@ -415,7 +406,7 @@ bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt,
DEBUG(dbgs() << '\n');
DEBUG(NewPt->print(dbgs()));
DEBUG(dbgs() << '\n');
- appendAndTransferDominatedUses(NewPt, UseIt, IPI, InsertPts);
+ appendAndTransferDominatedUses(NewPt, Use, IPI, InsertPts);
return true;
}
return false;
@@ -424,22 +415,22 @@ bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt,
void AArch64PromoteConstant::computeInsertionPoints(
Constant *Val, InsertionPointsPerFunc &InsPtsPerFunc) {
DEBUG(dbgs() << "** Compute insertion points **\n");
- for (Value::user_iterator UseIt = Val->user_begin(),
- EndUseIt = Val->user_end();
- UseIt != EndUseIt; ++UseIt) {
+ for (Use &Use : Val->uses()) {
+ Instruction *User = dyn_cast<Instruction>(Use.getUser());
+
// If the user is not an Instruction, we cannot modify it.
- if (!isa<Instruction>(*UseIt))
+ if (!User)
continue;
// Filter out uses that should not be converted.
- if (!shouldConvertUse(Val, cast<Instruction>(*UseIt), UseIt.getOperandNo()))
+ if (!shouldConvertUse(Val, User, Use.getOperandNo()))
continue;
- DEBUG(dbgs() << "Considered use, opidx " << UseIt.getOperandNo() << ":\n");
- DEBUG((*UseIt)->print(dbgs()));
+ DEBUG(dbgs() << "Considered use, opidx " << Use.getOperandNo() << ":\n");
+ DEBUG(User->print(dbgs()));
DEBUG(dbgs() << '\n');
- Instruction *InsertionPoint = findInsertionPoint(UseIt);
+ Instruction *InsertionPoint = findInsertionPoint(Use);
DEBUG(dbgs() << "Considered insertion point:\n");
DEBUG(InsertionPoint->print(dbgs()));
@@ -449,17 +440,17 @@ void AArch64PromoteConstant::computeInsertionPoints(
// by another one.
InsertionPoints &InsertPts =
InsPtsPerFunc[InsertionPoint->getParent()->getParent()];
- if (isDominated(InsertionPoint, UseIt, InsertPts))
+ if (isDominated(InsertionPoint, Use, InsertPts))
continue;
// This insertion point is useful, check if we can merge some insertion
// point in a common dominator or if NewPt dominates an existing one.
- if (tryAndMerge(InsertionPoint, UseIt, InsertPts))
+ if (tryAndMerge(InsertionPoint, Use, InsertPts))
continue;
DEBUG(dbgs() << "Keep considered insertion point\n");
// It is definitely useful by its own
- InsertPts[InsertionPoint].push_back(UseIt);
+ InsertPts[InsertionPoint].push_back(&Use);
}
}
@@ -470,41 +461,32 @@ bool AArch64PromoteConstant::insertDefinitions(
bool HasChanged = false;
// Traverse all insertion points in all the function.
- for (InsertionPointsPerFunc::iterator FctToInstPtsIt = InsPtsPerFunc.begin(),
- EndIt = InsPtsPerFunc.end();
- FctToInstPtsIt != EndIt; ++FctToInstPtsIt) {
- InsertionPoints &InsertPts = FctToInstPtsIt->second;
+ for (const auto &FctToInstPtsIt : InsPtsPerFunc) {
+ const InsertionPoints &InsertPts = FctToInstPtsIt.second;
// Do more checking for debug purposes.
#ifndef NDEBUG
DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
- *FctToInstPtsIt->first).getDomTree();
+ *FctToInstPtsIt.first).getDomTree();
#endif
- GlobalVariable *PromotedGV;
assert(!InsertPts.empty() && "Empty uses does not need a definition");
- Module *M = FctToInstPtsIt->first->getParent();
- DenseMap<Module *, GlobalVariable *>::iterator MapIt =
- ModuleToMergedGV.find(M);
- if (MapIt == ModuleToMergedGV.end()) {
+ Module *M = FctToInstPtsIt.first->getParent();
+ GlobalVariable *&PromotedGV = ModuleToMergedGV[M];
+ if (!PromotedGV) {
PromotedGV = new GlobalVariable(
*M, Cst->getType(), true, GlobalValue::InternalLinkage, nullptr,
"_PromotedConst", nullptr, GlobalVariable::NotThreadLocal);
PromotedGV->setInitializer(Cst);
- ModuleToMergedGV[M] = PromotedGV;
DEBUG(dbgs() << "Global replacement: ");
DEBUG(PromotedGV->print(dbgs()));
DEBUG(dbgs() << '\n');
++NumPromoted;
HasChanged = true;
- } else {
- PromotedGV = MapIt->second;
}
- for (InsertionPoints::iterator IPI = InsertPts.begin(),
- EndIPI = InsertPts.end();
- IPI != EndIPI; ++IPI) {
+ for (const auto &IPI : InsertPts) {
// Create the load of the global variable.
- IRBuilder<> Builder(IPI->first->getParent(), IPI->first);
+ IRBuilder<> Builder(IPI.first->getParent(), IPI.first);
LoadInst *LoadedCst = Builder.CreateLoad(PromotedGV);
DEBUG(dbgs() << "**********\n");
DEBUG(dbgs() << "New def: ");
@@ -512,18 +494,15 @@ bool AArch64PromoteConstant::insertDefinitions(
DEBUG(dbgs() << '\n');
// Update the dominated uses.
- Users &DominatedUsers = IPI->second;
- for (Value::user_iterator Use : DominatedUsers) {
+ for (Use *Use : IPI.second) {
#ifndef NDEBUG
- assert((DT.dominates(LoadedCst, cast<Instruction>(*Use)) ||
- (isa<PHINode>(*Use) &&
- DT.dominates(LoadedCst, findInsertionPoint(Use)))) &&
+ assert(DT.dominates(LoadedCst, findInsertionPoint(*Use)) &&
"Inserted definition does not dominate all its uses!");
#endif
- DEBUG(dbgs() << "Use to update " << Use.getOperandNo() << ":");
- DEBUG(Use->print(dbgs()));
+ DEBUG(dbgs() << "Use to update " << Use->getOperandNo() << ":");
+ DEBUG(Use->getUser()->print(dbgs()));
DEBUG(dbgs() << '\n');
- Use->setOperand(Use.getOperandNo(), LoadedCst);
+ Use->set(LoadedCst);
++NumPromotedUses;
}
}
@@ -556,22 +535,19 @@ bool AArch64PromoteConstant::runOnFunction(Function &F) {
// global variable. Create as few loads of this variable as possible and
// update the uses accordingly.
bool LocalChange = false;
- SmallSet<Constant *, 8> AlreadyChecked;
-
- for (auto &MBB : F) {
- for (auto &MI : MBB) {
- // Traverse the operand, looking for constant vectors. Replace them by a
- // load of a global variable of constant vector type.
- for (unsigned OpIdx = 0, EndOpIdx = MI.getNumOperands();
- OpIdx != EndOpIdx; ++OpIdx) {
- Constant *Cst = dyn_cast<Constant>(MI.getOperand(OpIdx));
- // There is no point in promoting global values as they are already
- // global. Do not promote constant expressions either, as they may
- // require some code expansion.
- if (Cst && !isa<GlobalValue>(Cst) && !isa<ConstantExpr>(Cst) &&
- AlreadyChecked.insert(Cst).second)
- LocalChange |= promoteConstant(Cst);
- }
+ SmallPtrSet<Constant *, 8> AlreadyChecked;
+
+ for (Instruction &I : inst_range(&F)) {
+ // Traverse the operand, looking for constant vectors. Replace them by a
+ // load of a global variable of constant vector type.
+ for (Value *Op : I.operand_values()) {
+ Constant *Cst = dyn_cast<Constant>(Op);
+ // There is no point in promoting global values as they are already
+ // global. Do not promote constant expressions either, as they may
+ // require some code expansion.
+ if (Cst && !isa<GlobalValue>(Cst) && !isa<ConstantExpr>(Cst) &&
+ AlreadyChecked.insert(Cst).second)
+ LocalChange |= promoteConstant(Cst);
}
}
return LocalChange;
diff --git a/lib/Target/AArch64/AArch64RegisterInfo.cpp b/lib/Target/AArch64/AArch64RegisterInfo.cpp
index d734d43..206cdbb 100644
--- a/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -33,6 +33,10 @@ using namespace llvm;
#define GET_REGINFO_TARGET_DESC
#include "AArch64GenRegisterInfo.inc"
+static cl::opt<bool>
+ReserveX18("aarch64-reserve-x18", cl::Hidden,
+ cl::desc("Reserve X18, making it unavailable as GPR"));
+
AArch64RegisterInfo::AArch64RegisterInfo(const AArch64InstrInfo *tii,
const AArch64Subtarget *sti)
: AArch64GenRegisterInfo(AArch64::LR), TII(tii), STI(sti) {}
@@ -40,6 +44,10 @@ AArch64RegisterInfo::AArch64RegisterInfo(const AArch64InstrInfo *tii,
const MCPhysReg *
AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
+ if (MF->getFunction()->getCallingConv() == CallingConv::GHC)
+ // GHC set of callee saved regs is empty as all those regs are
+ // used for passing STG regs around
+ return CSR_AArch64_NoRegs_SaveList;
if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg)
return CSR_AArch64_AllRegs_SaveList;
else
@@ -48,6 +56,9 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const uint32_t *
AArch64RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
+ if (CC == CallingConv::GHC)
+ // This is academic becase all GHC calls are (supposed to be) tail calls
+ return CSR_AArch64_NoRegs_RegMask;
if (CC == CallingConv::AnyReg)
return CSR_AArch64_AllRegs_RegMask;
else
@@ -63,7 +74,7 @@ const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const {
}
const uint32_t *
-AArch64RegisterInfo::getThisReturnPreservedMask(CallingConv::ID) const {
+AArch64RegisterInfo::getThisReturnPreservedMask(CallingConv::ID CC) const {
// This should return a register mask that is the same as that returned by
// getCallPreservedMask but that additionally preserves the register used for
// the first i64 argument (which must also be the register used to return a
@@ -71,6 +82,7 @@ AArch64RegisterInfo::getThisReturnPreservedMask(CallingConv::ID) const {
//
// In case that the calling convention does not use the same register for
// both, the function should return NULL (does not currently apply)
+ assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
return CSR_AArch64_AAPCS_ThisReturn_RegMask;
}
@@ -90,7 +102,7 @@ AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(AArch64::W29);
}
- if (STI->isTargetDarwin()) {
+ if (STI->isTargetDarwin() || ReserveX18) {
Reserved.set(AArch64::X18); // Platform register
Reserved.set(AArch64::W18);
}
@@ -117,7 +129,7 @@ bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF,
return true;
case AArch64::X18:
case AArch64::W18:
- return STI->isTargetDarwin();
+ return STI->isTargetDarwin() || ReserveX18;
case AArch64::FP:
case AArch64::W29:
return TFI->hasFP(MF) || STI->isTargetDarwin();
@@ -379,7 +391,7 @@ unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
case AArch64::GPR64commonRegClassID:
return 32 - 1 // XZR/SP
- (TFI->hasFP(MF) || STI->isTargetDarwin()) // FP
- - STI->isTargetDarwin() // X18 reserved as platform register
+ - (STI->isTargetDarwin() || ReserveX18) // X18 reserved as platform register
- hasBasePointer(MF); // X19
case AArch64::FPR8RegClassID:
case AArch64::FPR16RegClassID:
diff --git a/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp b/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
index 0cfd582..b9c5399 100644
--- a/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
+++ b/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
@@ -28,15 +28,14 @@ SDValue AArch64SelectionDAGInfo::EmitTargetCodeForMemset(
// Check to see if there is a specialized entry-point for memory zeroing.
ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src);
ConstantSDNode *SizeValue = dyn_cast<ConstantSDNode>(Size);
+ const AArch64Subtarget &STI =
+ DAG.getMachineFunction().getSubtarget<AArch64Subtarget>();
const char *bzeroEntry =
- (V && V->isNullValue())
- ? DAG.getTarget().getSubtarget<AArch64Subtarget>().getBZeroEntry()
- : nullptr;
+ (V && V->isNullValue()) ? STI.getBZeroEntry() : nullptr;
// For small size (< 256), it is not beneficial to use bzero
// instead of memset.
if (bzeroEntry && (!SizeValue || SizeValue->getZExtValue() > 256)) {
- const AArch64TargetLowering &TLI =
- *DAG.getTarget().getSubtarget<AArch64Subtarget>().getTargetLowering();
+ const AArch64TargetLowering &TLI = *STI.getTargetLowering();
EVT IntPtr = TLI.getPointerTy();
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
diff --git a/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
index 0c36e8f..85b44a2 100644
--- a/lib/Target/AArch64/AArch64StorePairSuppress.cpp
+++ b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
@@ -30,7 +30,6 @@ class AArch64StorePairSuppress : public MachineFunctionPass {
const AArch64InstrInfo *TII;
const TargetRegisterInfo *TRI;
const MachineRegisterInfo *MRI;
- MachineFunction *MF;
TargetSchedModel SchedModel;
MachineTraceMetrics *Traces;
MachineTraceMetrics::Ensemble *MinInstr;
@@ -115,20 +114,16 @@ bool AArch64StorePairSuppress::isNarrowFPStore(const MachineInstr &MI) {
}
}
-bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &mf) {
- MF = &mf;
- TII =
- static_cast<const AArch64InstrInfo *>(MF->getSubtarget().getInstrInfo());
- TRI = MF->getSubtarget().getRegisterInfo();
- MRI = &MF->getRegInfo();
- const TargetSubtargetInfo &ST =
- MF->getTarget().getSubtarget<TargetSubtargetInfo>();
+bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
+ const TargetSubtargetInfo &ST = MF.getSubtarget();
+ TII = static_cast<const AArch64InstrInfo *>(ST.getInstrInfo());
+ TRI = ST.getRegisterInfo();
+ MRI = &MF.getRegInfo();
SchedModel.init(ST.getSchedModel(), &ST, TII);
-
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
- DEBUG(dbgs() << "*** " << getPassName() << ": " << MF->getName() << '\n');
+ DEBUG(dbgs() << "*** " << getPassName() << ": " << MF.getName() << '\n');
if (!SchedModel.hasInstrSchedModel()) {
DEBUG(dbgs() << " Skipping pass: no machine model present.\n");
@@ -139,7 +134,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &mf) {
// precisely determine whether a store pair can be formed. But we do want to
// filter out most situations where we can't form store pairs to avoid
// computing trace metrics in those cases.
- for (auto &MBB : *MF) {
+ for (auto &MBB : MF) {
bool SuppressSTP = false;
unsigned PrevBaseReg = 0;
for (auto &MI : MBB) {
diff --git a/lib/Target/AArch64/AArch64Subtarget.cpp b/lib/Target/AArch64/AArch64Subtarget.cpp
index 47b5d54..c613025 100644
--- a/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -48,17 +48,10 @@ AArch64Subtarget::AArch64Subtarget(const std::string &TT,
const TargetMachine &TM, bool LittleEndian)
: AArch64GenSubtargetInfo(TT, CPU, FS), ARMProcFamily(Others),
HasFPARMv8(false), HasNEON(false), HasCrypto(false), HasCRC(false),
- HasZeroCycleRegMove(false), HasZeroCycleZeroing(false), CPUString(CPU),
- TargetTriple(TT),
- // This nested ternary is horrible, but DL needs to be properly
- // initialized
- // before TLInfo is constructed.
- DL(isTargetMachO()
- ? "e-m:o-i64:64-i128:128-n32:64-S128"
- : (LittleEndian ? "e-m:e-i64:64-i128:128-n32:64-S128"
- : "E-m:e-i64:64-i128:128-n32:64-S128")),
- FrameLowering(), InstrInfo(initializeSubtargetDependencies(FS)),
- TSInfo(&DL), TLInfo(TM) {}
+ HasZeroCycleRegMove(false), HasZeroCycleZeroing(false),
+ IsLittle(LittleEndian), CPUString(CPU), TargetTriple(TT), FrameLowering(),
+ InstrInfo(initializeSubtargetDependencies(FS)),
+ TSInfo(TM.getDataLayout()), TLInfo(TM, *this) {}
/// ClassifyGlobalReference - Find the target operand flags that describe
/// how a global value should be referenced for the current subtarget.
diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h
index e2740f1..d418cc5 100644
--- a/lib/Target/AArch64/AArch64Subtarget.h
+++ b/lib/Target/AArch64/AArch64Subtarget.h
@@ -48,13 +48,14 @@ protected:
// HasZeroCycleZeroing - Has zero-cycle zeroing instructions.
bool HasZeroCycleZeroing;
+ bool IsLittle;
+
/// CPUString - String name of used CPU.
std::string CPUString;
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
- const DataLayout DL;
AArch64FrameLowering FrameLowering;
AArch64InstrInfo InstrInfo;
AArch64SelectionDAGInfo TSInfo;
@@ -82,7 +83,6 @@ public:
return &TLInfo;
}
const AArch64InstrInfo *getInstrInfo() const override { return &InstrInfo; }
- const DataLayout *getDataLayout() const override { return &DL; }
const AArch64RegisterInfo *getRegisterInfo() const override {
return &getInstrInfo()->getRegisterInfo();
}
@@ -100,7 +100,7 @@ public:
bool hasCrypto() const { return HasCrypto; }
bool hasCRC() const { return HasCRC; }
- bool isLittleEndian() const { return DL.isLittleEndian(); }
+ bool isLittleEndian() const { return IsLittle; }
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
bool isTargetIOS() const { return TargetTriple.isiOS(); }
diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp
index beed8e0..d73d0b3 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -13,10 +13,11 @@
#include "AArch64.h"
#include "AArch64TargetMachine.h"
#include "AArch64TargetObjectFile.h"
+#include "AArch64TargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/IR/Function.h"
-#include "llvm/PassManager.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetOptions.h"
@@ -112,6 +113,13 @@ AArch64TargetMachine::AArch64TargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL,
bool LittleEndian)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ // This nested ternary is horrible, but DL needs to be properly
+ // initialized
+ // before TLInfo is constructed.
+ DL(Triple(TT).isOSBinFormatMachO()
+ ? "e-m:o-i64:64-i128:128-n32:64-S128"
+ : (LittleEndian ? "e-m:e-i64:64-i128:128-n32:64-S128"
+ : "E-m:e-i64:64-i128:128-n32:64-S128")),
TLOF(createTLOF(Triple(getTargetTriple()))),
Subtarget(TT, CPU, FS, *this, LittleEndian), isLittle(LittleEndian) {
initAsmInfo();
@@ -121,11 +129,8 @@ AArch64TargetMachine::~AArch64TargetMachine() {}
const AArch64Subtarget *
AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
- AttributeSet FnAttrs = F.getAttributes();
- Attribute CPUAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu");
- Attribute FSAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features");
+ Attribute CPUAttr = F.getFnAttribute("target-cpu");
+ Attribute FSAttr = F.getFnAttribute("target-features");
std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
? CPUAttr.getValueAsString().str()
@@ -181,19 +186,17 @@ public:
bool addPreISel() override;
bool addInstSelector() override;
bool addILPOpts() override;
- bool addPreRegAlloc() override;
- bool addPostRegAlloc() override;
- bool addPreSched2() override;
- bool addPreEmitPass() override;
+ void addPreRegAlloc() override;
+ void addPostRegAlloc() override;
+ void addPreSched2() override;
+ void addPreEmitPass() override;
};
} // namespace
-void AArch64TargetMachine::addAnalysisPasses(PassManagerBase &PM) {
- // Add first the target-independent BasicTTI pass, then our AArch64 pass. This
- // allows the AArch64 pass to delegate to the target independent layer when
- // appropriate.
- PM.add(createBasicTargetTransformInfoPass(this));
- PM.add(createAArch64TargetTransformInfoPass(this));
+TargetIRAnalysis AArch64TargetMachine::getTargetIRAnalysis() {
+ return TargetIRAnalysis([this](Function &F) {
+ return TargetTransformInfo(AArch64TTIImpl(this, F));
+ });
}
TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
@@ -233,8 +236,11 @@ bool AArch64PassConfig::addPreISel() {
// get a chance to be merged
if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
addPass(createAArch64PromoteConstantPass());
+ // FIXME: On AArch64, this depends on the type.
+ // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
+ // and the offset has to be a multiple of the related size in bytes.
if (TM->getOptLevel() != CodeGenOpt::None)
- addPass(createGlobalMergePass(TM));
+ addPass(createGlobalMergePass(TM, 4095));
if (TM->getOptLevel() != CodeGenOpt::None)
addPass(createAArch64AddressTypePromotionPass());
@@ -246,7 +252,7 @@ bool AArch64PassConfig::addInstSelector() {
// For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
// references to _TLS_MODULE_BASE_ as possible.
- if (TM->getSubtarget<AArch64Subtarget>().isTargetELF() &&
+ if (Triple(TM->getTargetTriple()).isOSBinFormatELF() &&
getOptLevel() != CodeGenOpt::None)
addPass(createAArch64CleanupLocalDynamicTLSPass());
@@ -267,7 +273,7 @@ bool AArch64PassConfig::addILPOpts() {
return true;
}
-bool AArch64PassConfig::addPreRegAlloc() {
+void AArch64PassConfig::addPreRegAlloc() {
// Use AdvSIMD scalar instructions whenever profitable.
if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
addPass(createAArch64AdvSIMDScalar());
@@ -275,10 +281,9 @@ bool AArch64PassConfig::addPreRegAlloc() {
// be register coaleascer friendly.
addPass(&PeepholeOptimizerID);
}
- return true;
}
-bool AArch64PassConfig::addPostRegAlloc() {
+void AArch64PassConfig::addPostRegAlloc() {
// Change dead register definitions to refer to the zero register.
if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
addPass(createAArch64DeadRegisterDefinitions());
@@ -288,26 +293,23 @@ bool AArch64PassConfig::addPostRegAlloc() {
usingDefaultRegAlloc())
// Improve performance for some FP/SIMD code for A57.
addPass(createAArch64A57FPLoadBalancing());
- return true;
}
-bool AArch64PassConfig::addPreSched2() {
+void AArch64PassConfig::addPreSched2() {
// Expand some pseudo instructions to allow proper scheduling.
addPass(createAArch64ExpandPseudoPass());
// Use load/store pair instructions when possible.
if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
addPass(createAArch64LoadStoreOptimizationPass());
- return true;
}
-bool AArch64PassConfig::addPreEmitPass() {
+void AArch64PassConfig::addPreEmitPass() {
if (EnableA53Fix835769)
addPass(createAArch64A53Fix835769());
// Relax conditional branch instructions if they're otherwise out of
// range of their destination.
addPass(createAArch64BranchRelaxation());
if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
- TM->getSubtarget<AArch64Subtarget>().isTargetMachO())
+ Triple(TM->getTargetTriple()).isOSBinFormatMachO())
addPass(createAArch64CollectLOHPass());
- return true;
}
diff --git a/lib/Target/AArch64/AArch64TargetMachine.h b/lib/Target/AArch64/AArch64TargetMachine.h
index 75c65c5..7143adf 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.h
+++ b/lib/Target/AArch64/AArch64TargetMachine.h
@@ -23,6 +23,7 @@ namespace llvm {
class AArch64TargetMachine : public LLVMTargetMachine {
protected:
+ const DataLayout DL;
std::unique_ptr<TargetLoweringObjectFile> TLOF;
AArch64Subtarget Subtarget;
mutable StringMap<std::unique_ptr<AArch64Subtarget>> SubtargetMap;
@@ -35,6 +36,7 @@ public:
~AArch64TargetMachine() override;
+ const DataLayout *getDataLayout() const override { return &DL; }
const AArch64Subtarget *getSubtargetImpl() const override {
return &Subtarget;
}
@@ -43,8 +45,8 @@ public:
// Pass Pipeline Configuration
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
- /// \brief Register AArch64 analysis passes with a pass manager.
- void addAnalysisPasses(PassManagerBase &PM) override;
+ /// \brief Get the TargetIRAnalysis for this target.
+ TargetIRAnalysis getTargetIRAnalysis() override;
TargetLoweringObjectFile* getObjFileLowering() const override {
return TLOF.get();
diff --git a/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index b1a2914..0646d85 100644
--- a/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -1,4 +1,4 @@
-//===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI pass --------===//
+//===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,18 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-/// \file
-/// This file implements a TargetTransformInfo analysis pass specific to the
-/// AArch64 target machine. It uses the target's detailed information to provide
-/// more precise answers to certain TTI queries, while letting the target
-/// independent and default TTI implementations handle the rest.
-///
-//===----------------------------------------------------------------------===//
-#include "AArch64.h"
-#include "AArch64TargetMachine.h"
+#include "AArch64TargetTransformInfo.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/CostTable.h"
#include "llvm/Target/TargetLowering.h"
@@ -26,130 +19,10 @@ using namespace llvm;
#define DEBUG_TYPE "aarch64tti"
-// Declare the pass initialization routine locally as target-specific passes
-// don't have a target-wide initialization entry point, and so we rely on the
-// pass constructor initialization.
-namespace llvm {
-void initializeAArch64TTIPass(PassRegistry &);
-}
-
-namespace {
-
-class AArch64TTI final : public ImmutablePass, public TargetTransformInfo {
- const AArch64TargetMachine *TM;
- const AArch64Subtarget *ST;
- const AArch64TargetLowering *TLI;
-
- /// Estimate the overhead of scalarizing an instruction. Insert and Extract
- /// are set if the result needs to be inserted and/or extracted from vectors.
- unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
-
-public:
- AArch64TTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) {
- llvm_unreachable("This pass cannot be directly constructed");
- }
-
- AArch64TTI(const AArch64TargetMachine *TM)
- : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
- TLI(TM->getSubtargetImpl()->getTargetLowering()) {
- initializeAArch64TTIPass(*PassRegistry::getPassRegistry());
- }
-
- void initializePass() override { pushTTIStack(this); }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- TargetTransformInfo::getAnalysisUsage(AU);
- }
-
- /// Pass identification.
- static char ID;
-
- /// Provide necessary pointer adjustments for the two base classes.
- void *getAdjustedAnalysisPointer(const void *ID) override {
- if (ID == &TargetTransformInfo::ID)
- return (TargetTransformInfo *)this;
- return this;
- }
-
- /// \name Scalar TTI Implementations
- /// @{
- unsigned getIntImmCost(int64_t Val) const;
- unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
- unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
- Type *Ty) const override;
- unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
- Type *Ty) const override;
- PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
-
- /// @}
-
- /// \name Vector TTI Implementations
- /// @{
-
- unsigned getNumberOfRegisters(bool Vector) const override {
- if (Vector) {
- if (ST->hasNEON())
- return 32;
- return 0;
- }
- return 31;
- }
-
- unsigned getRegisterBitWidth(bool Vector) const override {
- if (Vector) {
- if (ST->hasNEON())
- return 128;
- return 0;
- }
- return 64;
- }
-
- unsigned getMaxInterleaveFactor() const override;
-
- unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const
- override;
-
- unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) const
- override;
-
- unsigned getArithmeticInstrCost(
- unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
- OperandValueKind Opd2Info = OK_AnyValue,
- OperandValueProperties Opd1PropInfo = OP_None,
- OperandValueProperties Opd2PropInfo = OP_None) const override;
-
- unsigned getAddressComputationCost(Type *Ty, bool IsComplex) const override;
-
- unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) const
- override;
-
- unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
- unsigned AddressSpace) const override;
-
- unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type*> Tys) const override;
-
- void getUnrollingPreferences(const Function *F, Loop *L,
- UnrollingPreferences &UP) const override;
-
-
- /// @}
-};
-
-} // end anonymous namespace
-
-INITIALIZE_AG_PASS(AArch64TTI, TargetTransformInfo, "aarch64tti",
- "AArch64 Target Transform Info", true, true, false)
-char AArch64TTI::ID = 0;
-
-ImmutablePass *
-llvm::createAArch64TargetTransformInfoPass(const AArch64TargetMachine *TM) {
- return new AArch64TTI(TM);
-}
-
/// \brief Calculate the cost of materializing a 64-bit value. This helper
/// method might only calculate a fraction of a larger immediate. Therefore it
/// is valid to return a cost of ZERO.
-unsigned AArch64TTI::getIntImmCost(int64_t Val) const {
+unsigned AArch64TTIImpl::getIntImmCost(int64_t Val) {
// Check if the immediate can be encoded within an instruction.
if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
return 0;
@@ -163,7 +36,7 @@ unsigned AArch64TTI::getIntImmCost(int64_t Val) const {
}
/// \brief Calculate the cost of materializing the given constant.
-unsigned AArch64TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
+unsigned AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
@@ -187,25 +60,25 @@ unsigned AArch64TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
return std::max(1U, Cost);
}
-unsigned AArch64TTI::getIntImmCost(unsigned Opcode, unsigned Idx,
- const APInt &Imm, Type *Ty) const {
+unsigned AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
+ const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
// There is no cost model for constants with a bit size of 0. Return TCC_Free
// here, so that constant hoisting will ignore this constant.
if (BitSize == 0)
- return TCC_Free;
+ return TTI::TCC_Free;
unsigned ImmIdx = ~0U;
switch (Opcode) {
default:
- return TCC_Free;
+ return TTI::TCC_Free;
case Instruction::GetElementPtr:
// Always hoist the base address of a GetElementPtr.
if (Idx == 0)
- return 2 * TCC_Basic;
- return TCC_Free;
+ return 2 * TTI::TCC_Basic;
+ return TTI::TCC_Free;
case Instruction::Store:
ImmIdx = 0;
break;
@@ -227,7 +100,7 @@ unsigned AArch64TTI::getIntImmCost(unsigned Opcode, unsigned Idx,
case Instruction::LShr:
case Instruction::AShr:
if (Idx == 1)
- return TCC_Free;
+ return TTI::TCC_Free;
break;
case Instruction::Trunc:
case Instruction::ZExt:
@@ -245,26 +118,27 @@ unsigned AArch64TTI::getIntImmCost(unsigned Opcode, unsigned Idx,
if (Idx == ImmIdx) {
unsigned NumConstants = (BitSize + 63) / 64;
- unsigned Cost = AArch64TTI::getIntImmCost(Imm, Ty);
- return (Cost <= NumConstants * TCC_Basic)
- ? static_cast<unsigned>(TCC_Free) : Cost;
+ unsigned Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
+ return (Cost <= NumConstants * TTI::TCC_Basic)
+ ? static_cast<unsigned>(TTI::TCC_Free)
+ : Cost;
}
- return AArch64TTI::getIntImmCost(Imm, Ty);
+ return AArch64TTIImpl::getIntImmCost(Imm, Ty);
}
-unsigned AArch64TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
- const APInt &Imm, Type *Ty) const {
+unsigned AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
+ const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
// There is no cost model for constants with a bit size of 0. Return TCC_Free
// here, so that constant hoisting will ignore this constant.
if (BitSize == 0)
- return TCC_Free;
+ return TTI::TCC_Free;
switch (IID) {
default:
- return TCC_Free;
+ return TTI::TCC_Free;
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow:
case Intrinsic::ssub_with_overflow:
@@ -273,35 +147,36 @@ unsigned AArch64TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
case Intrinsic::umul_with_overflow:
if (Idx == 1) {
unsigned NumConstants = (BitSize + 63) / 64;
- unsigned Cost = AArch64TTI::getIntImmCost(Imm, Ty);
- return (Cost <= NumConstants * TCC_Basic)
- ? static_cast<unsigned>(TCC_Free) : Cost;
+ unsigned Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
+ return (Cost <= NumConstants * TTI::TCC_Basic)
+ ? static_cast<unsigned>(TTI::TCC_Free)
+ : Cost;
}
break;
case Intrinsic::experimental_stackmap:
if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
- return TCC_Free;
+ return TTI::TCC_Free;
break;
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64:
if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
- return TCC_Free;
+ return TTI::TCC_Free;
break;
}
- return AArch64TTI::getIntImmCost(Imm, Ty);
+ return AArch64TTIImpl::getIntImmCost(Imm, Ty);
}
-AArch64TTI::PopcntSupportKind
-AArch64TTI::getPopcntSupport(unsigned TyWidth) const {
+TargetTransformInfo::PopcntSupportKind
+AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
if (TyWidth == 32 || TyWidth == 64)
- return PSK_FastHardware;
+ return TTI::PSK_FastHardware;
// TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
- return PSK_Software;
+ return TTI::PSK_Software;
}
-unsigned AArch64TTI::getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const {
+unsigned AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
+ Type *Src) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
@@ -309,7 +184,7 @@ unsigned AArch64TTI::getCastInstrCost(unsigned Opcode, Type *Dst,
EVT DstTy = TLI->getValueType(Dst);
if (!SrcTy.isSimple() || !DstTy.isSimple())
- return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
+ return BaseT::getCastInstrCost(Opcode, Dst, Src);
static const TypeConversionCostTblEntry<MVT> ConversionTbl[] = {
// LowerVectorINT_TO_FP:
@@ -380,11 +255,11 @@ unsigned AArch64TTI::getCastInstrCost(unsigned Opcode, Type *Dst,
if (Idx != -1)
return ConversionTbl[Idx].Cost;
- return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
+ return BaseT::getCastInstrCost(Opcode, Dst, Src);
}
-unsigned AArch64TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index) const {
+unsigned AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) {
assert(Val->isVectorTy() && "This must be a vector type");
if (Index != -1U) {
@@ -408,10 +283,10 @@ unsigned AArch64TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
return 2;
}
-unsigned AArch64TTI::getArithmeticInstrCost(
- unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
- OperandValueKind Opd2Info, OperandValueProperties Opd1PropInfo,
- OperandValueProperties Opd2PropInfo) const {
+unsigned AArch64TTIImpl::getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
+ TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
+ TTI::OperandValueProperties Opd2PropInfo) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
@@ -442,8 +317,8 @@ unsigned AArch64TTI::getArithmeticInstrCost(
switch (ISD) {
default:
- return TargetTransformInfo::getArithmeticInstrCost(
- Opcode, Ty, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo);
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
+ Opd1PropInfo, Opd2PropInfo);
case ISD::ADD:
case ISD::MUL:
case ISD::XOR:
@@ -455,7 +330,7 @@ unsigned AArch64TTI::getArithmeticInstrCost(
}
}
-unsigned AArch64TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
+unsigned AArch64TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
// Address computations in vectorized code with non-consecutive addresses will
// likely result in more instructions compared to scalar code where the
// computation can more often be merged into the index mode. The resulting
@@ -470,8 +345,8 @@ unsigned AArch64TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
return 1;
}
-unsigned AArch64TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy) const {
+unsigned AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
// We don't lower vector selects well that are wider than the register width.
@@ -498,12 +373,12 @@ unsigned AArch64TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
return VectorSelectTbl[Idx].Cost;
}
}
- return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
+ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
-unsigned AArch64TTI::getMemoryOpCost(unsigned Opcode, Type *Src,
- unsigned Alignment,
- unsigned AddressSpace) const {
+unsigned AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
+ unsigned Alignment,
+ unsigned AddressSpace) {
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 &&
@@ -531,7 +406,7 @@ unsigned AArch64TTI::getMemoryOpCost(unsigned Opcode, Type *Src,
return LT.first;
}
-unsigned AArch64TTI::getCostOfKeepingLiveOverCall(ArrayRef<Type*> Tys) const {
+unsigned AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
unsigned Cost = 0;
for (auto *I : Tys) {
if (!I->isVectorTy())
@@ -543,14 +418,94 @@ unsigned AArch64TTI::getCostOfKeepingLiveOverCall(ArrayRef<Type*> Tys) const {
return Cost;
}
-unsigned AArch64TTI::getMaxInterleaveFactor() const {
+unsigned AArch64TTIImpl::getMaxInterleaveFactor() {
if (ST->isCortexA57())
return 4;
return 2;
}
-void AArch64TTI::getUnrollingPreferences(const Function *F, Loop *L,
- UnrollingPreferences &UP) const {
+void AArch64TTIImpl::getUnrollingPreferences(Loop *L,
+ TTI::UnrollingPreferences &UP) {
// Disable partial & runtime unrolling on -Os.
UP.PartialOptSizeThreshold = 0;
}
+
+Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+ Type *ExpectedType) {
+ switch (Inst->getIntrinsicID()) {
+ default:
+ return nullptr;
+ case Intrinsic::aarch64_neon_st2:
+ case Intrinsic::aarch64_neon_st3:
+ case Intrinsic::aarch64_neon_st4: {
+ // Create a struct type
+ StructType *ST = dyn_cast<StructType>(ExpectedType);
+ if (!ST)
+ return nullptr;
+ unsigned NumElts = Inst->getNumArgOperands() - 1;
+ if (ST->getNumElements() != NumElts)
+ return nullptr;
+ for (unsigned i = 0, e = NumElts; i != e; ++i) {
+ if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
+ return nullptr;
+ }
+ Value *Res = UndefValue::get(ExpectedType);
+ IRBuilder<> Builder(Inst);
+ for (unsigned i = 0, e = NumElts; i != e; ++i) {
+ Value *L = Inst->getArgOperand(i);
+ Res = Builder.CreateInsertValue(Res, L, i);
+ }
+ return Res;
+ }
+ case Intrinsic::aarch64_neon_ld2:
+ case Intrinsic::aarch64_neon_ld3:
+ case Intrinsic::aarch64_neon_ld4:
+ if (Inst->getType() == ExpectedType)
+ return Inst;
+ return nullptr;
+ }
+}
+
+bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
+ MemIntrinsicInfo &Info) {
+ switch (Inst->getIntrinsicID()) {
+ default:
+ break;
+ case Intrinsic::aarch64_neon_ld2:
+ case Intrinsic::aarch64_neon_ld3:
+ case Intrinsic::aarch64_neon_ld4:
+ Info.ReadMem = true;
+ Info.WriteMem = false;
+ Info.Vol = false;
+ Info.NumMemRefs = 1;
+ Info.PtrVal = Inst->getArgOperand(0);
+ break;
+ case Intrinsic::aarch64_neon_st2:
+ case Intrinsic::aarch64_neon_st3:
+ case Intrinsic::aarch64_neon_st4:
+ Info.ReadMem = false;
+ Info.WriteMem = true;
+ Info.Vol = false;
+ Info.NumMemRefs = 1;
+ Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
+ break;
+ }
+
+ switch (Inst->getIntrinsicID()) {
+ default:
+ return false;
+ case Intrinsic::aarch64_neon_ld2:
+ case Intrinsic::aarch64_neon_st2:
+ Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
+ break;
+ case Intrinsic::aarch64_neon_ld3:
+ case Intrinsic::aarch64_neon_st3:
+ Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
+ break;
+ case Intrinsic::aarch64_neon_ld4:
+ case Intrinsic::aarch64_neon_st4:
+ Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
+ break;
+ }
+ return true;
+}
diff --git a/lib/Target/AArch64/AArch64TargetTransformInfo.h b/lib/Target/AArch64/AArch64TargetTransformInfo.h
new file mode 100644
index 0000000..dd3fd1f
--- /dev/null
+++ b/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -0,0 +1,147 @@
+//===-- AArch64TargetTransformInfo.h - AArch64 specific TTI -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file a TargetTransformInfo::Concept conforming object specific to the
+/// AArch64 target machine. It uses the target's detailed information to
+/// provide more precise answers to certain TTI queries, while letting the
+/// target independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
+
+#include "AArch64.h"
+#include "AArch64TargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/Target/TargetLowering.h"
+#include <algorithm>
+
+namespace llvm {
+
+class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
+ typedef BasicTTIImplBase<AArch64TTIImpl> BaseT;
+ typedef TargetTransformInfo TTI;
+ friend BaseT;
+
+ const AArch64TargetMachine *TM;
+ const AArch64Subtarget *ST;
+ const AArch64TargetLowering *TLI;
+
+ /// Estimate the overhead of scalarizing an instruction. Insert and Extract
+ /// are set if the result needs to be inserted and/or extracted from vectors.
+ unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract);
+
+ const AArch64Subtarget *getST() const { return ST; }
+ const AArch64TargetLowering *getTLI() const { return TLI; }
+
+ enum MemIntrinsicType {
+ VECTOR_LDST_TWO_ELEMENTS,
+ VECTOR_LDST_THREE_ELEMENTS,
+ VECTOR_LDST_FOUR_ELEMENTS
+ };
+
+public:
+ explicit AArch64TTIImpl(const AArch64TargetMachine *TM, Function &F)
+ : BaseT(TM), TM(TM), ST(TM->getSubtargetImpl(F)),
+ TLI(ST->getTargetLowering()) {}
+
+ // Provide value semantics. MSVC requires that we spell all of these out.
+ AArch64TTIImpl(const AArch64TTIImpl &Arg)
+ : BaseT(static_cast<const BaseT &>(Arg)), TM(Arg.TM), ST(Arg.ST),
+ TLI(Arg.TLI) {}
+ AArch64TTIImpl(AArch64TTIImpl &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))), TM(std::move(Arg.TM)),
+ ST(std::move(Arg.ST)), TLI(std::move(Arg.TLI)) {}
+ AArch64TTIImpl &operator=(const AArch64TTIImpl &RHS) {
+ BaseT::operator=(static_cast<const BaseT &>(RHS));
+ TM = RHS.TM;
+ ST = RHS.ST;
+ TLI = RHS.TLI;
+ return *this;
+ }
+ AArch64TTIImpl &operator=(AArch64TTIImpl &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ TM = std::move(RHS.TM);
+ ST = std::move(RHS.ST);
+ TLI = std::move(RHS.TLI);
+ return *this;
+ }
+
+ /// \name Scalar TTI Implementations
+ /// @{
+
+ using BaseT::getIntImmCost;
+ unsigned getIntImmCost(int64_t Val);
+ unsigned getIntImmCost(const APInt &Imm, Type *Ty);
+ unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty);
+ unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty);
+ TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
+
+ /// @}
+
+ /// \name Vector TTI Implementations
+ /// @{
+
+ unsigned getNumberOfRegisters(bool Vector) {
+ if (Vector) {
+ if (ST->hasNEON())
+ return 32;
+ return 0;
+ }
+ return 31;
+ }
+
+ unsigned getRegisterBitWidth(bool Vector) {
+ if (Vector) {
+ if (ST->hasNEON())
+ return 128;
+ return 0;
+ }
+ return 64;
+ }
+
+ unsigned getMaxInterleaveFactor();
+
+ unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
+
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
+
+ unsigned getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty,
+ TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+ TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+ TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+ TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
+
+ unsigned getAddressComputationCost(Type *Ty, bool IsComplex);
+
+ unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
+
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace);
+
+ unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
+
+ void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP);
+
+ Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+ Type *ExpectedType);
+
+ bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info);
+
+ /// @}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 98e0ea8..1960c99 100644
--- a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -210,9 +210,9 @@ private:
struct SysRegOp {
const char *Data;
unsigned Length;
- uint64_t FeatureBits; // We need to pass through information about which
- // core we are compiling for so that the SysReg
- // Mappers can appropriately conditionalize.
+ uint32_t MRSReg;
+ uint32_t MSRReg;
+ uint32_t PStateField;
};
struct SysCRImmOp {
@@ -374,11 +374,6 @@ public:
return StringRef(SysReg.Data, SysReg.Length);
}
- uint64_t getSysRegFeatureBits() const {
- assert(Kind == k_SysReg && "Invalid access!");
- return SysReg.FeatureBits;
- }
-
unsigned getSysCR() const {
assert(Kind == k_SysCR && "Invalid access!");
return SysCRImm.Val;
@@ -855,28 +850,17 @@ public:
bool isMRSSystemRegister() const {
if (!isSysReg()) return false;
- bool IsKnownRegister;
- auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
- Mapper.fromString(getSysReg(), IsKnownRegister);
-
- return IsKnownRegister;
+ return SysReg.MRSReg != -1U;
}
bool isMSRSystemRegister() const {
if (!isSysReg()) return false;
- bool IsKnownRegister;
- auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
- Mapper.fromString(getSysReg(), IsKnownRegister);
-
- return IsKnownRegister;
+ return SysReg.MSRReg != -1U;
}
bool isSystemPStateField() const {
if (!isSysReg()) return false;
- bool IsKnownRegister;
- AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
-
- return IsKnownRegister;
+ return SysReg.PStateField != -1U;
}
bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
@@ -1454,31 +1438,19 @@ public:
void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- bool Valid;
- auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
- uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
-
- Inst.addOperand(MCOperand::CreateImm(Bits));
+ Inst.addOperand(MCOperand::CreateImm(SysReg.MRSReg));
}
void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- bool Valid;
- auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
- uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
-
- Inst.addOperand(MCOperand::CreateImm(Bits));
+ Inst.addOperand(MCOperand::CreateImm(SysReg.MSRReg));
}
void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- bool Valid;
- uint32_t Bits =
- AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
-
- Inst.addOperand(MCOperand::CreateImm(Bits));
+ Inst.addOperand(MCOperand::CreateImm(SysReg.PStateField));
}
void addSysCROperands(MCInst &Inst, unsigned N) const {
@@ -1645,12 +1617,17 @@ public:
return Op;
}
- static std::unique_ptr<AArch64Operand>
- CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
+ static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
+ uint32_t MRSReg,
+ uint32_t MSRReg,
+ uint32_t PStateField,
+ MCContext &Ctx) {
auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
Op->SysReg.Data = Str.data();
Op->SysReg.Length = Str.size();
- Op->SysReg.FeatureBits = FeatureBits;
+ Op->SysReg.MRSReg = MRSReg;
+ Op->SysReg.MSRReg = MSRReg;
+ Op->SysReg.PStateField = PStateField;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
@@ -2643,8 +2620,24 @@ AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
- Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
- STI.getFeatureBits(), getContext()));
+ bool IsKnown;
+ auto MRSMapper = AArch64SysReg::MRSMapper(STI.getFeatureBits());
+ uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), IsKnown);
+ assert(IsKnown == (MRSReg != -1U) &&
+ "register should be -1 if and only if it's unknown");
+
+ auto MSRMapper = AArch64SysReg::MSRMapper(STI.getFeatureBits());
+ uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), IsKnown);
+ assert(IsKnown == (MSRReg != -1U) &&
+ "register should be -1 if and only if it's unknown");
+
+ uint32_t PStateField =
+ AArch64PState::PStateMapper().fromString(Tok.getString(), IsKnown);
+ assert(IsKnown == (PStateField != -1U) &&
+ "register should be -1 if and only if it's unknown");
+
+ Operands.push_back(AArch64Operand::CreateSysReg(
+ Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
Parser.Lex(); // Eat identifier
return MatchOperand_Success;
@@ -3927,7 +3920,6 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
}
llvm_unreachable("Implement any new match types added!");
- return true;
}
/// ParseDirective parses the arm specific directives
@@ -4140,7 +4132,7 @@ bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
Parser.Lex(); // Consume the EndOfStatement
auto pair = std::make_pair(IsVector, RegNum);
- if (!RegisterReqs.insert(std::make_pair(Name, pair)).second)
+ if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
Warning(L, "ignoring redefinition of register alias '" + Name + "'");
return true;
diff --git a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
index 878e29c..fb25089 100644
--- a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
+++ b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
@@ -221,13 +221,11 @@ DecodeStatus AArch64Disassembler::getInstruction(MCInst &MI, uint64_t &Size,
static MCSymbolizer *
createAArch64ExternalSymbolizer(StringRef TT, LLVMOpInfoCallback GetOpInfo,
- LLVMSymbolLookupCallback SymbolLookUp,
- void *DisInfo, MCContext *Ctx,
- MCRelocationInfo *RelInfo) {
- return new llvm::AArch64ExternalSymbolizer(
- *Ctx,
- std::unique_ptr<MCRelocationInfo>(RelInfo),
- GetOpInfo, SymbolLookUp, DisInfo);
+ LLVMSymbolLookupCallback SymbolLookUp,
+ void *DisInfo, MCContext *Ctx,
+ std::unique_ptr<MCRelocationInfo> &&RelInfo) {
+ return new llvm::AArch64ExternalSymbolizer(*Ctx, move(RelInfo), GetOpInfo,
+ SymbolLookUp, DisInfo);
}
extern "C" void LLVMInitializeAArch64Disassembler() {
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
index 1dc506a..ed24343 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
@@ -51,7 +51,7 @@ enum ShiftExtendType {
/// getShiftName - Get the string encoding for the shift type.
static inline const char *getShiftExtendName(AArch64_AM::ShiftExtendType ST) {
switch (ST) {
- default: assert(false && "unhandled shift type!");
+ default: llvm_unreachable("unhandled shift type!");
case AArch64_AM::LSL: return "lsl";
case AArch64_AM::LSR: return "lsr";
case AArch64_AM::ASR: return "asr";
@@ -236,21 +236,22 @@ static inline bool processLogicalImmediate(uint64_t Imm, unsigned RegSize,
if (isShiftedMask_64(Imm)) {
I = countTrailingZeros(Imm);
- CTO = CountTrailingOnes_64(Imm >> I);
+ assert(I < 64 && "undefined behavior");
+ CTO = countTrailingOnes(Imm >> I);
} else {
Imm |= ~Mask;
if (!isShiftedMask_64(~Imm))
return false;
- unsigned CLO = CountLeadingOnes_64(Imm);
+ unsigned CLO = countLeadingOnes(Imm);
I = 64 - CLO;
- CTO = CLO + CountTrailingOnes_64(Imm) - (64 - Size);
+ CTO = CLO + countTrailingOnes(Imm) - (64 - Size);
}
// Encode in Immr the number of RORs it would take to get *from* 0^m 1^n
- // to our target value, where i is the number of RORs to go the opposite
+ // to our target value, where I is the number of RORs to go the opposite
// direction.
- assert(Size > I && "I should be smaller than element Size");
+ assert(Size > I && "I should be smaller than element size");
unsigned Immr = (Size - I) & (Size - 1);
// If size has a 1 in the n'th bit, create a value that has zeroes in
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index 0bc2f77..423da65 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -13,8 +13,8 @@
#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCDirectives.h"
-#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
@@ -132,7 +132,7 @@ static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) {
int64_t SignedValue = static_cast<int64_t>(Value);
switch (Kind) {
default:
- assert(false && "Unknown fixup kind!");
+ llvm_unreachable("Unknown fixup kind!");
case AArch64::fixup_aarch64_pcrel_adr_imm21:
if (SignedValue > 2097151 || SignedValue < -2097152)
report_fatal_error("fixup value out of range");
@@ -239,7 +239,7 @@ bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
void AArch64AsmBackend::relaxInstruction(const MCInst &Inst,
MCInst &Res) const {
- assert(false && "AArch64AsmBackend::relaxInstruction() unimplemented");
+ llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
}
bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
@@ -317,42 +317,6 @@ public:
MachO::CPU_SUBTYPE_ARM64_ALL);
}
- bool doesSectionRequireSymbols(const MCSection &Section) const override {
- // Any section for which the linker breaks things into atoms needs to
- // preserve symbols, including assembler local symbols, to identify
- // those atoms. These sections are:
- // Sections of type:
- //
- // S_CSTRING_LITERALS (e.g. __cstring)
- // S_LITERAL_POINTERS (e.g. objc selector pointers)
- // S_16BYTE_LITERALS, S_8BYTE_LITERALS, S_4BYTE_LITERALS
- //
- // Sections named:
- //
- // __TEXT,__eh_frame
- // __TEXT,__ustring
- // __DATA,__cfstring
- // __DATA,__objc_classrefs
- // __DATA,__objc_catlist
- //
- // FIXME: It would be better if the compiler used actual linker local
- // symbols for each of these sections rather than preserving what
- // are ostensibly assembler local symbols.
- const MCSectionMachO &SMO = static_cast<const MCSectionMachO &>(Section);
- return (SMO.getType() == MachO::S_CSTRING_LITERALS ||
- SMO.getType() == MachO::S_4BYTE_LITERALS ||
- SMO.getType() == MachO::S_8BYTE_LITERALS ||
- SMO.getType() == MachO::S_16BYTE_LITERALS ||
- SMO.getType() == MachO::S_LITERAL_POINTERS ||
- (SMO.getSegmentName() == "__TEXT" &&
- (SMO.getSectionName() == "__eh_frame" ||
- SMO.getSectionName() == "__ustring")) ||
- (SMO.getSegmentName() == "__DATA" &&
- (SMO.getSectionName() == "__cfstring" ||
- SMO.getSectionName() == "__objc_classrefs" ||
- SMO.getSectionName() == "__objc_catlist")));
- }
-
/// \brief Generate the compact unwind encoding from the CFI directives.
uint32_t generateCompactUnwindEncoding(
ArrayRef<MCCFIInstruction> Instrs) const override {
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
index e05191e..5ea49c3 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
@@ -78,7 +78,7 @@ unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
if (SymLoc == AArch64MCExpr::VK_GOTTPREL && !IsNC)
return ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
if (SymLoc == AArch64MCExpr::VK_TLSDESC && !IsNC)
- return ELF::R_AARCH64_TLSDESC_ADR_PAGE;
+ return ELF::R_AARCH64_TLSDESC_ADR_PAGE21;
llvm_unreachable("invalid symbol kind for ADRP relocation");
case AArch64::fixup_aarch64_pcrel_branch26:
return ELF::R_AARCH64_JUMP26;
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
index 60e9c19..8dc6c30 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
@@ -177,7 +177,9 @@ private:
MCELF::SetType(SD, ELF::STT_NOTYPE);
MCELF::SetBinding(SD, ELF::STB_LOCAL);
SD.setExternal(false);
- Symbol->setSection(*getCurrentSection().first);
+ auto Sec = getCurrentSection().first;
+ assert(Sec && "need a section");
+ Symbol->setSection(*Sec);
const MCExpr *Value = MCSymbolRefExpr::Create(Start, getContext());
Symbol->setVariableValue(Value);
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
index 70b9329..f048474 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
@@ -37,6 +37,7 @@ AArch64MCAsmInfoDarwin::AArch64MCAsmInfoDarwin() {
AssemblerDialect = AsmWriterVariant == Default ? 1 : AsmWriterVariant;
PrivateGlobalPrefix = "L";
+ PrivateLabelPrefix = "L";
SeparatorString = "%%";
CommentString = ";";
PointerSize = CalleeSaveStackSlotSize = 8;
@@ -79,6 +80,7 @@ AArch64MCAsmInfoELF::AArch64MCAsmInfoELF(StringRef TT) {
CommentString = "//";
PrivateGlobalPrefix = ".L";
+ PrivateLabelPrefix = ".L";
Code32Directive = ".code\t32";
Data16bitsDirective = "\t.hword\t";
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
index 5d03c21..9b88de7 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
@@ -15,6 +15,7 @@
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCASMINFO_H
#include "llvm/MC/MCAsmInfoDarwin.h"
+#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
class Target;
@@ -27,7 +28,7 @@ struct AArch64MCAsmInfoDarwin : public MCAsmInfoDarwin {
MCStreamer &Streamer) const override;
};
-struct AArch64MCAsmInfoELF : public MCAsmInfo {
+struct AArch64MCAsmInfoELF : public MCAsmInfoELF {
explicit AArch64MCAsmInfoELF(StringRef TT);
};
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
index c306b11..4756a19 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
@@ -437,8 +437,7 @@ AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
return 3;
}
- assert(false && "Invalid value for vector shift amount!");
- return 0;
+ llvm_unreachable("Invalid value for vector shift amount!");
}
uint32_t
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
index e12a24b..0d9385d 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
@@ -10,6 +10,7 @@
#include "MCTargetDesc/AArch64FixupKinds.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
@@ -33,7 +34,7 @@ public:
: MCMachObjectTargetWriter(true /* is64Bit */, CPUType, CPUSubtype,
/*UseAggressiveSymbolFolding=*/true) {}
- void RecordRelocation(MachObjectWriter *Writer, const MCAssembler &Asm,
+ void RecordRelocation(MachObjectWriter *Writer, MCAssembler &Asm,
const MCAsmLayout &Layout, const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) override;
@@ -112,8 +113,36 @@ bool AArch64MachObjectWriter::getAArch64FixupKindMachOInfo(
}
}
+static bool canUseLocalRelocation(const MCSectionMachO &Section,
+ const MCSymbol &Symbol, unsigned Log2Size) {
+ // Debug info sections can use local relocations.
+ if (Section.hasAttribute(MachO::S_ATTR_DEBUG))
+ return true;
+
+ // Otherwise, only pointer sized relocations are supported.
+ if (Log2Size != 3)
+ return false;
+
+ // But only if they don't point to a few forbidden sections.
+ if (!Symbol.isInSection())
+ return true;
+ const MCSectionMachO &RefSec = cast<MCSectionMachO>(Symbol.getSection());
+ if (RefSec.getType() == MachO::S_CSTRING_LITERALS)
+ return false;
+
+ if (RefSec.getSegmentName() == "__DATA" &&
+ RefSec.getSectionName() == "__cfstring")
+ return false;
+
+ if (RefSec.getSegmentName() == "__DATA" &&
+ RefSec.getSectionName() == "__objc_classrefs")
+ return false;
+
+ return true;
+}
+
void AArch64MachObjectWriter::RecordRelocation(
- MachObjectWriter *Writer, const MCAssembler &Asm, const MCAsmLayout &Layout,
+ MachObjectWriter *Writer, MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) {
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
@@ -123,9 +152,9 @@ void AArch64MachObjectWriter::RecordRelocation(
unsigned Log2Size = 0;
int64_t Value = 0;
unsigned Index = 0;
- unsigned IsExtern = 0;
unsigned Type = 0;
unsigned Kind = Fixup.getKind();
+ const MCSymbolData *RelSymbol = nullptr;
FixupOffset += Fixup.getOffset();
@@ -171,10 +200,8 @@ void AArch64MachObjectWriter::RecordRelocation(
// FIXME: Should this always be extern?
// SymbolNum of 0 indicates the absolute section.
Type = MachO::ARM64_RELOC_UNSIGNED;
- Index = 0;
if (IsPCRel) {
- IsExtern = 1;
Asm.getContext().FatalError(Fixup.getLoc(),
"PC relative absolute relocation!");
@@ -198,15 +225,12 @@ void AArch64MachObjectWriter::RecordRelocation(
Layout.getSymbolOffset(&B_SD) ==
Layout.getFragmentOffset(Fragment) + Fixup.getOffset()) {
// SymB is the PC, so use a PC-rel pointer-to-GOT relocation.
- Index = A_Base->getIndex();
- IsExtern = 1;
Type = MachO::ARM64_RELOC_POINTER_TO_GOT;
IsPCRel = 1;
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
- MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
- (IsExtern << 27) | (Type << 28));
- Writer->addRelocation(Fragment->getParent(), MRE);
+ MRE.r_word1 = (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
+ Writer->addRelocation(A_Base, Fragment->getParent(), MRE);
return;
} else if (Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None ||
Target.getSymB()->getKind() != MCSymbolRefExpr::VK_None)
@@ -252,26 +276,31 @@ void AArch64MachObjectWriter::RecordRelocation(
? 0
: Writer->getSymbolAddress(B_Base, Layout));
- Index = A_Base->getIndex();
- IsExtern = 1;
Type = MachO::ARM64_RELOC_UNSIGNED;
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
- MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
- (IsExtern << 27) | (Type << 28));
- Writer->addRelocation(Fragment->getParent(), MRE);
+ MRE.r_word1 = (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
+ Writer->addRelocation(A_Base, Fragment->getParent(), MRE);
- Index = B_Base->getIndex();
- IsExtern = 1;
+ RelSymbol = B_Base;
Type = MachO::ARM64_RELOC_SUBTRACTOR;
} else { // A + constant
const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
- const MCSymbolData &SD = Asm.getSymbolData(*Symbol);
- const MCSymbolData *Base = Asm.getAtom(&SD);
const MCSectionMachO &Section = static_cast<const MCSectionMachO &>(
Fragment->getParent()->getSection());
+ bool CanUseLocalRelocation =
+ canUseLocalRelocation(Section, *Symbol, Log2Size);
+ if (Symbol->isTemporary() && (Value || !CanUseLocalRelocation)) {
+ const MCSection &Sec = Symbol->getSection();
+ if (!Asm.getContext().getAsmInfo()->isSectionAtomizableBySymbols(Sec))
+ Asm.addLocalUsedInReloc(*Symbol);
+ }
+
+ const MCSymbolData &SD = Asm.getSymbolData(*Symbol);
+ const MCSymbolData *Base = Asm.getAtom(&SD);
+
// If the symbol is a variable and we weren't able to get a Base for it
// (i.e., it's not in the symbol table associated with a section) resolve
// the relocation based its expansion instead.
@@ -310,16 +339,13 @@ void AArch64MachObjectWriter::RecordRelocation(
// sections, and for pointer-sized relocations (.quad), we allow section
// relocations. It's code sections that run into trouble.
if (Base) {
- Index = Base->getIndex();
- IsExtern = 1;
+ RelSymbol = Base;
// Add the local offset, if needed.
if (Base != &SD)
Value += Layout.getSymbolOffset(&SD) - Layout.getSymbolOffset(Base);
} else if (Symbol->isInSection()) {
- // Pointer-sized relocations can use a local relocation. Otherwise,
- // we have to be in a debug info section.
- if (!Section.hasAttribute(MachO::S_ATTR_DEBUG) && Log2Size != 3)
+ if (!CanUseLocalRelocation)
Asm.getContext().FatalError(
Fixup.getLoc(),
"unsupported relocation of local symbol '" + Symbol->getName() +
@@ -329,7 +355,6 @@ void AArch64MachObjectWriter::RecordRelocation(
const MCSectionData &SymSD =
Asm.getSectionData(SD.getSymbol().getSection());
Index = SymSD.getOrdinal() + 1;
- IsExtern = 0;
Value += Writer->getSymbolAddress(&SD, Layout);
if (IsPCRel)
@@ -362,16 +387,16 @@ void AArch64MachObjectWriter::RecordRelocation(
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
- MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
- (IsExtern << 27) | (Type << 28));
- Writer->addRelocation(Fragment->getParent(), MRE);
+ MRE.r_word1 =
+ (Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
+ Writer->addRelocation(RelSymbol, Fragment->getParent(), MRE);
// Now set up the Addend relocation.
Type = MachO::ARM64_RELOC_ADDEND;
Index = Value;
+ RelSymbol = nullptr;
IsPCRel = 0;
Log2Size = 2;
- IsExtern = 0;
// Put zero into the instruction itself. The addend is in the relocation.
Value = 0;
@@ -383,9 +408,9 @@ void AArch64MachObjectWriter::RecordRelocation(
// struct relocation_info (8 bytes)
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
- MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
- (IsExtern << 27) | (Type << 28));
- Writer->addRelocation(Fragment->getParent(), MRE);
+ MRE.r_word1 =
+ (Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
+ Writer->addRelocation(RelSymbol, Fragment->getParent(), MRE);
}
MCObjectWriter *llvm::createAArch64MachObjectWriter(raw_ostream &OS,
diff --git a/lib/Target/ARM/ARM.h b/lib/Target/ARM/ARM.h
index 02db53a..d3cc068 100644
--- a/lib/Target/ARM/ARM.h
+++ b/lib/Target/ARM/ARM.h
@@ -34,16 +34,12 @@ FunctionPass *createA15SDOptimizerPass();
FunctionPass *createARMLoadStoreOptimizationPass(bool PreAlloc = false);
FunctionPass *createARMExpandPseudoPass();
FunctionPass *createARMGlobalBaseRegPass();
-FunctionPass *createARMGlobalMergePass(const TargetLowering* tli);
FunctionPass *createARMConstantIslandPass();
FunctionPass *createMLxExpansionPass();
FunctionPass *createThumb2ITBlockPass();
FunctionPass *createARMOptimizeBarriersPass();
FunctionPass *createThumb2SizeReductionPass();
-/// \brief Creates an ARM-specific Target Transformation Info pass.
-ImmutablePass *createARMTargetTransformInfoPass(const ARMBaseTargetMachine *TM);
-
void LowerARMMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
ARMAsmPrinter &AP);
diff --git a/lib/Target/ARM/ARM.td b/lib/Target/ARM/ARM.td
index 80b976b..f080c60 100644
--- a/lib/Target/ARM/ARM.td
+++ b/lib/Target/ARM/ARM.td
@@ -147,6 +147,11 @@ def FeatureAClass : SubtargetFeature<"aclass", "ARMProcClass", "AClass",
def FeatureNaClTrap : SubtargetFeature<"nacl-trap", "UseNaClTrap", "true",
"NaCl trap">;
+// RenderScript-specific support for 64-bit long types on all targets
+def FeatureLong64 : SubtargetFeature<"long64", "UseLong64",
+ "true",
+ "long type is forced to be 64-bit">;
+
// ARM ISAs.
def HasV4TOps : SubtargetFeature<"v4t", "HasV4TOps", "true",
"Support ARM v4T instructions">;
@@ -270,17 +275,6 @@ def ProcKrait : SubtargetFeature<"krait", "ARMProcFamily", "Krait",
FeatureHWDivARM]>;
-def FeatureAPCS : SubtargetFeature<"apcs", "TargetABI", "ARM_ABI_APCS",
- "Use the APCS ABI">;
-
-def FeatureAAPCS : SubtargetFeature<"aapcs", "TargetABI", "ARM_ABI_AAPCS",
- "Use the AAPCS ABI">;
-
-// RenderScript-specific support for 64-bit long types on all targets
-def FeatureLong64 : SubtargetFeature<"long64", "UseLong64",
- "true",
- "long type is forced to be 64-bit">;
-
class ProcNoItin<string Name, list<SubtargetFeature> Features>
: Processor<Name, NoItineraries, Features>;
@@ -336,6 +330,12 @@ def : Processor<"mpcore", ARMV6Itineraries, [HasV6Ops, FeatureVFP2,
// V6M Processors.
def : Processor<"cortex-m0", ARMV6Itineraries, [HasV6MOps, FeatureNoARM,
FeatureDB, FeatureMClass]>;
+def : Processor<"cortex-m0plus", ARMV6Itineraries, [HasV6MOps, FeatureNoARM,
+ FeatureDB, FeatureMClass]>;
+def : Processor<"cortex-m1", ARMV6Itineraries, [HasV6MOps, FeatureNoARM,
+ FeatureDB, FeatureMClass]>;
+def : Processor<"sc000", ARMV6Itineraries, [HasV6MOps, FeatureNoARM,
+ FeatureDB, FeatureMClass]>;
// V6T2 Processors.
def : Processor<"arm1156t2-s", ARMV6Itineraries, [HasV6T2Ops,
@@ -395,10 +395,20 @@ def : ProcessorModel<"cortex-r5", CortexA8Model,
FeatureHasRAS, FeatureVFPOnlySP,
FeatureD16, FeatureRClass]>;
+// FIXME: R7 has currently the same ProcessorModel as A8 and is modelled as R5.
+def : ProcessorModel<"cortex-r7", CortexA8Model,
+ [ProcR5, HasV7Ops, FeatureDB,
+ FeatureVFP3, FeatureDSPThumb2,
+ FeatureHasRAS, FeatureVFPOnlySP,
+ FeatureD16, FeatureMP, FeatureRClass]>;
+
// V7M Processors.
def : ProcNoItin<"cortex-m3", [HasV7Ops,
FeatureThumb2, FeatureNoARM, FeatureDB,
FeatureHWDiv, FeatureMClass]>;
+def : ProcNoItin<"sc300", [HasV7Ops,
+ FeatureThumb2, FeatureNoARM, FeatureDB,
+ FeatureHWDiv, FeatureMClass]>;
// V7EM Processors.
def : ProcNoItin<"cortex-m4", [HasV7Ops,
@@ -427,6 +437,10 @@ def : ProcNoItin<"cortex-a53", [ProcA53, HasV8Ops, FeatureAClass,
def : ProcNoItin<"cortex-a57", [ProcA57, HasV8Ops, FeatureAClass,
FeatureDB, FeatureFPARMv8,
FeatureNEON, FeatureDSPThumb2]>;
+// FIXME: Cortex-A72 is currently modelled as an Cortex-A57.
+def : ProcNoItin<"cortex-a72", [ProcA57, HasV8Ops, FeatureAClass,
+ FeatureDB, FeatureFPARMv8,
+ FeatureNEON, FeatureDSPThumb2]>;
// Cyclone is very similar to swift
def : ProcessorModel<"cyclone", SwiftModel,
diff --git a/lib/Target/ARM/ARMArchExtName.def b/lib/Target/ARM/ARMArchExtName.def
new file mode 100644
index 0000000..d6da50c
--- /dev/null
+++ b/lib/Target/ARM/ARMArchExtName.def
@@ -0,0 +1,30 @@
+//===-- ARMArchExtName.def - List of the ARM Extension names ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the list of the supported ARM Architecture Extension
+// names. These can be used to enable the extension through .arch_extension
+// attribute
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef ARM_ARCHEXT_NAME
+#error "You must define ARM_ARCHEXT_NAME(NAME, ID) before including ARMArchExtName.h"
+#endif
+
+ARM_ARCHEXT_NAME("crc", CRC)
+ARM_ARCHEXT_NAME("crypto", CRYPTO)
+ARM_ARCHEXT_NAME("fp", FP)
+ARM_ARCHEXT_NAME("idiv", HWDIV)
+ARM_ARCHEXT_NAME("mp", MP)
+ARM_ARCHEXT_NAME("sec", SEC)
+ARM_ARCHEXT_NAME("virt", VIRT)
+
+#undef ARM_ARCHEXT_NAME
diff --git a/lib/Target/ARM/ARMArchExtName.h b/lib/Target/ARM/ARMArchExtName.h
new file mode 100644
index 0000000..bc1157a
--- /dev/null
+++ b/lib/Target/ARM/ARMArchExtName.h
@@ -0,0 +1,26 @@
+//===-- ARMArchExtName.h - List of the ARM Extension names ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_ARM_ARMARCHEXTNAME_H
+#define LLVM_LIB_TARGET_ARM_ARMARCHEXTNAME_H
+
+namespace llvm {
+namespace ARM {
+
+enum ArchExtKind {
+ INVALID_ARCHEXT = 0
+
+#define ARM_ARCHEXT_NAME(NAME, ID) , ID
+#include "ARMArchExtName.def"
+};
+
+} // namespace ARM
+} // namespace llvm
+
+#endif
diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp
index 695fd4d..2544a01 100644
--- a/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -16,6 +16,7 @@
#include "ARM.h"
#include "ARMConstantPoolValue.h"
#include "ARMFPUName.h"
+#include "ARMArchExtName.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMTargetMachine.h"
#include "ARMTargetObjectFile.h"
@@ -57,6 +58,11 @@ using namespace llvm;
#define DEBUG_TYPE "asm-printer"
+ARMAsmPrinter::ARMAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)), AFI(nullptr), MCP(nullptr),
+ InConstantPool(false) {}
+
void ARMAsmPrinter::EmitFunctionBodyEnd() {
// Make sure to terminate any constant pools that were at the end
// of the function.
@@ -76,8 +82,7 @@ void ARMAsmPrinter::EmitFunctionEntryLabel() {
}
void ARMAsmPrinter::EmitXXStructor(const Constant *CV) {
- uint64_t Size =
- TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(CV->getType());
+ uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType());
assert(Size && "C++ constructor pointer had zero size!");
const GlobalValue *GV = dyn_cast<GlobalValue>(CV->stripPointerCasts());
@@ -99,6 +104,7 @@ void ARMAsmPrinter::EmitXXStructor(const Constant *CV) {
bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
AFI = MF.getInfo<ARMFunctionInfo>();
MCP = MF.getConstantPool();
+ Subtarget = &MF.getSubtarget<ARMSubtarget>();
SetupMachineFunction(MF);
@@ -120,6 +126,23 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// Emit the rest of the function body.
EmitFunctionBody();
+ // If we need V4T thumb mode Register Indirect Jump pads, emit them.
+ // These are created per function, rather than per TU, since it's
+ // relatively easy to exceed the thumb branch range within a TU.
+ if (! ThumbIndirectPads.empty()) {
+ OutStreamer.EmitAssemblerFlag(MCAF_Code16);
+ EmitAlignment(1);
+ for (unsigned i = 0, e = ThumbIndirectPads.size(); i < e; i++) {
+ OutStreamer.EmitLabel(ThumbIndirectPads[i].second);
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tBX)
+ .addReg(ThumbIndirectPads[i].first)
+ // Add predicate operands.
+ .addImm(ARMCC::AL)
+ .addReg(0));
+ }
+ ThumbIndirectPads.clear();
+ }
+
// We didn't modify anything.
return false;
}
@@ -183,7 +206,7 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
MCSymbol *ARMAsmPrinter::
GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const {
- const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = TM.getDataLayout();
SmallString<60> Name;
raw_svector_ostream(Name) << DL->getPrivateGlobalPrefix() << "JTI"
<< getFunctionNumber() << '_' << uid << '_' << uid2;
@@ -192,7 +215,7 @@ GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const {
MCSymbol *ARMAsmPrinter::GetARMSJLJEHLabel() const {
- const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = TM.getDataLayout();
SmallString<60> Name;
raw_svector_ostream(Name) << DL->getPrivateGlobalPrefix() << "SJLJEH"
<< getFunctionNumber();
@@ -414,7 +437,8 @@ void ARMAsmPrinter::emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
}
void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
- if (Subtarget->isTargetMachO()) {
+ Triple TT(TM.getTargetTriple());
+ if (TT.isOSBinFormatMachO()) {
Reloc::Model RelocM = TM.getRelocationModel();
if (RelocM == Reloc::PIC_ || RelocM == Reloc::DynamicNoPIC) {
// Declare all the text sections up front (before the DWARF sections
@@ -477,10 +501,17 @@ void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
OutStreamer.EmitAssemblerFlag(MCAF_SyntaxUnified);
// Emit ARM Build Attributes
- if (Subtarget->isTargetELF())
+ if (TT.isOSBinFormatELF())
emitAttributes();
- if (!M.getModuleInlineAsm().empty() && Subtarget->isThumb())
+ // Use the triple's architecture and subarchitecture to determine
+ // if we're thumb for the purposes of the top level code16 assembler
+ // flag.
+ bool isThumb = TT.getArch() == Triple::thumb ||
+ TT.getArch() == Triple::thumbeb ||
+ TT.getSubArch() == Triple::ARMSubArch_v7m ||
+ TT.getSubArch() == Triple::ARMSubArch_v6m;
+ if (!M.getModuleInlineAsm().empty() && isThumb)
OutStreamer.EmitAssemblerFlag(MCAF_Code16);
}
@@ -509,7 +540,8 @@ emitNonLazySymbolPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel,
void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
- if (Subtarget->isTargetMachO()) {
+ Triple TT(TM.getTargetTriple());
+ if (TT.isOSBinFormatMachO()) {
// All darwin targets use mach-o.
const TargetLoweringObjectFileMachO &TLOFMacho =
static_cast<const TargetLoweringObjectFileMachO &>(getObjFileLowering());
@@ -552,7 +584,7 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
}
// Emit a .data.rel section containing any stubs that were created.
- if (Subtarget->isTargetELF()) {
+ if (TT.isOSBinFormatELF()) {
const TargetLoweringObjectFileELF &TLOFELF =
static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
@@ -562,7 +594,7 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
if (!Stubs.empty()) {
OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
for (auto &stub: Stubs) {
OutStreamer.EmitLabel(stub.first);
@@ -612,69 +644,96 @@ void ARMAsmPrinter::emitAttributes() {
MCTargetStreamer &TS = *OutStreamer.getTargetStreamer();
ARMTargetStreamer &ATS = static_cast<ARMTargetStreamer &>(TS);
- ATS.switchVendor("aeabi");
+ ATS.emitTextAttribute(ARMBuildAttrs::conformance, "2.09");
- std::string CPUString = Subtarget->getCPUString();
+ ATS.switchVendor("aeabi");
- // FIXME: remove krait check when GNU tools support krait cpu
- if (CPUString != "generic" && CPUString != "krait")
- ATS.emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString);
+ // Compute ARM ELF Attributes based on the default subtarget that
+ // we'd have constructed. The existing ARM behavior isn't LTO clean
+ // anyhow.
+ // FIXME: For ifunc related functions we could iterate over and look
+ // for a feature string that doesn't match the default one.
+ StringRef TT = TM.getTargetTriple();
+ StringRef CPU = TM.getTargetCPU();
+ StringRef FS = TM.getTargetFeatureString();
+ std::string ArchFS = ARM_MC::ParseARMTriple(TT, CPU);
+ if (!FS.empty()) {
+ if (!ArchFS.empty())
+ ArchFS = ArchFS + "," + FS.str();
+ else
+ ArchFS = FS;
+ }
+ const ARMBaseTargetMachine &ATM =
+ static_cast<const ARMBaseTargetMachine &>(TM);
+ const ARMSubtarget STI(TT, CPU, ArchFS, ATM, ATM.isLittleEndian());
+
+ std::string CPUString = STI.getCPUString();
+
+ if (CPUString != "generic") {
+ // FIXME: remove krait check when GNU tools support krait cpu
+ if (STI.isKrait()) {
+ ATS.emitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a9");
+ // We consider krait as a "cortex-a9" + hwdiv CPU
+ // Enable hwdiv through ".arch_extension idiv"
+ if (STI.hasDivide() || STI.hasDivideInARMMode())
+ ATS.emitArchExtension(ARM::HWDIV);
+ } else
+ ATS.emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString);
+ }
- ATS.emitAttribute(ARMBuildAttrs::CPU_arch,
- getArchForCPU(CPUString, Subtarget));
+ ATS.emitAttribute(ARMBuildAttrs::CPU_arch, getArchForCPU(CPUString, &STI));
// Tag_CPU_arch_profile must have the default value of 0 when "Architecture
// profile is not applicable (e.g. pre v7, or cross-profile code)".
- if (Subtarget->hasV7Ops()) {
- if (Subtarget->isAClass()) {
+ if (STI.hasV7Ops()) {
+ if (STI.isAClass()) {
ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
ARMBuildAttrs::ApplicationProfile);
- } else if (Subtarget->isRClass()) {
+ } else if (STI.isRClass()) {
ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
ARMBuildAttrs::RealTimeProfile);
- } else if (Subtarget->isMClass()) {
+ } else if (STI.isMClass()) {
ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
ARMBuildAttrs::MicroControllerProfile);
}
}
- ATS.emitAttribute(ARMBuildAttrs::ARM_ISA_use, Subtarget->hasARMOps() ?
- ARMBuildAttrs::Allowed : ARMBuildAttrs::Not_Allowed);
- if (Subtarget->isThumb1Only()) {
- ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
- ARMBuildAttrs::Allowed);
- } else if (Subtarget->hasThumb2()) {
+ ATS.emitAttribute(ARMBuildAttrs::ARM_ISA_use,
+ STI.hasARMOps() ? ARMBuildAttrs::Allowed
+ : ARMBuildAttrs::Not_Allowed);
+ if (STI.isThumb1Only()) {
+ ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed);
+ } else if (STI.hasThumb2()) {
ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
ARMBuildAttrs::AllowThumb32);
}
- if (Subtarget->hasNEON()) {
+ if (STI.hasNEON()) {
/* NEON is not exactly a VFP architecture, but GAS emit one of
* neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */
- if (Subtarget->hasFPARMv8()) {
- if (Subtarget->hasCrypto())
+ if (STI.hasFPARMv8()) {
+ if (STI.hasCrypto())
ATS.emitFPU(ARM::CRYPTO_NEON_FP_ARMV8);
else
ATS.emitFPU(ARM::NEON_FP_ARMV8);
- }
- else if (Subtarget->hasVFP4())
+ } else if (STI.hasVFP4())
ATS.emitFPU(ARM::NEON_VFPV4);
else
ATS.emitFPU(ARM::NEON);
// Emit Tag_Advanced_SIMD_arch for ARMv8 architecture
- if (Subtarget->hasV8Ops())
+ if (STI.hasV8Ops())
ATS.emitAttribute(ARMBuildAttrs::Advanced_SIMD_arch,
ARMBuildAttrs::AllowNeonARMv8);
} else {
- if (Subtarget->hasFPARMv8())
+ if (STI.hasFPARMv8())
// FPv5 and FP-ARMv8 have the same instructions, so are modeled as one
// FPU, but there are two different names for it depending on the CPU.
- ATS.emitFPU(Subtarget->hasD16() ? ARM::FPV5_D16 : ARM::FP_ARMV8);
- else if (Subtarget->hasVFP4())
- ATS.emitFPU(Subtarget->hasD16() ? ARM::VFPV4_D16 : ARM::VFPV4);
- else if (Subtarget->hasVFP3())
- ATS.emitFPU(Subtarget->hasD16() ? ARM::VFPV3_D16 : ARM::VFPV3);
- else if (Subtarget->hasVFP2())
+ ATS.emitFPU(STI.hasD16() ? ARM::FPV5_D16 : ARM::FP_ARMV8);
+ else if (STI.hasVFP4())
+ ATS.emitFPU(STI.hasD16() ? ARM::VFPV4_D16 : ARM::VFPV4);
+ else if (STI.hasVFP3())
+ ATS.emitFPU(STI.hasD16() ? ARM::VFPV3_D16 : ARM::VFPV3);
+ else if (STI.hasVFP2())
ATS.emitFPU(ARM::VFPV2);
}
@@ -694,11 +753,42 @@ void ARMAsmPrinter::emitAttributes() {
// Signal various FP modes.
if (!TM.Options.UnsafeFPMath) {
- ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal, ARMBuildAttrs::Allowed);
- ATS.emitAttribute(ARMBuildAttrs::ABI_FP_exceptions,
- ARMBuildAttrs::Allowed);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal,
+ ARMBuildAttrs::IEEEDenormals);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_FP_exceptions, ARMBuildAttrs::Allowed);
+
+ // If the user has permitted this code to choose the IEEE 754
+ // rounding at run-time, emit the rounding attribute.
+ if (TM.Options.HonorSignDependentRoundingFPMathOption)
+ ATS.emitAttribute(ARMBuildAttrs::ABI_FP_rounding, ARMBuildAttrs::Allowed);
+ } else {
+ if (!STI.hasVFP2()) {
+ // When the target doesn't have an FPU (by design or
+ // intention), the assumptions made on the software support
+ // mirror that of the equivalent hardware support *if it
+ // existed*. For v7 and better we indicate that denormals are
+ // flushed preserving sign, and for V6 we indicate that
+ // denormals are flushed to positive zero.
+ if (STI.hasV7Ops())
+ ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal,
+ ARMBuildAttrs::PreserveFPSign);
+ } else if (STI.hasVFP3()) {
+ // In VFPv4, VFPv4U, VFPv3, or VFPv3U, it is preserved. That is,
+ // the sign bit of the zero matches the sign bit of the input or
+ // result that is being flushed to zero.
+ ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal,
+ ARMBuildAttrs::PreserveFPSign);
+ }
+ // For VFPv2 implementations it is implementation defined as
+ // to whether denormals are flushed to positive zero or to
+ // whatever the sign of zero is (ARM v7AR ARM 2.7.5). Historically
+ // LLVM has chosen to flush this to positive zero (most likely for
+ // GCC compatibility), so that's the chosen value here (the
+ // absence of its emission implies zero).
}
+ // TM.Options.NoInfsFPMath && TM.Options.NoNaNsFPMath is the
+ // equivalent of GCC's -ffinite-math-only flag.
if (TM.Options.NoInfsFPMath && TM.Options.NoNaNsFPMath)
ATS.emitAttribute(ARMBuildAttrs::ABI_FP_number_model,
ARMBuildAttrs::Allowed);
@@ -706,7 +796,7 @@ void ARMAsmPrinter::emitAttributes() {
ATS.emitAttribute(ARMBuildAttrs::ABI_FP_number_model,
ARMBuildAttrs::AllowIEE754);
- if (Subtarget->allowsUnalignedMem())
+ if (STI.allowsUnalignedMem())
ATS.emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
ARMBuildAttrs::Allowed);
else
@@ -719,21 +809,28 @@ void ARMAsmPrinter::emitAttributes() {
ATS.emitAttribute(ARMBuildAttrs::ABI_align_preserved, 1);
// ABI_HardFP_use attribute to indicate single precision FP.
- if (Subtarget->isFPOnlySP())
+ if (STI.isFPOnlySP())
ATS.emitAttribute(ARMBuildAttrs::ABI_HardFP_use,
ARMBuildAttrs::HardFPSinglePrecision);
// Hard float. Use both S and D registers and conform to AAPCS-VFP.
- if (Subtarget->isAAPCS_ABI() && TM.Options.FloatABIType == FloatABI::Hard)
+ if (STI.isAAPCS_ABI() && TM.Options.FloatABIType == FloatABI::Hard)
ATS.emitAttribute(ARMBuildAttrs::ABI_VFP_args, ARMBuildAttrs::HardFPAAPCS);
// FIXME: Should we signal R9 usage?
- if (Subtarget->hasFP16())
- ATS.emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP);
+ if (STI.hasFP16())
+ ATS.emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP);
+
+ // FIXME: To support emitting this build attribute as GCC does, the
+ // -mfp16-format option and associated plumbing must be
+ // supported. For now the __fp16 type is exposed by default, so this
+ // attribute should be emitted with value 1.
+ ATS.emitAttribute(ARMBuildAttrs::ABI_FP_16bit_format,
+ ARMBuildAttrs::FP16FormatIEEE);
- if (Subtarget->hasMPExtension())
- ATS.emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP);
+ if (STI.hasMPExtension())
+ ATS.emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP);
// Hardware divide in ARM mode is part of base arch, starting from ARMv8.
// If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M).
@@ -741,14 +838,14 @@ void ARMAsmPrinter::emitAttributes() {
// arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits.
// AllowDIVExt is only emitted if hwdiv isn't available in the base arch;
// otherwise, the default value (AllowDIVIfExists) applies.
- if (Subtarget->hasDivideInARMMode() && !Subtarget->hasV8Ops())
- ATS.emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt);
+ if (STI.hasDivideInARMMode() && !STI.hasV8Ops())
+ ATS.emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt);
if (MMI) {
if (const Module *SourceModule = MMI->getModule()) {
// ABI_PCS_wchar_t to indicate wchar_t width
// FIXME: There is no way to emit value 0 (wchar_t prohibited).
- if (auto WCharWidthValue = cast_or_null<ConstantInt>(
+ if (auto WCharWidthValue = mdconst::extract_or_null<ConstantInt>(
SourceModule->getModuleFlag("wchar_size"))) {
int WCharWidth = WCharWidthValue->getZExtValue();
assert((WCharWidth == 2 || WCharWidth == 4) &&
@@ -759,7 +856,7 @@ void ARMAsmPrinter::emitAttributes() {
// ABI_enum_size to indicate enum width
// FIXME: There is no way to emit value 0 (enums prohibited) or value 3
// (all enums contain a value needing 32 bits to encode).
- if (auto EnumWidthValue = cast_or_null<ConstantInt>(
+ if (auto EnumWidthValue = mdconst::extract_or_null<ConstantInt>(
SourceModule->getModuleFlag("min_enum_size"))) {
int EnumWidth = EnumWidthValue->getZExtValue();
assert((EnumWidth == 1 || EnumWidth == 4) &&
@@ -774,22 +871,20 @@ void ARMAsmPrinter::emitAttributes() {
// it as another callee-saved register, but not as SB or a TLS pointer; It
// would instead be nicer to push this from the frontend as metadata, as we do
// for the wchar and enum size tags
- if (Subtarget->isR9Reserved())
- ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_R9_use,
- ARMBuildAttrs::R9Reserved);
+ if (STI.isR9Reserved())
+ ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_R9_use, ARMBuildAttrs::R9Reserved);
else
- ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_R9_use,
- ARMBuildAttrs::R9IsGPR);
-
- if (Subtarget->hasTrustZone() && Subtarget->hasVirtualization())
- ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
- ARMBuildAttrs::AllowTZVirtualization);
- else if (Subtarget->hasTrustZone())
- ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
- ARMBuildAttrs::AllowTZ);
- else if (Subtarget->hasVirtualization())
- ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
- ARMBuildAttrs::AllowVirtualization);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_R9_use, ARMBuildAttrs::R9IsGPR);
+
+ if (STI.hasTrustZone() && STI.hasVirtualization())
+ ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
+ ARMBuildAttrs::AllowTZVirtualization);
+ else if (STI.hasTrustZone())
+ ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
+ ARMBuildAttrs::AllowTZ);
+ else if (STI.hasVirtualization())
+ ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
+ ARMBuildAttrs::AllowVirtualization);
ATS.finishAttributeSection();
}
@@ -858,9 +953,8 @@ MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV,
void ARMAsmPrinter::
EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
- const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
- int Size =
- TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(MCPV->getType());
+ const DataLayout *DL = TM.getDataLayout();
+ int Size = TM.getDataLayout()->getTypeAllocSize(MCPV->getType());
ARMConstantPoolValue *ACPV = static_cast<ARMConstantPoolValue*>(MCPV);
@@ -1176,7 +1270,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
#include "ARMGenMCPseudoLowering.inc"
void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = TM.getDataLayout();
// If we just ended a constant pool, mark it as such.
if (InConstantPool && MI->getOpcode() != ARM::CONSTPOOL_ENTRY) {
@@ -1251,18 +1345,34 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
return;
}
case ARM::tBX_CALL: {
- EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tMOVr)
- .addReg(ARM::LR)
- .addReg(ARM::PC)
- // Add predicate operands.
- .addImm(ARMCC::AL)
- .addReg(0));
+ if (Subtarget->hasV5TOps())
+ llvm_unreachable("Expected BLX to be selected for v5t+");
+
+ // On ARM v4t, when doing a call from thumb mode, we need to ensure
+ // that the saved lr has its LSB set correctly (the arch doesn't
+ // have blx).
+ // So here we generate a bl to a small jump pad that does bx rN.
+ // The jump pads are emitted after the function body.
+
+ unsigned TReg = MI->getOperand(0).getReg();
+ MCSymbol *TRegSym = nullptr;
+ for (unsigned i = 0, e = ThumbIndirectPads.size(); i < e; i++) {
+ if (ThumbIndirectPads[i].first == TReg) {
+ TRegSym = ThumbIndirectPads[i].second;
+ break;
+ }
+ }
- EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tBX)
- .addReg(MI->getOperand(0).getReg())
- // Add predicate operands.
- .addImm(ARMCC::AL)
- .addReg(0));
+ if (!TRegSym) {
+ TRegSym = OutContext.CreateTempSymbol();
+ ThumbIndirectPads.push_back(std::make_pair(TReg, TRegSym));
+ }
+
+ // Create a link-saving branch to the Reg Indirect Jump Pad.
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tBL)
+ // Predicate comes first here.
+ .addImm(ARMCC::AL).addReg(0)
+ .addExpr(MCSymbolRefExpr::Create(TRegSym, OutContext)));
return;
}
case ARM::BMOVPCRX_CALL: {
diff --git a/lib/Target/ARM/ARMAsmPrinter.h b/lib/Target/ARM/ARMAsmPrinter.h
index 5ff20ce..50cb954 100644
--- a/lib/Target/ARM/ARMAsmPrinter.h
+++ b/lib/Target/ARM/ARMAsmPrinter.h
@@ -20,6 +20,7 @@ class ARMFunctionInfo;
class MCOperand;
class MachineConstantPool;
class MachineOperand;
+class MCSymbol;
namespace ARM {
enum DW_ISA {
@@ -45,12 +46,14 @@ class LLVM_LIBRARY_VISIBILITY ARMAsmPrinter : public AsmPrinter {
/// InConstantPool - Maintain state when emitting a sequence of constant
/// pool entries so we can properly mark them as data regions.
bool InConstantPool;
+
+ /// ThumbIndirectPads - These maintain a per-function list of jump pad
+ /// labels used for ARMv4t thumb code to make register indirect calls.
+ SmallVector<std::pair<unsigned, MCSymbol*>, 4> ThumbIndirectPads;
+
public:
- explicit ARMAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), AFI(nullptr), MCP(nullptr),
- InConstantPool(false) {
- Subtarget = &TM.getSubtarget<ARMSubtarget>();
- }
+ explicit ARMAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer);
const char *getPassName() const override {
return "ARM Assembly / Object Emitter";
@@ -100,12 +103,13 @@ private:
const MachineInstr *MI);
public:
- unsigned getISAEncoding() override {
+ unsigned getISAEncoding(const Function *F) override {
// ARM/Darwin adds ISA to the DWARF info for each function.
- if (!Subtarget->isTargetMachO())
+ Triple TT(TM.getTargetTriple());
+ if (!TT.isOSBinFormatMachO())
return 0;
- return Subtarget->isThumb() ?
- ARM::DW_ISA_ARM_thumb : ARM::DW_ISA_ARM_arm;
+ const ARMSubtarget &STI = TM.getSubtarget<ARMSubtarget>(*F);
+ return STI.isThumb() ? ARM::DW_ISA_ARM_thumb : ARM::DW_ISA_ARM_arm;
}
private:
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 7a315c4..29ee22e 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1836,8 +1836,10 @@ bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr *MI,
return false;
}
-MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
- bool PreferFalse) const {
+MachineInstr *
+ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
+ SmallPtrSetImpl<MachineInstr *> &SeenMIs,
+ bool PreferFalse) const {
assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) &&
"Unknown select instruction");
MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
@@ -1885,6 +1887,10 @@ MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
NewMI.addOperand(FalseReg);
NewMI->tieOperands(0, NewMI->getNumOperands() - 1);
+ // Update SeenMIs set: register newly created MI and erase removed DefMI.
+ SeenMIs.insert(NewMI);
+ SeenMIs.erase(DefMI);
+
// The caller will erase MI, but not DefMI.
DefMI->eraseFromParent();
return NewMI;
@@ -1985,8 +1991,7 @@ bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
unsigned NumBytes) {
// This optimisation potentially adds lots of load and store
// micro-operations, it's only really a great benefit to code-size.
- if (!MF.getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::MinSize))
+ if (!MF.getFunction()->hasFnAttribute(Attribute::MinSize))
return false;
// If only one register is pushed/popped, LLVM can use an LDR/STR
@@ -2394,7 +2399,8 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) {
// Conservatively refuse to convert an instruction which isn't in the same
// BB as the comparison.
- // For CMPri, we need to check Sub, thus we can't return here.
+ // For CMPri w/ CmpValue != 0, a Sub may still be a candidate.
+ // Thus we cannot return here.
if (CmpInstr->getOpcode() == ARM::CMPri ||
CmpInstr->getOpcode() == ARM::t2CMPri)
MI = nullptr;
@@ -2473,8 +2479,8 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
case ARM::t2EORrr:
case ARM::t2EORri: {
// Scan forward for the use of CPSR
- // When checking against MI: if it's a conditional code requires
- // checking of V bit, then this is not safe to do.
+ // When checking against MI: if it's a conditional code that requires
+ // checking of the V bit or C bit, then this is not safe to do.
// It is safe to remove CmpInstr if CPSR is redefined or killed.
// If we are done with the basic block, we need to check whether CPSR is
// live-out.
@@ -2541,19 +2547,30 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
OperandsToUpdate.push_back(
std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
}
- } else
+ } else {
+ // No Sub, so this is x = <op> y, z; cmp x, 0.
switch (CC) {
- default:
+ case ARMCC::EQ: // Z
+ case ARMCC::NE: // Z
+ case ARMCC::MI: // N
+ case ARMCC::PL: // N
+ case ARMCC::AL: // none
// CPSR can be used multiple times, we should continue.
break;
- case ARMCC::VS:
- case ARMCC::VC:
- case ARMCC::GE:
- case ARMCC::LT:
- case ARMCC::GT:
- case ARMCC::LE:
+ case ARMCC::HS: // C
+ case ARMCC::LO: // C
+ case ARMCC::VS: // V
+ case ARMCC::VC: // V
+ case ARMCC::HI: // C Z
+ case ARMCC::LS: // C Z
+ case ARMCC::GE: // N V
+ case ARMCC::LT: // N V
+ case ARMCC::GT: // Z N V
+ case ARMCC::LE: // Z N V
+ // The instruction uses the V bit or C bit which is not safe.
return false;
}
+ }
}
}
@@ -3647,9 +3664,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// instructions).
if (Latency > 0 && Subtarget.isThumb2()) {
const MachineFunction *MF = DefMI->getParent()->getParent();
- if (MF->getFunction()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize))
+ if (MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
--Latency;
}
return Latency;
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h
index 0ae291b..ecbcf5c 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -261,7 +261,9 @@ public:
unsigned &TrueOp, unsigned &FalseOp,
bool &Optimizable) const override;
- MachineInstr *optimizeSelect(MachineInstr *MI, bool) const override;
+ MachineInstr *optimizeSelect(MachineInstr *MI,
+ SmallPtrSetImpl<MachineInstr *> &SeenMIs,
+ bool) const override;
/// FoldImmediate - 'Reg' is known to be defined by a move immediate
/// instruction, try to fold the immediate into the use instruction.
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 6dc0493..7574727 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -60,9 +60,8 @@ ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMSubtarget &sti)
const MCPhysReg*
ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- const MCPhysReg *RegList = (STI.isTargetIOS() && !STI.isAAPCS_ABI())
- ? CSR_iOS_SaveList
- : CSR_AAPCS_SaveList;
+ const MCPhysReg *RegList =
+ STI.isTargetDarwin() ? CSR_iOS_SaveList : CSR_AAPCS_SaveList;
if (!MF) return RegList;
@@ -95,8 +94,7 @@ ARMBaseRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
if (CC == CallingConv::GHC)
// This is academic becase all GHC calls are (supposed to be) tail calls
return CSR_NoRegs_RegMask;
- return (STI.isTargetIOS() && !STI.isAAPCS_ABI())
- ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
+ return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
}
const uint32_t*
@@ -117,8 +115,8 @@ ARMBaseRegisterInfo::getThisReturnPreservedMask(CallingConv::ID CC) const {
if (CC == CallingConv::GHC)
// This is academic becase all GHC calls are (supposed to be) tail calls
return nullptr;
- return (STI.isTargetIOS() && !STI.isAAPCS_ABI())
- ? CSR_iOS_ThisReturn_RegMask : CSR_AAPCS_ThisReturn_RegMask;
+ return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask
+ : CSR_AAPCS_ThisReturn_RegMask;
}
BitVector ARMBaseRegisterInfo::
@@ -266,7 +264,7 @@ ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg,
}
void
-ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
+ARMBaseRegisterInfo::updateRegAllocHint(unsigned Reg, unsigned NewReg,
MachineFunction &MF) const {
MachineRegisterInfo *MRI = &MF.getRegInfo();
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
@@ -356,10 +354,7 @@ bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
return false;
// We may also need a base pointer if there are dynamic allocas or stack
// pointer adjustments around calls.
- if (MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->hasReservedCallFrame(MF))
+ if (MF.getSubtarget().getFrameLowering()->hasReservedCallFrame(MF))
return true;
// A base pointer is required and allowed. Check that it isn't too late to
// reserve it.
@@ -370,14 +365,10 @@ bool ARMBaseRegisterInfo::
needsStackRealignment(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *F = MF.getFunction();
- unsigned StackAlign = MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment();
- bool requiresRealignment =
- ((MFI->getMaxAlignment() > StackAlign) ||
- F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::StackAlignment));
+ unsigned StackAlign =
+ MF.getSubtarget().getFrameLowering()->getStackAlignment();
+ bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
+ F->hasFnAttribute(Attribute::StackAlignment));
return requiresRealignment && canRealignStack(MF);
}
@@ -555,12 +546,13 @@ needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
// and pick a real one.
Offset += 128; // 128 bytes of spill slots
- // If there is a frame pointer, try using it.
+ // If there's a frame pointer and the addressing mode allows it, try using it.
// The FP is only available if there is no dynamic realignment. We
// don't know for sure yet whether we'll need that, so we guess based
// on whether there are any local variables that would trigger it.
unsigned StackAlign = TFI->getStackAlignment();
- if (TFI->hasFP(MF) &&
+ if (TFI->hasFP(MF) &&
+ (MI->getDesc().TSFlags & ARMII::AddrModeMask) != ARMII::AddrModeT1_s &&
!((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
if (isFrameOffsetLegal(MI, FPOffset))
return false;
@@ -677,7 +669,7 @@ bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
NumBits = 8;
break;
case ARMII::AddrModeT1_s:
- NumBits = 5;
+ NumBits = 8;
Scale = 4;
isSigned = false;
break;
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.h b/lib/Target/ARM/ARMBaseRegisterInfo.h
index e9bc412..17027c2 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.h
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.h
@@ -135,7 +135,7 @@ public:
const MachineFunction &MF,
const VirtRegMap *VRM) const override;
- void UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
+ void updateRegAllocHint(unsigned Reg, unsigned NewReg,
MachineFunction &MF) const override;
bool avoidWriteAfterWrite(const TargetRegisterClass *RC) const override;
diff --git a/lib/Target/ARM/ARMCallingConv.h b/lib/Target/ARM/ARMCallingConv.h
index bd07236..d687568 100644
--- a/lib/Target/ARM/ARMCallingConv.h
+++ b/lib/Target/ARM/ARMCallingConv.h
@@ -31,7 +31,7 @@ static bool f64AssignAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
static const MCPhysReg RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
// Try to get the first register.
- if (unsigned Reg = State.AllocateReg(RegList, 4))
+ if (unsigned Reg = State.AllocateReg(RegList))
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
else {
// For the 2nd half of a v2f64, do not fail.
@@ -46,7 +46,7 @@ static bool f64AssignAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
}
// Try to get the second register.
- if (unsigned Reg = State.AllocateReg(RegList, 4))
+ if (unsigned Reg = State.AllocateReg(RegList))
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
else
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
@@ -76,11 +76,11 @@ static bool f64AssignAAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
static const MCPhysReg ShadowRegList[] = { ARM::R0, ARM::R1 };
static const MCPhysReg GPRArgRegs[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
- unsigned Reg = State.AllocateReg(HiRegList, ShadowRegList, 2);
+ unsigned Reg = State.AllocateReg(HiRegList, ShadowRegList);
if (Reg == 0) {
// If we had R3 unallocated only, now we still must to waste it.
- Reg = State.AllocateReg(GPRArgRegs, 4);
+ Reg = State.AllocateReg(GPRArgRegs);
assert((!Reg || Reg == ARM::R3) && "Wrong GPRs usage for f64");
// For the 2nd half of a v2f64, do not just fail.
@@ -126,7 +126,7 @@ static bool f64RetAssign(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 };
static const MCPhysReg LoRegList[] = { ARM::R1, ARM::R3 };
- unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
+ unsigned Reg = State.AllocateReg(HiRegList, LoRegList);
if (Reg == 0)
return false; // we didn't handle it
@@ -160,6 +160,8 @@ static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
State);
}
+static const uint16_t RRegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
+
static const uint16_t SRegList[] = { ARM::S0, ARM::S1, ARM::S2, ARM::S3,
ARM::S4, ARM::S5, ARM::S6, ARM::S7,
ARM::S8, ARM::S9, ARM::S10, ARM::S11,
@@ -168,85 +170,114 @@ static const uint16_t DRegList[] = { ARM::D0, ARM::D1, ARM::D2, ARM::D3,
ARM::D4, ARM::D5, ARM::D6, ARM::D7 };
static const uint16_t QRegList[] = { ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3 };
+
// Allocate part of an AAPCS HFA or HVA. We assume that each member of the HA
// has InConsecutiveRegs set, and that the last member also has
// InConsecutiveRegsLast set. We must process all members of the HA before
// we can allocate it, as we need to know the total number of registers that
// will be needed in order to (attempt to) allocate a contiguous block.
-static bool CC_ARM_AAPCS_Custom_HA(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags, CCState &State) {
- SmallVectorImpl<CCValAssign> &PendingHAMembers = State.getPendingLocs();
+static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
+ SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
// AAPCS HFAs must have 1-4 elements, all of the same type
- assert(PendingHAMembers.size() < 4);
- if (PendingHAMembers.size() > 0)
- assert(PendingHAMembers[0].getLocVT() == LocVT);
+ if (PendingMembers.size() > 0)
+ assert(PendingMembers[0].getLocVT() == LocVT);
// Add the argument to the list to be allocated once we know the size of the
- // HA
- PendingHAMembers.push_back(
- CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
-
- if (ArgFlags.isInConsecutiveRegsLast()) {
- assert(PendingHAMembers.size() > 0 && PendingHAMembers.size() <= 4 &&
- "Homogeneous aggregates must have between 1 and 4 members");
-
- // Try to allocate a contiguous block of registers, each of the correct
- // size to hold one member.
- const uint16_t *RegList;
- unsigned NumRegs;
- switch (LocVT.SimpleTy) {
- case MVT::f32:
- RegList = SRegList;
- NumRegs = 16;
- break;
- case MVT::f64:
- RegList = DRegList;
- NumRegs = 8;
- break;
- case MVT::v2f64:
- RegList = QRegList;
- NumRegs = 4;
- break;
- default:
- llvm_unreachable("Unexpected member type for HA");
- break;
- }
+ // aggregate. Store the type's required alignmnent as extra info for later: in
+ // the [N x i64] case all trace has been removed by the time we actually get
+ // to do allocation.
+ PendingMembers.push_back(CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo,
+ ArgFlags.getOrigAlign()));
- unsigned RegResult =
- State.AllocateRegBlock(RegList, NumRegs, PendingHAMembers.size());
-
- if (RegResult) {
- for (SmallVectorImpl<CCValAssign>::iterator It = PendingHAMembers.begin();
- It != PendingHAMembers.end(); ++It) {
- It->convertToReg(RegResult);
- State.addLoc(*It);
- ++RegResult;
- }
- PendingHAMembers.clear();
- return true;
- }
+ if (!ArgFlags.isInConsecutiveRegsLast())
+ return true;
+
+ // Try to allocate a contiguous block of registers, each of the correct
+ // size to hold one member.
+ unsigned Align = std::min(PendingMembers[0].getExtraInfo(), 8U);
- // Register allocation failed, fall back to the stack
+ ArrayRef<uint16_t> RegList;
+ switch (LocVT.SimpleTy) {
+ case MVT::i32: {
+ RegList = RRegList;
+ unsigned RegIdx = State.getFirstUnallocated(RegList);
- // Mark all VFP regs as unavailable (AAPCS rule C.2.vfp)
- for (unsigned regNo = 0; regNo < 16; ++regNo)
- State.AllocateReg(SRegList[regNo]);
+ // First consume all registers that would give an unaligned object. Whether
+ // we go on stack or in regs, no-one will be using them in future.
+ unsigned RegAlign = RoundUpToAlignment(Align, 4) / 4;
+ while (RegIdx % RegAlign != 0 && RegIdx < RegList.size())
+ State.AllocateReg(RegList[RegIdx++]);
- unsigned Size = LocVT.getSizeInBits() / 8;
- unsigned Align = std::min(Size, 8U);
+ break;
+ }
+ case MVT::f32:
+ RegList = SRegList;
+ break;
+ case MVT::f64:
+ RegList = DRegList;
+ break;
+ case MVT::v2f64:
+ RegList = QRegList;
+ break;
+ default:
+ llvm_unreachable("Unexpected member type for block aggregate");
+ break;
+ }
+
+ unsigned RegResult = State.AllocateRegBlock(RegList, PendingMembers.size());
+ if (RegResult) {
+ for (SmallVectorImpl<CCValAssign>::iterator It = PendingMembers.begin();
+ It != PendingMembers.end(); ++It) {
+ It->convertToReg(RegResult);
+ State.addLoc(*It);
+ ++RegResult;
+ }
+ PendingMembers.clear();
+ return true;
+ }
+
+ // Register allocation failed, we'll be needing the stack
+ unsigned Size = LocVT.getSizeInBits() / 8;
+ if (LocVT == MVT::i32 && State.getNextStackOffset() == 0) {
+ // If nothing else has used the stack until this point, a non-HFA aggregate
+ // can be split between regs and stack.
+ unsigned RegIdx = State.getFirstUnallocated(RegList);
+ for (auto &It : PendingMembers) {
+ if (RegIdx >= RegList.size())
+ It.convertToMem(State.AllocateStack(Size, Size));
+ else
+ It.convertToReg(State.AllocateReg(RegList[RegIdx++]));
- for (auto It : PendingHAMembers) {
- It.convertToMem(State.AllocateStack(Size, Align));
State.addLoc(It);
}
+ PendingMembers.clear();
+ return true;
+ } else if (LocVT != MVT::i32)
+ RegList = SRegList;
+
+ // Mark all regs as unavailable (AAPCS rule C.2.vfp for VFP, C.6 for core)
+ for (auto Reg : RegList)
+ State.AllocateReg(Reg);
- // All pending members have now been allocated
- PendingHAMembers.clear();
+ for (auto &It : PendingMembers) {
+ It.convertToMem(State.AllocateStack(Size, Align));
+ State.addLoc(It);
+
+ // After the first item has been allocated, the rest are packed as tightly
+ // as possible. (E.g. an incoming i64 would have starting Align of 8, but
+ // we'll be allocating a bunch of i32 slots).
+ Align = Size;
}
- // This will be allocated by the last member of the HA
+ // All pending members have now been allocated
+ PendingMembers.clear();
+
+ // This will be allocated by the last member of the aggregate
return true;
}
diff --git a/lib/Target/ARM/ARMCallingConv.td b/lib/Target/ARM/ARMCallingConv.td
index 526089b..7dd21ecbe 100644
--- a/lib/Target/ARM/ARMCallingConv.td
+++ b/lib/Target/ARM/ARMCallingConv.td
@@ -175,7 +175,7 @@ def CC_ARM_AAPCS_VFP : CallingConv<[
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
// HFAs are passed in a contiguous block of registers, or on the stack
- CCIfConsecutiveRegs<CCCustom<"CC_ARM_AAPCS_Custom_HA">>,
+ CCIfConsecutiveRegs<CCCustom<"CC_ARM_AAPCS_Custom_Aggregate">>,
CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>,
CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp
index 29405eb..9966cd7 100644
--- a/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -383,11 +383,9 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
<< MCP->getConstants().size() << " CP entries, aligned to "
<< MCP->getConstantPoolAlignment() << " bytes *****\n");
- TII = (const ARMBaseInstrInfo *)MF->getTarget()
- .getSubtargetImpl()
- ->getInstrInfo();
+ STI = &static_cast<const ARMSubtarget &>(MF->getSubtarget());
+ TII = STI->getInstrInfo();
AFI = MF->getInfo<ARMFunctionInfo>();
- STI = &MF->getTarget().getSubtarget<ARMSubtarget>();
isThumb = AFI->isThumbFunction();
isThumb1 = AFI->isThumb1OnlyFunction();
@@ -532,7 +530,7 @@ ARMConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
// identity mapping of CPI's to CPE's.
const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
- const DataLayout &TD = *MF->getSubtarget().getDataLayout();
+ const DataLayout &TD = *MF->getTarget().getDataLayout();
for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
assert(Size >= 4 && "Too small constant pool entry");
@@ -1270,7 +1268,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
ImmBranches.push_back(ImmBranch(&UserMBB->back(),
MaxDisp, false, UncondBr));
- BBInfo[UserMBB->getNumber()].Size += Delta;
+ computeBlockSize(UserMBB);
adjustBBOffsetsAfter(UserMBB);
return;
}
@@ -1952,7 +1950,9 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() {
DEBUG(dbgs() << "Shrink JT: " << *MI << " addr: " << *AddrMI
<< " lea: " << *LeaMI);
unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
- MachineInstr *NewJTMI = BuildMI(MBB, MI->getDebugLoc(), TII->get(Opc))
+ MachineBasicBlock::iterator MI_JT = MI;
+ MachineInstr *NewJTMI =
+ BuildMI(*MBB, MI_JT, MI->getDebugLoc(), TII->get(Opc))
.addReg(IdxReg, getKillRegState(IdxRegKill))
.addJumpTableIndex(JTI, JTOP.getTargetFlags())
.addImm(MI->getOperand(JTOpIdx+1).getImm());
diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 2d80518..4438f50 100644
--- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -22,8 +22,8 @@
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h" // FIXME: for debug only. remove!
@@ -887,6 +887,9 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
unsigned MaxAlign = MFI->getMaxAlignment();
assert (!AFI->isThumb1OnlyFunction());
// Emit bic r6, r6, MaxAlign
+ assert(MaxAlign <= 256 && "The BIC instruction cannot encode "
+ "immediates larger than 256 with all lower "
+ "bits set.");
unsigned bicOpc = AFI->isThumbFunction() ?
ARM::t2BICri : ARM::BICri;
AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(),
@@ -980,7 +983,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
unsigned LDRLITOpc = IsARM ? ARM::LDRi12 : ARM::tLDRpci;
unsigned PICAddOpc =
IsARM
- ? (Opcode == ARM::LDRLIT_ga_pcrel_ldr ? ARM::PICADD : ARM::PICLDR)
+ ? (Opcode == ARM::LDRLIT_ga_pcrel_ldr ? ARM::PICLDR : ARM::PICADD)
: ARM::tPICADD;
// We need a new const-pool entry to load from.
@@ -1129,7 +1132,8 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
// Add the source operands (D subregs).
unsigned D0 = TRI->getSubReg(SrcReg, ARM::dsub_0);
unsigned D1 = TRI->getSubReg(SrcReg, ARM::dsub_1);
- MIB.addReg(D0).addReg(D1);
+ MIB.addReg(D0, SrcIsKill ? RegState::Kill : 0)
+ .addReg(D1, SrcIsKill ? RegState::Kill : 0);
if (SrcIsKill) // Add an implicit kill for the Q register.
MIB->addRegisterKilled(SrcReg, TRI, true);
@@ -1342,11 +1346,9 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
}
bool ARMExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
- const TargetMachine &TM = MF.getTarget();
- TII = static_cast<const ARMBaseInstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
- TRI = TM.getSubtargetImpl()->getRegisterInfo();
- STI = &TM.getSubtarget<ARMSubtarget>();
+ STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
+ TII = STI->getInstrInfo();
+ TRI = STI->getRegisterInfo();
AFI = MF.getInfo<ARMFunctionInfo>();
bool Modified = false;
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index a5f635e..375d394 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -93,11 +93,11 @@ class ARMFastISel final : public FastISel {
explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo)
: FastISel(funcInfo, libInfo),
+ Subtarget(
+ &static_cast<const ARMSubtarget &>(funcInfo.MF->getSubtarget())),
M(const_cast<Module &>(*funcInfo.Fn->getParent())),
- TM(funcInfo.MF->getTarget()),
- TII(*TM.getSubtargetImpl()->getInstrInfo()),
- TLI(*TM.getSubtargetImpl()->getTargetLowering()) {
- Subtarget = &TM.getSubtarget<ARMSubtarget>();
+ TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()),
+ TLI(*Subtarget->getTargetLowering()) {
AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
isThumb2 = AFI->isThumbFunction();
Context = &funcInfo.Fn->getContext();
@@ -189,9 +189,7 @@ class ARMFastISel final : public FastISel {
unsigned ARMSelectCallOp(bool UseReg);
unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT);
- const TargetLowering *getTargetLowering() {
- return TM.getSubtargetImpl()->getTargetLowering();
- }
+ const TargetLowering *getTargetLowering() { return &TLI; }
// Call handling routines.
private:
@@ -586,9 +584,8 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
Reloc::Model RelocM = TM.getRelocationModel();
bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM);
- const TargetRegisterClass *RC = isThumb2 ?
- (const TargetRegisterClass*)&ARM::rGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass;
+ const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
+ : &ARM::GPRRegClass;
unsigned DestReg = createResultReg(RC);
// FastISel TLS support on non-MachO is broken, punt to SelectionDAG.
@@ -893,9 +890,8 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
// put the alloca address into a register, set the base type back to
// register and continue. This should almost never happen.
if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
- const TargetRegisterClass *RC = isThumb2 ?
- (const TargetRegisterClass*)&ARM::tGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass;
+ const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
+ : &ARM::GPRRegClass;
unsigned ResultReg = createResultReg(RC);
unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -1094,9 +1090,8 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
// This is mostly going to be Neon/vector support.
default: return false;
case MVT::i1: {
- unsigned Res = createResultReg(isThumb2 ?
- (const TargetRegisterClass*)&ARM::tGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass);
+ unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
+ : &ARM::GPRRegClass);
unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -1500,9 +1495,8 @@ bool ARMFastISel::SelectCmp(const Instruction *I) {
// Now set a register based on the comparison. Explicitly set the predicates
// here.
unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
- const TargetRegisterClass *RC = isThumb2 ?
- (const TargetRegisterClass*)&ARM::rGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass;
+ const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
+ : &ARM::GPRRegClass;
unsigned DestReg = createResultReg(RC);
Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
unsigned ZeroReg = fastMaterializeConstant(Zero);
@@ -2490,19 +2484,12 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
MFI->setFrameAddressIsTaken(true);
- unsigned LdrOpc;
- const TargetRegisterClass *RC;
- if (isThumb2) {
- LdrOpc = ARM::t2LDRi12;
- RC = (const TargetRegisterClass*)&ARM::tGPRRegClass;
- } else {
- LdrOpc = ARM::LDRi12;
- RC = (const TargetRegisterClass*)&ARM::GPRRegClass;
- }
+ unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
+ const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
+ : &ARM::GPRRegClass;
const ARMBaseRegisterInfo *RegInfo =
- static_cast<const ARMBaseRegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
+ static_cast<const ARMBaseRegisterInfo *>(Subtarget->getRegisterInfo());
unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
unsigned SrcReg = FramePtr;
@@ -3075,13 +3062,13 @@ namespace llvm {
FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) {
const TargetMachine &TM = funcInfo.MF->getTarget();
-
- const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
+ const ARMSubtarget &STI =
+ static_cast<const ARMSubtarget &>(funcInfo.MF->getSubtarget());
// Thumb2 support on iOS; ARM support on iOS, Linux and NaCl.
bool UseFastISel = false;
- UseFastISel |= Subtarget->isTargetMachO() && !Subtarget->isThumb1Only();
- UseFastISel |= Subtarget->isTargetLinux() && !Subtarget->isThumb();
- UseFastISel |= Subtarget->isTargetNaCl() && !Subtarget->isThumb();
+ UseFastISel |= STI.isTargetMachO() && !STI.isThumb1Only();
+ UseFastISel |= STI.isTargetLinux() && !STI.isThumb();
+ UseFastISel |= STI.isTargetNaCl() && !STI.isThumb();
if (UseFastISel) {
// iOS always has a FP for backtracking, force other targets
diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp
index 80add7a..5a5bd57 100644
--- a/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/lib/Target/ARM/ARMFrameLowering.cpp
@@ -164,9 +164,13 @@ static int sizeOfSPAdjustment(const MachineInstr *MI) {
static bool WindowsRequiresStackProbe(const MachineFunction &MF,
size_t StackSizeInBytes) {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- if (MFI->getStackProtectorIndex() > 0)
- return StackSizeInBytes >= 4080;
- return StackSizeInBytes >= 4096;
+ const Function *F = MF.getFunction();
+ unsigned StackProbeSize = (MFI->getStackProtectorIndex() > 0) ? 4080 : 4096;
+ if (F->hasFnAttribute("stack-probe-size"))
+ F->getFnAttribute("stack-probe-size")
+ .getValueAsString()
+ .getAsInteger(0, StackProbeSize);
+ return StackSizeInBytes >= StackProbeSize;
}
namespace {
@@ -203,12 +207,77 @@ struct StackAdjustingInsts {
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
BuildMI(MBB, std::next(Info.I), dl,
- TII.get(TargetOpcode::CFI_INSTRUCTION)).addCFIIndex(CFIIndex);
+ TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
}
}
};
}
+/// Emit an instruction sequence that will align the address in
+/// register Reg by zero-ing out the lower bits. For versions of the
+/// architecture that support Neon, this must be done in a single
+/// instruction, since skipAlignedDPRCS2Spills assumes it is done in a
+/// single instruction. That function only gets called when optimizing
+/// spilling of D registers on a core with the Neon instruction set
+/// present.
+static void emitAligningInstructions(MachineFunction &MF, ARMFunctionInfo *AFI,
+ const TargetInstrInfo &TII,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, const unsigned Reg,
+ const unsigned Alignment,
+ const bool MustBeSingleInstruction) {
+ const ARMSubtarget &AST =
+ static_cast<const ARMSubtarget &>(MF.getSubtarget());
+ const bool CanUseBFC = AST.hasV6T2Ops() || AST.hasV7Ops();
+ const unsigned AlignMask = Alignment - 1;
+ const unsigned NrBitsToZero = countTrailingZeros(Alignment);
+ assert(!AFI->isThumb1OnlyFunction() && "Thumb1 not supported");
+ if (!AFI->isThumbFunction()) {
+ // if the BFC instruction is available, use that to zero the lower
+ // bits:
+ // bfc Reg, #0, log2(Alignment)
+ // otherwise use BIC, if the mask to zero the required number of bits
+ // can be encoded in the bic immediate field
+ // bic Reg, Reg, Alignment-1
+ // otherwise, emit
+ // lsr Reg, Reg, log2(Alignment)
+ // lsl Reg, Reg, log2(Alignment)
+ if (CanUseBFC) {
+ AddDefaultPred(BuildMI(MBB, MBBI, DL, TII.get(ARM::BFC), Reg)
+ .addReg(Reg, RegState::Kill)
+ .addImm(~AlignMask));
+ } else if (AlignMask <= 255) {
+ AddDefaultCC(
+ AddDefaultPred(BuildMI(MBB, MBBI, DL, TII.get(ARM::BICri), Reg)
+ .addReg(Reg, RegState::Kill)
+ .addImm(AlignMask)));
+ } else {
+ assert(!MustBeSingleInstruction &&
+ "Shouldn't call emitAligningInstructions demanding a single "
+ "instruction to be emitted for large stack alignment for a target "
+ "without BFC.");
+ AddDefaultCC(AddDefaultPred(
+ BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg)
+ .addReg(Reg, RegState::Kill)
+ .addImm(ARM_AM::getSORegOpc(ARM_AM::lsr, NrBitsToZero))));
+ AddDefaultCC(AddDefaultPred(
+ BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg)
+ .addReg(Reg, RegState::Kill)
+ .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, NrBitsToZero))));
+ }
+ } else {
+ // Since this is only reached for Thumb-2 targets, the BFC instruction
+ // should always be available.
+ assert(CanUseBFC);
+ AddDefaultPred(BuildMI(MBB, MBBI, DL, TII.get(ARM::t2BFC), Reg)
+ .addReg(Reg, RegState::Kill)
+ .addImm(~AlignMask));
+ }
+}
+
void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front();
MachineBasicBlock::iterator MBBI = MBB.begin();
@@ -218,15 +287,12 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
MCContext &Context = MMI.getContext();
const TargetMachine &TM = MF.getTarget();
const MCRegisterInfo *MRI = Context.getRegisterInfo();
- const ARMBaseRegisterInfo *RegInfo = static_cast<const ARMBaseRegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
- const ARMBaseInstrInfo &TII = *static_cast<const ARMBaseInstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
+ const ARMBaseRegisterInfo *RegInfo = STI.getRegisterInfo();
+ const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
assert(!AFI->isThumb1OnlyFunction() &&
"This emitPrologue does not support Thumb1!");
bool isARM = !AFI->isThumbFunction();
- unsigned Align =
- TM.getSubtargetImpl()->getFrameLowering()->getStackAlignment();
+ unsigned Align = STI.getFrameLowering()->getStackAlignment();
unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(Align);
unsigned NumBytes = MFI->getStackSize();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
@@ -451,13 +517,15 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
nullptr, MRI->getDwarfRegNum(FramePtr, true),
-(ArgRegsSaveSize - FramePtrOffsetInPush)));
BuildMI(MBB, AfterPush, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
} else {
unsigned CFIIndex =
MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(
nullptr, MRI->getDwarfRegNum(FramePtr, true)));
BuildMI(MBB, AfterPush, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
}
}
@@ -491,7 +559,8 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
nullptr, MRI->getDwarfRegNum(Reg, true), MFI->getObjectOffset(FI)));
BuildMI(MBB, Pos, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
break;
}
}
@@ -514,7 +583,8 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
BuildMI(MBB, Pos, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
}
break;
}
@@ -535,7 +605,8 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
BuildMI(MBB, Pos, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
}
}
}
@@ -561,28 +632,24 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
// realigned.
if (!AFI->getNumAlignedDPRCS2Regs() && RegInfo->needsStackRealignment(MF)) {
unsigned MaxAlign = MFI->getMaxAlignment();
- assert (!AFI->isThumb1OnlyFunction());
+ assert(!AFI->isThumb1OnlyFunction());
if (!AFI->isThumbFunction()) {
- // Emit bic sp, sp, MaxAlign
- AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
- TII.get(ARM::BICri), ARM::SP)
- .addReg(ARM::SP, RegState::Kill)
- .addImm(MaxAlign-1)));
+ emitAligningInstructions(MF, AFI, TII, MBB, MBBI, dl, ARM::SP, MaxAlign,
+ false);
} else {
- // We cannot use sp as source/dest register here, thus we're emitting the
- // following sequence:
+ // We cannot use sp as source/dest register here, thus we're using r4 to
+ // perform the calculations. We're emitting the following sequence:
// mov r4, sp
- // bic r4, r4, MaxAlign
+ // -- use emitAligningInstructions to produce best sequence to zero
+ // -- out lower bits in r4
// mov sp, r4
// FIXME: It will be better just to find spare register here.
AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::R4)
- .addReg(ARM::SP, RegState::Kill));
- AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
- TII.get(ARM::t2BICri), ARM::R4)
- .addReg(ARM::R4, RegState::Kill)
- .addImm(MaxAlign-1)));
+ .addReg(ARM::SP, RegState::Kill));
+ emitAligningInstructions(MF, AFI, TII, MBB, MBBI, dl, ARM::R4, MaxAlign,
+ false);
AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP)
- .addReg(ARM::R4, RegState::Kill));
+ .addReg(ARM::R4, RegState::Kill));
}
AFI->setShouldRestoreSPFromFP(true);
@@ -612,11 +679,59 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
AFI->setShouldRestoreSPFromFP(true);
}
+// Resolve TCReturn pseudo-instruction
+void ARMFrameLowering::fixTCReturn(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ assert(MBBI->isReturn() && "Can only insert epilog into returning blocks");
+ unsigned RetOpcode = MBBI->getOpcode();
+ DebugLoc dl = MBBI->getDebugLoc();
+ const ARMBaseInstrInfo &TII =
+ *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
+
+ if (!(RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNri))
+ return;
+
+ // Tail call return: adjust the stack pointer and jump to callee.
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+
+ // Jump to label or value in register.
+ if (RetOpcode == ARM::TCRETURNdi) {
+ unsigned TCOpcode = STI.isThumb() ?
+ (STI.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND) :
+ ARM::TAILJMPd;
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(TCOpcode));
+ if (JumpTarget.isGlobal())
+ MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
+ JumpTarget.getTargetFlags());
+ else {
+ assert(JumpTarget.isSymbol());
+ MIB.addExternalSymbol(JumpTarget.getSymbolName(),
+ JumpTarget.getTargetFlags());
+ }
+
+ // Add the default predicate in Thumb mode.
+ if (STI.isThumb()) MIB.addImm(ARMCC::AL).addReg(0);
+ } else if (RetOpcode == ARM::TCRETURNri) {
+ BuildMI(MBB, MBBI, dl,
+ TII.get(STI.isThumb() ? ARM::tTAILJMPr : ARM::TAILJMPr)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
+ }
+
+ MachineInstr *NewMI = std::prev(MBBI);
+ for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
+ NewMI->addOperand(MBBI->getOperand(i));
+
+ // Delete the pseudo instruction TCRETURN.
+ MBB.erase(MBBI);
+ MBBI = NewMI;
+}
+
void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
assert(MBBI->isReturn() && "Can only insert epilog into returning blocks");
- unsigned RetOpcode = MBBI->getOpcode();
DebugLoc dl = MBBI->getDebugLoc();
MachineFrameInfo *MFI = MF.getFrameInfo();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -627,18 +742,17 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
"This emitEpilogue does not support Thumb1!");
bool isARM = !AFI->isThumbFunction();
- unsigned Align = MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment();
+ unsigned Align = STI.getFrameLowering()->getStackAlignment();
unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(Align);
int NumBytes = (int)MFI->getStackSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction()->getCallingConv() == CallingConv::GHC) {
+ fixTCReturn(MF, MBB);
return;
+ }
if (!AFI->hasStackFrame()) {
if (NumBytes - ArgRegsSaveSize != 0)
@@ -717,42 +831,7 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
if (AFI->getGPRCalleeSavedArea1Size()) MBBI++;
}
- if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNri) {
- // Tail call return: adjust the stack pointer and jump to callee.
- MBBI = MBB.getLastNonDebugInstr();
- MachineOperand &JumpTarget = MBBI->getOperand(0);
-
- // Jump to label or value in register.
- if (RetOpcode == ARM::TCRETURNdi) {
- unsigned TCOpcode = STI.isThumb() ?
- (STI.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND) :
- ARM::TAILJMPd;
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(TCOpcode));
- if (JumpTarget.isGlobal())
- MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
- JumpTarget.getTargetFlags());
- else {
- assert(JumpTarget.isSymbol());
- MIB.addExternalSymbol(JumpTarget.getSymbolName(),
- JumpTarget.getTargetFlags());
- }
-
- // Add the default predicate in Thumb mode.
- if (STI.isThumb()) MIB.addImm(ARMCC::AL).addReg(0);
- } else if (RetOpcode == ARM::TCRETURNri) {
- BuildMI(MBB, MBBI, dl,
- TII.get(STI.isThumb() ? ARM::tTAILJMPr : ARM::TAILJMPr)).
- addReg(JumpTarget.getReg(), RegState::Kill);
- }
-
- MachineInstr *NewMI = std::prev(MBBI);
- for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
- NewMI->addOperand(MBBI->getOperand(i));
-
- // Delete the pseudo instruction TCRETURN.
- MBB.erase(MBBI);
- MBBI = NewMI;
- }
+ fixTCReturn(MF, MBB);
if (ArgRegsSaveSize)
emitSPUpdate(isARM, MBB, MBBI, dl, TII, ArgRegsSaveSize);
@@ -1062,15 +1141,16 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB,
// The immediate is <= 64, so it doesn't need any special encoding.
unsigned Opc = isThumb ? ARM::t2SUBri : ARM::SUBri;
AddDefaultCC(AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4)
- .addReg(ARM::SP)
- .addImm(8 * NumAlignedDPRCS2Regs)));
+ .addReg(ARM::SP)
+ .addImm(8 * NumAlignedDPRCS2Regs)));
- // bic r4, r4, #align-1
- Opc = isThumb ? ARM::t2BICri : ARM::BICri;
unsigned MaxAlign = MF.getFrameInfo()->getMaxAlignment();
- AddDefaultCC(AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4)
- .addReg(ARM::R4, RegState::Kill)
- .addImm(MaxAlign - 1)));
+ // We must set parameter MustBeSingleInstruction to true, since
+ // skipAlignedDPRCS2Spills expects exactly 3 instructions to perform
+ // stack alignment. Luckily, this can always be done since all ARM
+ // architecture versions that support Neon also support the BFC
+ // instruction.
+ emitAligningInstructions(MF, AFI, TII, MBB, MI, DL, ARM::R4, MaxAlign, true);
// mov sp, r4
// The stack pointer must be adjusted before spilling anything, otherwise
@@ -1387,25 +1467,20 @@ static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) {
return;
// Naked functions don't spill callee-saved registers.
- if (MF.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::Naked))
+ if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
return;
// We are planning to use NEON instructions vst1 / vld1.
- if (!MF.getTarget().getSubtarget<ARMSubtarget>().hasNEON())
+ if (!static_cast<const ARMSubtarget &>(MF.getSubtarget()).hasNEON())
return;
// Don't bother if the default stack alignment is sufficiently high.
- if (MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment() >= 8)
+ if (MF.getSubtarget().getFrameLowering()->getStackAlignment() >= 8)
return;
// Aligned spills require stack realignment.
- const ARMBaseRegisterInfo *RegInfo = static_cast<const ARMBaseRegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
- if (!RegInfo->canRealignStack(MF))
+ if (!static_cast<const ARMBaseRegisterInfo *>(
+ MF.getSubtarget().getRegisterInfo())->canRealignStack(MF))
return;
// We always spill contiguous d-registers starting from d8. Count how many
@@ -1789,7 +1864,7 @@ static const uint64_t kSplitStackAvailable = 256;
void ARMFrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
unsigned Opcode;
unsigned CFIIndex;
- const ARMSubtarget *ST = &MF.getTarget().getSubtarget<ARMSubtarget>();
+ const ARMSubtarget *ST = &MF.getSubtarget<ARMSubtarget>();
bool Thumb = ST->isThumb();
// Sadly, this currently doesn't support varargs, platforms other than
diff --git a/lib/Target/ARM/ARMFrameLowering.h b/lib/Target/ARM/ARMFrameLowering.h
index a83b773..b7be436 100644
--- a/lib/Target/ARM/ARMFrameLowering.h
+++ b/lib/Target/ARM/ARMFrameLowering.h
@@ -31,6 +31,8 @@ public:
void emitPrologue(MachineFunction &MF) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ void fixTCReturn(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
diff --git a/lib/Target/ARM/ARMHazardRecognizer.cpp b/lib/Target/ARM/ARMHazardRecognizer.cpp
index 0e4f81c..a84603b 100644
--- a/lib/Target/ARM/ARMHazardRecognizer.cpp
+++ b/lib/Target/ARM/ARMHazardRecognizer.cpp
@@ -44,10 +44,9 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
if (LastMI && (MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainGeneral) {
MachineInstr *DefMI = LastMI;
const MCInstrDesc &LastMCID = LastMI->getDesc();
- const TargetMachine &TM =
- MI->getParent()->getParent()->getTarget();
+ const MachineFunction *MF = MI->getParent()->getParent();
const ARMBaseInstrInfo &TII = *static_cast<const ARMBaseInstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
+ MF->getSubtarget().getInstrInfo());
// Skip over one non-VFP / NEON instruction.
if (!LastMI->isBarrier() &&
diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 6941579..6ebf640 100644
--- a/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -70,7 +70,7 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override {
// Reset the subtarget each time through.
- Subtarget = &MF.getTarget().getSubtarget<ARMSubtarget>();
+ Subtarget = &MF.getSubtarget<ARMSubtarget>();
SelectionDAGISel::runOnMachineFunction(MF);
return true;
}
@@ -992,18 +992,24 @@ bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
Addr = N;
unsigned Alignment = 0;
- if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
+
+ MemSDNode *MemN = cast<MemSDNode>(Parent);
+
+ if (isa<LSBaseSDNode>(MemN) ||
+ ((MemN->getOpcode() == ARMISD::VST1_UPD ||
+ MemN->getOpcode() == ARMISD::VLD1_UPD) &&
+ MemN->getConstantOperandVal(MemN->getNumOperands() - 1) == 1)) {
// This case occurs only for VLD1-lane/dup and VST1-lane instructions.
// The maximum alignment is equal to the memory size being referenced.
- unsigned LSNAlign = LSN->getAlignment();
- unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
- if (LSNAlign >= MemSize && MemSize > 1)
+ unsigned MMOAlign = MemN->getAlignment();
+ unsigned MemSize = MemN->getMemoryVT().getSizeInBits() / 8;
+ if (MMOAlign >= MemSize && MemSize > 1)
Alignment = MemSize;
} else {
// All other uses of addrmode6 are for intrinsics. For now just record
// the raw alignment value; it will be refined later based on the legal
// alignment operands for the intrinsic.
- Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
+ Alignment = MemN->getAlignment();
}
Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
@@ -1191,6 +1197,11 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
SDValue &Base, SDValue &OffImm) {
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
+ // Only multiples of 4 are allowed for the offset, so the frame object
+ // alignment must be at least 4.
+ MachineFrameInfo *MFI = MF->getFrameInfo();
+ if (MFI->getObjectAlignment(FI) < 4)
+ MFI->setObjectAlignment(FI, 4);
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
@@ -1208,6 +1219,11 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
Base = N.getOperand(0);
if (Base.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
+ // For LHS+RHS to result in an offset that's a multiple of 4 the object
+ // indexed by the LHS must be 4-byte aligned.
+ MachineFrameInfo *MFI = MF->getFrameInfo();
+ if (MFI->getObjectAlignment(FI) < 4)
+ MFI->setObjectAlignment(FI, 4);
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
}
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
@@ -1784,6 +1800,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
case MVT::v8i16: OpcodeIndex = 1; break;
case MVT::v4f32:
case MVT::v4i32: OpcodeIndex = 2; break;
+ case MVT::v2f64:
case MVT::v2i64: OpcodeIndex = 3;
assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
break;
@@ -1920,6 +1937,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
case MVT::v8i16: OpcodeIndex = 1; break;
case MVT::v4f32:
case MVT::v4i32: OpcodeIndex = 2; break;
+ case MVT::v2f64:
case MVT::v2i64: OpcodeIndex = 3;
assert(NumVecs == 1 && "v2i64 type only supported for VST1");
break;
@@ -2290,7 +2308,7 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
// Note: The width operand is encoded as width-1.
- unsigned Width = CountTrailingOnes_32(And_imm) - 1;
+ unsigned Width = countTrailingOnes(And_imm) - 1;
unsigned LSB = Srl_imm;
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
@@ -2494,6 +2512,11 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
if (Subtarget->isThumb1Only()) {
+ // Set the alignment of the frame object to 4, to avoid having to generate
+ // more than one ADD
+ MachineFrameInfo *MFI = MF->getFrameInfo();
+ if (MFI->getObjectAlignment(FI) < 4)
+ MFI->setObjectAlignment(FI, 4);
return CurDAG->SelectNodeTo(N, ARM::tADDframe, MVT::i32, TFI,
CurDAG->getTargetConstant(0, MVT::i32));
} else {
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 0d0d81f..56290aa 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -156,11 +156,11 @@ void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
}
-ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM)
- : TargetLowering(TM) {
- Subtarget = &TM.getSubtarget<ARMSubtarget>();
- RegInfo = TM.getSubtargetImpl()->getRegisterInfo();
- Itins = TM.getSubtargetImpl()->getInstrItineraryData();
+ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
+ const ARMSubtarget &STI)
+ : TargetLowering(TM), Subtarget(&STI) {
+ RegInfo = Subtarget->getRegisterInfo();
+ Itins = Subtarget->getInstrItineraryData();
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
@@ -404,22 +404,20 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM)
addRegisterClass(MVT::f64, &ARM::DPRRegClass);
}
- for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
- for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
- setTruncStoreAction((MVT::SimpleValueType)VT,
- (MVT::SimpleValueType)InnerVT, Expand);
- setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand);
- setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand);
+ for (MVT VT : MVT::vector_valuetypes()) {
+ for (MVT InnerVT : MVT::vector_valuetypes()) {
+ setTruncStoreAction(VT, InnerVT, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
+ }
- setOperationAction(ISD::MULHS, (MVT::SimpleValueType)VT, Expand);
- setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
- setOperationAction(ISD::MULHU, (MVT::SimpleValueType)VT, Expand);
- setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::MULHS, VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::MULHU, VT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, VT, Expand);
- setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::BSWAP, VT, Expand);
}
setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
@@ -567,15 +565,18 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM)
setTargetDAGCombine(ISD::FP_TO_SINT);
setTargetDAGCombine(ISD::FP_TO_UINT);
setTargetDAGCombine(ISD::FDIV);
+ setTargetDAGCombine(ISD::LOAD);
// It is legal to extload from v4i8 to v4i16 or v4i32.
MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8,
MVT::v4i16, MVT::v2i16,
MVT::v2i32};
for (unsigned i = 0; i < 6; ++i) {
- setLoadExtAction(ISD::EXTLOAD, Tys[i], Legal);
- setLoadExtAction(ISD::ZEXTLOAD, Tys[i], Legal);
- setLoadExtAction(ISD::SEXTLOAD, Tys[i], Legal);
+ for (MVT VT : MVT::integer_vector_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, Tys[i], Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, Tys[i], Legal);
+ setLoadExtAction(ISD::SEXTLOAD, VT, Tys[i], Legal);
+ }
}
}
@@ -617,11 +618,13 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM)
setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
}
- computeRegisterProperties();
+ computeRegisterProperties(Subtarget->getRegisterInfo());
// ARM does not have floating-point extending loads.
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
+ for (MVT VT : MVT::fp_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
+ }
// ... or truncating stores
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
@@ -629,7 +632,8 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM)
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
// ARM does not have i1 sign extending load.
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ for (MVT VT : MVT::integer_valuetypes())
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
// ARM supports all 4 flavors of integer indexed load / store.
if (!Subtarget->isThumb1Only()) {
@@ -963,13 +967,14 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM)
// of the difficulty prior to coalescing of modeling operand register classes
// due to the common occurrence of cross class copies and subregister insertions
// and extractions.
-std::pair<const TargetRegisterClass*, uint8_t>
-ARMTargetLowering::findRepresentativeClass(MVT VT) const{
+std::pair<const TargetRegisterClass *, uint8_t>
+ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
+ MVT VT) const {
const TargetRegisterClass *RRC = nullptr;
uint8_t Cost = 1;
switch (VT.SimpleTy) {
default:
- return TargetLowering::findRepresentativeClass(VT);
+ return TargetLowering::findRepresentativeClass(TRI, VT);
// Use DPR as representative register class for all floating point
// and vector types. Since there are 32 SPR registers and 32 DPR registers so
// the cost is 1 for both f32 and f64.
@@ -1166,12 +1171,6 @@ ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
return ARM::createFastISel(funcInfo, libInfo);
}
-/// getMaximalGlobalOffset - Returns the maximal possible offset which can
-/// be used for loads / stores from the global.
-unsigned ARMTargetLowering::getMaximalGlobalOffset() const {
- return (Subtarget->isThumb1Only() ? 127 : 4095);
-}
-
Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
unsigned NumVals = N->getNumValues();
if (!NumVals)
@@ -1190,8 +1189,7 @@ Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
// Load are scheduled for latency even if there instruction itinerary
// is not available.
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
if (MCID.getNumDefs() == 0)
@@ -1783,8 +1781,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// FIXME: handle tail calls differently.
unsigned CallOpc;
- bool HasMinSizeAttr = MF.getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::MinSize);
+ bool HasMinSizeAttr = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
if (Subtarget->isThumb()) {
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
@@ -1815,9 +1812,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Add a register mask operand representing the call-preserved registers.
if (!isTailCall) {
const uint32_t *Mask;
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
- const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo*>(TRI);
+ const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
if (isThisReturn) {
// For 'this' returns, use the R0-preserving mask if applicable
Mask = ARI->getThisReturnPreservedMask(CallConv);
@@ -1865,7 +1860,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
void
ARMTargetLowering::HandleByVal(
CCState *State, unsigned &size, unsigned Align) const {
- unsigned reg = State->AllocateReg(GPRArgRegs, 4);
+ unsigned reg = State->AllocateReg(GPRArgRegs);
assert((State->getCallOrPrologue() == Prologue ||
State->getCallOrPrologue() == Call) &&
"unhandled ParmContext");
@@ -1875,7 +1870,7 @@ ARMTargetLowering::HandleByVal(
unsigned AlignInRegs = Align / 4;
unsigned Waste = (ARM::R4 - reg) % AlignInRegs;
for (unsigned i = 0; i < Waste; ++i)
- reg = State->AllocateReg(GPRArgRegs, 4);
+ reg = State->AllocateReg(GPRArgRegs);
}
if (reg != 0) {
unsigned excess = 4 * (ARM::R4 - reg);
@@ -1886,7 +1881,7 @@ ARMTargetLowering::HandleByVal(
// remained registers.
const unsigned NSAAOffset = State->getNextStackOffset();
if (Subtarget->isAAPCS_ABI() && NSAAOffset != 0 && size > excess) {
- while (State->AllocateReg(GPRArgRegs, 4))
+ while (State->AllocateReg(GPRArgRegs))
;
return;
}
@@ -1903,7 +1898,7 @@ ARMTargetLowering::HandleByVal(
// Note, first register is allocated in the beginning of function already,
// allocate remained amount of registers we need.
for (unsigned i = reg+1; i != ByValRegEnd; ++i)
- State->AllocateReg(GPRArgRegs, 4);
+ State->AllocateReg(GPRArgRegs);
// A byval parameter that is split between registers and memory needs its
// size truncated here.
// In the case where the entire structure fits in registers, we set the
@@ -2025,7 +2020,9 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// cannot rely on the linker replacing the tail call with a return.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
const GlobalValue *GV = G->getGlobal();
- if (GV->hasExternalWeakLinkage())
+ const Triple TT(getTargetMachine().getTargetTriple());
+ if (GV->hasExternalWeakLinkage() &&
+ (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
return false;
}
@@ -2084,8 +2081,7 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// the caller's fixed stack objects.
MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
i != e;
++i, ++realArgIdx) {
@@ -2837,16 +2833,11 @@ ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
NumGPRs = REnd - RBegin;
} else {
unsigned int firstUnalloced;
- firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs,
- sizeof(GPRArgRegs) /
- sizeof(GPRArgRegs[0]));
+ firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs);
NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0;
}
- unsigned Align = MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment();
+ unsigned Align = Subtarget->getFrameLowering()->getStackAlignment();
ArgRegsSize = NumGPRs * 4;
// If parameter is split between stack and GPRs...
@@ -2913,8 +2904,7 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
firstRegToSaveIndex = RBegin - ARM::R0;
lastRegToSaveIndex = REnd - ARM::R0;
} else {
- firstRegToSaveIndex = CCInfo.getFirstUnallocated
- (GPRArgRegs, array_lengthof(GPRArgRegs));
+ firstRegToSaveIndex = CCInfo.getFirstUnallocated(GPRArgRegs);
lastRegToSaveIndex = 4;
}
@@ -3087,8 +3077,11 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx);
- CurArgIdx = Ins[VA.getValNo()].OrigArgIndex;
+ if (Ins[VA.getValNo()].isOrigArg()) {
+ std::advance(CurOrigArg,
+ Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
+ CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
+ }
// Arguments stored in registers.
if (VA.isRegLoc()) {
EVT RegVT = VA.getLocVT();
@@ -3129,9 +3122,8 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
else if (RegVT == MVT::v2f64)
RC = &ARM::QPRRegClass;
else if (RegVT == MVT::i32)
- RC = AFI->isThumb1OnlyFunction() ?
- (const TargetRegisterClass*)&ARM::tGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass;
+ RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
+ : &ARM::GPRRegClass;
else
llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
@@ -3169,7 +3161,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
assert(VA.isMemLoc());
assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
- int index = ArgLocs[i].getValNo();
+ int index = VA.getValNo();
// Some Ins[] entries become multiple ArgLoc[] entries.
// Process them only once.
@@ -3182,6 +3174,8 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
// Since they could be overwritten by lowering of arguments in case of
// a tail call.
if (Flags.isByVal()) {
+ assert(Ins[index].isOrigArg() &&
+ "Byval arguments cannot be implicit");
unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
ByValStoreOffset = RoundUpToAlignment(ByValStoreOffset, Flags.getByValAlign());
@@ -3596,8 +3590,8 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
// inverting the compare condition, swapping 'less' and 'greater') and
// sometimes need to swap the operands to the VSEL (which inverts the
// condition in the sense of firing whenever the previous condition didn't)
- if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
- TrueVal.getValueType() == MVT::f64)) {
+ if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
+ TrueVal.getValueType() == MVT::f64)) {
ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
@@ -3616,8 +3610,8 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
FPCCToARMCC(CC, CondCode, CondCode2);
// Try to generate VSEL on ARMv8.
- if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
- TrueVal.getValueType() == MVT::f64)) {
+ if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
+ TrueVal.getValueType() == MVT::f64)) {
// We can select VMAXNM/VMINNM from a compare followed by a select with the
// same operands, as follows:
// c = fcmp [ogt, olt, ugt, ult] a, b
@@ -4483,6 +4477,7 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
+ EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
EVT VT = Op.getValueType();
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
SDLoc dl(Op);
@@ -4512,8 +4507,8 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
TmpOp0 = Op0;
TmpOp1 = Op1;
Opc = ISD::OR;
- Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
- Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
+ Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
+ Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
break;
case ISD::SETUO: Invert = true; // Fallthrough
case ISD::SETO:
@@ -4521,8 +4516,8 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
TmpOp0 = Op0;
TmpOp1 = Op1;
Opc = ISD::OR;
- Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
- Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
+ Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
+ Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1);
break;
}
} else {
@@ -4556,8 +4551,8 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
Opc = ARMISD::VTST;
- Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
- Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
+ Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
+ Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
Invert = !Invert;
}
}
@@ -4583,22 +4578,24 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
if (SingleOp.getNode()) {
switch (Opc) {
case ARMISD::VCEQ:
- Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break;
+ Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break;
case ARMISD::VCGE:
- Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break;
+ Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break;
case ARMISD::VCLEZ:
- Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break;
+ Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break;
case ARMISD::VCGT:
- Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break;
+ Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break;
case ARMISD::VCLTZ:
- Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break;
+ Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break;
default:
- Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
+ Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
}
} else {
- Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
+ Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
}
+ Result = DAG.getSExtOrTrunc(Result, dl, VT);
+
if (Invert)
Result = DAG.getNOT(dl, Result, VT);
@@ -6497,8 +6494,7 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
void ARMTargetLowering::
SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB,
MachineBasicBlock *DispatchBB, int FI) const {
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
@@ -6515,9 +6511,8 @@ SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB,
ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj);
unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
- const TargetRegisterClass *TRC = isThumb ?
- (const TargetRegisterClass*)&ARM::tGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass;
+ const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
+ : &ARM::GPRRegClass;
// Grab constant pool and fixed stack memory operands.
MachineMemOperand *CPMMO =
@@ -6613,8 +6608,7 @@ SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB,
MachineBasicBlock *ARMTargetLowering::
EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
@@ -6622,9 +6616,8 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
MachineFrameInfo *MFI = MF->getFrameInfo();
int FI = MFI->getFunctionContextIndex();
- const TargetRegisterClass *TRC = Subtarget->isThumb() ?
- (const TargetRegisterClass*)&ARM::tGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRnopcRegClass;
+ const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass
+ : &ARM::GPRnopcRegClass;
// Get a mapping of the call site numbers to all of the landing pads they're
// associated with.
@@ -7129,8 +7122,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
// This pseudo instruction has 3 operands: dst, src, size
// We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
// Otherwise, we will generate unrolled scalar copies.
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction::iterator It = BB;
++It;
@@ -7156,9 +7148,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
UnitSize = 2;
} else {
// Check whether we can use NEON instructions.
- if (!MF->getFunction()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex,
- Attribute::NoImplicitFloat) &&
+ if (!MF->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) &&
Subtarget->hasNEON()) {
if ((Align % 16 == 0) && SizeVal >= 16)
UnitSize = 16;
@@ -7172,14 +7162,11 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
// Select the correct opcode and register class for unit size load/store
bool IsNeon = UnitSize >= 8;
- TRC = (IsThumb1 || IsThumb2) ? (const TargetRegisterClass *)&ARM::tGPRRegClass
- : (const TargetRegisterClass *)&ARM::GPRRegClass;
+ TRC = (IsThumb1 || IsThumb2) ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
if (IsNeon)
- VecTRC = UnitSize == 16
- ? (const TargetRegisterClass *)&ARM::DPairRegClass
- : UnitSize == 8
- ? (const TargetRegisterClass *)&ARM::DPRRegClass
- : nullptr;
+ VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
+ : UnitSize == 8 ? &ARM::DPRRegClass
+ : nullptr;
unsigned BytesLeft = SizeVal % UnitSize;
unsigned LoopSize = SizeVal - BytesLeft;
@@ -7364,7 +7351,7 @@ MachineBasicBlock *
ARMTargetLowering::EmitLowered__chkstk(MachineInstr *MI,
MachineBasicBlock *MBB) const {
const TargetMachine &TM = getTargetMachine();
- const TargetInstrInfo &TII = *TM.getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
assert(Subtarget->isTargetWindows() &&
@@ -7429,8 +7416,7 @@ ARMTargetLowering::EmitLowered__chkstk(MachineInstr *MI,
MachineBasicBlock *
ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
bool isThumb2 = Subtarget->isThumb2();
switch (MI->getOpcode()) {
@@ -7627,9 +7613,8 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineRegisterInfo &MRI = Fn->getRegInfo();
// In Thumb mode S must not be specified if source register is the SP or
// PC and if destination register is the SP, so restrict register class
- unsigned NewRsbDstReg = MRI.createVirtualRegister(isThumb2 ?
- (const TargetRegisterClass*)&ARM::rGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass);
+ unsigned NewRsbDstReg =
+ MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
// Transfer the remainder of BB and its successor edges to sinkMBB.
SinkBB->splice(SinkBB->begin(), BB,
@@ -7694,8 +7679,7 @@ void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
// Rename pseudo opcodes.
unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode());
if (NewOpc) {
- const ARMBaseInstrInfo *TII = static_cast<const ARMBaseInstrInfo *>(
- getTargetMachine().getSubtargetImpl()->getInstrInfo());
+ const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
MCID = &TII->get(NewOpc);
assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 &&
@@ -8059,29 +8043,35 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
else
IsLeftOperandMUL = true;
if (MULOp == SDValue())
- return SDValue();
+ return SDValue();
// Figure out the right opcode.
unsigned Opc = MULOp->getOpcode();
unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;
// Figure out the high and low input values to the MLAL node.
- SDValue* HiMul = &MULOp;
SDValue* HiAdd = nullptr;
SDValue* LoMul = nullptr;
SDValue* LowAdd = nullptr;
+ // Ensure that ADDE is from high result of ISD::SMUL_LOHI.
+ if ((AddeOp0 != MULOp.getValue(1)) && (AddeOp1 != MULOp.getValue(1)))
+ return SDValue();
+
if (IsLeftOperandMUL)
HiAdd = &AddeOp1;
else
HiAdd = &AddeOp0;
- if (AddcOp0->getOpcode() == Opc) {
+ // Ensure that LoMul and LowAdd are taken from correct ISD::SMUL_LOHI node
+ // whose low result is fed to the ADDC we are checking.
+
+ if (AddcOp0 == MULOp.getValue(0)) {
LoMul = &AddcOp0;
LowAdd = &AddcOp1;
}
- if (AddcOp1->getOpcode() == Opc) {
+ if (AddcOp1 == MULOp.getValue(0)) {
LoMul = &AddcOp1;
LowAdd = &AddcOp0;
}
@@ -8089,9 +8079,6 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
if (!LoMul)
return SDValue();
- if (LoMul->getNode() != HiMul->getNode())
- return SDValue();
-
// Create the merged node.
SelectionDAG &DAG = DCI.DAG;
@@ -8583,7 +8570,10 @@ static SDValue PerformBFICombine(SDNode *N,
unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
unsigned LSB = countTrailingZeros(~InvMask);
unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
- unsigned Mask = (1 << Width)-1;
+ assert(Width <
+ static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
+ "undefined behavior");
+ unsigned Mask = (1u << Width) - 1;
unsigned Mask2 = N11C->getZExtValue();
if ((Mask & (~Mask2)) == 0)
return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
@@ -8655,147 +8645,6 @@ static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-/// PerformSTORECombine - Target-specific dag combine xforms for
-/// ISD::STORE.
-static SDValue PerformSTORECombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- StoreSDNode *St = cast<StoreSDNode>(N);
- if (St->isVolatile())
- return SDValue();
-
- // Optimize trunc store (of multiple scalars) to shuffle and store. First,
- // pack all of the elements in one place. Next, store to memory in fewer
- // chunks.
- SDValue StVal = St->getValue();
- EVT VT = StVal.getValueType();
- if (St->isTruncatingStore() && VT.isVector()) {
- SelectionDAG &DAG = DCI.DAG;
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT StVT = St->getMemoryVT();
- unsigned NumElems = VT.getVectorNumElements();
- assert(StVT != VT && "Cannot truncate to the same type");
- unsigned FromEltSz = VT.getVectorElementType().getSizeInBits();
- unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits();
-
- // From, To sizes and ElemCount must be pow of two
- if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue();
-
- // We are going to use the original vector elt for storing.
- // Accumulated smaller vector elements must be a multiple of the store size.
- if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue();
-
- unsigned SizeRatio = FromEltSz / ToEltSz;
- assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
-
- // Create a type on which we perform the shuffle.
- EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
- NumElems*SizeRatio);
- assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
-
- SDLoc DL(St);
- SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
- SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
- for (unsigned i = 0; i < NumElems; ++i)
- ShuffleVec[i] = TLI.isBigEndian() ? (i+1) * SizeRatio - 1 : i * SizeRatio;
-
- // Can't shuffle using an illegal type.
- if (!TLI.isTypeLegal(WideVecVT)) return SDValue();
-
- SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec,
- DAG.getUNDEF(WideVec.getValueType()),
- ShuffleVec.data());
- // At this point all of the data is stored at the bottom of the
- // register. We now need to save it to mem.
-
- // Find the largest store unit
- MVT StoreType = MVT::i8;
- for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE;
- tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) {
- MVT Tp = (MVT::SimpleValueType)tp;
- if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
- StoreType = Tp;
- }
- // Didn't find a legal store type.
- if (!TLI.isTypeLegal(StoreType))
- return SDValue();
-
- // Bitcast the original vector into a vector of store-size units
- EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
- StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits());
- assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
- SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
- SmallVector<SDValue, 8> Chains;
- SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
- TLI.getPointerTy());
- SDValue BasePtr = St->getBasePtr();
-
- // Perform one or more big stores into memory.
- unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits();
- for (unsigned I = 0; I < E; I++) {
- SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
- StoreType, ShuffWide,
- DAG.getIntPtrConstant(I));
- SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr,
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment());
- BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
- Increment);
- Chains.push_back(Ch);
- }
- return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
- }
-
- if (!ISD::isNormalStore(St))
- return SDValue();
-
- // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
- // ARM stores of arguments in the same cache line.
- if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
- StVal.getNode()->hasOneUse()) {
- SelectionDAG &DAG = DCI.DAG;
- bool isBigEndian = DAG.getTargetLoweringInfo().isBigEndian();
- SDLoc DL(St);
- SDValue BasePtr = St->getBasePtr();
- SDValue NewST1 = DAG.getStore(St->getChain(), DL,
- StVal.getNode()->getOperand(isBigEndian ? 1 : 0 ),
- BasePtr, St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment());
-
- SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
- DAG.getConstant(4, MVT::i32));
- return DAG.getStore(NewST1.getValue(0), DL,
- StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
- OffsetPtr, St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(),
- std::min(4U, St->getAlignment() / 2));
- }
-
- if (StVal.getValueType() != MVT::i64 ||
- StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
- return SDValue();
-
- // Bitcast an i64 store extracted from a vector to f64.
- // Otherwise, the i64 value will be legalized to a pair of i32 values.
- SelectionDAG &DAG = DCI.DAG;
- SDLoc dl(StVal);
- SDValue IntVec = StVal.getOperand(0);
- EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
- IntVec.getValueType().getVectorNumElements());
- SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
- SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
- Vec, StVal.getOperand(1));
- dl = SDLoc(N);
- SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
- // Make the DAGCombiner fold the bitcasts.
- DCI.AddToWorklist(Vec.getNode());
- DCI.AddToWorklist(ExtElt.getNode());
- DCI.AddToWorklist(V.getNode());
- return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment(),
- St->getAAInfo());
-}
-
/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
/// are normal, non-volatile loads. If so, it is profitable to bitcast an
/// i64 vector to have f64 elements, since the value can then be loaded
@@ -9016,18 +8865,20 @@ static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
DAG.getUNDEF(VT), NewMask.data());
}
-/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and
-/// NEON load/store intrinsics to merge base address updates.
+/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
+/// NEON load/store intrinsics, and generic vector load/stores, to merge
+/// base address updates.
+/// For generic load/stores, the memory type is assumed to be a vector.
+/// The caller is assumed to have checked legality.
static SDValue CombineBaseUpdate(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
- if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
- return SDValue();
-
SelectionDAG &DAG = DCI.DAG;
- bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
- N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
- unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
+ const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
+ N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
+ const bool isStore = N->getOpcode() == ISD::STORE;
+ const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
SDValue Addr = N->getOperand(AddrOpIdx);
+ MemSDNode *MemN = cast<MemSDNode>(N);
// Search for a use of the address operand that is an increment.
for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
@@ -9043,7 +8894,7 @@ static SDValue CombineBaseUpdate(SDNode *N,
continue;
// Find the new opcode for the updating load/store.
- bool isLoad = true;
+ bool isLoadOp = true;
bool isLaneOp = false;
unsigned NewOpc = 0;
unsigned NumVecs = 0;
@@ -9066,19 +8917,19 @@ static SDValue CombineBaseUpdate(SDNode *N,
case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
NumVecs = 4; isLaneOp = true; break;
case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD;
- NumVecs = 1; isLoad = false; break;
+ NumVecs = 1; isLoadOp = false; break;
case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD;
- NumVecs = 2; isLoad = false; break;
+ NumVecs = 2; isLoadOp = false; break;
case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD;
- NumVecs = 3; isLoad = false; break;
+ NumVecs = 3; isLoadOp = false; break;
case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD;
- NumVecs = 4; isLoad = false; break;
+ NumVecs = 4; isLoadOp = false; break;
case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
- NumVecs = 2; isLoad = false; isLaneOp = true; break;
+ NumVecs = 2; isLoadOp = false; isLaneOp = true; break;
case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
- NumVecs = 3; isLoad = false; isLaneOp = true; break;
+ NumVecs = 3; isLoadOp = false; isLaneOp = true; break;
case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
- NumVecs = 4; isLoad = false; isLaneOp = true; break;
+ NumVecs = 4; isLoadOp = false; isLaneOp = true; break;
}
} else {
isLaneOp = true;
@@ -9087,15 +8938,24 @@ static SDValue CombineBaseUpdate(SDNode *N,
case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
+ case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD;
+ NumVecs = 1; isLaneOp = false; break;
+ case ISD::STORE: NewOpc = ARMISD::VST1_UPD;
+ NumVecs = 1; isLaneOp = false; isLoadOp = false; break;
}
}
// Find the size of memory referenced by the load/store.
EVT VecTy;
- if (isLoad)
+ if (isLoadOp) {
VecTy = N->getValueType(0);
- else
+ } else if (isIntrinsic) {
VecTy = N->getOperand(AddrOpIdx+1).getValueType();
+ } else {
+ assert(isStore && "Node has to be a load, a store, or an intrinsic!");
+ VecTy = N->getOperand(1).getValueType();
+ }
+
unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
if (isLaneOp)
NumBytes /= VecTy.getVectorNumElements();
@@ -9112,32 +8972,99 @@ static SDValue CombineBaseUpdate(SDNode *N,
continue;
}
+ // OK, we found an ADD we can fold into the base update.
+ // Now, create a _UPD node, taking care of not breaking alignment.
+
+ EVT AlignedVecTy = VecTy;
+ unsigned Alignment = MemN->getAlignment();
+
+ // If this is a less-than-standard-aligned load/store, change the type to
+ // match the standard alignment.
+ // The alignment is overlooked when selecting _UPD variants; and it's
+ // easier to introduce bitcasts here than fix that.
+ // There are 3 ways to get to this base-update combine:
+ // - intrinsics: they are assumed to be properly aligned (to the standard
+ // alignment of the memory type), so we don't need to do anything.
+ // - ARMISD::VLDx nodes: they are only generated from the aforementioned
+ // intrinsics, so, likewise, there's nothing to do.
+ // - generic load/store instructions: the alignment is specified as an
+ // explicit operand, rather than implicitly as the standard alignment
+ // of the memory type (like the intrisics). We need to change the
+ // memory type to match the explicit alignment. That way, we don't
+ // generate non-standard-aligned ARMISD::VLDx nodes.
+ if (isa<LSBaseSDNode>(N)) {
+ if (Alignment == 0)
+ Alignment = 1;
+ if (Alignment < VecTy.getScalarSizeInBits() / 8) {
+ MVT EltTy = MVT::getIntegerVT(Alignment * 8);
+ assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
+ assert(!isLaneOp && "Unexpected generic load/store lane.");
+ unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
+ AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
+ }
+ // Don't set an explicit alignment on regular load/stores that we want
+ // to transform to VLD/VST 1_UPD nodes.
+ // This matches the behavior of regular load/stores, which only get an
+ // explicit alignment if the MMO alignment is larger than the standard
+ // alignment of the memory type.
+ // Intrinsics, however, always get an explicit alignment, set to the
+ // alignment of the MMO.
+ Alignment = 1;
+ }
+
// Create the new updating load/store node.
+ // First, create an SDVTList for the new updating node's results.
EVT Tys[6];
- unsigned NumResultVecs = (isLoad ? NumVecs : 0);
+ unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
unsigned n;
for (n = 0; n < NumResultVecs; ++n)
- Tys[n] = VecTy;
+ Tys[n] = AlignedVecTy;
Tys[n++] = MVT::i32;
Tys[n] = MVT::Other;
SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2));
+
+ // Then, gather the new node's operands.
SmallVector<SDValue, 8> Ops;
Ops.push_back(N->getOperand(0)); // incoming chain
Ops.push_back(N->getOperand(AddrOpIdx));
Ops.push_back(Inc);
- for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
- Ops.push_back(N->getOperand(i));
+
+ if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
+ // Try to match the intrinsic's signature
+ Ops.push_back(StN->getValue());
+ } else {
+ // Loads (and of course intrinsics) match the intrinsics' signature,
+ // so just add all but the alignment operand.
+ for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i)
+ Ops.push_back(N->getOperand(i));
+ }
+
+ // For all node types, the alignment operand is always the last one.
+ Ops.push_back(DAG.getConstant(Alignment, MVT::i32));
+
+ // If this is a non-standard-aligned STORE, the penultimate operand is the
+ // stored value. Bitcast it to the aligned type.
+ if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
+ SDValue &StVal = Ops[Ops.size()-2];
+ StVal = DAG.getNode(ISD::BITCAST, SDLoc(N), AlignedVecTy, StVal);
}
- MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
+
SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
- Ops, MemInt->getMemoryVT(),
- MemInt->getMemOperand());
+ Ops, AlignedVecTy,
+ MemN->getMemOperand());
// Update the uses.
- std::vector<SDValue> NewResults;
- for (unsigned i = 0; i < NumResultVecs; ++i) {
+ SmallVector<SDValue, 5> NewResults;
+ for (unsigned i = 0; i < NumResultVecs; ++i)
NewResults.push_back(SDValue(UpdN.getNode(), i));
+
+ // If this is an non-standard-aligned LOAD, the first result is the loaded
+ // value. Bitcast it to the expected result type.
+ if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
+ SDValue &LdVal = NewResults[0];
+ LdVal = DAG.getNode(ISD::BITCAST, SDLoc(N), VecTy, LdVal);
}
+
NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
DCI.CombineTo(N, NewResults);
DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
@@ -9147,6 +9074,14 @@ static SDValue CombineBaseUpdate(SDNode *N,
return SDValue();
}
+static SDValue PerformVLDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
+ return SDValue();
+
+ return CombineBaseUpdate(N, DCI);
+}
+
/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and
@@ -9260,6 +9195,164 @@ static SDValue PerformVDUPLANECombine(SDNode *N,
return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
}
+static SDValue PerformLOADCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ EVT VT = N->getValueType(0);
+
+ // If this is a legal vector load, try to combine it into a VLD1_UPD.
+ if (ISD::isNormalLoad(N) && VT.isVector() &&
+ DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
+ return CombineBaseUpdate(N, DCI);
+
+ return SDValue();
+}
+
+/// PerformSTORECombine - Target-specific dag combine xforms for
+/// ISD::STORE.
+static SDValue PerformSTORECombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ StoreSDNode *St = cast<StoreSDNode>(N);
+ if (St->isVolatile())
+ return SDValue();
+
+ // Optimize trunc store (of multiple scalars) to shuffle and store. First,
+ // pack all of the elements in one place. Next, store to memory in fewer
+ // chunks.
+ SDValue StVal = St->getValue();
+ EVT VT = StVal.getValueType();
+ if (St->isTruncatingStore() && VT.isVector()) {
+ SelectionDAG &DAG = DCI.DAG;
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ EVT StVT = St->getMemoryVT();
+ unsigned NumElems = VT.getVectorNumElements();
+ assert(StVT != VT && "Cannot truncate to the same type");
+ unsigned FromEltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits();
+
+ // From, To sizes and ElemCount must be pow of two
+ if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue();
+
+ // We are going to use the original vector elt for storing.
+ // Accumulated smaller vector elements must be a multiple of the store size.
+ if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue();
+
+ unsigned SizeRatio = FromEltSz / ToEltSz;
+ assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
+
+ // Create a type on which we perform the shuffle.
+ EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
+ NumElems*SizeRatio);
+ assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
+
+ SDLoc DL(St);
+ SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
+ SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
+ for (unsigned i = 0; i < NumElems; ++i)
+ ShuffleVec[i] = TLI.isBigEndian() ? (i+1) * SizeRatio - 1 : i * SizeRatio;
+
+ // Can't shuffle using an illegal type.
+ if (!TLI.isTypeLegal(WideVecVT)) return SDValue();
+
+ SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec,
+ DAG.getUNDEF(WideVec.getValueType()),
+ ShuffleVec.data());
+ // At this point all of the data is stored at the bottom of the
+ // register. We now need to save it to mem.
+
+ // Find the largest store unit
+ MVT StoreType = MVT::i8;
+ for (MVT Tp : MVT::integer_valuetypes()) {
+ if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
+ StoreType = Tp;
+ }
+ // Didn't find a legal store type.
+ if (!TLI.isTypeLegal(StoreType))
+ return SDValue();
+
+ // Bitcast the original vector into a vector of store-size units
+ EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
+ StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits());
+ assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
+ SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
+ SmallVector<SDValue, 8> Chains;
+ SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
+ TLI.getPointerTy());
+ SDValue BasePtr = St->getBasePtr();
+
+ // Perform one or more big stores into memory.
+ unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits();
+ for (unsigned I = 0; I < E; I++) {
+ SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
+ StoreType, ShuffWide,
+ DAG.getIntPtrConstant(I));
+ SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr,
+ St->getPointerInfo(), St->isVolatile(),
+ St->isNonTemporal(), St->getAlignment());
+ BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
+ Increment);
+ Chains.push_back(Ch);
+ }
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
+ }
+
+ if (!ISD::isNormalStore(St))
+ return SDValue();
+
+ // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
+ // ARM stores of arguments in the same cache line.
+ if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
+ StVal.getNode()->hasOneUse()) {
+ SelectionDAG &DAG = DCI.DAG;
+ bool isBigEndian = DAG.getTargetLoweringInfo().isBigEndian();
+ SDLoc DL(St);
+ SDValue BasePtr = St->getBasePtr();
+ SDValue NewST1 = DAG.getStore(St->getChain(), DL,
+ StVal.getNode()->getOperand(isBigEndian ? 1 : 0 ),
+ BasePtr, St->getPointerInfo(), St->isVolatile(),
+ St->isNonTemporal(), St->getAlignment());
+
+ SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
+ DAG.getConstant(4, MVT::i32));
+ return DAG.getStore(NewST1.getValue(0), DL,
+ StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
+ OffsetPtr, St->getPointerInfo(), St->isVolatile(),
+ St->isNonTemporal(),
+ std::min(4U, St->getAlignment() / 2));
+ }
+
+ if (StVal.getValueType() == MVT::i64 &&
+ StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
+
+ // Bitcast an i64 store extracted from a vector to f64.
+ // Otherwise, the i64 value will be legalized to a pair of i32 values.
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc dl(StVal);
+ SDValue IntVec = StVal.getOperand(0);
+ EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
+ IntVec.getValueType().getVectorNumElements());
+ SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
+ SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
+ Vec, StVal.getOperand(1));
+ dl = SDLoc(N);
+ SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
+ // Make the DAGCombiner fold the bitcasts.
+ DCI.AddToWorklist(Vec.getNode());
+ DCI.AddToWorklist(ExtElt.getNode());
+ DCI.AddToWorklist(V.getNode());
+ return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
+ St->getPointerInfo(), St->isVolatile(),
+ St->isNonTemporal(), St->getAlignment(),
+ St->getAAInfo());
+ }
+
+ // If this is a legal vector store, try to combine it into a VST1_UPD.
+ if (ISD::isNormalStore(N) && VT.isVector() &&
+ DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
+ return CombineBaseUpdate(N, DCI);
+
+ return SDValue();
+}
+
// isConstVecPow2 - Return true if each vector element is a power of 2, all
// elements are the same constant, C, and Log2(C) ranges from 1 to 32.
static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C)
@@ -9316,16 +9409,18 @@ static SDValue PerformVCVTCombine(SDNode *N,
MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
- if (FloatTy.getSizeInBits() != 32 || IntTy.getSizeInBits() > 32) {
+ unsigned NumLanes = Op.getValueType().getVectorNumElements();
+ if (FloatTy.getSizeInBits() != 32 || IntTy.getSizeInBits() > 32 ||
+ NumLanes > 4) {
// These instructions only exist converting from f32 to i32. We can handle
// smaller integers by generating an extra truncate, but larger ones would
- // be lossy.
+ // be lossy. We also can't handle more then 4 lanes, since these intructions
+ // only support v2i32/v4i32 types.
return SDValue();
}
unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
Intrinsic::arm_neon_vcvtfp2fxu;
- unsigned NumLanes = Op.getValueType().getVectorNumElements();
SDValue FixConv = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N),
NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
DAG.getConstant(IntrinsicOpcode, MVT::i32), N0,
@@ -9848,10 +9943,11 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget);
case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
+ case ISD::LOAD: return PerformLOADCombine(N, DCI);
case ARMISD::VLD2DUP:
case ARMISD::VLD3DUP:
case ARMISD::VLD4DUP:
- return CombineBaseUpdate(N, DCI);
+ return PerformVLDCombine(N, DCI);
case ARMISD::BUILD_VECTOR:
return PerformARMBUILD_VECTORCombine(N, DCI);
case ISD::INTRINSIC_VOID:
@@ -9871,7 +9967,7 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case Intrinsic::arm_neon_vst2lane:
case Intrinsic::arm_neon_vst3lane:
case Intrinsic::arm_neon_vst4lane:
- return CombineBaseUpdate(N, DCI);
+ return PerformVLDCombine(N, DCI);
default: break;
}
break;
@@ -9934,10 +10030,8 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
const Function *F = MF.getFunction();
// See if we can use NEON instructions for this...
- if ((!IsMemset || ZeroMemset) &&
- Subtarget->hasNEON() &&
- !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::NoImplicitFloat)) {
+ if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() &&
+ !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
bool Fast;
if (Size >= 16 &&
(memOpAlign(SrcAlign, DstAlign, 16) ||
@@ -10535,7 +10629,8 @@ ARMTargetLowering::getSingleConstraintMatchWeight(
typedef std::pair<unsigned, const TargetRegisterClass*> RCPair;
RCPair
-ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
+ARMTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
MVT VT) const {
if (Constraint.size() == 1) {
// GCC ARM Constraint Letters
@@ -10581,7 +10676,7 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
if (StringRef("{cc}").equals_lower(Constraint))
return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
@@ -10861,11 +10956,7 @@ bool ARM::isBitFieldInvertedMask(unsigned v) {
// there can be 1's on either or both "outsides", all the "inside"
// bits must be 0's
- unsigned TO = CountTrailingOnes_32(v);
- unsigned LO = CountLeadingOnes_32(v);
- v = (v >> TO) << TO;
- v = (v << LO) >> LO;
- return v == 0;
+ return isShiftedMask_32(~v);
}
/// isFPImmLegal - Returns true if the target can instruction select the
@@ -11114,7 +11205,7 @@ bool ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
// This has so far only been implemented for MachO.
bool ARMTargetLowering::useLoadStackGuardNode() const {
- return Subtarget->getTargetTriple().getObjectFormat() == Triple::MachO;
+ return Subtarget->isTargetMachO();
}
bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
@@ -11274,7 +11365,9 @@ static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
return (Members > 0 && Members <= 4);
}
-/// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate.
+/// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
+/// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
+/// passing according to AAPCS rules.
bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
if (getEffectiveCallingConv(CallConv, isVarArg) !=
@@ -11283,7 +11376,9 @@ bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
HABaseType Base = HA_UNKNOWN;
uint64_t Members = 0;
- bool result = isHomogeneousAggregate(Ty, Base, Members);
- DEBUG(dbgs() << "isHA: " << result << " "; Ty->dump());
- return result;
+ bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
+ DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
+
+ bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
+ return IsHA || IsIntArray;
}
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index 89b0c31..ec1407d 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -232,7 +232,8 @@ namespace llvm {
class ARMTargetLowering : public TargetLowering {
public:
- explicit ARMTargetLowering(const TargetMachine &TM);
+ explicit ARMTargetLowering(const TargetMachine &TM,
+ const ARMSubtarget &STI);
unsigned getJumpTableEncoding() const override;
@@ -332,9 +333,10 @@ namespace llvm {
ConstraintWeight getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const override;
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const override;
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
+ MVT VT) const override;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
@@ -352,10 +354,6 @@ namespace llvm {
/// specified value type.
const TargetRegisterClass *getRegClassFor(MVT VT) const override;
- /// getMaximalGlobalOffset - Returns the maximal possible offset which can
- /// be used for loads / stores from the global.
- unsigned getMaximalGlobalOffset() const override;
-
/// Returns true if a cast between SrcAS and DestAS is a noop.
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
// Addrspacecasts are always noops.
@@ -414,8 +412,9 @@ namespace llvm {
unsigned &Cost) const override;
protected:
- std::pair<const TargetRegisterClass*, uint8_t>
- findRepresentativeClass(MVT VT) const override;
+ std::pair<const TargetRegisterClass *, uint8_t>
+ findRepresentativeClass(const TargetRegisterInfo *TRI,
+ MVT VT) const override;
private:
/// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp
index 17d1ffa..bc617f0 100644
--- a/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/lib/Target/ARM/ARMInstrInfo.cpp
@@ -93,7 +93,7 @@ unsigned ARMInstrInfo::getUnindexedOpcode(unsigned Opc) const {
void ARMInstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
Reloc::Model RM) const {
MachineFunction &MF = *MI->getParent()->getParent();
- const ARMSubtarget &Subtarget = MF.getTarget().getSubtarget<ARMSubtarget>();
+ const ARMSubtarget &Subtarget = MF.getSubtarget<ARMSubtarget>();
if (!Subtarget.useMovt(MF)) {
if (RM == Reloc::PIC_)
@@ -144,21 +144,20 @@ namespace {
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (AFI->getGlobalBaseReg() == 0)
return false;
-
- const ARMTargetMachine *TM =
- static_cast<const ARMTargetMachine *>(&MF.getTarget());
- if (TM->getRelocationModel() != Reloc::PIC_)
+ const ARMSubtarget &STI =
+ static_cast<const ARMSubtarget &>(MF.getSubtarget());
+ const TargetMachine &TM = MF.getTarget();
+ if (TM.getRelocationModel() != Reloc::PIC_)
return false;
LLVMContext *Context = &MF.getFunction()->getContext();
unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
- unsigned PCAdj = TM->getSubtarget<ARMSubtarget>().isThumb() ? 4 : 8;
+ unsigned PCAdj = STI.isThumb() ? 4 : 8;
ARMConstantPoolValue *CPV = ARMConstantPoolSymbol::Create(
*Context, "_GLOBAL_OFFSET_TABLE_", ARMPCLabelIndex, PCAdj);
- unsigned Align =
- TM->getSubtargetImpl()->getDataLayout()->getPrefTypeAlignment(
- Type::getInt32PtrTy(*Context));
+ unsigned Align = TM.getDataLayout()->getPrefTypeAlignment(
+ Type::getInt32PtrTy(*Context));
unsigned Idx = MF.getConstantPool()->getConstantPoolIndex(CPV, Align);
MachineBasicBlock &FirstMBB = MF.front();
@@ -166,9 +165,8 @@ namespace {
DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
unsigned TempReg =
MF.getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
- unsigned Opc = TM->getSubtarget<ARMSubtarget>().isThumb2() ?
- ARM::t2LDRpci : ARM::LDRcp;
- const TargetInstrInfo &TII = *TM->getSubtargetImpl()->getInstrInfo();
+ unsigned Opc = STI.isThumb2() ? ARM::t2LDRpci : ARM::LDRcp;
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineInstrBuilder MIB = BuildMI(FirstMBB, MBBI, DL,
TII.get(Opc), TempReg)
.addConstantPoolIndex(Idx);
@@ -178,15 +176,13 @@ namespace {
// Fix the GOT address by adding pc.
unsigned GlobalBaseReg = AFI->getGlobalBaseReg();
- Opc = TM->getSubtarget<ARMSubtarget>().isThumb2() ? ARM::tPICADD
- : ARM::PICADD;
+ Opc = STI.isThumb2() ? ARM::tPICADD : ARM::PICADD;
MIB = BuildMI(FirstMBB, MBBI, DL, TII.get(Opc), GlobalBaseReg)
.addReg(TempReg)
.addImm(ARMPCLabelIndex);
if (Opc == ARM::PICADD)
AddDefaultPred(MIB);
-
return true;
}
diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td
index 3177114..126c552 100644
--- a/lib/Target/ARM/ARMInstrInfo.td
+++ b/lib/Target/ARM/ARMInstrInfo.td
@@ -263,8 +263,6 @@ def IsNotMClass : Predicate<"!Subtarget->isMClass()">,
"!armv*m">;
def IsARM : Predicate<"!Subtarget->isThumb()">,
AssemblerPredicate<"!ModeThumb", "arm-mode">;
-def IsIOS : Predicate<"Subtarget->isTargetIOS()">;
-def IsNotIOS : Predicate<"!Subtarget->isTargetIOS()">;
def IsMachO : Predicate<"Subtarget->isTargetMachO()">;
def IsNotMachO : Predicate<"!Subtarget->isTargetMachO()">;
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
@@ -333,24 +331,6 @@ def imm16_31 : ImmLeaf<i32, [{
return (int32_t)Imm >= 16 && (int32_t)Imm < 32;
}]>;
-def so_imm_neg_asmoperand : AsmOperandClass { let Name = "ARMSOImmNeg"; }
-def so_imm_neg : Operand<i32>, PatLeaf<(imm), [{
- unsigned Value = -(unsigned)N->getZExtValue();
- return Value && ARM_AM::getSOImmVal(Value) != -1;
- }], imm_neg_XFORM> {
- let ParserMatchClass = so_imm_neg_asmoperand;
-}
-
-// Note: this pattern doesn't require an encoder method and such, as it's
-// only used on aliases (Pat<> and InstAlias<>). The actual encoding
-// is handled by the destination instructions, which use so_imm.
-def so_imm_not_asmoperand : AsmOperandClass { let Name = "ARMSOImmNot"; }
-def so_imm_not : Operand<i32>, PatLeaf<(imm), [{
- return ARM_AM::getSOImmVal(~(uint32_t)N->getZExtValue()) != -1;
- }], imm_not_XFORM> {
- let ParserMatchClass = so_imm_not_asmoperand;
-}
-
// sext_16_node predicate - True if the SDNode is sign-extended 16 or more bits.
def sext_16_node : PatLeaf<(i32 GPR:$a), [{
return CurDAG->ComputeNumSignBits(SDValue(N,0)) >= 17;
@@ -530,7 +510,7 @@ def shift_imm : Operand<i32> {
let ParserMatchClass = ShifterImmAsmOperand;
}
-// shifter_operand operands: so_reg_reg, so_reg_imm, and so_imm.
+// shifter_operand operands: so_reg_reg, so_reg_imm, and mod_imm.
def ShiftedRegAsmOperand : AsmOperandClass { let Name = "RegShiftedReg"; }
def so_reg_reg : Operand<i32>, // reg reg imm
ComplexPattern<i32, 3, "SelectRegShifterOperand",
@@ -575,27 +555,43 @@ def shift_so_reg_imm : Operand<i32>, // reg reg imm
let MIOperandInfo = (ops GPR, i32imm);
}
-
-// so_imm - Match a 32-bit shifter_operand immediate operand, which is an
-// 8-bit immediate rotated by an arbitrary number of bits.
-def SOImmAsmOperand: ImmAsmOperand { let Name = "ARMSOImm"; }
-def so_imm : Operand<i32>, ImmLeaf<i32, [{
+// mod_imm: match a 32-bit immediate operand, which can be encoded into
+// a 12-bit immediate; an 8-bit integer and a 4-bit rotator (See ARMARM
+// - "Modified Immediate Constants"). Within the MC layer we keep this
+// immediate in its encoded form.
+def ModImmAsmOperand: AsmOperandClass {
+ let Name = "ModImm";
+ let ParserMethod = "parseModImm";
+}
+def mod_imm : Operand<i32>, ImmLeaf<i32, [{
return ARM_AM::getSOImmVal(Imm) != -1;
}]> {
- let EncoderMethod = "getSOImmOpValue";
- let ParserMatchClass = SOImmAsmOperand;
- let DecoderMethod = "DecodeSOImmOperand";
+ let EncoderMethod = "getModImmOpValue";
+ let PrintMethod = "printModImmOperand";
+ let ParserMatchClass = ModImmAsmOperand;
}
-// Break so_imm's up into two pieces. This handles immediates with up to 16
-// bits set in them. This uses so_imm2part to match and so_imm2part_[12] to
-// get the first/second pieces.
-def so_imm2part : PatLeaf<(imm), [{
- return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
-}]>;
+// Note: the patterns mod_imm_not and mod_imm_neg do not require an encoder
+// method and such, as they are only used on aliases (Pat<> and InstAlias<>).
+// The actual parsing, encoding, decoding are handled by the destination
+// instructions, which use mod_imm.
-/// arm_i32imm - True for +V6T2, or true only if so_imm2part is true.
-///
+def ModImmNotAsmOperand : AsmOperandClass { let Name = "ModImmNot"; }
+def mod_imm_not : Operand<i32>, PatLeaf<(imm), [{
+ return ARM_AM::getSOImmVal(~(uint32_t)N->getZExtValue()) != -1;
+ }], imm_not_XFORM> {
+ let ParserMatchClass = ModImmNotAsmOperand;
+}
+
+def ModImmNegAsmOperand : AsmOperandClass { let Name = "ModImmNeg"; }
+def mod_imm_neg : Operand<i32>, PatLeaf<(imm), [{
+ unsigned Value = -(unsigned)N->getZExtValue();
+ return Value && ARM_AM::getSOImmVal(Value) != -1;
+ }], imm_neg_XFORM> {
+ let ParserMatchClass = ModImmNegAsmOperand;
+}
+
+/// arm_i32imm - True for +V6T2, or when isSOImmTwoParVal()
def arm_i32imm : PatLeaf<(imm), [{
if (Subtarget->useMovt(*MF))
return true;
@@ -1204,7 +1200,7 @@ include "ARMInstrFormats.td"
// Multiclass helpers...
//
-/// AsI1_bin_irs - Defines a set of (op r, {so_imm|r|so_reg}) patterns for a
+/// AsI1_bin_irs - Defines a set of (op r, {mod_imm|r|so_reg}) patterns for a
/// binop that produces a value.
let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AsI1_bin_irs<bits<4> opcod, string opc,
@@ -1213,9 +1209,9 @@ multiclass AsI1_bin_irs<bits<4> opcod, string opc,
// The register-immediate version is re-materializable. This is useful
// in particular for taking the address of a local.
let isReMaterializable = 1 in {
- def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
+ def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, mod_imm:$imm), DPFrm,
iii, opc, "\t$Rd, $Rn, $imm",
- [(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]>,
+ [(set GPR:$Rd, (opnode GPR:$Rn, mod_imm:$imm))]>,
Sched<[WriteALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
@@ -1286,9 +1282,9 @@ multiclass AsI1_rbin_irs<bits<4> opcod, string opc,
// The register-immediate version is re-materializable. This is useful
// in particular for taking the address of a local.
let isReMaterializable = 1 in {
- def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
+ def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, mod_imm:$imm), DPFrm,
iii, opc, "\t$Rd, $Rn, $imm",
- [(set GPR:$Rd, (opnode so_imm:$imm, GPR:$Rn))]>,
+ [(set GPR:$Rd, (opnode mod_imm:$imm, GPR:$Rn))]>,
Sched<[WriteALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
@@ -1356,9 +1352,9 @@ let hasPostISelHook = 1, Defs = [CPSR] in {
multiclass AsI1_bin_s_irs<InstrItinClass iii, InstrItinClass iir,
InstrItinClass iis, PatFrag opnode,
bit Commutable = 0> {
- def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm, pred:$p),
+ def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, mod_imm:$imm, pred:$p),
4, iii,
- [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_imm:$imm))]>,
+ [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, mod_imm:$imm))]>,
Sched<[WriteALU, ReadALU]>;
def rr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, pred:$p),
@@ -1389,9 +1385,9 @@ let hasPostISelHook = 1, Defs = [CPSR] in {
multiclass AsI1_rbin_s_is<InstrItinClass iii, InstrItinClass iir,
InstrItinClass iis, PatFrag opnode,
bit Commutable = 0> {
- def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm, pred:$p),
+ def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, mod_imm:$imm, pred:$p),
4, iii,
- [(set GPR:$Rd, CPSR, (opnode so_imm:$imm, GPR:$Rn))]>,
+ [(set GPR:$Rd, CPSR, (opnode mod_imm:$imm, GPR:$Rn))]>,
Sched<[WriteALU, ReadALU]>;
def rsi : ARMPseudoInst<(outs GPR:$Rd),
@@ -1410,16 +1406,16 @@ multiclass AsI1_rbin_s_is<InstrItinClass iii, InstrItinClass iir,
}
}
-/// AI1_cmp_irs - Defines a set of (op r, {so_imm|r|so_reg}) cmp / test
+/// AI1_cmp_irs - Defines a set of (op r, {mod_imm|r|so_reg}) cmp / test
/// patterns. Similar to AsI1_bin_irs except the instruction does not produce
/// a explicit result, only implicitly set CPSR.
let isCompare = 1, Defs = [CPSR] in {
multiclass AI1_cmp_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
PatFrag opnode, bit Commutable = 0> {
- def ri : AI1<opcod, (outs), (ins GPR:$Rn, so_imm:$imm), DPFrm, iii,
+ def ri : AI1<opcod, (outs), (ins GPR:$Rn, mod_imm:$imm), DPFrm, iii,
opc, "\t$Rn, $imm",
- [(opnode GPR:$Rn, so_imm:$imm)]>,
+ [(opnode GPR:$Rn, mod_imm:$imm)]>,
Sched<[WriteCMP, ReadALU]> {
bits<4> Rn;
bits<12> imm;
@@ -1547,9 +1543,9 @@ let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
bit Commutable = 0> {
let hasPostISelHook = 1, Defs = [CPSR], Uses = [CPSR] in {
- def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
+ def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, mod_imm:$imm),
DPFrm, IIC_iALUi, opc, "\t$Rd, $Rn, $imm",
- [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_imm:$imm, CPSR))]>,
+ [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, mod_imm:$imm, CPSR))]>,
Requires<[IsARM]>,
Sched<[WriteALU, ReadALU]> {
bits<4> Rd;
@@ -1617,9 +1613,9 @@ multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AI1_rsc_irs<bits<4> opcod, string opc, PatFrag opnode> {
let hasPostISelHook = 1, Defs = [CPSR], Uses = [CPSR] in {
- def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
+ def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, mod_imm:$imm),
DPFrm, IIC_iALUi, opc, "\t$Rd, $Rn, $imm",
- [(set GPR:$Rd, CPSR, (opnode so_imm:$imm, GPR:$Rn, CPSR))]>,
+ [(set GPR:$Rd, CPSR, (opnode mod_imm:$imm, GPR:$Rn, CPSR))]>,
Requires<[IsARM]>,
Sched<[WriteALU, ReadALU]> {
bits<4> Rd;
@@ -1813,7 +1809,7 @@ multiclass AI_str1nopc<bit isByte, string opc, InstrItinClass iii,
/// the function. The first operand is the ID# for this instruction, the second
/// is the index into the MachineConstantPool that this is, the third is the
/// size in bytes of this constant pool entry.
-let neverHasSideEffects = 1, isNotDuplicable = 1 in
+let hasSideEffects = 0, isNotDuplicable = 1 in
def CONSTPOOL_ENTRY :
PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
i32imm:$size), NoItinerary, []>;
@@ -2057,7 +2053,7 @@ def PICSTRB : ARMPseudoInst<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
// LEApcrel - Load a pc-relative address into a register without offending the
// assembler.
-let neverHasSideEffects = 1, isReMaterializable = 1 in
+let hasSideEffects = 0, isReMaterializable = 1 in
// The 'adr' mnemonic encodes differently if the label is before or after
// the instruction. The {24-21} opcode bits are set by the fixup, as we don't
// know until then which form of the instruction will be used.
@@ -2387,6 +2383,33 @@ def RFEIB_UPD : RFEI<1, "rfeib\t$Rn!"> {
let Inst{24-23} = 0b11;
}
+// Hypervisor Call is a system instruction
+let isCall = 1 in {
+def HVC : AInoP< (outs), (ins imm0_65535:$imm), BrFrm, NoItinerary,
+ "hvc", "\t$imm", []>,
+ Requires<[IsARM, HasVirtualization]> {
+ bits<16> imm;
+
+ // Even though HVC isn't predicable, it's encoding includes a condition field.
+ // The instruction is undefined if the condition field is 0xf otherwise it is
+ // unpredictable if it isn't condition AL (0xe).
+ let Inst{31-28} = 0b1110;
+ let Unpredictable{31-28} = 0b1111;
+ let Inst{27-24} = 0b0001;
+ let Inst{23-20} = 0b0100;
+ let Inst{19-8} = imm{15-4};
+ let Inst{7-4} = 0b0111;
+ let Inst{3-0} = imm{3-0};
+}
+}
+
+// Return from exception in Hypervisor mode.
+let isReturn = 1, isBarrier = 1, isTerminator = 1, Defs = [PC] in
+def ERET : ABI<0b0001, (outs), (ins), NoItinerary, "eret", "", []>,
+ Requires<[IsARM, HasVirtualization]> {
+ let Inst{23-0} = 0b011000000000000001101110;
+}
+
//===----------------------------------------------------------------------===//
// Load / Store Instructions.
//
@@ -2404,7 +2427,7 @@ defm STRB : AI_str1nopc<1, "strb", IIC_iStore_bh_r, IIC_iStore_bh_si,
BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>;
// Special LDR for loads from non-pc-relative constpools.
-let canFoldAsLoad = 1, mayLoad = 1, neverHasSideEffects = 1,
+let canFoldAsLoad = 1, mayLoad = 1, hasSideEffects = 0,
isReMaterializable = 1, isCodeGenOnly = 1 in
def LDRcp : AI2ldst<0b010, 1, 0, (outs GPR:$Rt), (ins addrmode_imm12:$addr),
AddrMode_i12, LdFrm, IIC_iLoad_r, "ldr", "\t$Rt, $addr",
@@ -2431,7 +2454,7 @@ def LDRSB : AI3ld<0b1101, 1, (outs GPR:$Rt), (ins addrmode3:$addr), LdMiscFrm,
IIC_iLoad_bh_r, "ldrsb", "\t$Rt, $addr",
[(set GPR:$Rt, (sextloadi8 addrmode3:$addr))]>;
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
+let mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1 in {
// Load doubleword
def LDRD : AI3ld<0b1101, 0, (outs GPR:$Rt, GPR:$Rt2), (ins addrmode3:$addr),
LdMiscFrm, IIC_iLoad_d_r, "ldrd", "\t$Rt, $Rt2, $addr", []>,
@@ -2508,7 +2531,7 @@ multiclass AI2_ldridx<bit isByte, string opc,
}
-let mayLoad = 1, neverHasSideEffects = 1 in {
+let mayLoad = 1, hasSideEffects = 0 in {
// FIXME: for LDR_PRE_REG etc. the itineray should be either IIC_iLoad_ru or
// IIC_iLoad_siu depending on whether it the offset register is shifted.
defm LDR : AI2_ldridx<0, "ldr", IIC_iLoad_iu, IIC_iLoad_ru>;
@@ -2544,7 +2567,7 @@ multiclass AI3_ldridx<bits<4> op, string opc, InstrItinClass itin> {
}
}
-let mayLoad = 1, neverHasSideEffects = 1 in {
+let mayLoad = 1, hasSideEffects = 0 in {
defm LDRH : AI3_ldridx<0b1011, "ldrh", IIC_iLoad_bh_ru>;
defm LDRSH : AI3_ldridx<0b1111, "ldrsh", IIC_iLoad_bh_ru>;
defm LDRSB : AI3_ldridx<0b1101, "ldrsb", IIC_iLoad_bh_ru>;
@@ -2577,10 +2600,10 @@ def LDRD_POST: AI3ldstidx<0b1101, 0, 0, (outs GPR:$Rt, GPR:$Rt2, GPR:$Rn_wb),
let DecoderMethod = "DecodeAddrMode3Instruction";
}
} // hasExtraDefRegAllocReq = 1
-} // mayLoad = 1, neverHasSideEffects = 1
+} // mayLoad = 1, hasSideEffects = 0
// LDRT, LDRBT, LDRSBT, LDRHT, LDRSHT.
-let mayLoad = 1, neverHasSideEffects = 1 in {
+let mayLoad = 1, hasSideEffects = 0 in {
def LDRT_POST_REG : AI2ldstidx<1, 0, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am2offset_reg:$offset),
IndexModePost, LdFrm, IIC_iLoad_ru,
@@ -2699,7 +2722,7 @@ def STRH : AI3str<0b1011, (outs), (ins GPR:$Rt, addrmode3:$addr), StMiscFrm,
[(truncstorei16 GPR:$Rt, addrmode3:$addr)]>;
// Store doubleword
-let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
+let mayStore = 1, hasSideEffects = 0, hasExtraSrcRegAllocReq = 1 in {
def STRD : AI3str<0b1111, (outs), (ins GPR:$Rt, GPR:$Rt2, addrmode3:$addr),
StMiscFrm, IIC_iStore_d_r, "strd", "\t$Rt, $Rt2, $addr", []>,
Requires<[IsARM, HasV5TE]> {
@@ -2772,7 +2795,7 @@ multiclass AI2_stridx<bit isByte, string opc,
}
}
-let mayStore = 1, neverHasSideEffects = 1 in {
+let mayStore = 1, hasSideEffects = 0 in {
// FIXME: for STR_PRE_REG etc. the itineray should be either IIC_iStore_ru or
// IIC_iStore_siu depending on whether it the offset register is shifted.
defm STR : AI2_stridx<0, "str", IIC_iStore_iu, IIC_iStore_ru>;
@@ -2864,7 +2887,7 @@ def STRH_POST : AI3ldstidx<0b1011, 0, 0, (outs GPR:$Rn_wb),
let DecoderMethod = "DecodeAddrMode3Instruction";
}
-let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
+let mayStore = 1, hasSideEffects = 0, hasExtraSrcRegAllocReq = 1 in {
def STRD_PRE : AI3ldstidx<0b1111, 0, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rt2, addrmode3_pre:$addr),
IndexModePre, StMiscFrm, IIC_iStore_d_ru,
@@ -2894,7 +2917,7 @@ def STRD_POST: AI3ldstidx<0b1111, 0, 0, (outs GPR:$Rn_wb),
let Inst{3-0} = offset{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
}
-} // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
+} // mayStore = 1, hasSideEffects = 0, hasExtraSrcRegAllocReq = 1
// STRT, STRBT, and STRHT
@@ -2938,7 +2961,7 @@ def STRBT_POST
: ARMAsmPseudo<"strbt${q} $Rt, $addr",
(ins GPR:$Rt, addr_offset_none:$addr, pred:$q)>;
-let mayStore = 1, neverHasSideEffects = 1 in {
+let mayStore = 1, hasSideEffects = 0 in {
def STRT_POST_REG : AI2ldstidx<0, 0, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_reg:$offset),
IndexModePost, StFrm, IIC_iStore_ru,
@@ -3103,17 +3126,18 @@ multiclass arm_ldst_mult<string asm, string sfx, bit L_bit, bit P_bit, Format f,
}
}
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
defm LDM : arm_ldst_mult<"ldm", "", 1, 0, LdStMulFrm, IIC_iLoad_m,
- IIC_iLoad_mu>;
+ IIC_iLoad_mu>, ComplexDeprecationPredicate<"ARMLoad">;
let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
defm STM : arm_ldst_mult<"stm", "", 0, 0, LdStMulFrm, IIC_iStore_m,
- IIC_iStore_mu>;
+ IIC_iStore_mu>,
+ ComplexDeprecationPredicate<"ARMStore">;
-} // neverHasSideEffects
+} // hasSideEffects
// FIXME: remove when we have a way to marking a MI with these properties.
// FIXME: Should pc be an implicit operand like PICADD, etc?
@@ -3139,7 +3163,7 @@ defm sysSTM : arm_ldst_mult<"stm", " ^", 0, 1, LdStMulFrm, IIC_iStore_m,
// Move Instructions.
//
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def MOVr : AsI1<0b1101, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMOVr,
"mov", "\t$Rd, $Rm", []>, UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
@@ -3153,7 +3177,7 @@ def MOVr : AsI1<0b1101, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMOVr,
}
// A version for the smaller set of tail call registers.
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def MOVr_TC : AsI1<0b1101, (outs tcGPR:$Rd), (ins tcGPR:$Rm), DPFrm,
IIC_iMOVr, "mov", "\t$Rd, $Rm", []>, UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
@@ -3197,8 +3221,8 @@ def MOVsi : AsI1<0b1101, (outs GPR:$Rd), (ins shift_so_reg_imm:$src),
}
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
-def MOVi : AsI1<0b1101, (outs GPR:$Rd), (ins so_imm:$imm), DPFrm, IIC_iMOVi,
- "mov", "\t$Rd, $imm", [(set GPR:$Rd, so_imm:$imm)]>, UnaryDP,
+def MOVi : AsI1<0b1101, (outs GPR:$Rd), (ins mod_imm:$imm), DPFrm, IIC_iMOVi,
+ "mov", "\t$Rd, $imm", [(set GPR:$Rd, mod_imm:$imm)]>, UnaryDP,
Sched<[WriteALU]> {
bits<4> Rd;
bits<12> imm;
@@ -3408,10 +3432,10 @@ defm RSC : AI1_rsc_irs<0b0111, "rsc",
// assume opposite meanings of the carry flag (i.e., carry == !borrow).
// See the definition of AddWithCarry() in the ARM ARM A2.2.1 for the gory
// details.
-def : ARMPat<(add GPR:$src, so_imm_neg:$imm),
- (SUBri GPR:$src, so_imm_neg:$imm)>;
-def : ARMPat<(ARMaddc GPR:$src, so_imm_neg:$imm),
- (SUBSri GPR:$src, so_imm_neg:$imm)>;
+def : ARMPat<(add GPR:$src, mod_imm_neg:$imm),
+ (SUBri GPR:$src, mod_imm_neg:$imm)>;
+def : ARMPat<(ARMaddc GPR:$src, mod_imm_neg:$imm),
+ (SUBSri GPR:$src, mod_imm_neg:$imm)>;
def : ARMPat<(add GPR:$src, imm0_65535_neg:$imm),
(SUBrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>,
@@ -3423,8 +3447,8 @@ def : ARMPat<(ARMaddc GPR:$src, imm0_65535_neg:$imm),
// The with-carry-in form matches bitwise not instead of the negation.
// Effectively, the inverse interpretation of the carry flag already accounts
// for part of the negation.
-def : ARMPat<(ARMadde GPR:$src, so_imm_not:$imm, CPSR),
- (SBCri GPR:$src, so_imm_not:$imm)>;
+def : ARMPat<(ARMadde GPR:$src, mod_imm_not:$imm, CPSR),
+ (SBCri GPR:$src, mod_imm_not:$imm)>;
def : ARMPat<(ARMadde GPR:$src, imm0_65535_neg:$imm, CPSR),
(SBCrr GPR:$src, (MOVi16 (imm_not_XFORM imm:$imm)))>,
Requires<[IsARM, HasV6T2]>;
@@ -3705,9 +3729,9 @@ def MVNsr : AsI1<0b1111, (outs GPR:$Rd), (ins so_reg_reg:$shift),
let Inst{3-0} = shift{3-0};
}
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
-def MVNi : AsI1<0b1111, (outs GPR:$Rd), (ins so_imm:$imm), DPFrm,
+def MVNi : AsI1<0b1111, (outs GPR:$Rd), (ins mod_imm:$imm), DPFrm,
IIC_iMVNi, "mvn", "\t$Rd, $imm",
- [(set GPR:$Rd, so_imm_not:$imm)]>,UnaryDP, Sched<[WriteALU]> {
+ [(set GPR:$Rd, mod_imm_not:$imm)]>,UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<12> imm;
let Inst{25} = 1;
@@ -3716,8 +3740,8 @@ def MVNi : AsI1<0b1111, (outs GPR:$Rd), (ins so_imm:$imm), DPFrm,
let Inst{11-0} = imm;
}
-def : ARMPat<(and GPR:$src, so_imm_not:$imm),
- (BICri GPR:$src, so_imm_not:$imm)>;
+def : ARMPat<(and GPR:$src, mod_imm_not:$imm),
+ (BICri GPR:$src, mod_imm_not:$imm)>;
//===----------------------------------------------------------------------===//
// Multiply Instructions.
@@ -3811,7 +3835,7 @@ def MLS : AMul1I<0b0000011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
}
// Extra precision multiplies with low / high results
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let isCommutable = 1 in {
def SMULL : AsMul1I64<0b0000110, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), IIC_iMUL64,
@@ -3878,7 +3902,7 @@ def UMLALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
Requires<[IsARM, NoV6]>;
}
-} // neverHasSideEffects
+} // hasSideEffects
// Most significant word multiply
def SMMUL : AMul2I <0b0111010, 0b0001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
@@ -4242,8 +4266,8 @@ defm CMP : AI1_cmp_irs<0b1010, "cmp",
BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>;
// ARMcmpZ can re-use the above instruction definitions.
-def : ARMPat<(ARMcmpZ GPR:$src, so_imm:$imm),
- (CMPri GPR:$src, so_imm:$imm)>;
+def : ARMPat<(ARMcmpZ GPR:$src, mod_imm:$imm),
+ (CMPri GPR:$src, mod_imm:$imm)>;
def : ARMPat<(ARMcmpZ GPR:$src, GPR:$rhs),
(CMPrr GPR:$src, GPR:$rhs)>;
def : ARMPat<(ARMcmpZ GPR:$src, so_reg_imm:$rhs),
@@ -4253,9 +4277,9 @@ def : ARMPat<(ARMcmpZ GPR:$src, so_reg_reg:$rhs),
// CMN register-integer
let isCompare = 1, Defs = [CPSR] in {
-def CMNri : AI1<0b1011, (outs), (ins GPR:$Rn, so_imm:$imm), DPFrm, IIC_iCMPi,
+def CMNri : AI1<0b1011, (outs), (ins GPR:$Rn, mod_imm:$imm), DPFrm, IIC_iCMPi,
"cmn", "\t$Rn, $imm",
- [(ARMcmn GPR:$Rn, so_imm:$imm)]>,
+ [(ARMcmn GPR:$Rn, mod_imm:$imm)]>,
Sched<[WriteCMP, ReadALU]> {
bits<4> Rn;
bits<12> imm;
@@ -4328,11 +4352,11 @@ def CMNzrsr : AI1<0b1011, (outs),
}
-def : ARMPat<(ARMcmp GPR:$src, so_imm_neg:$imm),
- (CMNri GPR:$src, so_imm_neg:$imm)>;
+def : ARMPat<(ARMcmp GPR:$src, mod_imm_neg:$imm),
+ (CMNri GPR:$src, mod_imm_neg:$imm)>;
-def : ARMPat<(ARMcmpZ GPR:$src, so_imm_neg:$imm),
- (CMNri GPR:$src, so_imm_neg:$imm)>;
+def : ARMPat<(ARMcmpZ GPR:$src, mod_imm_neg:$imm),
+ (CMNri GPR:$src, mod_imm_neg:$imm)>;
// Note that TST/TEQ don't set all the same flags that CMP does!
defm TST : AI1_cmp_irs<0b1000, "tst",
@@ -4359,7 +4383,7 @@ def BCCZi64 : PseudoInst<(outs),
// Conditional moves
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let isCommutable = 1, isSelect = 1 in
def MOVCCr : ARMPseudoInst<(outs GPR:$Rd),
@@ -4396,9 +4420,9 @@ def MOVCCi16
let isMoveImm = 1 in
def MOVCCi : ARMPseudoInst<(outs GPR:$Rd),
- (ins GPR:$false, so_imm:$imm, cmovpred:$p),
+ (ins GPR:$false, mod_imm:$imm, cmovpred:$p),
4, IIC_iCMOVi,
- [(set GPR:$Rd, (ARMcmov GPR:$false, so_imm:$imm,
+ [(set GPR:$Rd, (ARMcmov GPR:$false, mod_imm:$imm,
cmovpred:$p))]>,
RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
@@ -4414,13 +4438,13 @@ def MOVCCi32imm
let isMoveImm = 1 in
def MVNCCi : ARMPseudoInst<(outs GPR:$Rd),
- (ins GPR:$false, so_imm:$imm, cmovpred:$p),
+ (ins GPR:$false, mod_imm:$imm, cmovpred:$p),
4, IIC_iCMOVi,
- [(set GPR:$Rd, (ARMcmov GPR:$false, so_imm_not:$imm,
+ [(set GPR:$Rd, (ARMcmov GPR:$false, mod_imm_not:$imm,
cmovpred:$p))]>,
RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
-} // neverHasSideEffects
+} // hasSideEffects
//===----------------------------------------------------------------------===//
@@ -5074,7 +5098,7 @@ def MRSbanked : ABI<0b0001, (outs GPRnopc:$Rd), (ins banked_reg:$banked),
let Inst{23} = 0;
let Inst{22} = banked{5}; // R bit
- let Inst{21-20} = 0b10;
+ let Inst{21-20} = 0b00;
let Inst{19-16} = banked{3-0};
let Inst{15-12} = Rd;
let Inst{11-9} = 0b001;
@@ -5103,17 +5127,17 @@ def MSR : ABI<0b0001, (outs), (ins msr_mask:$mask, GPR:$Rn), NoItinerary,
let Inst{3-0} = Rn;
}
-def MSRi : ABI<0b0011, (outs), (ins msr_mask:$mask, so_imm:$a), NoItinerary,
- "msr", "\t$mask, $a", []> {
+def MSRi : ABI<0b0011, (outs), (ins msr_mask:$mask, mod_imm:$imm), NoItinerary,
+ "msr", "\t$mask, $imm", []> {
bits<5> mask;
- bits<12> a;
+ bits<12> imm;
let Inst{23} = 0;
let Inst{22} = mask{4}; // R bit
let Inst{21-20} = 0b10;
let Inst{19-16} = mask{3-0};
let Inst{15-12} = 0b1111;
- let Inst{11-0} = a;
+ let Inst{11-0} = imm;
}
// However, the MSR (banked register) system instruction (ARMv7VE) *does* have a
@@ -5204,7 +5228,7 @@ let isBarrier = 1, hasSideEffects = 1, isTerminator = 1,
def Int_eh_sjlj_longjmp : PseudoInst<(outs), (ins GPR:$src, GPR:$scratch),
NoItinerary,
[(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
- Requires<[IsARM, IsIOS]>;
+ Requires<[IsARM]>;
}
// eh.sjlj.dispatchsetup pseudo-instruction.
@@ -5228,7 +5252,7 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in
// Large immediate handling.
-// 32-bit immediate using two piece so_imms or movw + movt.
+// 32-bit immediate using two piece mod_imms or movw + movt.
// This is a single pseudo instruction, the benefit is that it can be remat'd
// as a single unit instead of having to handle reg inputs.
// FIXME: Remove this when we can do generalized remat.
@@ -5257,6 +5281,7 @@ def LDRLIT_ga_pcrel : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
(ARMWrapperPIC tglobaladdr:$addr))]>,
Requires<[IsARM, DontUseMovt]>;
+let AddedComplexity = 10 in
def LDRLIT_ga_pcrel_ldr : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
NoItinerary,
[(set GPR:$dst,
@@ -5519,36 +5544,36 @@ def : MnemonicAlias<"uqsubaddx", "uqsax">;
// USAX == USUBADDX
def : MnemonicAlias<"usubaddx", "usax">;
-// "mov Rd, so_imm_not" can be handled via "mvn" in assembly, just like
+// "mov Rd, mod_imm_not" can be handled via "mvn" in assembly, just like
// for isel.
def : ARMInstAlias<"mov${s}${p} $Rd, $imm",
- (MVNi rGPR:$Rd, so_imm_not:$imm, pred:$p, cc_out:$s)>;
+ (MVNi rGPR:$Rd, mod_imm_not:$imm, pred:$p, cc_out:$s)>;
def : ARMInstAlias<"mvn${s}${p} $Rd, $imm",
- (MOVi rGPR:$Rd, so_imm_not:$imm, pred:$p, cc_out:$s)>;
+ (MOVi rGPR:$Rd, mod_imm_not:$imm, pred:$p, cc_out:$s)>;
// Same for AND <--> BIC
def : ARMInstAlias<"bic${s}${p} $Rd, $Rn, $imm",
- (ANDri rGPR:$Rd, rGPR:$Rn, so_imm_not:$imm,
+ (ANDri rGPR:$Rd, rGPR:$Rn, mod_imm_not:$imm,
pred:$p, cc_out:$s)>;
def : ARMInstAlias<"bic${s}${p} $Rdn, $imm",
- (ANDri rGPR:$Rdn, rGPR:$Rdn, so_imm_not:$imm,
+ (ANDri rGPR:$Rdn, rGPR:$Rdn, mod_imm_not:$imm,
pred:$p, cc_out:$s)>;
def : ARMInstAlias<"and${s}${p} $Rd, $Rn, $imm",
- (BICri rGPR:$Rd, rGPR:$Rn, so_imm_not:$imm,
+ (BICri rGPR:$Rd, rGPR:$Rn, mod_imm_not:$imm,
pred:$p, cc_out:$s)>;
def : ARMInstAlias<"and${s}${p} $Rdn, $imm",
- (BICri rGPR:$Rdn, rGPR:$Rdn, so_imm_not:$imm,
+ (BICri rGPR:$Rdn, rGPR:$Rdn, mod_imm_not:$imm,
pred:$p, cc_out:$s)>;
-// Likewise, "add Rd, so_imm_neg" -> sub
+// Likewise, "add Rd, mod_imm_neg" -> sub
def : ARMInstAlias<"add${s}${p} $Rd, $Rn, $imm",
- (SUBri GPR:$Rd, GPR:$Rn, so_imm_neg:$imm, pred:$p, cc_out:$s)>;
+ (SUBri GPR:$Rd, GPR:$Rn, mod_imm_neg:$imm, pred:$p, cc_out:$s)>;
def : ARMInstAlias<"add${s}${p} $Rd, $imm",
- (SUBri GPR:$Rd, GPR:$Rd, so_imm_neg:$imm, pred:$p, cc_out:$s)>;
-// Same for CMP <--> CMN via so_imm_neg
+ (SUBri GPR:$Rd, GPR:$Rd, mod_imm_neg:$imm, pred:$p, cc_out:$s)>;
+// Same for CMP <--> CMN via mod_imm_neg
def : ARMInstAlias<"cmp${p} $Rd, $imm",
- (CMNri rGPR:$Rd, so_imm_neg:$imm, pred:$p)>;
+ (CMNri rGPR:$Rd, mod_imm_neg:$imm, pred:$p)>;
def : ARMInstAlias<"cmn${p} $Rd, $imm",
- (CMPri rGPR:$Rd, so_imm_neg:$imm, pred:$p)>;
+ (CMPri rGPR:$Rd, mod_imm_neg:$imm, pred:$p)>;
// The shifter forms of the MOV instruction are aliased to the ASR, LSL,
// LSR, ROR, and RRX instructions.
diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td
index a0c627c..2a7b4b5 100644
--- a/lib/Target/ARM/ARMInstrNEON.td
+++ b/lib/Target/ARM/ARMInstrNEON.td
@@ -665,7 +665,7 @@ class VLDQQQQWBPseudo<InstrItinClass itin>
(ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src), itin,
"$addr.addr = $wb, $src = $dst">;
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
+let mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1 in {
// VLD1 : Vector Load (multiple single elements)
class VLD1D<bits<4> op7_4, string Dt, Operand AddrMode>
@@ -1023,7 +1023,7 @@ def VLD4q8oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
def VLD4q16oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
def VLD4q32oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
-} // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
+} // mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1
// Classes for VLD*LN pseudo-instructions with multi-register operands.
// These are expanded to real instructions after register allocation.
@@ -1106,7 +1106,7 @@ def : Pat<(vector_insert (v4f32 QPR:$src),
(f32 (load addrmode6:$addr)), imm:$lane),
(VLD1LNq32Pseudo addrmode6:$addr, QPR:$src, imm:$lane)>;
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
+let mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1 in {
// ...with address register writeback:
class VLD1LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
@@ -1359,7 +1359,7 @@ def VLD4LNq32_UPD : VLD4LNWB<0b1011, {?,1,?,?}, "32"> {
def VLD4LNq16Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
def VLD4LNq32Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
-} // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
+} // mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1
// VLD1DUP : Vector Load (single element to all lanes)
class VLD1DUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp,
@@ -1405,7 +1405,7 @@ def VLD1DUPq32 : VLD1QDUP<{1,0,1,?}, "32", v4i32, load,
def : Pat<(v4f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
(VLD1DUPq32 addrmode6:$addr)>;
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
+let mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1 in {
// ...with address register writeback:
multiclass VLD1DUPWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<1, 0b10, 0b1100, op7_4,
@@ -1609,9 +1609,9 @@ def VLD4DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
def VLD4DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
def VLD4DUPd32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
-} // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
+} // mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1
-let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
+let mayStore = 1, hasSideEffects = 0, hasExtraSrcRegAllocReq = 1 in {
// Classes for VST* pseudo-instructions with multi-register operands.
// These are expanded to real instructions after register allocation.
@@ -2025,7 +2025,7 @@ def VST4q8oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
def VST4q16oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
def VST4q32oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
-} // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
+} // mayStore = 1, hasSideEffects = 0, hasExtraSrcRegAllocReq = 1
// Classes for VST*LN pseudo-instructions with multi-register operands.
// These are expanded to real instructions after register allocation.
@@ -2129,7 +2129,7 @@ def VST1LNq8Pseudo_UPD : VST1QLNWBPseudo<v16i8, post_truncsti8, NEONvgetlaneu>;
def VST1LNq16Pseudo_UPD : VST1QLNWBPseudo<v8i16, post_truncsti16,NEONvgetlaneu>;
def VST1LNq32Pseudo_UPD : VST1QLNWBPseudo<v4i32, post_store, extractelt>;
-let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
+let mayStore = 1, hasSideEffects = 0, hasExtraSrcRegAllocReq = 1 in {
// VST2LN : Vector Store (single 2-element structure from one lane)
class VST2LN<bits<4> op11_8, bits<4> op7_4, string Dt>
@@ -2351,7 +2351,7 @@ def VST4LNq32_UPD : VST4LNWB<0b1011, {?,1,?,?}, "32"> {
def VST4LNq16Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST4lnu>;
def VST4LNq32Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST4lnu>;
-} // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
+} // mayStore = 1, hasSideEffects = 0, hasExtraSrcRegAllocReq = 1
// Use vld1/vst1 for unaligned f64 load / store
def : Pat<(f64 (hword_alignedload addrmode6:$addr)),
diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td
index a867844..3c62e0e 100644
--- a/lib/Target/ARM/ARMInstrThumb.td
+++ b/lib/Target/ARM/ARMInstrThumb.td
@@ -714,7 +714,7 @@ def tSTRspi : T1pIs<(outs), (ins tGPR:$Rt, t_addrmode_sp:$addr), IIC_iStore_i,
//
// These require base address to be written back or one of the loaded regs.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
def tLDMIA : T1I<(outs), (ins tGPR:$Rn, pred:$p, reglist:$regs, variable_ops),
@@ -754,7 +754,7 @@ def tSTMIA_UPD : Thumb1I<(outs GPR:$wb),
let Inst{7-0} = regs;
}
-} // neverHasSideEffects
+} // hasSideEffects
def : InstAlias<"ldm${p} $Rn!, $regs",
(tLDMIA tGPR:$Rn, pred:$p, reglist:$regs)>,
@@ -888,7 +888,7 @@ def tADDrr : // A8.6.6 T1
"add", "\t$Rd, $Rn, $Rm",
[(set tGPR:$Rd, (add tGPR:$Rn, tGPR:$Rm))]>, Sched<[WriteALU]>;
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def tADDhirr : T1pIt<(outs GPR:$Rdn), (ins GPR:$Rn, GPR:$Rm), IIC_iALUr,
"add", "\t$Rdn, $Rm", []>,
T1Special<{0,0,?,?}>, Sched<[WriteALU]> {
@@ -1048,7 +1048,7 @@ def : tInstAlias <"movs $Rdn, $imm",
// A7-73: MOV(2) - mov setting flag.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def tMOVr : Thumb1pI<(outs GPR:$Rd), (ins GPR:$Rm), AddrModeNone,
2, IIC_iMOVr,
"mov", "\t$Rd, $Rm", "", []>,
@@ -1070,7 +1070,7 @@ def tMOVSr : T1I<(outs tGPR:$Rd), (ins tGPR:$Rm), IIC_iMOVr,
let Inst{5-3} = Rm;
let Inst{2-0} = Rd;
}
-} // neverHasSideEffects
+} // hasSideEffects
// Multiply register
let isCommutable = 1 in
@@ -1248,7 +1248,7 @@ def tADR : T1I<(outs tGPR:$Rd), (ins t_adrlabel:$addr, pred:$p),
let DecoderMethod = "DecodeThumbAddSpecialReg";
}
-let neverHasSideEffects = 1, isReMaterializable = 1 in
+let hasSideEffects = 0, isReMaterializable = 1 in
def tLEApcrel : tPseudoInst<(outs tGPR:$Rd), (ins i32imm:$label, pred:$p),
2, IIC_iALUi, []>, Sched<[WriteALU]>;
@@ -1297,7 +1297,7 @@ def tInt_eh_sjlj_longjmp : XI<(outs), (ins GPR:$src, GPR:$scratch),
AddrModeNone, 0, IndexModeNone,
Pseudo, NoItinerary, "", "",
[(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
- Requires<[IsThumb, IsIOS]>;
+ Requires<[IsThumb]>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
@@ -1375,6 +1375,17 @@ def : T1Pat<(zextloadi1 t_addrmode_rrs1:$addr),
def : T1Pat<(zextloadi1 t_addrmode_is1:$addr),
(tLDRBi t_addrmode_is1:$addr)>;
+// extload from the stack -> word load from the stack, as it avoids having to
+// materialize the base in a separate register. This only works when a word
+// load puts the byte/halfword value in the same place in the register that the
+// byte/halfword load would, i.e. when little-endian.
+def : T1Pat<(extloadi1 t_addrmode_sp:$addr), (tLDRspi t_addrmode_sp:$addr)>,
+ Requires<[IsThumb, IsThumb1Only, IsLE]>;
+def : T1Pat<(extloadi8 t_addrmode_sp:$addr), (tLDRspi t_addrmode_sp:$addr)>,
+ Requires<[IsThumb, IsThumb1Only, IsLE]>;
+def : T1Pat<(extloadi16 t_addrmode_sp:$addr), (tLDRspi t_addrmode_sp:$addr)>,
+ Requires<[IsThumb, IsThumb1Only, IsLE]>;
+
// extload -> zextload
def : T1Pat<(extloadi1 t_addrmode_rrs1:$addr), (tLDRBr t_addrmode_rrs1:$addr)>;
def : T1Pat<(extloadi1 t_addrmode_is1:$addr), (tLDRBi t_addrmode_is1:$addr)>;
diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td
index 807c252..10b0a0e 100644
--- a/lib/Target/ARM/ARMInstrThumb2.td
+++ b/lib/Target/ARM/ARMInstrThumb2.td
@@ -1185,7 +1185,8 @@ class T2I_exta_rrot<bits<3> opcod, string opc, PatFrag opnode>
class T2I_exta_rrot_np<bits<3> opcod, string opc>
: T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm,rot_imm:$rot),
- IIC_iEXTAsr, opc, "\t$Rd, $Rn, $Rm$rot", []> {
+ IIC_iEXTAsr, opc, "\t$Rd, $Rn, $Rm$rot", []>,
+ Requires<[HasT2ExtractPack, IsThumb2]> {
bits<2> rot;
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
@@ -1241,7 +1242,7 @@ def t2ADR : T2PCOneRegImm<(outs rGPR:$Rd),
let DecoderMethod = "DecodeT2Adr";
}
-let neverHasSideEffects = 1, isReMaterializable = 1 in
+let hasSideEffects = 0, isReMaterializable = 1 in
def t2LEApcrel : t2PseudoInst<(outs rGPR:$Rd), (ins i32imm:$label, pred:$p),
4, IIC_iALUi, []>, Sched<[WriteALU, ReadALU]>;
let hasSideEffects = 1 in
@@ -1272,12 +1273,12 @@ defm t2LDRSH : T2I_ld<1, 0b01, "ldrsh", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
defm t2LDRSB : T2I_ld<1, 0b00, "ldrsb", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
GPRnopc, UnOpFrag<(sextloadi8 node:$Src)>>;
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
+let mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1 in {
// Load doubleword
def t2LDRDi8 : T2Ii8s4<1, 0, 1, (outs rGPR:$Rt, rGPR:$Rt2),
(ins t2addrmode_imm8s4:$addr),
IIC_iLoad_d_i, "ldrd", "\t$Rt, $Rt2, $addr", "", []>;
-} // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
+} // mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1
// zextload i1 -> zextload i8
def : T2Pat<(zextloadi1 t2addrmode_imm12:$addr),
@@ -1326,7 +1327,7 @@ def : T2Pat<(extloadi16 (ARMWrapper tconstpool:$addr)),
// Indexed loads
-let mayLoad = 1, neverHasSideEffects = 1 in {
+let mayLoad = 1, hasSideEffects = 0 in {
def t2LDR_PRE : T2Ipreldst<0, 0b10, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb),
(ins t2addrmode_imm8_pre:$addr),
AddrModeT2_i8, IndexModePre, IIC_iLoad_iu,
@@ -1378,7 +1379,7 @@ def t2LDRSH_POST : T2Ipostldst<1, 0b01, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$Rn, t2am_imm8_offset:$offset),
AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu,
"ldrsh", "\t$Rt, $Rn$offset", "$Rn = $Rn_wb", []>;
-} // mayLoad = 1, neverHasSideEffects = 1
+} // mayLoad = 1, hasSideEffects = 0
// LDRT, LDRBT, LDRHT, LDRSBT, LDRSHT all have offset mode (PUW=0b110).
// Ref: A8.6.57 LDR (immediate, Thumb) Encoding T4
@@ -1443,14 +1444,14 @@ defm t2STRH:T2I_st<0b01,"strh", IIC_iStore_bh_i, IIC_iStore_bh_si,
rGPR, BinOpFrag<(truncstorei16 node:$LHS, node:$RHS)>>;
// Store doubleword
-let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in
+let mayStore = 1, hasSideEffects = 0, hasExtraSrcRegAllocReq = 1 in
def t2STRDi8 : T2Ii8s4<1, 0, 0, (outs),
(ins rGPR:$Rt, rGPR:$Rt2, t2addrmode_imm8s4:$addr),
IIC_iStore_d_r, "strd", "\t$Rt, $Rt2, $addr", "", []>;
// Indexed stores
-let mayStore = 1, neverHasSideEffects = 1 in {
+let mayStore = 1, hasSideEffects = 0 in {
def t2STR_PRE : T2Ipreldst<0, 0b10, 0, 1, (outs GPRnopc:$Rn_wb),
(ins GPRnopc:$Rt, t2addrmode_imm8_pre:$addr),
AddrModeT2_i8, IndexModePre, IIC_iStore_iu,
@@ -1468,7 +1469,7 @@ def t2STRB_PRE : T2Ipreldst<0, 0b00, 0, 1, (outs GPRnopc:$Rn_wb),
AddrModeT2_i8, IndexModePre, IIC_iStore_bh_iu,
"strb", "\t$Rt, $addr!",
"$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []>;
-} // mayStore = 1, neverHasSideEffects = 1
+} // mayStore = 1, hasSideEffects = 0
def t2STR_POST : T2Ipostldst<0, 0b10, 0, 0, (outs GPRnopc:$Rn_wb),
(ins GPRnopc:$Rt, addr_offset_none:$Rn,
@@ -1763,7 +1764,7 @@ multiclass thumb2_ld_mult<string asm, InstrItinClass itin,
}
}
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
defm t2LDM : thumb2_ld_mult<"ldm", IIC_iLoad_m, IIC_iLoad_mu, 1>;
@@ -1848,14 +1849,14 @@ multiclass thumb2_st_mult<string asm, InstrItinClass itin,
let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
defm t2STM : thumb2_st_mult<"stm", IIC_iStore_m, IIC_iStore_mu, 0>;
-} // neverHasSideEffects
+} // hasSideEffects
//===----------------------------------------------------------------------===//
// Move Instructions.
//
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def t2MOVr : T2sTwoReg<(outs GPRnopc:$Rd), (ins GPR:$Rm), IIC_iMOVr,
"mov", ".w\t$Rd, $Rm", []>, Sched<[WriteALU]> {
let Inst{31-27} = 0b11101;
@@ -2572,7 +2573,7 @@ def t2MLS: T2FourReg<
}
// Extra precision multiplies with low / high results
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let isCommutable = 1 in {
def t2SMULL : T2MulLong<0b000, 0b0000,
(outs rGPR:$RdLo, rGPR:$RdHi),
@@ -2603,7 +2604,7 @@ def t2UMAAL : T2MulLong<0b110, 0b0110,
(ins rGPR:$Rn, rGPR:$Rm), IIC_iMAC64,
"umaal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsThumb2, HasThumb2DSP]>;
-} // neverHasSideEffects
+} // hasSideEffects
// Rounding variants of the below included for disassembly only
@@ -3150,7 +3151,7 @@ defm t2TEQ : T2I_cmp_irs<0b0100, "teq",
BinOpFrag<(ARMcmpZ (xor_su node:$LHS, node:$RHS), 0)>>;
// Conditional moves
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let isCommutable = 1, isSelect = 1 in
def t2MOVCCr : t2PseudoInst<(outs rGPR:$Rd),
@@ -3213,7 +3214,7 @@ def t2MOVCCi32imm
RegConstraint<"$false = $dst">;
} // isCodeGenOnly = 1
-} // neverHasSideEffects
+} // hasSideEffects
//===----------------------------------------------------------------------===//
// Atomic operations intrinsics
@@ -3824,6 +3825,27 @@ def t2SUBS_PC_LR : T2I <(outs), (ins imm0_255:$imm), NoItinerary,
let Inst{7-0} = imm;
}
+// Hypervisor Call is a system instruction.
+let isCall = 1 in {
+def t2HVC : T2XI <(outs), (ins imm0_65535:$imm16), IIC_Br, "hvc.w\t$imm16", []>,
+ Requires<[IsThumb2, HasVirtualization]>, Sched<[WriteBr]> {
+ bits<16> imm16;
+ let Inst{31-20} = 0b111101111110;
+ let Inst{19-16} = imm16{15-12};
+ let Inst{15-12} = 0b1000;
+ let Inst{11-0} = imm16{11-0};
+}
+}
+
+// Alias for HVC without the ".w" optional width specifier
+def : t2InstAlias<"hvc\t$imm16", (t2HVC imm0_65535:$imm16)>;
+
+// ERET - Return from exception in Hypervisor mode.
+// B9.3.3, B9.3.20: ERET is an alias for "SUBS PC, LR, #0" in an implementation that
+// includes virtualization extensions.
+def t2ERET : InstAlias<"eret${p}", (t2SUBS_PC_LR 0, pred:$p)>,
+ Requires<[IsThumb2, HasVirtualization]>;
+
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//
@@ -4564,17 +4586,21 @@ def : t2InstAlias<"strh${p} $Rt, $addr",
(t2STRHs rGPR:$Rt, t2addrmode_so_reg:$addr, pred:$p)>;
// Extend instruction optional rotate operand.
-def : t2InstAlias<"sxtab${p} $Rd, $Rn, $Rm",
- (t2SXTAB rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>;
-def : t2InstAlias<"sxtah${p} $Rd, $Rn, $Rm",
- (t2SXTAH rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>;
-def : t2InstAlias<"sxtab16${p} $Rd, $Rn, $Rm",
- (t2SXTAB16 rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>;
+def : InstAlias<"sxtab${p} $Rd, $Rn, $Rm",
+ (t2SXTAB rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
+def : InstAlias<"sxtah${p} $Rd, $Rn, $Rm",
+ (t2SXTAH rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
+def : InstAlias<"sxtab16${p} $Rd, $Rn, $Rm",
+ (t2SXTAB16 rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
+def : InstAlias<"sxtb16${p} $Rd, $Rm",
+ (t2SXTB16 rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
def : t2InstAlias<"sxtb${p} $Rd, $Rm",
(t2SXTB rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>;
-def : t2InstAlias<"sxtb16${p} $Rd, $Rm",
- (t2SXTB16 rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>;
def : t2InstAlias<"sxth${p} $Rd, $Rm",
(t2SXTH rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>;
def : t2InstAlias<"sxtb${p}.w $Rd, $Rm",
@@ -4582,19 +4608,23 @@ def : t2InstAlias<"sxtb${p}.w $Rd, $Rm",
def : t2InstAlias<"sxth${p}.w $Rd, $Rm",
(t2SXTH rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>;
-def : t2InstAlias<"uxtab${p} $Rd, $Rn, $Rm",
- (t2UXTAB rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>;
-def : t2InstAlias<"uxtah${p} $Rd, $Rn, $Rm",
- (t2UXTAH rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>;
-def : t2InstAlias<"uxtab16${p} $Rd, $Rn, $Rm",
- (t2UXTAB16 rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>;
+def : InstAlias<"uxtab${p} $Rd, $Rn, $Rm",
+ (t2UXTAB rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
+def : InstAlias<"uxtah${p} $Rd, $Rn, $Rm",
+ (t2UXTAH rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
+def : InstAlias<"uxtab16${p} $Rd, $Rn, $Rm",
+ (t2UXTAB16 rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
+def : InstAlias<"uxtb16${p} $Rd, $Rm",
+ (t2UXTB16 rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
+
def : t2InstAlias<"uxtb${p} $Rd, $Rm",
(t2UXTB rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>;
-def : t2InstAlias<"uxtb16${p} $Rd, $Rm",
- (t2UXTB16 rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>;
def : t2InstAlias<"uxth${p} $Rd, $Rm",
(t2UXTH rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>;
-
def : t2InstAlias<"uxtb${p}.w $Rd, $Rm",
(t2UXTB rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>;
def : t2InstAlias<"uxth${p}.w $Rd, $Rm",
@@ -4603,15 +4633,17 @@ def : t2InstAlias<"uxth${p}.w $Rd, $Rm",
// Extend instruction w/o the ".w" optional width specifier.
def : t2InstAlias<"uxtb${p} $Rd, $Rm$rot",
(t2UXTB rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
-def : t2InstAlias<"uxtb16${p} $Rd, $Rm$rot",
- (t2UXTB16 rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
+def : InstAlias<"uxtb16${p} $Rd, $Rm$rot",
+ (t2UXTB16 rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
def : t2InstAlias<"uxth${p} $Rd, $Rm$rot",
(t2UXTH rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
def : t2InstAlias<"sxtb${p} $Rd, $Rm$rot",
(t2SXTB rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
-def : t2InstAlias<"sxtb16${p} $Rd, $Rm$rot",
- (t2SXTB16 rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
+def : InstAlias<"sxtb16${p} $Rd, $Rm$rot",
+ (t2SXTB16 rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
def : t2InstAlias<"sxth${p} $Rd, $Rm$rot",
(t2SXTH rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td
index d78f2ac..e0a9314 100644
--- a/lib/Target/ARM/ARMInstrVFP.td
+++ b/lib/Target/ARM/ARMInstrVFP.td
@@ -194,7 +194,7 @@ multiclass vfp_ldst_mult<string asm, bit L_bit,
}
}
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
@@ -202,7 +202,7 @@ defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>;
-} // neverHasSideEffects
+} // hasSideEffects
def : MnemonicAlias<"vldm", "vldmia">;
def : MnemonicAlias<"vstm", "vstmia">;
@@ -769,7 +769,7 @@ def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
[(set SPR:$Sd, (fsqrt SPR:$Sm))]>;
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
(outs DPR:$Dd), (ins DPR:$Dm),
IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>;
@@ -777,7 +777,7 @@ def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
(outs SPR:$Sd), (ins SPR:$Sm),
IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>;
-} // neverHasSideEffects
+} // hasSideEffects
//===----------------------------------------------------------------------===//
// FP <-> GPR Copies. Int <-> FP Conversions.
@@ -827,7 +827,7 @@ def VMOVSR : AVConv4I<0b11100000, 0b1010,
let D = VFPNeonDomain;
}
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def VMOVRRD : AVConv3I<0b11000101, 0b1011,
(outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
@@ -876,7 +876,7 @@ def VMOVRRS : AVConv3I<0b11000101, 0b1010,
let D = VFPNeonDomain;
let DecoderMethod = "DecodeVMOVRRS";
}
-} // neverHasSideEffects
+} // hasSideEffects
// FMDHR: GPR -> SPR
// FMDLR: GPR -> SPR
@@ -907,7 +907,7 @@ def VMOVDRR : AVConv5I<0b11000100, 0b1011,
let isRegSequence = 1;
}
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def VMOVSRR : AVConv5I<0b11000100, 0b1010,
(outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
@@ -1543,7 +1543,7 @@ def : Pat<(fneg (f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin))),
// FP Conditional moves.
//
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def VMOVDcc : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, cmovpred:$p),
IIC_fpUNA64,
[(set (f64 DPR:$Dd),
@@ -1555,7 +1555,7 @@ def VMOVScc : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, cmovpred:$p),
[(set (f32 SPR:$Sd),
(ARMcmov SPR:$Sn, SPR:$Sm, cmovpred:$p))]>,
RegConstraint<"$Sn = $Sd">, Requires<[HasVFP2]>;
-} // neverHasSideEffects
+} // hasSideEffects
//===----------------------------------------------------------------------===//
// Move from VFP System Register to ARM core register.
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index c429ac1..a8d0981 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -170,7 +170,8 @@ static int getMemoryOpOffset(const MachineInstr *MI) {
return OffField;
// Thumb1 immediate offsets are scaled by 4
- if (Opcode == ARM::tLDRi || Opcode == ARM::tSTRi)
+ if (Opcode == ARM::tLDRi || Opcode == ARM::tSTRi ||
+ Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi)
return OffField * 4;
int Offset = isAM3 ? ARM_AM::getAM3Offset(OffField)
@@ -206,6 +207,7 @@ static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
case ARM_AM::ib: return ARM::STMIB;
}
case ARM::tLDRi:
+ case ARM::tLDRspi:
// tLDMIA is writeback-only - unless the base register is in the input
// reglist.
++NumLDMGened;
@@ -214,6 +216,7 @@ static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
case ARM_AM::ia: return ARM::tLDMIA;
}
case ARM::tSTRi:
+ case ARM::tSTRspi:
// There is no non-writeback tSTMIA either.
++NumSTMGened;
switch (Mode) {
@@ -328,7 +331,7 @@ AMSubMode getLoadStoreMultipleSubMode(int Opcode) {
} // end namespace llvm
static bool isT1i32Load(unsigned Opc) {
- return Opc == ARM::tLDRi;
+ return Opc == ARM::tLDRi || Opc == ARM::tLDRspi;
}
static bool isT2i32Load(unsigned Opc) {
@@ -340,7 +343,7 @@ static bool isi32Load(unsigned Opc) {
}
static bool isT1i32Store(unsigned Opc) {
- return Opc == ARM::tSTRi;
+ return Opc == ARM::tSTRi || Opc == ARM::tSTRspi;
}
static bool isT2i32Store(unsigned Opc) {
@@ -356,6 +359,8 @@ static unsigned getImmScale(unsigned Opc) {
default: llvm_unreachable("Unhandled opcode!");
case ARM::tLDRi:
case ARM::tSTRi:
+ case ARM::tLDRspi:
+ case ARM::tSTRspi:
return 1;
case ARM::tLDRHi:
case ARM::tSTRHi:
@@ -495,6 +500,7 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
if (isThumb1)
for (unsigned I = 0; I < NumRegs; ++I)
if (Base == Regs[I].first) {
+ assert(Base != ARM::SP && "Thumb1 does not allow SP in register list");
if (Opcode == ARM::tLDRi) {
Writeback = false;
break;
@@ -515,7 +521,7 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
} else if (Offset == -4 * (int)NumRegs && isNotVFP && !isThumb1) {
// VLDM/VSTM do not support DB mode without also updating the base reg.
Mode = ARM_AM::db;
- } else if (Offset != 0) {
+ } else if (Offset != 0 || Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
// Check if this is a supported opcode before inserting instructions to
// calculate a new base register.
if (!getLoadStoreMultipleOpcode(Opcode, Mode)) return false;
@@ -545,6 +551,7 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
int BaseOpc =
isThumb2 ? ARM::t2ADDri :
+ (isThumb1 && Base == ARM::SP) ? ARM::tADDrSPi :
(isThumb1 && Offset < 8) ? ARM::tADDi3 :
isThumb1 ? ARM::tADDi8 : ARM::ADDri;
@@ -552,7 +559,7 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
Offset = - Offset;
BaseOpc =
isThumb2 ? ARM::t2SUBri :
- (isThumb1 && Offset < 8) ? ARM::tSUBi3 :
+ (isThumb1 && Offset < 8 && Base != ARM::SP) ? ARM::tSUBi3 :
isThumb1 ? ARM::tSUBi8 : ARM::SUBri;
}
@@ -566,18 +573,34 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
// or
// MOV NewBase, Base
// ADDS NewBase, #imm8.
- if (Base != NewBase && Offset >= 8) {
+ if (Base != NewBase &&
+ (BaseOpc == ARM::tADDi8 || BaseOpc == ARM::tSUBi8)) {
// Need to insert a MOV to the new base first.
- BuildMI(MBB, MBBI, dl, TII->get(ARM::tMOVr), NewBase)
- .addReg(Base, getKillRegState(BaseKill))
- .addImm(Pred).addReg(PredReg);
+ if (isARMLowRegister(NewBase) && isARMLowRegister(Base) &&
+ !STI->hasV6Ops()) {
+ // thumbv4t doesn't have lo->lo copies, and we can't predicate tMOVSr
+ if (Pred != ARMCC::AL)
+ return false;
+ BuildMI(MBB, MBBI, dl, TII->get(ARM::tMOVSr), NewBase)
+ .addReg(Base, getKillRegState(BaseKill));
+ } else
+ BuildMI(MBB, MBBI, dl, TII->get(ARM::tMOVr), NewBase)
+ .addReg(Base, getKillRegState(BaseKill))
+ .addImm(Pred).addReg(PredReg);
+
// Set up BaseKill and Base correctly to insert the ADDS/SUBS below.
Base = NewBase;
BaseKill = false;
}
- AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase), true)
- .addReg(Base, getKillRegState(BaseKill)).addImm(Offset)
- .addImm(Pred).addReg(PredReg);
+ if (BaseOpc == ARM::tADDrSPi) {
+ assert(Offset % 4 == 0 && "tADDrSPi offset is scaled by 4");
+ BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase)
+ .addReg(Base, getKillRegState(BaseKill)).addImm(Offset/4)
+ .addImm(Pred).addReg(PredReg);
+ } else
+ AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase), true)
+ .addReg(Base, getKillRegState(BaseKill)).addImm(Offset)
+ .addImm(Pred).addReg(PredReg);
} else {
BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase)
.addReg(Base, getKillRegState(BaseKill)).addImm(Offset)
@@ -958,6 +981,8 @@ static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
case ARM::STRi12:
case ARM::tLDRi:
case ARM::tSTRi:
+ case ARM::tLDRspi:
+ case ARM::tSTRspi:
case ARM::t2LDRi8:
case ARM::t2LDRi12:
case ARM::t2STRi8:
@@ -1393,6 +1418,8 @@ static bool isMemoryOp(const MachineInstr *MI) {
case ARM::STRi12:
case ARM::tLDRi:
case ARM::tSTRi:
+ case ARM::tLDRspi:
+ case ARM::tSTRspi:
case ARM::t2LDRi8:
case ARM::t2LDRi12:
case ARM::t2STRi8:
@@ -1787,12 +1814,11 @@ bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
}
bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- const TargetMachine &TM = Fn.getTarget();
- TL = TM.getSubtargetImpl()->getTargetLowering();
+ STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
+ TL = STI->getTargetLowering();
AFI = Fn.getInfo<ARMFunctionInfo>();
- TII = TM.getSubtargetImpl()->getInstrInfo();
- TRI = TM.getSubtargetImpl()->getRegisterInfo();
- STI = &TM.getSubtarget<ARMSubtarget>();
+ TII = STI->getInstrInfo();
+ TRI = STI->getRegisterInfo();
RS = new RegScavenger();
isThumb2 = AFI->isThumb2Function();
isThumb1 = AFI->isThumbFunction() && !isThumb2;
@@ -1802,7 +1828,7 @@ bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
++MFI) {
MachineBasicBlock &MBB = *MFI;
Modified |= LoadStoreMultipleOpti(MBB);
- if (TM.getSubtarget<ARMSubtarget>().hasV5TOps())
+ if (STI->hasV5TOps())
Modified |= MergeReturnIntoLDM(MBB);
}
@@ -1850,10 +1876,10 @@ namespace {
}
bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- TD = Fn.getSubtarget().getDataLayout();
- TII = Fn.getSubtarget().getInstrInfo();
- TRI = Fn.getSubtarget().getRegisterInfo();
+ TD = Fn.getTarget().getDataLayout();
STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
+ TII = STI->getInstrInfo();
+ TRI = STI->getRegisterInfo();
MRI = &Fn.getRegInfo();
MF = &Fn;
diff --git a/lib/Target/ARM/ARMMCInstLower.cpp b/lib/Target/ARM/ARMMCInstLower.cpp
index 023f5f8..fd4f5ff 100644
--- a/lib/Target/ARM/ARMMCInstLower.cpp
+++ b/lib/Target/ARM/ARMMCInstLower.cpp
@@ -119,11 +119,45 @@ void llvm::LowerARMMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
ARMAsmPrinter &AP) {
OutMI.setOpcode(MI->getOpcode());
+ // In the MC layer, we keep modified immediates in their encoded form
+ bool EncodeImms = false;
+ switch (MI->getOpcode()) {
+ default: break;
+ case ARM::MOVi:
+ case ARM::MVNi:
+ case ARM::CMPri:
+ case ARM::CMNri:
+ case ARM::TSTri:
+ case ARM::TEQri:
+ case ARM::MSRi:
+ case ARM::ADCri:
+ case ARM::ADDri:
+ case ARM::ADDSri:
+ case ARM::SBCri:
+ case ARM::SUBri:
+ case ARM::SUBSri:
+ case ARM::ANDri:
+ case ARM::ORRri:
+ case ARM::EORri:
+ case ARM::BICri:
+ case ARM::RSBri:
+ case ARM::RSBSri:
+ case ARM::RSCri:
+ EncodeImms = true;
+ break;
+ }
+
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
MCOperand MCOp;
- if (AP.lowerOperand(MO, MCOp))
+ if (AP.lowerOperand(MO, MCOp)) {
+ if (MCOp.isImm() && EncodeImms) {
+ int32_t Enc = ARM_AM::getSOImmVal(MCOp.getImm());
+ if (Enc != -1)
+ MCOp.setImm(Enc);
+ }
OutMI.addOperand(MCOp);
+ }
}
}
diff --git a/lib/Target/ARM/ARMMachineFunctionInfo.cpp b/lib/Target/ARM/ARMMachineFunctionInfo.cpp
index 892b269..229d041 100644
--- a/lib/Target/ARM/ARMMachineFunctionInfo.cpp
+++ b/lib/Target/ARM/ARMMachineFunctionInfo.cpp
@@ -14,8 +14,8 @@ using namespace llvm;
void ARMFunctionInfo::anchor() { }
ARMFunctionInfo::ARMFunctionInfo(MachineFunction &MF)
- : isThumb(MF.getTarget().getSubtarget<ARMSubtarget>().isThumb()),
- hasThumb2(MF.getTarget().getSubtarget<ARMSubtarget>().hasThumb2()),
+ : isThumb(MF.getSubtarget<ARMSubtarget>().isThumb()),
+ hasThumb2(MF.getSubtarget<ARMSubtarget>().hasThumb2()),
StByValParamsPadding(0), ArgRegsSaveSize(0), HasStackFrame(false),
RestoreSPFromFP(false), LRSpilledForFarJump(false),
FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
diff --git a/lib/Target/ARM/ARMMachineFunctionInfo.h b/lib/Target/ARM/ARMMachineFunctionInfo.h
index 4e67fa1..ddfdb52 100644
--- a/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ b/lib/Target/ARM/ARMMachineFunctionInfo.h
@@ -16,10 +16,10 @@
#include "ARMSubtarget.h"
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/ADT/DenseMap.h"
namespace llvm {
diff --git a/lib/Target/ARM/ARMOptimizeBarriersPass.cpp b/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
index 2a49255..1c50f9e 100644
--- a/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
+++ b/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
@@ -9,8 +9,8 @@
//===------------------------------------------------------------------------------------------===//
#include "ARM.h"
-#include "ARMMachineFunctionInfo.h"
#include "ARMInstrInfo.h"
+#include "ARMMachineFunctionInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
using namespace llvm;
diff --git a/lib/Target/ARM/ARMRegisterInfo.td b/lib/Target/ARM/ARMRegisterInfo.td
index b290e7f..45cc9ea 100644
--- a/lib/Target/ARM/ARMRegisterInfo.td
+++ b/lib/Target/ARM/ARMRegisterInfo.td
@@ -199,7 +199,7 @@ def GPR : RegisterClass<"ARM", [i32], 32, (add (sequence "R%u", 0, 12),
// Thumb1 instructions that know how to use hi regs.
let AltOrders = [(add LR, GPR), (trunc GPR, 8)];
let AltOrderSelect = [{
- return 1 + MF.getTarget().getSubtarget<ARMSubtarget>().isThumb1Only();
+ return 1 + MF.getSubtarget<ARMSubtarget>().isThumb1Only();
}];
}
@@ -209,7 +209,7 @@ def GPR : RegisterClass<"ARM", [i32], 32, (add (sequence "R%u", 0, 12),
def GPRnopc : RegisterClass<"ARM", [i32], 32, (sub GPR, PC)> {
let AltOrders = [(add LR, GPRnopc), (trunc GPRnopc, 8)];
let AltOrderSelect = [{
- return 1 + MF.getTarget().getSubtarget<ARMSubtarget>().isThumb1Only();
+ return 1 + MF.getSubtarget<ARMSubtarget>().isThumb1Only();
}];
}
@@ -219,7 +219,7 @@ def GPRnopc : RegisterClass<"ARM", [i32], 32, (sub GPR, PC)> {
def GPRwithAPSR : RegisterClass<"ARM", [i32], 32, (add (sub GPR, PC), APSR_NZCV)> {
let AltOrders = [(add LR, GPRnopc), (trunc GPRnopc, 8)];
let AltOrderSelect = [{
- return 1 + MF.getTarget().getSubtarget<ARMSubtarget>().isThumb1Only();
+ return 1 + MF.getSubtarget<ARMSubtarget>().isThumb1Only();
}];
}
@@ -237,7 +237,7 @@ def GPRsp : RegisterClass<"ARM", [i32], 32, (add SP)>;
def rGPR : RegisterClass<"ARM", [i32], 32, (sub GPR, SP, PC)> {
let AltOrders = [(add LR, rGPR), (trunc rGPR, 8)];
let AltOrderSelect = [{
- return 1 + MF.getTarget().getSubtarget<ARMSubtarget>().isThumb1Only();
+ return 1 + MF.getSubtarget<ARMSubtarget>().isThumb1Only();
}];
}
@@ -255,7 +255,7 @@ def hGPR : RegisterClass<"ARM", [i32], 32, (sub GPR, tGPR)>;
def tcGPR : RegisterClass<"ARM", [i32], 32, (add R0, R1, R2, R3, R12)> {
let AltOrders = [(and tcGPR, tGPR)];
let AltOrderSelect = [{
- return MF.getTarget().getSubtarget<ARMSubtarget>().isThumb1Only();
+ return MF.getSubtarget<ARMSubtarget>().isThumb1Only();
}];
}
diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index fa30ac3..636205f 100644
--- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -32,7 +32,8 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const {
- const ARMSubtarget &Subtarget = DAG.getTarget().getSubtarget<ARMSubtarget>();
+ const ARMSubtarget &Subtarget =
+ DAG.getMachineFunction().getSubtarget<ARMSubtarget>();
// Do repeated 4-byte loads and stores. To be improved.
// This requires 4-byte alignment.
if ((Align & 3) != 0)
@@ -150,14 +151,14 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Src, SDValue Size,
unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const {
- const ARMSubtarget &Subtarget = DAG.getTarget().getSubtarget<ARMSubtarget>();
+ const ARMSubtarget &Subtarget =
+ DAG.getMachineFunction().getSubtarget<ARMSubtarget>();
// Use default for non-AAPCS (or MachO) subtargets
if (!Subtarget.isAAPCS_ABI() || Subtarget.isTargetMachO() ||
Subtarget.isTargetWindows())
return SDValue();
- const ARMTargetLowering &TLI =
- *DAG.getTarget().getSubtarget<ARMSubtarget>().getTargetLowering();
+ const ARMTargetLowering &TLI = *Subtarget.getTargetLowering();
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp
index 600f39d..89624dd 100644
--- a/lib/Target/ARM/ARMSubtarget.cpp
+++ b/lib/Target/ARM/ARMSubtarget.cpp
@@ -15,12 +15,14 @@
#include "ARMFrameLowering.h"
#include "ARMISelLowering.h"
#include "ARMInstrInfo.h"
+#include "ARMMachineFunctionInfo.h"
#include "ARMSelectionDAGInfo.h"
#include "ARMSubtarget.h"
-#include "ARMMachineFunctionInfo.h"
+#include "ARMTargetMachine.h"
#include "Thumb1FrameLowering.h"
#include "Thumb1InstrInfo.h"
#include "Thumb2InstrInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
@@ -28,7 +30,6 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
using namespace llvm;
@@ -87,56 +88,6 @@ IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT),
"Allow IT blocks based on ARMv7"),
clEnumValEnd));
-static std::string computeDataLayout(ARMSubtarget &ST) {
- std::string Ret = "";
-
- if (ST.isLittle())
- // Little endian.
- Ret += "e";
- else
- // Big endian.
- Ret += "E";
-
- Ret += DataLayout::getManglingComponent(ST.getTargetTriple());
-
- // Pointers are 32 bits and aligned to 32 bits.
- Ret += "-p:32:32";
-
- // ABIs other than APCS have 64 bit integers with natural alignment.
- if (!ST.isAPCS_ABI())
- Ret += "-i64:64";
-
- // We have 64 bits floats. The APCS ABI requires them to be aligned to 32
- // bits, others to 64 bits. We always try to align to 64 bits.
- if (ST.isAPCS_ABI())
- Ret += "-f64:32:64";
-
- // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
- // to 64. We always ty to give them natural alignment.
- if (ST.isAPCS_ABI())
- Ret += "-v64:32:64-v128:32:128";
- else
- Ret += "-v128:64:128";
-
- // Try to align aggregates to 32 bits (the default is 64 bits, which has no
- // particular hardware support on 32-bit ARM).
- Ret += "-a:0:32";
-
- // Integer registers are 32 bits.
- Ret += "-n32";
-
- // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit
- // aligned everywhere else.
- if (ST.isTargetNaCl())
- Ret += "-S128";
- else if (ST.isAAPCS_ABI())
- Ret += "-S64";
- else
- Ret += "-S32";
-
- return Ret;
-}
-
/// initializeSubtargetDependencies - Initializes using a CPU and feature string
/// so that we can use initializer lists for subtarget initialization.
ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU,
@@ -146,23 +97,31 @@ ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU,
return *this;
}
+ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU,
+ StringRef FS) {
+ ARMSubtarget &STI = initializeSubtargetDependencies(CPU, FS);
+ if (STI.isThumb1Only())
+ return (ARMFrameLowering *)new Thumb1FrameLowering(STI);
+
+ return new ARMFrameLowering(STI);
+}
+
ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, const TargetMachine &TM,
- bool IsLittle)
+ const std::string &FS,
+ const ARMBaseTargetMachine &TM, bool IsLittle)
: ARMGenSubtargetInfo(TT, CPU, FS), ARMProcFamily(Others),
ARMProcClass(None), stackAlignment(4), CPUString(CPU), IsLittle(IsLittle),
- TargetTriple(TT), Options(TM.Options), TargetABI(ARM_ABI_UNKNOWN),
- DL(computeDataLayout(initializeSubtargetDependencies(CPU, FS))),
- TSInfo(DL),
+ TargetTriple(TT), Options(TM.Options), TM(TM),
+ TSInfo(*TM.getDataLayout()),
+ FrameLowering(initializeFrameLowering(CPU, FS)),
+ // At this point initializeSubtargetDependencies has been called so
+ // we can query directly.
InstrInfo(isThumb1Only()
? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this)
: !isThumb()
? (ARMBaseInstrInfo *)new ARMInstrInfo(*this)
: (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)),
- TLInfo(TM),
- FrameLowering(!isThumb1Only()
- ? new ARMFrameLowering(*this)
- : (ARMFrameLowering *)new Thumb1FrameLowering(*this)) {}
+ TLInfo(TM, *this) {}
void ARMSubtarget::initializeEnvironment() {
HasV4TOps = false;
@@ -216,7 +175,7 @@ void ARMSubtarget::initializeEnvironment() {
void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
if (CPUString.empty()) {
- if (isTargetIOS() && TargetTriple.getArchName().endswith("v7s"))
+ if (isTargetDarwin() && TargetTriple.getArchName().endswith("v7s"))
// Default to the Swift CPU when targeting armv7s/thumbv7s.
CPUString = "swift";
else
@@ -226,8 +185,8 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
// Insert the architecture feature derived from the target triple into the
// feature string. This is important for setting features that are implied
// based on the architecture version.
- std::string ArchFS = ARM_MC::ParseARMTriple(TargetTriple.getTriple(),
- CPUString);
+ std::string ArchFS =
+ ARM_MC::ParseARMTriple(TargetTriple.getTriple(), CPUString);
if (!FS.empty()) {
if (!ArchFS.empty())
ArchFS = ArchFS + "," + FS.str();
@@ -246,30 +205,9 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
// Initialize scheduling itinerary for the specified CPU.
InstrItins = getInstrItineraryForCPU(CPUString);
- if (TargetABI == ARM_ABI_UNKNOWN) {
- switch (TargetTriple.getEnvironment()) {
- case Triple::Android:
- case Triple::EABI:
- case Triple::EABIHF:
- case Triple::GNUEABI:
- case Triple::GNUEABIHF:
- TargetABI = ARM_ABI_AAPCS;
- break;
- default:
- if (TargetTriple.isOSBinFormatMachO() &&
- TargetTriple.getOS() == Triple::UnknownOS)
- TargetABI = ARM_ABI_AAPCS;
- else
- TargetABI = ARM_ABI_APCS;
- break;
- }
- }
-
// FIXME: this is invalid for WindowsCE
- if (isTargetWindows()) {
- TargetABI = ARM_ABI_AAPCS;
+ if (isTargetWindows())
NoARM = true;
- }
if (isAAPCS_ABI())
stackAlignment = 8;
@@ -331,6 +269,15 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
UseNEONForSinglePrecisionFP = true;
}
+bool ARMSubtarget::isAPCS_ABI() const {
+ assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN);
+ return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_APCS;
+}
+bool ARMSubtarget::isAAPCS_ABI() const {
+ assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN);
+ return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS;
+}
+
/// GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol.
bool
ARMSubtarget::GVIsIndirectSymbol(const GlobalValue *GV,
@@ -402,6 +349,5 @@ bool ARMSubtarget::useMovt(const MachineFunction &MF) const {
// immediates as it is inherently position independent, and may be out of
// range otherwise.
return UseMovt && (isTargetWindows() ||
- !MF.getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::MinSize));
+ !MF.getFunction()->hasFnAttribute(Attribute::MinSize));
}
diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h
index d5ee009..f4deddf 100644
--- a/lib/Target/ARM/ARMSubtarget.h
+++ b/lib/Target/ARM/ARMSubtarget.h
@@ -20,10 +20,10 @@
#include "ARMInstrInfo.h"
#include "ARMSelectionDAGInfo.h"
#include "ARMSubtarget.h"
+#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "Thumb1FrameLowering.h"
#include "Thumb1InstrInfo.h"
#include "Thumb2InstrInfo.h"
-#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCInstrItineraries.h"
@@ -37,6 +37,7 @@ namespace llvm {
class GlobalValue;
class StringRef;
class TargetOptions;
+class ARMBaseTargetMachine;
class ARMSubtarget : public ARMGenSubtargetInfo {
protected:
@@ -228,18 +229,14 @@ protected:
/// Options passed via command line that could influence the target
const TargetOptions &Options;
- public:
- enum {
- ARM_ABI_UNKNOWN,
- ARM_ABI_APCS,
- ARM_ABI_AAPCS // ARM EABI
- } TargetABI;
+ const ARMBaseTargetMachine &TM;
+public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
ARMSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, const TargetMachine &TM, bool IsLittle);
+ const std::string &FS, const ARMBaseTargetMachine &TM, bool IsLittle);
/// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
/// that still makes it profitable to inline the call.
@@ -254,7 +251,6 @@ protected:
/// so that we can use initializer lists for subtarget initialization.
ARMSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
- const DataLayout *getDataLayout() const override { return &DL; }
const ARMSelectionDAGInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
@@ -272,16 +268,17 @@ protected:
}
private:
- const DataLayout DL;
ARMSelectionDAGInfo TSInfo;
+ // Either Thumb1FrameLowering or ARMFrameLowering.
+ std::unique_ptr<ARMFrameLowering> FrameLowering;
// Either Thumb1InstrInfo or Thumb2InstrInfo.
std::unique_ptr<ARMBaseInstrInfo> InstrInfo;
ARMTargetLowering TLInfo;
- // Either Thumb1FrameLowering or ARMFrameLowering.
- std::unique_ptr<ARMFrameLowering> FrameLowering;
void initializeEnvironment();
void initSubtargetFeatures(StringRef CPU, StringRef FS);
+ ARMFrameLowering *initializeFrameLowering(StringRef CPU, StringRef FS);
+
public:
void computeIssueWidth();
@@ -316,7 +313,8 @@ public:
bool hasCRC() const { return HasCRC; }
bool hasVirtualization() const { return HasVirtualization; }
bool useNEONForSinglePrecisionFP() const {
- return hasNEON() && UseNEONForSinglePrecisionFP; }
+ return hasNEON() && UseNEONForSinglePrecisionFP;
+ }
bool hasDivide() const { return HasHardwareDivide; }
bool hasDivideInARMMode() const { return HasHardwareDivideInARM; }
@@ -350,7 +348,7 @@ public:
bool isTargetIOS() const { return TargetTriple.isiOS(); }
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
- bool isTargetNetBSD() const { return TargetTriple.getOS() == Triple::NetBSD; }
+ bool isTargetNetBSD() const { return TargetTriple.isOSNetBSD(); }
bool isTargetWindows() const { return TargetTriple.isOSWindows(); }
bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
@@ -391,14 +389,8 @@ public:
return TargetTriple.getEnvironment() == Triple::Android;
}
- bool isAPCS_ABI() const {
- assert(TargetABI != ARM_ABI_UNKNOWN);
- return TargetABI == ARM_ABI_APCS;
- }
- bool isAAPCS_ABI() const {
- assert(TargetABI != ARM_ABI_UNKNOWN);
- return TargetABI == ARM_ABI_AAPCS;
- }
+ bool isAPCS_ABI() const;
+ bool isAAPCS_ABI() const;
bool isThumb() const { return InThumbMode; }
bool isThumb1Only() const { return InThumbMode && !HasThumb2; }
diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp
index 88d6c5e..a97a058 100644
--- a/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/lib/Target/ARM/ARMTargetMachine.cpp
@@ -11,13 +11,14 @@
//===----------------------------------------------------------------------===//
#include "ARM.h"
-#include "ARMTargetMachine.h"
#include "ARMFrameLowering.h"
+#include "ARMTargetMachine.h"
#include "ARMTargetObjectFile.h"
+#include "ARMTargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
@@ -52,6 +53,110 @@ static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
return make_unique<ARMElfTargetObjectFile>();
}
+static ARMBaseTargetMachine::ARMABI
+computeTargetABI(const Triple &TT, StringRef CPU,
+ const TargetOptions &Options) {
+ if (Options.MCOptions.getABIName().startswith("aapcs"))
+ return ARMBaseTargetMachine::ARM_ABI_AAPCS;
+ else if (Options.MCOptions.getABIName().startswith("apcs"))
+ return ARMBaseTargetMachine::ARM_ABI_APCS;
+
+ assert(Options.MCOptions.getABIName().empty() &&
+ "Unknown target-abi option!");
+
+ ARMBaseTargetMachine::ARMABI TargetABI =
+ ARMBaseTargetMachine::ARM_ABI_UNKNOWN;
+
+ // FIXME: This is duplicated code from the front end and should be unified.
+ if (TT.isOSBinFormatMachO()) {
+ if (TT.getEnvironment() == llvm::Triple::EABI ||
+ (TT.getOS() == llvm::Triple::UnknownOS &&
+ TT.getObjectFormat() == llvm::Triple::MachO) ||
+ CPU.startswith("cortex-m")) {
+ TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
+ } else {
+ TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS;
+ }
+ } else if (TT.isOSWindows()) {
+ // FIXME: this is invalid for WindowsCE
+ TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
+ } else {
+ // Select the default based on the platform.
+ switch (TT.getEnvironment()) {
+ case llvm::Triple::Android:
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::EABI:
+ TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
+ break;
+ case llvm::Triple::GNU:
+ TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS;
+ break;
+ default:
+ if (TT.getOS() == llvm::Triple::NetBSD)
+ TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS;
+ else
+ TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
+ break;
+ }
+ }
+
+ return TargetABI;
+}
+
+static std::string computeDataLayout(const Triple &TT,
+ ARMBaseTargetMachine::ARMABI ABI,
+ bool isLittle) {
+ std::string Ret = "";
+
+ if (isLittle)
+ // Little endian.
+ Ret += "e";
+ else
+ // Big endian.
+ Ret += "E";
+
+ Ret += DataLayout::getManglingComponent(TT);
+
+ // Pointers are 32 bits and aligned to 32 bits.
+ Ret += "-p:32:32";
+
+ // ABIs other than APCS have 64 bit integers with natural alignment.
+ if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS)
+ Ret += "-i64:64";
+
+ // We have 64 bits floats. The APCS ABI requires them to be aligned to 32
+ // bits, others to 64 bits. We always try to align to 64 bits.
+ if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS)
+ Ret += "-f64:32:64";
+
+ // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
+ // to 64. We always ty to give them natural alignment.
+ if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS)
+ Ret += "-v64:32:64-v128:32:128";
+ else
+ Ret += "-v128:64:128";
+
+ // Try to align aggregates to 32 bits (the default is 64 bits, which has no
+ // particular hardware support on 32-bit ARM).
+ Ret += "-a:0:32";
+
+ // Integer registers are 32 bits.
+ Ret += "-n32";
+
+ // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit
+ // aligned everywhere else.
+ if (TT.isOSNaCl())
+ Ret += "-S128";
+ else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS)
+ Ret += "-S64";
+ else
+ Ret += "-S32";
+
+ return Ret;
+}
+
/// TargetMachine ctor - Create an ARM architecture model.
///
ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT,
@@ -60,6 +165,8 @@ ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool isLittle)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ TargetABI(computeTargetABI(Triple(TT), CPU, Options)),
+ DL(computeDataLayout(Triple(TT), TargetABI, isLittle)),
TLOF(createTLOF(Triple(getTargetTriple()))),
Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) {
@@ -73,11 +180,8 @@ ARMBaseTargetMachine::~ARMBaseTargetMachine() {}
const ARMSubtarget *
ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const {
- AttributeSet FnAttrs = F.getAttributes();
- Attribute CPUAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu");
- Attribute FSAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features");
+ Attribute CPUAttr = F.getFnAttribute("target-cpu");
+ Attribute FSAttr = F.getFnAttribute("target-features");
std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
? CPUAttr.getValueAsString().str()
@@ -91,8 +195,7 @@ ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const {
// function before we can generate a subtarget. We also need to use
// it as a key for the subtarget since that can be the only difference
// between two functions.
- Attribute SFAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "use-soft-float");
+ Attribute SFAttr = F.getFnAttribute("use-soft-float");
bool SoftFloat = !SFAttr.hasAttribute(Attribute::None)
? SFAttr.getValueAsString() == "true"
: Options.UseSoftFloat;
@@ -109,12 +212,9 @@ ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const {
return I.get();
}
-void ARMBaseTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
- // Add first the target-independent BasicTTI pass, then our ARM pass. This
- // allows the ARM pass to delegate to the target independent layer when
- // appropriate.
- PM.add(createBasicTargetTransformInfoPass(this));
- PM.add(createARMTargetTransformInfoPass(this));
+TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() {
+ return TargetIRAnalysis(
+ [this](Function &F) { return TargetTransformInfo(ARMTTIImpl(this, F)); });
}
@@ -197,9 +297,9 @@ public:
void addIRPasses() override;
bool addPreISel() override;
bool addInstSelector() override;
- bool addPreRegAlloc() override;
- bool addPreSched2() override;
- bool addPreEmitPass() override;
+ void addPreRegAlloc() override;
+ void addPreSched2() override;
+ void addPreEmitPass() override;
};
} // namespace
@@ -226,7 +326,12 @@ void ARMPassConfig::addIRPasses() {
bool ARMPassConfig::addPreISel() {
if (TM->getOptLevel() != CodeGenOpt::None)
- addPass(createGlobalMergePass(TM));
+ // FIXME: This is using the thumb1 only constant value for
+ // maximal global offset for merging globals. We may want
+ // to look into using the old value for non-thumb1 code of
+ // 4095 based on the TargetMachine, but this starts to become
+ // tricky when doing code gen per function.
+ addPass(createGlobalMergePass(TM, 127));
return false;
}
@@ -241,7 +346,7 @@ bool ARMPassConfig::addInstSelector() {
return false;
}
-bool ARMPassConfig::addPreRegAlloc() {
+void ARMPassConfig::addPreRegAlloc() {
if (getOptLevel() != CodeGenOpt::None)
addPass(createARMLoadStoreOptimizationPass(true));
if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9())
@@ -252,13 +357,11 @@ bool ARMPassConfig::addPreRegAlloc() {
getARMSubtarget().hasNEON() && !DisableA15SDOptimization) {
addPass(createA15SDOptimizerPass());
}
- return true;
}
-bool ARMPassConfig::addPreSched2() {
+void ARMPassConfig::addPreSched2() {
if (getOptLevel() != CodeGenOpt::None) {
addPass(createARMLoadStoreOptimizationPass());
- printAndVerify("After ARM load / store optimizer");
if (getARMSubtarget().hasNEON())
addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass));
@@ -279,11 +382,9 @@ bool ARMPassConfig::addPreSched2() {
}
if (getARMSubtarget().isThumb2())
addPass(createThumb2ITBlockPass());
-
- return true;
}
-bool ARMPassConfig::addPreEmitPass() {
+void ARMPassConfig::addPreEmitPass() {
if (getARMSubtarget().isThumb2()) {
if (!getARMSubtarget().prefers32BitThumb())
addPass(createThumb2SizeReductionPass());
@@ -294,6 +395,4 @@ bool ARMPassConfig::addPreEmitPass() {
addPass(createARMOptimizeBarriersPass());
addPass(createARMConstantIslandPass());
-
- return true;
}
diff --git a/lib/Target/ARM/ARMTargetMachine.h b/lib/Target/ARM/ARMTargetMachine.h
index fba0ec2..7f6a1ee 100644
--- a/lib/Target/ARM/ARMTargetMachine.h
+++ b/lib/Target/ARM/ARMTargetMachine.h
@@ -22,7 +22,15 @@
namespace llvm {
class ARMBaseTargetMachine : public LLVMTargetMachine {
+public:
+ enum ARMABI {
+ ARM_ABI_UNKNOWN,
+ ARM_ABI_APCS,
+ ARM_ABI_AAPCS // ARM EABI
+ } TargetABI;
+
protected:
+ const DataLayout DL;
std::unique_ptr<TargetLoweringObjectFile> TLOF;
ARMSubtarget Subtarget;
bool isLittle;
@@ -39,9 +47,11 @@ public:
const ARMSubtarget *getSubtargetImpl() const override { return &Subtarget; }
const ARMSubtarget *getSubtargetImpl(const Function &F) const override;
+ const DataLayout *getDataLayout() const override { return &DL; }
+ bool isLittleEndian() const { return isLittle; }
- /// \brief Register ARM analysis passes with a pass manager.
- void addAnalysisPasses(PassManagerBase &PM) override;
+ /// \brief Get the TargetIRAnalysis for this target.
+ TargetIRAnalysis getTargetIRAnalysis() override;
// Pass Pipeline Configuration
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
diff --git a/lib/Target/ARM/ARMTargetObjectFile.cpp b/lib/Target/ARM/ARMTargetObjectFile.cpp
index 48238bf..80f03c6 100644
--- a/lib/Target/ARM/ARMTargetObjectFile.cpp
+++ b/lib/Target/ARM/ARMTargetObjectFile.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
#include "ARMTargetObjectFile.h"
-#include "ARMSubtarget.h"
+#include "ARMTargetMachine.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -27,7 +27,8 @@ using namespace dwarf;
void ARMElfTargetObjectFile::Initialize(MCContext &Ctx,
const TargetMachine &TM) {
- bool isAAPCS_ABI = TM.getSubtarget<ARMSubtarget>().isAAPCS_ABI();
+ bool isAAPCS_ABI = static_cast<const ARMTargetMachine &>(TM).TargetABI ==
+ ARMTargetMachine::ARMABI::ARM_ABI_AAPCS;
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
InitializeELF(isAAPCS_ABI);
@@ -36,10 +37,7 @@ void ARMElfTargetObjectFile::Initialize(MCContext &Ctx,
}
AttributesSection =
- getContext().getELFSection(".ARM.attributes",
- ELF::SHT_ARM_ATTRIBUTES,
- 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".ARM.attributes", ELF::SHT_ARM_ATTRIBUTES, 0);
}
const MCExpr *ARMElfTargetObjectFile::getTTypeGlobalReference(
diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp
index ec834e8..4e1b371 100644
--- a/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1,4 +1,4 @@
-//===-- ARMTargetTransformInfo.cpp - ARM specific TTI pass ----------------===//
+//===-- ARMTargetTransformInfo.cpp - ARM specific TTI ---------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,17 +6,8 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-/// \file
-/// This file implements a TargetTransformInfo analysis pass specific to the
-/// ARM target machine. It uses the target's detailed information to provide
-/// more precise answers to certain TTI queries, while letting the target
-/// independent and default TTI implementations handle the rest.
-///
-//===----------------------------------------------------------------------===//
-#include "ARM.h"
-#include "ARMTargetMachine.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
+#include "ARMTargetTransformInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/CostTable.h"
#include "llvm/Target/TargetLowering.h"
@@ -24,132 +15,7 @@ using namespace llvm;
#define DEBUG_TYPE "armtti"
-// Declare the pass initialization routine locally as target-specific passes
-// don't have a target-wide initialization entry point, and so we rely on the
-// pass constructor initialization.
-namespace llvm {
-void initializeARMTTIPass(PassRegistry &);
-}
-
-namespace {
-
-class ARMTTI final : public ImmutablePass, public TargetTransformInfo {
- const ARMBaseTargetMachine *TM;
- const ARMSubtarget *ST;
- const ARMTargetLowering *TLI;
-
- /// Estimate the overhead of scalarizing an instruction. Insert and Extract
- /// are set if the result needs to be inserted and/or extracted from vectors.
- unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
-
-public:
- ARMTTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) {
- llvm_unreachable("This pass cannot be directly constructed");
- }
-
- ARMTTI(const ARMBaseTargetMachine *TM)
- : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
- TLI(TM->getSubtargetImpl()->getTargetLowering()) {
- initializeARMTTIPass(*PassRegistry::getPassRegistry());
- }
-
- void initializePass() override {
- pushTTIStack(this);
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- TargetTransformInfo::getAnalysisUsage(AU);
- }
-
- /// Pass identification.
- static char ID;
-
- /// Provide necessary pointer adjustments for the two base classes.
- void *getAdjustedAnalysisPointer(const void *ID) override {
- if (ID == &TargetTransformInfo::ID)
- return (TargetTransformInfo*)this;
- return this;
- }
-
- /// \name Scalar TTI Implementations
- /// @{
- using TargetTransformInfo::getIntImmCost;
- unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
-
- /// @}
-
-
- /// \name Vector TTI Implementations
- /// @{
-
- unsigned getNumberOfRegisters(bool Vector) const override {
- if (Vector) {
- if (ST->hasNEON())
- return 16;
- return 0;
- }
-
- if (ST->isThumb1Only())
- return 8;
- return 13;
- }
-
- unsigned getRegisterBitWidth(bool Vector) const override {
- if (Vector) {
- if (ST->hasNEON())
- return 128;
- return 0;
- }
-
- return 32;
- }
-
- unsigned getMaxInterleaveFactor() const override {
- // These are out of order CPUs:
- if (ST->isCortexA15() || ST->isSwift())
- return 2;
- return 1;
- }
-
- unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
- int Index, Type *SubTp) const override;
-
- unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const override;
-
- unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy) const override;
-
- unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index) const override;
-
- unsigned getAddressComputationCost(Type *Val,
- bool IsComplex) const override;
-
- unsigned getArithmeticInstrCost(
- unsigned Opcode, Type *Ty, OperandValueKind Op1Info = OK_AnyValue,
- OperandValueKind Op2Info = OK_AnyValue,
- OperandValueProperties Opd1PropInfo = OP_None,
- OperandValueProperties Opd2PropInfo = OP_None) const override;
-
- unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
- unsigned AddressSpace) const override;
- /// @}
-};
-
-} // end anonymous namespace
-
-INITIALIZE_AG_PASS(ARMTTI, TargetTransformInfo, "armtti",
- "ARM Target Transform Info", true, true, false)
-char ARMTTI::ID = 0;
-
-ImmutablePass *
-llvm::createARMTargetTransformInfoPass(const ARMBaseTargetMachine *TM) {
- return new ARMTTI(TM);
-}
-
-
-unsigned ARMTTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
+unsigned ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned Bits = Ty->getPrimitiveSizeInBits();
@@ -181,8 +47,7 @@ unsigned ARMTTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
return 3;
}
-unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const {
+unsigned ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
@@ -206,7 +71,7 @@ unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
EVT DstTy = TLI->getValueType(Dst);
if (!SrcTy.isSimple() || !DstTy.isSimple())
- return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
+ return BaseT::getCastInstrCost(Opcode, Dst, Src);
// Some arithmetic, load and store operations have specific instructions
// to cast up/down their types automatically at no extra cost.
@@ -377,11 +242,11 @@ unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
return ARMIntegerConversionTbl[Idx].Cost;
}
- return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
+ return BaseT::getCastInstrCost(Opcode, Dst, Src);
}
-unsigned ARMTTI::getVectorInstrCost(unsigned Opcode, Type *ValTy,
- unsigned Index) const {
+unsigned ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
+ unsigned Index) {
// Penalize inserting into an D-subregister. We end up with a three times
// lower estimated throughput on swift.
if (ST->isSwift() &&
@@ -397,11 +262,11 @@ unsigned ARMTTI::getVectorInstrCost(unsigned Opcode, Type *ValTy,
ValTy->getVectorElementType()->isIntegerTy())
return 3;
- return TargetTransformInfo::getVectorInstrCost(Opcode, ValTy, Index);
+ return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
}
-unsigned ARMTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy) const {
+unsigned ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
// On NEON a a vector select gets lowered to vbsl.
@@ -431,10 +296,10 @@ unsigned ARMTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
return LT.first;
}
- return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
+ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
-unsigned ARMTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
+unsigned ARMTTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
// Address computations in vectorized code with non-consecutive addresses will
// likely result in more instructions compared to scalar code where the
// computation can more often be merged into the index mode. The resulting
@@ -449,13 +314,32 @@ unsigned ARMTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
return 1;
}
-unsigned ARMTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
- Type *SubTp) const {
+unsigned ARMTTIImpl::getFPOpCost(Type *Ty) {
+ // Use similar logic that's in ARMISelLowering:
+ // Any ARM CPU with VFP2 has floating point, but Thumb1 didn't have access
+ // to VFP.
+
+ if (ST->hasVFP2() && !ST->isThumb1Only()) {
+ if (Ty->isFloatTy()) {
+ return TargetTransformInfo::TCC_Basic;
+ }
+
+ if (Ty->isDoubleTy()) {
+ return ST->isFPOnlySP() ? TargetTransformInfo::TCC_Expensive :
+ TargetTransformInfo::TCC_Basic;
+ }
+ }
+
+ return TargetTransformInfo::TCC_Expensive;
+}
+
+unsigned ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
+ Type *SubTp) {
// We only handle costs of reverse and alternate shuffles for now.
- if (Kind != SK_Reverse && Kind != SK_Alternate)
- return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
+ if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
+ return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
- if (Kind == SK_Reverse) {
+ if (Kind == TTI::SK_Reverse) {
static const CostTblEntry<MVT::SimpleValueType> NEONShuffleTbl[] = {
// Reverse shuffle cost one instruction if we are shuffling within a
// double word (vrev) or two if we shuffle a quad word (vrev, vext).
@@ -473,11 +357,11 @@ unsigned ARMTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
int Idx = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
if (Idx == -1)
- return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
+ return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
return LT.first * NEONShuffleTbl[Idx].Cost;
}
- if (Kind == SK_Alternate) {
+ if (Kind == TTI::SK_Alternate) {
static const CostTblEntry<MVT::SimpleValueType> NEONAltShuffleTbl[] = {
// Alt shuffle cost table for ARM. Cost is the number of instructions
// required to create the shuffled vector.
@@ -499,16 +383,16 @@ unsigned ARMTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
int Idx =
CostTableLookup(NEONAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
if (Idx == -1)
- return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
+ return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
return LT.first * NEONAltShuffleTbl[Idx].Cost;
}
- return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
+ return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
}
-unsigned ARMTTI::getArithmeticInstrCost(
- unsigned Opcode, Type *Ty, OperandValueKind Op1Info,
- OperandValueKind Op2Info, OperandValueProperties Opd1PropInfo,
- OperandValueProperties Opd2PropInfo) const {
+unsigned ARMTTIImpl::getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
+ TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
+ TTI::OperandValueProperties Opd2PropInfo) {
int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
@@ -564,8 +448,8 @@ unsigned ARMTTI::getArithmeticInstrCost(
if (Idx != -1)
return LT.first * CostTbl[Idx].Cost;
- unsigned Cost = TargetTransformInfo::getArithmeticInstrCost(
- Opcode, Ty, Op1Info, Op2Info, Opd1PropInfo, Opd2PropInfo);
+ unsigned Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
+ Opd1PropInfo, Opd2PropInfo);
// This is somewhat of a hack. The problem that we are facing is that SROA
// creates a sequence of shift, and, or instructions to construct values.
@@ -581,8 +465,9 @@ unsigned ARMTTI::getArithmeticInstrCost(
return Cost;
}
-unsigned ARMTTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
- unsigned AddressSpace) const {
+unsigned ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
+ unsigned Alignment,
+ unsigned AddressSpace) {
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
if (Src->isVectorTy() && Alignment != 16 &&
diff --git a/lib/Target/ARM/ARMTargetTransformInfo.h b/lib/Target/ARM/ARMTargetTransformInfo.h
new file mode 100644
index 0000000..97590f6
--- /dev/null
+++ b/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -0,0 +1,134 @@
+//===-- ARMTargetTransformInfo.h - ARM specific TTI -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file a TargetTransformInfo::Concept conforming object specific to the
+/// ARM target machine. It uses the target's detailed information to
+/// provide more precise answers to certain TTI queries, while letting the
+/// target independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
+
+#include "ARM.h"
+#include "ARMTargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+
+class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
+ typedef BasicTTIImplBase<ARMTTIImpl> BaseT;
+ typedef TargetTransformInfo TTI;
+ friend BaseT;
+
+ const ARMSubtarget *ST;
+ const ARMTargetLowering *TLI;
+
+ /// Estimate the overhead of scalarizing an instruction. Insert and Extract
+ /// are set if the result needs to be inserted and/or extracted from vectors.
+ unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract);
+
+ const ARMSubtarget *getST() const { return ST; }
+ const ARMTargetLowering *getTLI() const { return TLI; }
+
+public:
+ explicit ARMTTIImpl(const ARMBaseTargetMachine *TM, Function &F)
+ : BaseT(TM), ST(TM->getSubtargetImpl(F)), TLI(ST->getTargetLowering()) {}
+
+ // Provide value semantics. MSVC requires that we spell all of these out.
+ ARMTTIImpl(const ARMTTIImpl &Arg)
+ : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
+ ARMTTIImpl(ARMTTIImpl &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
+ TLI(std::move(Arg.TLI)) {}
+ ARMTTIImpl &operator=(const ARMTTIImpl &RHS) {
+ BaseT::operator=(static_cast<const BaseT &>(RHS));
+ ST = RHS.ST;
+ TLI = RHS.TLI;
+ return *this;
+ }
+ ARMTTIImpl &operator=(ARMTTIImpl &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ ST = std::move(RHS.ST);
+ TLI = std::move(RHS.TLI);
+ return *this;
+ }
+
+ /// \name Scalar TTI Implementations
+ /// @{
+
+ using BaseT::getIntImmCost;
+ unsigned getIntImmCost(const APInt &Imm, Type *Ty);
+
+ /// @}
+
+ /// \name Vector TTI Implementations
+ /// @{
+
+ unsigned getNumberOfRegisters(bool Vector) {
+ if (Vector) {
+ if (ST->hasNEON())
+ return 16;
+ return 0;
+ }
+
+ if (ST->isThumb1Only())
+ return 8;
+ return 13;
+ }
+
+ unsigned getRegisterBitWidth(bool Vector) {
+ if (Vector) {
+ if (ST->hasNEON())
+ return 128;
+ return 0;
+ }
+
+ return 32;
+ }
+
+ unsigned getMaxInterleaveFactor() {
+ // These are out of order CPUs:
+ if (ST->isCortexA15() || ST->isSwift())
+ return 2;
+ return 1;
+ }
+
+ unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
+ Type *SubTp);
+
+ unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
+
+ unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
+
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
+
+ unsigned getAddressComputationCost(Type *Val, bool IsComplex);
+
+ unsigned getFPOpCost(Type *Ty);
+
+ unsigned getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty,
+ TTI::OperandValueKind Op1Info = TTI::OK_AnyValue,
+ TTI::OperandValueKind Op2Info = TTI::OK_AnyValue,
+ TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+ TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
+
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace);
+
+ /// @}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 9cc89bd..59461e8 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -164,7 +164,10 @@ class ARMAsmParser : public MCTargetAsmParser {
// according to count of instructions in block.
// ~0U if no active IT block.
} ITState;
- bool inITBlock() { return ITState.CurPosition != ~0U;}
+ bool inITBlock() { return ITState.CurPosition != ~0U; }
+ bool lastInITBlock() {
+ return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
+ }
void forwardITPosition() {
if (!inITBlock()) return;
// Move to the next instruction in the IT block, if there is one. If not,
@@ -186,6 +189,11 @@ class ARMAsmParser : public MCTargetAsmParser {
return getParser().Error(L, Msg, Ranges);
}
+ bool validatetLDMRegList(MCInst Inst, const OperandVector &Operands,
+ unsigned ListNo, bool IsARPop = false);
+ bool validatetSTMRegList(MCInst Inst, const OperandVector &Operands,
+ unsigned ListNo);
+
int tryParseRegister();
bool tryParseRegisterWithWriteBack(OperandVector &);
int tryParseShiftRegister(OperandVector &);
@@ -305,6 +313,7 @@ class ARMAsmParser : public MCTargetAsmParser {
OperandMatchResultTy parseSetEndImm(OperandVector &);
OperandMatchResultTy parseShifterImm(OperandVector &);
OperandMatchResultTy parseRotImm(OperandVector &);
+ OperandMatchResultTy parseModImm(OperandVector &);
OperandMatchResultTy parseBitfield(OperandVector &);
OperandMatchResultTy parsePostIdxReg(OperandVector &);
OperandMatchResultTy parseAM3Offset(OperandVector &);
@@ -318,7 +327,7 @@ class ARMAsmParser : public MCTargetAsmParser {
void cvtThumbBranches(MCInst &Inst, const OperandVector &);
bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
- bool processInstruction(MCInst &Inst, const OperandVector &Ops);
+ bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
@@ -400,6 +409,7 @@ class ARMOperand : public MCParsedAsmOperand {
k_ShiftedImmediate,
k_ShifterImmediate,
k_RotateImmediate,
+ k_ModifiedImmediate,
k_BitfieldDescriptor,
k_Token
} Kind;
@@ -511,6 +521,11 @@ class ARMOperand : public MCParsedAsmOperand {
unsigned Imm;
};
+ struct ModImmOp {
+ unsigned Bits;
+ unsigned Rot;
+ };
+
struct BitfieldOp {
unsigned LSB;
unsigned Width;
@@ -537,6 +552,7 @@ class ARMOperand : public MCParsedAsmOperand {
struct RegShiftedRegOp RegShiftedReg;
struct RegShiftedImmOp RegShiftedImm;
struct RotImmOp RotImm;
+ struct ModImmOp ModImm;
struct BitfieldOp Bitfield;
};
@@ -612,6 +628,9 @@ public:
case k_RotateImmediate:
RotImm = o.RotImm;
break;
+ case k_ModifiedImmediate:
+ ModImm = o.ModImm;
+ break;
case k_BitfieldDescriptor:
Bitfield = o.Bitfield;
break;
@@ -1020,33 +1039,17 @@ public:
}
bool isAdrLabel() const {
// If we have an immediate that's not a constant, treat it as a label
- // reference needing a fixup. If it is a constant, but it can't fit
- // into shift immediate encoding, we reject it.
- if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
- else return (isARMSOImm() || isARMSOImmNeg());
- }
- bool isARMSOImm() const {
- if (!isImm()) return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
- int64_t Value = CE->getValue();
- return ARM_AM::getSOImmVal(Value) != -1;
- }
- bool isARMSOImmNot() const {
- if (!isImm()) return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
- int64_t Value = CE->getValue();
- return ARM_AM::getSOImmVal(~Value) != -1;
- }
- bool isARMSOImmNeg() const {
+ // reference needing a fixup.
+ if (isImm() && !isa<MCConstantExpr>(getImm()))
+ return true;
+
+ // If it is a constant, it must fit into a modified immediate encoding.
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
- // Only use this when not representable as a plain so_imm.
- return ARM_AM::getSOImmVal(Value) == -1 &&
- ARM_AM::getSOImmVal(-Value) != -1;
+ return (ARM_AM::getSOImmVal(Value) != -1 ||
+ ARM_AM::getSOImmVal(-Value) != -1);;
}
bool isT2SOImm() const {
if (!isImm()) return false;
@@ -1091,6 +1094,22 @@ public:
bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
bool isRotImm() const { return Kind == k_RotateImmediate; }
+ bool isModImm() const { return Kind == k_ModifiedImmediate; }
+ bool isModImmNot() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return ARM_AM::getSOImmVal(~Value) != -1;
+ }
+ bool isModImmNeg() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return ARM_AM::getSOImmVal(Value) == -1 &&
+ ARM_AM::getSOImmVal(-Value) != -1;
+ }
bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
bool isPostIdxReg() const {
@@ -1826,6 +1845,30 @@ public:
Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
}
+ void addModImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+
+ // Support for fixups (MCFixup)
+ if (isImm())
+ return addImmOperands(Inst, N);
+
+ Inst.addOperand(MCOperand::CreateImm(ModImm.Bits | (ModImm.Rot << 7)));
+ }
+
+ void addModImmNotOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
+ Inst.addOperand(MCOperand::CreateImm(Enc));
+ }
+
+ void addModImmNegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
+ Inst.addOperand(MCOperand::CreateImm(Enc));
+ }
+
void addBitfieldOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// Munge the lsb/width into a bitfield mask.
@@ -1982,22 +2025,6 @@ public:
Inst.addOperand(MCOperand::CreateImm(Memory.OffsetImm->getValue()));
}
- void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- // The operand is actually a so_imm, but we have its bitwise
- // negation in the assembly source, so twiddle it here.
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
- }
-
- void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- // The operand is actually a so_imm, but we have its
- // negation in the assembly source, so twiddle it here.
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
- }
-
void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
@@ -2630,6 +2657,16 @@ public:
return Op;
}
+ static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
+ SMLoc S, SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
+ Op->ModImm.Bits = Bits;
+ Op->ModImm.Rot = Rot;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
static std::unique_ptr<ARMOperand>
CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
@@ -2883,6 +2920,10 @@ void ARMOperand::print(raw_ostream &OS) const {
case k_RotateImmediate:
OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
break;
+ case k_ModifiedImmediate:
+ OS << "<mod_imm #" << ModImm.Bits << ", #"
+ << ModImm.Rot << ")>";
+ break;
case k_BitfieldDescriptor:
OS << "<bitfield " << "lsb: " << Bitfield.LSB
<< ", width: " << Bitfield.Width << ">";
@@ -4339,6 +4380,123 @@ ARMAsmParser::parseRotImm(OperandVector &Operands) {
}
ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseModImm(OperandVector &Operands) {
+ MCAsmParser &Parser = getParser();
+ MCAsmLexer &Lexer = getLexer();
+ int64_t Imm1, Imm2;
+
+ SMLoc S = Parser.getTok().getLoc();
+
+ // 1) A mod_imm operand can appear in the place of a register name:
+ // add r0, #mod_imm
+ // add r0, r0, #mod_imm
+ // to correctly handle the latter, we bail out as soon as we see an
+ // identifier.
+ //
+ // 2) Similarly, we do not want to parse into complex operands:
+ // mov r0, #mod_imm
+ // mov r0, :lower16:(_foo)
+ if (Parser.getTok().is(AsmToken::Identifier) ||
+ Parser.getTok().is(AsmToken::Colon))
+ return MatchOperand_NoMatch;
+
+ // Hash (dollar) is optional as per the ARMARM
+ if (Parser.getTok().is(AsmToken::Hash) ||
+ Parser.getTok().is(AsmToken::Dollar)) {
+ // Avoid parsing into complex operands (#:)
+ if (Lexer.peekTok().is(AsmToken::Colon))
+ return MatchOperand_NoMatch;
+
+ // Eat the hash (dollar)
+ Parser.Lex();
+ }
+
+ SMLoc Sx1, Ex1;
+ Sx1 = Parser.getTok().getLoc();
+ const MCExpr *Imm1Exp;
+ if (getParser().parseExpression(Imm1Exp, Ex1)) {
+ Error(Sx1, "malformed expression");
+ return MatchOperand_ParseFail;
+ }
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
+
+ if (CE) {
+ // Immediate must fit within 32-bits
+ Imm1 = CE->getValue();
+ int Enc = ARM_AM::getSOImmVal(Imm1);
+ if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
+ // We have a match!
+ Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
+ (Enc & 0xF00) >> 7,
+ Sx1, Ex1));
+ return MatchOperand_Success;
+ }
+
+ // We have parsed an immediate which is not for us, fallback to a plain
+ // immediate. This can happen for instruction aliases. For an example,
+ // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
+ // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
+ // instruction with a mod_imm operand. The alias is defined such that the
+ // parser method is shared, that's why we have to do this here.
+ if (Parser.getTok().is(AsmToken::EndOfStatement)) {
+ Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
+ return MatchOperand_Success;
+ }
+ } else {
+ // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
+ // MCFixup). Fallback to a plain immediate.
+ Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
+ return MatchOperand_Success;
+ }
+
+ // From this point onward, we expect the input to be a (#bits, #rot) pair
+ if (Parser.getTok().isNot(AsmToken::Comma)) {
+ Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
+ return MatchOperand_ParseFail;
+ }
+
+ if (Imm1 & ~0xFF) {
+ Error(Sx1, "immediate operand must a number in the range [0, 255]");
+ return MatchOperand_ParseFail;
+ }
+
+ // Eat the comma
+ Parser.Lex();
+
+ // Repeat for #rot
+ SMLoc Sx2, Ex2;
+ Sx2 = Parser.getTok().getLoc();
+
+ // Eat the optional hash (dollar)
+ if (Parser.getTok().is(AsmToken::Hash) ||
+ Parser.getTok().is(AsmToken::Dollar))
+ Parser.Lex();
+
+ const MCExpr *Imm2Exp;
+ if (getParser().parseExpression(Imm2Exp, Ex2)) {
+ Error(Sx2, "malformed expression");
+ return MatchOperand_ParseFail;
+ }
+
+ CE = dyn_cast<MCConstantExpr>(Imm2Exp);
+
+ if (CE) {
+ Imm2 = CE->getValue();
+ if (!(Imm2 & ~0x1E)) {
+ // We have a match!
+ Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
+ return MatchOperand_Success;
+ }
+ Error(Sx2, "immediate operand must an even number in the range [0, 30]");
+ return MatchOperand_ParseFail;
+ } else {
+ Error(Sx2, "constant expression expected");
+ return MatchOperand_ParseFail;
+ }
+}
+
+ARMAsmParser::OperandMatchResultTy
ARMAsmParser::parseBitfield(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
@@ -5091,15 +5249,52 @@ bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
return true;
}
+ enum {
+ COFF = (1 << MCObjectFileInfo::IsCOFF),
+ ELF = (1 << MCObjectFileInfo::IsELF),
+ MACHO = (1 << MCObjectFileInfo::IsMachO)
+ };
+ static const struct PrefixEntry {
+ const char *Spelling;
+ ARMMCExpr::VariantKind VariantKind;
+ uint8_t SupportedFormats;
+ } PrefixEntries[] = {
+ { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
+ { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
+ };
+
StringRef IDVal = Parser.getTok().getIdentifier();
- if (IDVal == "lower16") {
- RefKind = ARMMCExpr::VK_ARM_LO16;
- } else if (IDVal == "upper16") {
- RefKind = ARMMCExpr::VK_ARM_HI16;
- } else {
+
+ const auto &Prefix =
+ std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
+ [&IDVal](const PrefixEntry &PE) {
+ return PE.Spelling == IDVal;
+ });
+ if (Prefix == std::end(PrefixEntries)) {
Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
return true;
}
+
+ uint8_t CurrentFormat;
+ switch (getContext().getObjectFileInfo()->getObjectFileType()) {
+ case MCObjectFileInfo::IsMachO:
+ CurrentFormat = MACHO;
+ break;
+ case MCObjectFileInfo::IsELF:
+ CurrentFormat = ELF;
+ break;
+ case MCObjectFileInfo::IsCOFF:
+ CurrentFormat = COFF;
+ break;
+ }
+
+ if (~Prefix->SupportedFormats & CurrentFormat) {
+ Error(Parser.getTok().getLoc(),
+ "cannot represent relocation in the current file format");
+ return true;
+ }
+
+ RefKind = Prefix->VariantKind;
Parser.Lex();
if (getLexer().isNot(AsmToken::Colon)) {
@@ -5107,6 +5302,7 @@ bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
return true;
}
Parser.Lex(); // Eat the last ':'
+
return false;
}
@@ -5139,7 +5335,8 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
- Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic.startswith("vsel"))
+ Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
+ Mnemonic.startswith("vsel"))
return Mnemonic;
// First, split out any predication code. Ignore mnemonics we know aren't
@@ -5244,7 +5441,7 @@ getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
- Mnemonic == "vrintm" || Mnemonic.startswith("aes") ||
+ Mnemonic == "vrintm" || Mnemonic.startswith("aes") || Mnemonic == "hvc" ||
Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
(FullInst.startswith("vmull") && FullInst.endswith(".p64"))) {
// These mnemonics are never predicable
@@ -5282,7 +5479,7 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
// conditionally adding the cc_out in the first place because we need
// to check the type of the parsed immediate operand.
if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
- !static_cast<ARMOperand &>(*Operands[4]).isARMSOImm() &&
+ !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
return true;
@@ -5823,6 +6020,50 @@ static bool instIsBreakpoint(const MCInst &Inst) {
}
+bool ARMAsmParser::validatetLDMRegList(MCInst Inst,
+ const OperandVector &Operands,
+ unsigned ListNo, bool IsARPop) {
+ const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
+ bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
+
+ bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
+ bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
+ bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
+
+ if (!IsARPop && ListContainsSP)
+ return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
+ "SP may not be in the register list");
+ else if (ListContainsPC && ListContainsLR)
+ return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
+ "PC and LR may not be in the register list simultaneously");
+ else if (inITBlock() && !lastInITBlock() && ListContainsPC)
+ return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
+ "instruction must be outside of IT block or the last "
+ "instruction in an IT block");
+ return false;
+}
+
+bool ARMAsmParser::validatetSTMRegList(MCInst Inst,
+ const OperandVector &Operands,
+ unsigned ListNo) {
+ const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
+ bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
+
+ bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
+ bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
+
+ if (ListContainsSP && ListContainsPC)
+ return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
+ "SP and PC may not be in the register list");
+ else if (ListContainsSP)
+ return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
+ "SP may not be in the register list");
+ else if (ListContainsPC)
+ return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
+ "PC may not be in the register list");
+ return false;
+}
+
// FIXME: We would really like to be able to tablegen'erate this.
bool ARMAsmParser::validateInstruction(MCInst &Inst,
const OperandVector &Operands) {
@@ -6006,9 +6247,9 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
return Error(Operands[3]->getStartLoc(),
"writeback operator '!' not allowed when base register "
"in register list");
- if (listContainsReg(Inst, 3 + HasWritebackToken, ARM::SP))
- return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
- "SP not allowed in register list");
+
+ if (validatetLDMRegList(Inst, Operands, 3))
+ return true;
break;
}
case ARM::LDMIA_UPD:
@@ -6025,13 +6266,14 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
break;
case ARM::t2LDMIA:
case ARM::t2LDMDB:
+ if (validatetLDMRegList(Inst, Operands, 3))
+ return true;
+ break;
case ARM::t2STMIA:
- case ARM::t2STMDB: {
- if (listContainsReg(Inst, 3, ARM::SP))
- return Error(Operands.back()->getStartLoc(),
- "SP not allowed in register list");
+ case ARM::t2STMDB:
+ if (validatetSTMRegList(Inst, Operands, 3))
+ return true;
break;
- }
case ARM::t2LDMIA_UPD:
case ARM::t2LDMDB_UPD:
case ARM::t2STMIA_UPD:
@@ -6040,9 +6282,13 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
return Error(Operands.back()->getStartLoc(),
"writeback register not allowed in register list");
- if (listContainsReg(Inst, 4, ARM::SP))
- return Error(Operands.back()->getStartLoc(),
- "SP not allowed in register list");
+ if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
+ if (validatetLDMRegList(Inst, Operands, 3))
+ return true;
+ } else {
+ if (validatetSTMRegList(Inst, Operands, 3))
+ return true;
+ }
break;
}
case ARM::sysLDMIA_UPD:
@@ -6087,6 +6333,8 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
!isThumbTwo())
return Error(Operands[2]->getStartLoc(),
"registers must be in range r0-r7 or pc");
+ if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
+ return true;
break;
}
case ARM::tPUSH: {
@@ -6095,6 +6343,8 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
!isThumbTwo())
return Error(Operands[2]->getStartLoc(),
"registers must be in range r0-r7 or lr");
+ if (validatetSTMRegList(Inst, Operands, 2))
+ return true;
break;
}
case ARM::tSTMIA_UPD: {
@@ -6111,9 +6361,9 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
return Error(Operands[4]->getStartLoc(),
"writeback operator '!' not allowed when base register "
"in register list");
- if (listContainsReg(Inst, 4, ARM::SP) && !inITBlock())
- return Error(Operands.back()->getStartLoc(),
- "SP not allowed in register list");
+
+ if (validatetSTMRegList(Inst, Operands, 4))
+ return true;
break;
}
case ARM::tADDrSP: {
@@ -6434,7 +6684,8 @@ static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
}
bool ARMAsmParser::processInstruction(MCInst &Inst,
- const OperandVector &Operands) {
+ const OperandVector &Operands,
+ MCStreamer &Out) {
switch (Inst.getOpcode()) {
// Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
case ARM::LDRT_POST:
@@ -6475,12 +6726,35 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
// Alias for alternate form of 'ADR Rd, #imm' instruction.
case ARM::ADDri: {
if (Inst.getOperand(1).getReg() != ARM::PC ||
- Inst.getOperand(5).getReg() != 0)
+ Inst.getOperand(5).getReg() != 0 ||
+ !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
return false;
MCInst TmpInst;
TmpInst.setOpcode(ARM::ADR);
TmpInst.addOperand(Inst.getOperand(0));
- TmpInst.addOperand(Inst.getOperand(2));
+ if (Inst.getOperand(2).isImm()) {
+ // Immediate (mod_imm) will be in its encoded form, we must unencode it
+ // before passing it to the ADR instruction.
+ unsigned Enc = Inst.getOperand(2).getImm();
+ TmpInst.addOperand(MCOperand::CreateImm(
+ ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
+ } else {
+ // Turn PC-relative expression into absolute expression.
+ // Reading PC provides the start of the current instruction + 8 and
+ // the transform to adr is biased by that.
+ MCSymbol *Dot = getContext().CreateTempSymbol();
+ Out.EmitLabel(Dot);
+ const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
+ const MCExpr *InstPC = MCSymbolRefExpr::Create(Dot,
+ MCSymbolRefExpr::VK_None,
+ getContext());
+ const MCExpr *Const8 = MCConstantExpr::Create(8, getContext());
+ const MCExpr *ReadPC = MCBinaryExpr::CreateAdd(InstPC, Const8,
+ getContext());
+ const MCExpr *FixupAddr = MCBinaryExpr::CreateAdd(ReadPC, OpExpr,
+ getContext());
+ TmpInst.addOperand(MCOperand::CreateExpr(FixupAddr));
+ }
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
@@ -8302,7 +8576,6 @@ bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
MatchingInlineAsm);
switch (MatchResult) {
- default: break;
case Match_Success:
// Context sensitive operand constraints aren't handled by the matcher,
// so check them here.
@@ -8320,7 +8593,7 @@ bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
// encoding is selected. Loop on it while changes happen so the
// individual transformations can chain off each other. E.g.,
// tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
- while (processInstruction(Inst, Operands))
+ while (processInstruction(Inst, Operands, Out))
;
// Only after the instruction is fully processed, we can validate it
@@ -8732,7 +9005,7 @@ bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
Parser.Lex(); // Consume the EndOfStatement
- if (!RegisterReqs.insert(std::make_pair(Name, Reg)).second) {
+ if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg) {
Error(SRegLoc, "redefinition of '" + Name + "' does not match original.");
return false;
}
@@ -8858,8 +9131,13 @@ bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
if (Tag == ARMBuildAttrs::compatibility) {
if (Parser.getTok().isNot(AsmToken::Comma))
IsStringValue = false;
- else
- Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::Comma)) {
+ Error(Parser.getTok().getLoc(), "comma expected");
+ Parser.eatToEndOfStatement();
+ return false;
+ } else {
+ Parser.Lex();
+ }
}
if (IsStringValue) {
@@ -8888,38 +9166,78 @@ bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
StringRef CPU = getParser().parseStringToEndOfStatement().trim();
getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
+
+ if (!STI.isCPUStringValid(CPU)) {
+ Error(L, "Unknown CPU name");
+ return false;
+ }
+
+ // FIXME: This switches the CPU features globally, therefore it might
+ // happen that code you would not expect to assemble will. For details
+ // see: http://llvm.org/bugs/show_bug.cgi?id=20757
+ STI.InitMCProcessorInfo(CPU, "");
+ STI.InitCPUSchedModel(CPU);
+ setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+
return false;
}
// FIXME: This is duplicated in getARMFPUFeatures() in
// tools/clang/lib/Driver/Tools.cpp
static const struct {
- const unsigned Fpu;
+ const unsigned ID;
const uint64_t Enabled;
const uint64_t Disabled;
-} Fpus[] = {
- {ARM::VFP, ARM::FeatureVFP2, ARM::FeatureNEON},
- {ARM::VFPV2, ARM::FeatureVFP2, ARM::FeatureNEON},
- {ARM::VFPV3, ARM::FeatureVFP3, ARM::FeatureNEON},
- {ARM::VFPV3_D16, ARM::FeatureVFP3 | ARM::FeatureD16, ARM::FeatureNEON},
- {ARM::VFPV4, ARM::FeatureVFP4, ARM::FeatureNEON},
- {ARM::VFPV4_D16, ARM::FeatureVFP4 | ARM::FeatureD16, ARM::FeatureNEON},
- {ARM::FPV5_D16, ARM::FeatureFPARMv8 | ARM::FeatureD16,
- ARM::FeatureNEON | ARM::FeatureCrypto},
- {ARM::FP_ARMV8, ARM::FeatureFPARMv8,
- ARM::FeatureNEON | ARM::FeatureCrypto},
- {ARM::NEON, ARM::FeatureNEON, 0},
- {ARM::NEON_VFPV4, ARM::FeatureVFP4 | ARM::FeatureNEON, 0},
- {ARM::NEON_FP_ARMV8, ARM::FeatureFPARMv8 | ARM::FeatureNEON,
- ARM::FeatureCrypto},
- {ARM::CRYPTO_NEON_FP_ARMV8,
- ARM::FeatureFPARMv8 | ARM::FeatureNEON | ARM::FeatureCrypto, 0},
- {ARM::SOFTVFP, 0, 0},
+} FPUs[] = {
+ {/* ID */ ARM::VFP,
+ /* Enabled */ ARM::FeatureVFP2,
+ /* Disabled */ ARM::FeatureNEON},
+ {/* ID */ ARM::VFPV2,
+ /* Enabled */ ARM::FeatureVFP2,
+ /* Disabled */ ARM::FeatureNEON},
+ {/* ID */ ARM::VFPV3,
+ /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3,
+ /* Disabled */ ARM::FeatureNEON | ARM::FeatureD16},
+ {/* ID */ ARM::VFPV3_D16,
+ /* Enable */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureD16,
+ /* Disabled */ ARM::FeatureNEON},
+ {/* ID */ ARM::VFPV4,
+ /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4,
+ /* Disabled */ ARM::FeatureNEON | ARM::FeatureD16},
+ {/* ID */ ARM::VFPV4_D16,
+ /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
+ ARM::FeatureD16,
+ /* Disabled */ ARM::FeatureNEON},
+ {/* ID */ ARM::FPV5_D16,
+ /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
+ ARM::FeatureFPARMv8 | ARM::FeatureD16,
+ /* Disabled */ ARM::FeatureNEON | ARM::FeatureCrypto},
+ {/* ID */ ARM::FP_ARMV8,
+ /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
+ ARM::FeatureFPARMv8,
+ /* Disabled */ ARM::FeatureNEON | ARM::FeatureCrypto | ARM::FeatureD16},
+ {/* ID */ ARM::NEON,
+ /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureNEON,
+ /* Disabled */ ARM::FeatureD16},
+ {/* ID */ ARM::NEON_VFPV4,
+ /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
+ ARM::FeatureNEON,
+ /* Disabled */ ARM::FeatureD16},
+ {/* ID */ ARM::NEON_FP_ARMV8,
+ /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
+ ARM::FeatureFPARMv8 | ARM::FeatureNEON,
+ /* Disabled */ ARM::FeatureCrypto | ARM::FeatureD16},
+ {/* ID */ ARM::CRYPTO_NEON_FP_ARMV8,
+ /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
+ ARM::FeatureFPARMv8 | ARM::FeatureNEON | ARM::FeatureCrypto,
+ /* Disabled */ ARM::FeatureD16},
+ {ARM::SOFTVFP, 0, 0},
};
/// parseDirectiveFPU
/// ::= .fpu str
bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
+ SMLoc FPUNameLoc = getTok().getLoc();
StringRef FPU = getParser().parseStringToEndOfStatement().trim();
unsigned ID = StringSwitch<unsigned>(FPU)
@@ -8928,18 +9246,18 @@ bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
.Default(ARM::INVALID_FPU);
if (ID == ARM::INVALID_FPU) {
- Error(L, "Unknown FPU name");
+ Error(FPUNameLoc, "Unknown FPU name");
return false;
}
- for (const auto &Fpu : Fpus) {
- if (Fpu.Fpu != ID)
+ for (const auto &Entry : FPUs) {
+ if (Entry.ID != ID)
continue;
// Need to toggle features that should be on but are off and that
// should off but are on.
- uint64_t Toggle = (Fpu.Enabled & ~STI.getFeatureBits()) |
- (Fpu.Disabled & STI.getFeatureBits());
+ uint64_t Toggle = (Entry.Enabled & ~STI.getFeatureBits()) |
+ (Entry.Disabled & STI.getFeatureBits());
setAvailableFeatures(ComputeAvailableFeatures(STI.ToggleFeature(Toggle)));
break;
}
@@ -9766,7 +10084,7 @@ unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
if (CE->getValue() == 0)
return Match_Success;
break;
- case MCK_ARMSOImm:
+ case MCK_ModImm:
if (Op.isImm()) {
const MCExpr *SOExpr = Op.getImm();
int64_t Value;
diff --git a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index ef65418..4d5122a 100644
--- a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -176,8 +176,6 @@ static DecodeStatus DecodePredicateOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeCCOutOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeSOImmOperand(MCInst &Inst, unsigned Val,
- uint64_t Address, const void *Decoder);
static DecodeStatus DecodeRegListOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeSPRRegListOperand(MCInst &Inst, unsigned Val,
@@ -405,6 +403,28 @@ static MCDisassembler *createThumbDisassembler(const Target &T,
return new ThumbDisassembler(STI, Ctx);
}
+// Post-decoding checks
+static DecodeStatus checkDecodedInstruction(MCInst &MI, uint64_t &Size,
+ uint64_t Address, raw_ostream &OS,
+ raw_ostream &CS,
+ uint32_t Insn,
+ DecodeStatus Result)
+{
+ switch (MI.getOpcode()) {
+ case ARM::HVC: {
+ // HVC is undefined if condition = 0xf otherwise upredictable
+ // if condition != 0xe
+ uint32_t Cond = (Insn >> 28) & 0xF;
+ if (Cond == 0xF)
+ return MCDisassembler::Fail;
+ if (Cond != 0xE)
+ return MCDisassembler::SoftFail;
+ return Result;
+ }
+ default: return Result;
+ }
+}
+
DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
ArrayRef<uint8_t> Bytes,
uint64_t Address, raw_ostream &OS,
@@ -430,7 +450,7 @@ DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
decodeInstruction(DecoderTableARM32, MI, Insn, Address, this, STI);
if (Result != MCDisassembler::Fail) {
Size = 4;
- return Result;
+ return checkDecodedInstruction(MI, Size, Address, OS, CS, Insn, Result);
}
// VFP and NEON instructions, similarly, are shared between ARM
@@ -1113,15 +1133,6 @@ static DecodeStatus DecodeCCOutOperand(MCInst &Inst, unsigned Val,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeSOImmOperand(MCInst &Inst, unsigned Val,
- uint64_t Address, const void *Decoder) {
- uint32_t imm = Val & 0xFF;
- uint32_t rot = (Val & 0xF00) >> 7;
- uint32_t rot_imm = (imm >> rot) | (imm << ((32-rot) & 0x1F));
- Inst.addOperand(MCOperand::CreateImm(rot_imm));
- return MCDisassembler::Success;
-}
-
static DecodeStatus DecodeSORegImmOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -4960,7 +4971,7 @@ static DecodeStatus DecodeT2ShifterImmOperand(MCInst &Inst, uint32_t Val,
DecodeStatus S = MCDisassembler::Success;
// Shift of "asr #32" is not allowed in Thumb2 mode.
- if (Val == 0x20) S = MCDisassembler::SoftFail;
+ if (Val == 0x20) S = MCDisassembler::Fail;
Inst.addOperand(MCOperand::CreateImm(Val));
return S;
}
diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
index 0570084..16eea33 100644
--- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
+++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
@@ -269,7 +269,7 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
// expressed as a GPRPair, so we have to manually merge them.
// FIXME: We would really like to be able to tablegen'erate this.
case ARM::LDREXD: case ARM::STREXD:
- case ARM::LDAEXD: case ARM::STLEXD:
+ case ARM::LDAEXD: case ARM::STLEXD: {
const MCRegisterClass& MRC = MRI.getRegClass(ARM::GPRRegClassID);
bool isStore = Opcode == ARM::STREXD || Opcode == ARM::STLEXD;
unsigned Reg = MI->getOperand(isStore ? 1 : 0).getReg();
@@ -290,6 +290,23 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
printInstruction(&NewMI, O);
return;
}
+ break;
+ }
+ // B9.3.3 ERET (Thumb)
+ // For a target that has Virtualization Extensions, ERET is the preferred
+ // disassembly of SUBS PC, LR, #0
+ case ARM::t2SUBS_PC_LR: {
+ if (MI->getNumOperands() == 3 &&
+ MI->getOperand(0).isImm() &&
+ MI->getOperand(0).getImm() == 0 &&
+ (getAvailableFeatures() & ARM::FeatureVirtualization)) {
+ O << "\teret";
+ printPredicateOperand(MI, 1, O);
+ printAnnotation(O, Annot);
+ return;
+ }
+ break;
+ }
}
printInstruction(MI, O);
@@ -1301,6 +1318,52 @@ void ARMInstPrinter::printRotImmOperand(const MCInst *MI, unsigned OpNum,
O << markup(">");
}
+void ARMInstPrinter::printModImmOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ MCOperand Op = MI->getOperand(OpNum);
+
+ // Support for fixups (MCFixup)
+ if (Op.isExpr())
+ return printOperand(MI, OpNum, O);
+
+ unsigned Bits = Op.getImm() & 0xFF;
+ unsigned Rot = (Op.getImm() & 0xF00) >> 7;
+
+ bool PrintUnsigned = false;
+ switch (MI->getOpcode()){
+ case ARM::MOVi:
+ // Movs to PC should be treated unsigned
+ PrintUnsigned = (MI->getOperand(OpNum - 1).getReg() == ARM::PC);
+ break;
+ case ARM::MSRi:
+ // Movs to special registers should be treated unsigned
+ PrintUnsigned = true;
+ break;
+ }
+
+ int32_t Rotated = ARM_AM::rotr32(Bits, Rot);
+ if (ARM_AM::getSOImmVal(Rotated) == Op.getImm()) {
+ // #rot has the least possible value
+ O << "#" << markup("<imm:");
+ if (PrintUnsigned)
+ O << static_cast<uint32_t>(Rotated);
+ else
+ O << Rotated;
+ O << markup(">");
+ return;
+ }
+
+ // Explicit #bits, #rot implied
+ O << "#"
+ << markup("<imm:")
+ << Bits
+ << markup(">")
+ << ", #"
+ << markup("<imm:")
+ << Rot
+ << markup(">");
+}
+
void ARMInstPrinter::printFBits16(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
O << markup("<imm:")
diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
index 09fd536..f179e01 100644
--- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
+++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
@@ -131,6 +131,7 @@ public:
void printNEONModImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printImmPlusOneOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printRotImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printModImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printGPRPairOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printPCLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O);
diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
index f24b419..a821a6b 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
@@ -51,7 +51,7 @@ ARMELFObjectWriter::~ARMELFObjectWriter() {}
bool ARMELFObjectWriter::needsRelocateWithSymbol(const MCSymbolData &SD,
unsigned Type) const {
- // FIXME: This is extremelly conservative. This really needs to use a
+ // FIXME: This is extremely conservative. This really needs to use a
// whitelist with a clear explanation for why each realocation needs to
// point to the symbol, not to the section.
switch (Type) {
@@ -148,6 +148,22 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
} else {
switch ((unsigned)Fixup.getKind()) {
default: llvm_unreachable("invalid fixup kind!");
+ case FK_Data_1:
+ switch (Modifier) {
+ default: llvm_unreachable("unsupported Modifier");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_ARM_ABS8;
+ break;
+ }
+ break;
+ case FK_Data_2:
+ switch (Modifier) {
+ default: llvm_unreachable("unsupported modifier");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_ARM_ABS16;
+ break;
+ }
+ break;
case FK_Data_4:
switch (Modifier) {
default: llvm_unreachable("Unsupported Modifier");
@@ -184,6 +200,9 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
case MCSymbolRefExpr::VK_ARM_PREL31:
Type = ELF::R_ARM_PREL31;
break;
+ case MCSymbolRefExpr::VK_ARM_SBREL:
+ Type = ELF::R_ARM_SBREL32;
+ break;
case MCSymbolRefExpr::VK_ARM_TLSLDO:
Type = ELF::R_ARM_TLS_LDO32;
break;
diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
index 24ee537..2b65520 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
@@ -15,6 +15,7 @@
#include "ARMArchName.h"
#include "ARMFPUName.h"
+#include "ARMArchExtName.h"
#include "ARMRegisterInfo.h"
#include "ARMUnwindOpAsm.h"
#include "llvm/ADT/StringExtras.h"
@@ -105,6 +106,19 @@ static unsigned GetArchDefaultCPUArch(unsigned ID) {
return 0;
}
+static const char *GetArchExtName(unsigned ID) {
+ switch (ID) {
+ default:
+ llvm_unreachable("Unknown ARCH Extension kind");
+ break;
+#define ARM_ARCHEXT_NAME(NAME, ID) \
+ case ARM::ID: \
+ return NAME;
+#include "ARMArchExtName.def"
+ }
+ return nullptr;
+}
+
namespace {
class ARMELFStreamer;
@@ -134,6 +148,7 @@ class ARMTargetAsmStreamer : public ARMTargetStreamer {
void emitIntTextAttribute(unsigned Attribute, unsigned IntValue,
StringRef StrinValue) override;
void emitArch(unsigned Arch) override;
+ void emitArchExtension(unsigned ArchExt) override;
void emitObjectArch(unsigned Arch) override;
void emitFPU(unsigned FPU) override;
void emitInst(uint32_t Inst, char Suffix = '\0') override;
@@ -249,6 +264,9 @@ void ARMTargetAsmStreamer::emitIntTextAttribute(unsigned Attribute,
void ARMTargetAsmStreamer::emitArch(unsigned Arch) {
OS << "\t.arch\t" << GetArchName(Arch) << "\n";
}
+void ARMTargetAsmStreamer::emitArchExtension(unsigned ArchExt) {
+ OS << "\t.arch_extension\t" << GetArchExtName(ArchExt) << "\n";
+}
void ARMTargetAsmStreamer::emitObjectArch(unsigned Arch) {
OS << "\t.object_arch\t" << GetArchName(Arch) << '\n';
}
@@ -300,7 +318,19 @@ private:
StringRef StringValue;
static bool LessTag(const AttributeItem &LHS, const AttributeItem &RHS) {
- return (LHS.Tag < RHS.Tag);
+ // The conformance tag must be emitted first when serialised
+ // into an object file. Specifically, the addenda to the ARM ABI
+ // states that (2.3.7.4):
+ //
+ // "To simplify recognition by consumers in the common case of
+ // claiming conformity for the whole file, this tag should be
+ // emitted first in a file-scope sub-subsection of the first
+ // public subsection of the attributes section."
+ //
+ // So it is special-cased in this comparison predicate when the
+ // attributes are sorted in finishAttributeSection().
+ return (RHS.Tag != ARMBuildAttrs::conformance) &&
+ ((LHS.Tag == ARMBuildAttrs::conformance) || (LHS.Tag < RHS.Tag));
}
};
@@ -541,6 +571,10 @@ public:
/// necessary.
void EmitValueImpl(const MCExpr *Value, unsigned Size,
const SMLoc &Loc) override {
+ if (const MCSymbolRefExpr *SRE = dyn_cast_or_null<MCSymbolRefExpr>(Value))
+ if (SRE->getKind() == MCSymbolRefExpr::VK_ARM_SBREL && !(Size == 4))
+ getContext().FatalError(Loc, "relocated expression must be 32-bit");
+
EmitDataMappingSymbol();
MCELFStreamer::EmitValueImpl(Value, Size);
}
@@ -942,11 +976,8 @@ void ARMTargetELFStreamer::finishAttributeSection() {
if (AttributeSection) {
Streamer.SwitchSection(AttributeSection);
} else {
- AttributeSection =
- Streamer.getContext().getELFSection(".ARM.attributes",
- ELF::SHT_ARM_ATTRIBUTES,
- 0,
- SectionKind::getMetadata());
+ AttributeSection = Streamer.getContext().getELFSection(
+ ".ARM.attributes", ELF::SHT_ARM_ATTRIBUTES, 0);
Streamer.SwitchSection(AttributeSection);
// Format version
@@ -979,12 +1010,12 @@ void ARMTargetELFStreamer::finishAttributeSection() {
Streamer.EmitULEB128IntValue(item.IntValue);
break;
case AttributeItem::TextAttribute:
- Streamer.EmitBytes(item.StringValue.upper());
+ Streamer.EmitBytes(item.StringValue);
Streamer.EmitIntValue(0, 1); // '\0'
break;
case AttributeItem::NumericAndTextAttributes:
Streamer.EmitULEB128IntValue(item.IntValue);
- Streamer.EmitBytes(item.StringValue.upper());
+ Streamer.EmitBytes(item.StringValue);
Streamer.EmitIntValue(0, 1); // '\0'
break;
}
@@ -1053,11 +1084,11 @@ inline void ARMELFStreamer::SwitchToEHSection(const char *Prefix,
// Get .ARM.extab or .ARM.exidx section
const MCSectionELF *EHSection = nullptr;
if (const MCSymbol *Group = FnSection.getGroup()) {
- EHSection = getContext().getELFSection(
- EHSecName, Type, Flags | ELF::SHF_GROUP, Kind,
- FnSection.getEntrySize(), Group->getName());
+ EHSection =
+ getContext().getELFSection(EHSecName, Type, Flags | ELF::SHF_GROUP,
+ FnSection.getEntrySize(), Group->getName());
} else {
- EHSection = getContext().getELFSection(EHSecName, Type, Flags, Kind);
+ EHSection = getContext().getELFSection(EHSecName, Type, Flags);
}
assert(EHSection && "Failed to get the required EH section");
@@ -1341,10 +1372,8 @@ MCStreamer *createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
return S;
}
-MCStreamer *createARMNullStreamer(MCContext &Ctx) {
- MCStreamer *S = llvm::createNullStreamer(Ctx);
- new ARMTargetStreamer(*S);
- return S;
+MCTargetStreamer *createARMNullTargetStreamer(MCStreamer &S) {
+ return new ARMTargetStreamer(S);
}
MCELFStreamer *createARMELFStreamer(MCContext &Context, MCAsmBackend &TAB,
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
index 1d82099..66a1618 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
@@ -12,8 +12,8 @@
//===----------------------------------------------------------------------===//
#include "ARMMCAsmInfo.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
@@ -89,6 +89,7 @@ ARMCOFFMCAsmInfoMicrosoft::ARMCOFFMCAsmInfoMicrosoft() {
AlignmentIsInBytes = false;
PrivateGlobalPrefix = "$M";
+ PrivateLabelPrefix = "$M";
}
void ARMCOFFMCAsmInfoGNU::anchor() { }
@@ -101,6 +102,7 @@ ARMCOFFMCAsmInfoGNU::ARMCOFFMCAsmInfoGNU() {
Code16Directive = ".code\t16";
Code32Directive = ".code\t32";
PrivateGlobalPrefix = ".L";
+ PrivateLabelPrefix = ".L";
SupportsDebugInformation = true;
ExceptionsType = ExceptionHandling::None;
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
index f1fef41..6cb4715 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
@@ -21,7 +21,8 @@
namespace llvm {
class ARMMCAsmInfoDarwin : public MCAsmInfoDarwin {
- void anchor() override;
+ virtual void anchor();
+
public:
explicit ARMMCAsmInfoDarwin(StringRef TT);
};
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
index b8ee555..efbebd3 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
@@ -37,8 +37,8 @@ STATISTIC(MCNumCPRelocations, "Number of constant pool relocations created.");
namespace {
class ARMMCCodeEmitter : public MCCodeEmitter {
- ARMMCCodeEmitter(const ARMMCCodeEmitter &) LLVM_DELETED_FUNCTION;
- void operator=(const ARMMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ ARMMCCodeEmitter(const ARMMCCodeEmitter &) = delete;
+ void operator=(const ARMMCCodeEmitter &) = delete;
const MCInstrInfo &MCII;
const MCContext &CTX;
bool IsLittleEndian;
@@ -304,6 +304,28 @@ public:
return Binary;
}
+ unsigned getModImmOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &ST) const {
+ const MCOperand &MO = MI.getOperand(Op);
+
+ // Support for fixups (MCFixup)
+ if (MO.isExpr()) {
+ const MCExpr *Expr = MO.getExpr();
+ // In instruction code this value always encoded as lowest 12 bits,
+ // so we don't have to perform any specific adjustments.
+ // Due to requirements of relocatable records we have to use FK_Data_4.
+ // See ARMELFObjectWriter::ExplicitRelSym and
+ // ARMELFObjectWriter::GetRelocTypeInner for more details.
+ MCFixupKind Kind = MCFixupKind(FK_Data_4);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
+ return 0;
+ }
+
+ // Immediate is already in its encoded format
+ return MO.getImm();
+ }
+
/// getT2SOImmOpValue - Return an encoded 12-bit shifted-immediate value.
unsigned getT2SOImmOpValue(const MCInst &MI, unsigned Op,
SmallVectorImpl<MCFixup> &Fixups,
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
index 98190ba..8c19785 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
@@ -64,10 +64,60 @@ static bool getMCRDeprecationInfo(MCInst &MI, MCSubtargetInfo &STI,
}
static bool getITDeprecationInfo(MCInst &MI, MCSubtargetInfo &STI,
- std::string &Info) {
- if (STI.getFeatureBits() & llvm::ARM::HasV8Ops &&
- MI.getOperand(1).isImm() && MI.getOperand(1).getImm() != 8) {
- Info = "applying IT instruction to more than one subsequent instruction is deprecated";
+ std::string &Info) {
+ if (STI.getFeatureBits() & llvm::ARM::HasV8Ops && MI.getOperand(1).isImm() &&
+ MI.getOperand(1).getImm() != 8) {
+ Info = "applying IT instruction to more than one subsequent instruction is "
+ "deprecated";
+ return true;
+ }
+
+ return false;
+}
+
+static bool getARMStoreDeprecationInfo(MCInst &MI, MCSubtargetInfo &STI,
+ std::string &Info) {
+ assert((~STI.getFeatureBits() & llvm::ARM::ModeThumb) &&
+ "cannot predicate thumb instructions");
+
+ assert(MI.getNumOperands() >= 4 && "expected >= 4 arguments");
+ for (unsigned OI = 4, OE = MI.getNumOperands(); OI < OE; ++OI) {
+ assert(MI.getOperand(OI).isReg() && "expected register");
+ if (MI.getOperand(OI).getReg() == ARM::SP ||
+ MI.getOperand(OI).getReg() == ARM::PC) {
+ Info = "use of SP or PC in the list is deprecated";
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool getARMLoadDeprecationInfo(MCInst &MI, MCSubtargetInfo &STI,
+ std::string &Info) {
+ assert((~STI.getFeatureBits() & llvm::ARM::ModeThumb) &&
+ "cannot predicate thumb instructions");
+
+ assert(MI.getNumOperands() >= 4 && "expected >= 4 arguments");
+ bool ListContainsPC = false, ListContainsLR = false;
+ for (unsigned OI = 4, OE = MI.getNumOperands(); OI < OE; ++OI) {
+ assert(MI.getOperand(OI).isReg() && "expected register");
+ switch (MI.getOperand(OI).getReg()) {
+ default:
+ break;
+ case ARM::LR:
+ ListContainsLR = true;
+ break;
+ case ARM::PC:
+ ListContainsPC = true;
+ break;
+ case ARM::SP:
+ Info = "use of SP in the list is deprecated";
+ return true;
+ }
+ }
+
+ if (ListContainsPC && ListContainsLR) {
+ Info = "use of LR and PC simultaneously in the list is deprecated";
return true;
}
@@ -405,11 +455,15 @@ extern "C" void LLVMInitializeARMTargetMC() {
TargetRegistry::RegisterAsmStreamer(TheThumbLETarget, createMCAsmStreamer);
TargetRegistry::RegisterAsmStreamer(TheThumbBETarget, createMCAsmStreamer);
- // Register the null streamer.
- TargetRegistry::RegisterNullStreamer(TheARMLETarget, createARMNullStreamer);
- TargetRegistry::RegisterNullStreamer(TheARMBETarget, createARMNullStreamer);
- TargetRegistry::RegisterNullStreamer(TheThumbLETarget, createARMNullStreamer);
- TargetRegistry::RegisterNullStreamer(TheThumbBETarget, createARMNullStreamer);
+ // Register the null TargetStreamer.
+ TargetRegistry::RegisterNullTargetStreamer(TheARMLETarget,
+ createARMNullTargetStreamer);
+ TargetRegistry::RegisterNullTargetStreamer(TheARMBETarget,
+ createARMNullTargetStreamer);
+ TargetRegistry::RegisterNullTargetStreamer(TheThumbLETarget,
+ createARMNullTargetStreamer);
+ TargetRegistry::RegisterNullTargetStreamer(TheThumbBETarget,
+ createARMNullTargetStreamer);
// Register the MCInstPrinter.
TargetRegistry::RegisterMCInstPrinter(TheARMLETarget, createARMMCInstPrinter);
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
index a6c20d5..c17e959 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
@@ -29,6 +29,7 @@ class MCRegisterInfo;
class MCSubtargetInfo;
class MCStreamer;
class MCRelocationInfo;
+class MCTargetStreamer;
class StringRef;
class Target;
class raw_ostream;
@@ -51,7 +52,7 @@ MCStreamer *createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
MCInstPrinter *InstPrint, MCCodeEmitter *CE,
MCAsmBackend *TAB, bool ShowInst);
-MCStreamer *createARMNullStreamer(MCContext &Ctx);
+MCTargetStreamer *createARMNullTargetStreamer(MCStreamer &S);
MCCodeEmitter *createARMLEMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
index 7da5003..3187d36 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
@@ -54,10 +54,10 @@ public:
: MCMachObjectTargetWriter(Is64Bit, CPUType, CPUSubtype,
/*UseAggressiveSymbolFolding=*/true) {}
- void RecordRelocation(MachObjectWriter *Writer,
- const MCAssembler &Asm, const MCAsmLayout &Layout,
- const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, uint64_t &FixedValue) override;
+ void RecordRelocation(MachObjectWriter *Writer, MCAssembler &Asm,
+ const MCAsmLayout &Layout, const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) override;
};
}
@@ -232,7 +232,7 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
(IsPCRel << 30) |
MachO::R_SCATTERED);
MRE.r_word1 = Value2;
- Writer->addRelocation(Fragment->getParent(), MRE);
+ Writer->addRelocation(nullptr, Fragment->getParent(), MRE);
}
MachO::any_relocation_info MRE;
@@ -243,7 +243,7 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
(IsPCRel << 30) |
MachO::R_SCATTERED);
MRE.r_word1 = Value;
- Writer->addRelocation(Fragment->getParent(), MRE);
+ Writer->addRelocation(nullptr, Fragment->getParent(), MRE);
}
void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer,
@@ -297,7 +297,7 @@ void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer,
(IsPCRel << 30) |
MachO::R_SCATTERED);
MRE.r_word1 = Value2;
- Writer->addRelocation(Fragment->getParent(), MRE);
+ Writer->addRelocation(nullptr, Fragment->getParent(), MRE);
}
MachO::any_relocation_info MRE;
@@ -307,7 +307,7 @@ void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer,
(IsPCRel << 30) |
MachO::R_SCATTERED);
MRE.r_word1 = Value;
- Writer->addRelocation(Fragment->getParent(), MRE);
+ Writer->addRelocation(nullptr, Fragment->getParent(), MRE);
}
bool ARMMachObjectWriter::requiresExternRelocation(MachObjectWriter *Writer,
@@ -351,11 +351,10 @@ bool ARMMachObjectWriter::requiresExternRelocation(MachObjectWriter *Writer,
}
void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
- const MCAssembler &Asm,
+ MCAssembler &Asm,
const MCAsmLayout &Layout,
const MCFragment *Fragment,
- const MCFixup &Fixup,
- MCValue Target,
+ const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) {
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
unsigned Log2Size;
@@ -401,8 +400,8 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
// See <reloc.h>.
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
unsigned Index = 0;
- unsigned IsExtern = 0;
unsigned Type = 0;
+ const MCSymbolData *RelSymbol = nullptr;
if (Target.isAbsolute()) { // constant
// FIXME!
@@ -422,8 +421,7 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
// Check whether we need an external or internal relocation.
if (requiresExternRelocation(Writer, Asm, *Fragment, RelocType, SD,
FixedValue)) {
- IsExtern = 1;
- Index = SD->getIndex();
+ RelSymbol = SD;
// For external relocations, make sure to offset the fixup value to
// compensate for the addend of the symbol address, if it was
@@ -447,11 +445,8 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
// struct relocation_info (8 bytes)
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
- MRE.r_word1 = ((Index << 0) |
- (IsPCRel << 24) |
- (Log2Size << 25) |
- (IsExtern << 27) |
- (Type << 28));
+ MRE.r_word1 =
+ (Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
// Even when it's not a scattered relocation, movw/movt always uses
// a PAIR relocation.
@@ -476,10 +471,10 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
(Log2Size << 25) |
(MachO::ARM_RELOC_PAIR << 28));
- Writer->addRelocation(Fragment->getParent(), MREPair);
+ Writer->addRelocation(nullptr, Fragment->getParent(), MREPair);
}
- Writer->addRelocation(Fragment->getParent(), MRE);
+ Writer->addRelocation(RelSymbol, Fragment->getParent(), MRE);
}
MCObjectWriter *llvm::createARMMachObjectWriter(raw_ostream &OS,
diff --git a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
index 8acd7af..b680db5 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
@@ -63,6 +63,7 @@ void ARMTargetStreamer::emitIntTextAttribute(unsigned Attribute,
unsigned IntValue,
StringRef StringValue) {}
void ARMTargetStreamer::emitArch(unsigned Arch) {}
+void ARMTargetStreamer::emitArchExtension(unsigned ArchExt) {}
void ARMTargetStreamer::emitObjectArch(unsigned Arch) {}
void ARMTargetStreamer::emitFPU(unsigned FPU) {}
void ARMTargetStreamer::finishAttributeSection() {}
diff --git a/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp
index d31f1f4..2fd6445 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp
@@ -8,7 +8,10 @@
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/ARMFixupKinds.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCValue.h"
#include "llvm/MC/MCWinCOFFObjectWriter.h"
#include "llvm/Support/COFF.h"
@@ -26,14 +29,16 @@ public:
virtual ~ARMWinCOFFObjectWriter() { }
unsigned getRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsCrossSection) const override;
+ bool IsCrossSection,
+ const MCAsmBackend &MAB) const override;
bool recordRelocation(const MCFixup &) const override;
};
unsigned ARMWinCOFFObjectWriter::getRelocType(const MCValue &Target,
const MCFixup &Fixup,
- bool IsCrossSection) const {
+ bool IsCrossSection,
+ const MCAsmBackend &MAB) const {
assert(getMachine() == COFF::IMAGE_FILE_MACHINE_ARMNT &&
"AArch64 support not yet implemented");
@@ -41,7 +46,10 @@ unsigned ARMWinCOFFObjectWriter::getRelocType(const MCValue &Target,
Target.isAbsolute() ? MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
switch (static_cast<unsigned>(Fixup.getKind())) {
- default: llvm_unreachable("unsupported relocation type");
+ default: {
+ const MCFixupKindInfo &Info = MAB.getFixupKindInfo(Fixup.getKind());
+ report_fatal_error(Twine("unsupported relocation type: ") + Info.Name);
+ }
case FK_Data_4:
switch (Modifier) {
case MCSymbolRefExpr::VK_COFF_IMGREL32:
diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp
index 35fe9b3..51e519d 100644
--- a/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/lib/Target/ARM/MLxExpansionPass.cpp
@@ -381,7 +381,7 @@ bool MLxExpansion::runOnMachineFunction(MachineFunction &Fn) {
TII = static_cast<const ARMBaseInstrInfo *>(Fn.getSubtarget().getInstrInfo());
TRI = Fn.getSubtarget().getRegisterInfo();
MRI = &Fn.getRegInfo();
- const ARMSubtarget *STI = &Fn.getTarget().getSubtarget<ARMSubtarget>();
+ const ARMSubtarget *STI = &Fn.getSubtarget<ARMSubtarget>();
isLikeA9 = STI->isLikeA9() || STI->isSwift();
isSwift = STI->isSwift();
diff --git a/lib/Target/ARM/Thumb1FrameLowering.cpp b/lib/Target/ARM/Thumb1FrameLowering.cpp
index 6deab4f..7dcc64e 100644
--- a/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -52,9 +52,9 @@ void Thumb1FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
const Thumb1InstrInfo &TII =
- *static_cast<const Thumb1InstrInfo *>(MF.getSubtarget().getInstrInfo());
- const Thumb1RegisterInfo *RegInfo = static_cast<const Thumb1RegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
+ *static_cast<const Thumb1InstrInfo *>(STI.getInstrInfo());
+ const Thumb1RegisterInfo *RegInfo =
+ static_cast<const Thumb1RegisterInfo *>(STI.getRegisterInfo());
if (!hasReservedCallFrame(MF)) {
// If we have alloca, convert as follows:
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
@@ -89,15 +89,12 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
MachineModuleInfo &MMI = MF.getMMI();
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
- const Thumb1RegisterInfo *RegInfo = static_cast<const Thumb1RegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
+ const Thumb1RegisterInfo *RegInfo =
+ static_cast<const Thumb1RegisterInfo *>(STI.getRegisterInfo());
const Thumb1InstrInfo &TII =
- *static_cast<const Thumb1InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ *static_cast<const Thumb1InstrInfo *>(STI.getInstrInfo());
- unsigned Align = MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment();
+ unsigned Align = STI.getFrameLowering()->getStackAlignment();
unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(Align);
unsigned NumBytes = MFI->getStackSize();
assert(NumBytes >= ArgRegsSaveSize &&
@@ -124,7 +121,8 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
}
if (!AFI->hasStackFrame()) {
@@ -135,7 +133,8 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
}
return;
}
@@ -199,7 +198,8 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
}
for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
E = CSI.end(); I != E; ++I) {
@@ -226,7 +226,8 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
nullptr, MRI->getDwarfRegNum(Reg, true), MFI->getObjectOffset(FI)));
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
break;
}
}
@@ -244,13 +245,15 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfa(
nullptr, MRI->getDwarfRegNum(FramePtr, true), CFAOffset));
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
} else {
unsigned CFIIndex =
MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(
nullptr, MRI->getDwarfRegNum(FramePtr, true)));
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
}
if (NumBytes > 508)
// If offset is > 508 then sp cannot be adjusted in a single instruction,
@@ -267,7 +270,8 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameSetup);
}
}
@@ -324,15 +328,12 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
DebugLoc dl = MBBI->getDebugLoc();
MachineFrameInfo *MFI = MF.getFrameInfo();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- const Thumb1RegisterInfo *RegInfo = static_cast<const Thumb1RegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
+ const Thumb1RegisterInfo *RegInfo =
+ static_cast<const Thumb1RegisterInfo *>(STI.getRegisterInfo());
const Thumb1InstrInfo &TII =
- *static_cast<const Thumb1InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ *static_cast<const Thumb1InstrInfo *>(STI.getInstrInfo());
- unsigned Align = MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment();
+ unsigned Align = STI.getFrameLowering()->getStackAlignment();
unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(Align);
int NumBytes = (int)MFI->getStackSize();
assert((unsigned)NumBytes >= ArgRegsSaveSize &&
@@ -459,8 +460,7 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
return false;
DebugLoc DL;
- MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
if (MI != MBB.end()) DL = MI->getDebugLoc();
@@ -499,7 +499,7 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
bool isVarArg = AFI->getArgRegsSaveSize() > 0;
DebugLoc DL = MI->getDebugLoc();
diff --git a/lib/Target/ARM/Thumb1InstrInfo.cpp b/lib/Target/ARM/Thumb1InstrInfo.cpp
index 8ea912e..c24f740 100644
--- a/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -44,7 +44,7 @@ void Thumb1InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
bool KillSrc) const {
// Need to check the arch.
MachineFunction &MF = *MBB.getParent();
- const ARMSubtarget &st = MF.getTarget().getSubtarget<ARMSubtarget>();
+ const ARMSubtarget &st = MF.getSubtarget<ARMSubtarget>();
assert(ARM::GPRRegClass.contains(DestReg, SrcReg) &&
"Thumb1 can only copy GPR registers");
diff --git a/lib/Target/ARM/Thumb1RegisterInfo.cpp b/lib/Target/ARM/Thumb1RegisterInfo.cpp
index c10c809..5e2cbdc 100644
--- a/lib/Target/ARM/Thumb1RegisterInfo.cpp
+++ b/lib/Target/ARM/Thumb1RegisterInfo.cpp
@@ -71,7 +71,7 @@ Thumb1RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
"Thumb1 does not have ldr to high register");
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();
const Constant *C = ConstantInt::get(
Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
@@ -234,7 +234,6 @@ void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
// If we would emit the copy with an immediate of 0, just use tMOVr.
if (CopyOpc && Bytes < CopyScale) {
CopyOpc = ARM::tMOVr;
- CopyBits = 0;
CopyScale = 1;
CopyNeedsCC = false;
CopyRange = 0;
@@ -389,12 +388,7 @@ rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx,
void Thumb1RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
int64_t Offset) const {
- const ARMBaseInstrInfo &TII =
- *static_cast<const ARMBaseInstrInfo *>(MI.getParent()
- ->getParent()
- ->getTarget()
- .getSubtargetImpl()
- ->getInstrInfo());
+ const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
int Off = Offset; // ARM doesn't need the general 64-bit offsets
unsigned i = 0;
@@ -420,7 +414,7 @@ Thumb1RegisterInfo::saveScavengerRegister(MachineBasicBlock &MBB,
// off the frame pointer (if, for example, there are alloca() calls in
// the function, the offset will be negative. Use R12 instead since that's
// a call clobbered register that we know won't be used in Thumb1 mode.
- const TargetInstrInfo &TII = *MBB.getParent()->getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
DebugLoc DL;
AddDefaultPred(BuildMI(MBB, I, DL, TII.get(ARM::tMOVr))
.addReg(ARM::R12, RegState::Define)
@@ -466,8 +460,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
- const ARMBaseInstrInfo &TII =
- *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
DebugLoc dl = MI.getDebugLoc();
MachineInstrBuilder MIB(*MBB.getParent(), &MI);
@@ -478,8 +471,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MF.getFrameInfo()->getStackSize() + SPAdj;
if (MF.getFrameInfo()->hasVarSizedObjects()) {
- assert(SPAdj == 0 && MF.getSubtarget().getFrameLowering()->hasFP(MF) &&
- "Unexpected");
+ assert(SPAdj == 0 && STI.getFrameLowering()->hasFP(MF) && "Unexpected");
// There are alloca()'s in this function, must reference off the frame
// pointer or base pointer instead.
if (!hasBasePointer(MF)) {
@@ -495,10 +487,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// when !hasReservedCallFrame().
#ifndef NDEBUG
if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
- assert(MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->hasReservedCallFrame(MF) &&
+ assert(STI.getFrameLowering()->hasReservedCallFrame(MF) &&
"Cannot use SP to access the emergency spill slot in "
"functions without a reserved call frame");
assert(!MF.getFrameInfo()->hasVarSizedObjects() &&
diff --git a/lib/Target/ARM/Thumb2ITBlockPass.cpp b/lib/Target/ARM/Thumb2ITBlockPass.cpp
index fdcb522..b657f2d 100644
--- a/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ b/lib/Target/ARM/Thumb2ITBlockPass.cpp
@@ -253,12 +253,12 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
}
bool Thumb2ITBlockPass::runOnMachineFunction(MachineFunction &Fn) {
- const TargetMachine &TM = Fn.getTarget();
+ const ARMSubtarget &STI =
+ static_cast<const ARMSubtarget &>(Fn.getSubtarget());
AFI = Fn.getInfo<ARMFunctionInfo>();
- TII = static_cast<const Thumb2InstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
- TRI = TM.getSubtargetImpl()->getRegisterInfo();
- restrictIT = TM.getSubtarget<ARMSubtarget>().restrictIT();
+ TII = static_cast<const Thumb2InstrInfo *>(STI.getInstrInfo());
+ TRI = STI.getRegisterInfo();
+ restrictIT = STI.restrictIT();
if (!AFI->isThumbFunction())
return false;
diff --git a/lib/Target/ARM/Thumb2InstrInfo.cpp b/lib/Target/ARM/Thumb2InstrInfo.cpp
index 91973e1..62c3752 100644
--- a/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -574,13 +574,10 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
}
} else if (AddrMode == ARMII::AddrModeT2_i8s4) {
Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4;
- NumBits = 8;
- // MCInst operand has already scaled value.
+ NumBits = 10; // 8 bits scaled by 4
+ // MCInst operand expects already scaled value.
Scale = 1;
- if (Offset < 0) {
- isSub = true;
- Offset = -Offset;
- }
+ assert((Offset & 3) == 0 && "Can't encode this offset!");
} else {
llvm_unreachable("Unsupported addressing mode!");
}
diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp
index c51eb8b..2ee908b 100644
--- a/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -1001,17 +1001,12 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
}
bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
- const TargetMachine &TM = MF.getTarget();
- TII = static_cast<const Thumb2InstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
- STI = &TM.getSubtarget<ARMSubtarget>();
+ STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
+ TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
// Optimizing / minimizing size?
- AttributeSet FnAttrs = MF.getFunction()->getAttributes();
- OptimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize);
- MinimizeSize =
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
+ OptimizeSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
+ MinimizeSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
BlockInfo.clear();
BlockInfo.resize(MF.getNumBlockIDs());
diff --git a/lib/Target/Android.mk b/lib/Target/Android.mk
index 4494eb0..1e34a85 100644
--- a/lib/Target/Android.mk
+++ b/lib/Target/Android.mk
@@ -3,7 +3,6 @@ LOCAL_PATH:= $(call my-dir)
target_SRC_FILES := \
Target.cpp \
TargetIntrinsicInfo.cpp \
- TargetLibraryInfo.cpp \
TargetLoweringObjectFile.cpp \
TargetMachineC.cpp \
TargetMachine.cpp \
@@ -20,6 +19,7 @@ LOCAL_MODULE:= libLLVMTarget
LOCAL_MODULE_TAGS := optional
include $(LLVM_HOST_BUILD_MK)
+include $(LLVM_GEN_INTRINSICS_MK)
include $(BUILD_HOST_STATIC_LIBRARY)
# For the device
@@ -34,5 +34,6 @@ LOCAL_MODULE:= libLLVMTarget
LOCAL_MODULE_TAGS := optional
include $(LLVM_DEVICE_BUILD_MK)
+include $(LLVM_GEN_INTRINSICS_MK)
include $(BUILD_STATIC_LIBRARY)
endif
diff --git a/lib/Target/BPF/BPF.h b/lib/Target/BPF/BPF.h
new file mode 100644
index 0000000..4a0cb20
--- /dev/null
+++ b/lib/Target/BPF/BPF.h
@@ -0,0 +1,22 @@
+//===-- BPF.h - Top-level interface for BPF representation ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_BPF_H
+#define LLVM_LIB_TARGET_BPF_BPF_H
+
+#include "MCTargetDesc/BPFMCTargetDesc.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class BPFTargetMachine;
+
+FunctionPass *createBPFISelDag(BPFTargetMachine &TM);
+}
+
+#endif
diff --git a/lib/Target/BPF/BPF.td b/lib/Target/BPF/BPF.td
new file mode 100644
index 0000000..a4ce90a
--- /dev/null
+++ b/lib/Target/BPF/BPF.td
@@ -0,0 +1,31 @@
+//===-- BPF.td - Describe the BPF Target Machine -----------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+include "BPFRegisterInfo.td"
+include "BPFCallingConv.td"
+include "BPFInstrInfo.td"
+
+def BPFInstrInfo : InstrInfo;
+
+class Proc<string Name, list<SubtargetFeature> Features>
+ : Processor<Name, NoItineraries, Features>;
+
+def : Proc<"generic", []>;
+
+def BPFInstPrinter : AsmWriter {
+ string AsmWriterClassName = "InstPrinter";
+ bit isMCAsmWriter = 1;
+}
+
+def BPF : Target {
+ let InstructionSet = BPFInstrInfo;
+ let AssemblyWriters = [BPFInstPrinter];
+}
diff --git a/lib/Target/BPF/BPFAsmPrinter.cpp b/lib/Target/BPF/BPFAsmPrinter.cpp
new file mode 100644
index 0000000..dbc7bfe
--- /dev/null
+++ b/lib/Target/BPF/BPFAsmPrinter.cpp
@@ -0,0 +1,87 @@
+//===-- BPFAsmPrinter.cpp - BPF LLVM assembly writer ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to the BPF assembly language.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPF.h"
+#include "BPFInstrInfo.h"
+#include "BPFMCInstLower.h"
+#include "BPFTargetMachine.h"
+#include "InstPrinter/BPFInstPrinter.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+namespace {
+class BPFAsmPrinter : public AsmPrinter {
+public:
+ explicit BPFAsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)) {}
+
+ const char *getPassName() const override { return "BPF Assembly Printer"; }
+
+ void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &O,
+ const char *Modifier = nullptr);
+ void EmitInstruction(const MachineInstr *MI) override;
+};
+}
+
+void BPFAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
+ raw_ostream &O, const char *Modifier) {
+ const MachineOperand &MO = MI->getOperand(OpNum);
+
+ switch (MO.getType()) {
+ case MachineOperand::MO_Register:
+ O << BPFInstPrinter::getRegisterName(MO.getReg());
+ break;
+
+ case MachineOperand::MO_Immediate:
+ O << MO.getImm();
+ break;
+
+ case MachineOperand::MO_MachineBasicBlock:
+ O << *MO.getMBB()->getSymbol();
+ break;
+
+ case MachineOperand::MO_GlobalAddress:
+ O << *getSymbol(MO.getGlobal());
+ break;
+
+ default:
+ llvm_unreachable("<unknown operand type>");
+ }
+}
+
+void BPFAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+
+ BPFMCInstLower MCInstLowering(OutContext, *this);
+
+ MCInst TmpInst;
+ MCInstLowering.Lower(MI, TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeBPFAsmPrinter() {
+ RegisterAsmPrinter<BPFAsmPrinter> X(TheBPFTarget);
+}
diff --git a/lib/Target/BPF/BPFCallingConv.td b/lib/Target/BPF/BPFCallingConv.td
new file mode 100644
index 0000000..8cec6fa
--- /dev/null
+++ b/lib/Target/BPF/BPFCallingConv.td
@@ -0,0 +1,29 @@
+//===-- BPFCallingConv.td - Calling Conventions BPF --------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the calling conventions for the BPF architecture.
+//
+//===----------------------------------------------------------------------===//
+
+// BPF 64-bit C return-value convention.
+def RetCC_BPF64 : CallingConv<[CCIfType<[i64], CCAssignToReg<[R0]>>]>;
+
+// BPF 64-bit C Calling convention.
+def CC_BPF64 : CallingConv<[
+ // Promote i8/i16/i32 args to i64
+ CCIfType<[ i8, i16, i32 ], CCPromoteToType<i64>>,
+
+ // All arguments get passed in integer registers if there is space.
+ CCIfType<[i64], CCAssignToReg<[ R1, R2, R3, R4, R5 ]>>,
+
+ // Could be assigned to the stack in 8-byte aligned units, but unsupported
+ CCAssignToStack<8, 8>
+]>;
+
+def CSR : CalleeSavedRegs<(add R6, R7, R8, R9, R10)>;
diff --git a/lib/Target/BPF/BPFFrameLowering.cpp b/lib/Target/BPF/BPFFrameLowering.cpp
new file mode 100644
index 0000000..ae9f355
--- /dev/null
+++ b/lib/Target/BPF/BPFFrameLowering.cpp
@@ -0,0 +1,39 @@
+//===-- BPFFrameLowering.cpp - BPF Frame Information ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the BPF implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPFFrameLowering.h"
+#include "BPFInstrInfo.h"
+#include "BPFSubtarget.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+bool BPFFrameLowering::hasFP(const MachineFunction &MF) const { return true; }
+
+void BPFFrameLowering::emitPrologue(MachineFunction &MF) const {}
+
+void BPFFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {}
+
+void BPFFrameLowering::processFunctionBeforeCalleeSavedScan(
+ MachineFunction &MF, RegScavenger *RS) const {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ MRI.setPhysRegUnused(BPF::R6);
+ MRI.setPhysRegUnused(BPF::R7);
+ MRI.setPhysRegUnused(BPF::R8);
+ MRI.setPhysRegUnused(BPF::R9);
+}
diff --git a/lib/Target/BPF/BPFFrameLowering.h b/lib/Target/BPF/BPFFrameLowering.h
new file mode 100644
index 0000000..833046d
--- /dev/null
+++ b/lib/Target/BPF/BPFFrameLowering.h
@@ -0,0 +1,41 @@
+//===-- BPFFrameLowering.h - Define frame lowering for BPF -----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class implements BPF-specific bits of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_BPFFRAMELOWERING_H
+#define LLVM_LIB_TARGET_BPF_BPFFRAMELOWERING_H
+
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+class BPFSubtarget;
+
+class BPFFrameLowering : public TargetFrameLowering {
+public:
+ explicit BPFFrameLowering(const BPFSubtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, 0) {}
+
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+ bool hasFP(const MachineFunction &MF) const override;
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const override;
+
+ void
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override {
+ MBB.erase(MI);
+ }
+};
+}
+#endif
diff --git a/lib/Target/BPF/BPFISelDAGToDAG.cpp b/lib/Target/BPF/BPFISelDAGToDAG.cpp
new file mode 100644
index 0000000..07f62a9
--- /dev/null
+++ b/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -0,0 +1,159 @@
+//===-- BPFISelDAGToDAG.cpp - A dag to dag inst selector for BPF ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DAG pattern matching instruction selector for BPF,
+// converting from a legalized dag to a BPF dag.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPF.h"
+#include "BPFRegisterInfo.h"
+#include "BPFSubtarget.h"
+#include "BPFTargetMachine.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/IR/IntrinsicInst.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "bpf-isel"
+
+// Instruction Selector Implementation
+namespace {
+
+class BPFDAGToDAGISel : public SelectionDAGISel {
+public:
+ explicit BPFDAGToDAGISel(BPFTargetMachine &TM) : SelectionDAGISel(TM) {}
+
+ const char *getPassName() const override {
+ return "BPF DAG->DAG Pattern Instruction Selection";
+ }
+
+private:
+// Include the pieces autogenerated from the target description.
+#include "BPFGenDAGISel.inc"
+
+ SDNode *Select(SDNode *N) override;
+
+ // Complex Pattern for address selection.
+ bool SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset);
+};
+}
+
+// ComplexPattern used on BPF Load/Store instructions
+bool BPFDAGToDAGISel::SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset) {
+ // if Address is FI, get the TargetFrameIndex.
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
+ Offset = CurDAG->getTargetConstant(0, MVT::i64);
+ return true;
+ }
+
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false;
+
+ // Addresses of the form FI+const or FI|const
+ if (CurDAG->isBaseWithConstantOffset(Addr)) {
+ ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
+ if (isInt<32>(CN->getSExtValue())) {
+
+ // If the first operand is a FI, get the TargetFI Node
+ if (FrameIndexSDNode *FIN =
+ dyn_cast<FrameIndexSDNode>(Addr.getOperand(0)))
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
+ else
+ Base = Addr.getOperand(0);
+
+ Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i64);
+ return true;
+ }
+ }
+
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i64);
+ return true;
+}
+
+SDNode *BPFDAGToDAGISel::Select(SDNode *Node) {
+ unsigned Opcode = Node->getOpcode();
+
+ // Dump information about the Node being selected
+ DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
+
+ // If we have a custom node, we already have selected!
+ if (Node->isMachineOpcode()) {
+ DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
+ return NULL;
+ }
+
+ // tablegen selection should be handled here.
+ switch (Opcode) {
+ default: break;
+
+ case ISD::UNDEF: {
+ errs() << "BUG: "; Node->dump(CurDAG); errs() << '\n';
+ report_fatal_error("shouldn't see UNDEF during Select");
+ break;
+ }
+
+ case ISD::INTRINSIC_W_CHAIN: {
+ unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ switch (IntNo) {
+ case Intrinsic::bpf_load_byte:
+ case Intrinsic::bpf_load_half:
+ case Intrinsic::bpf_load_word: {
+ SDLoc DL(Node);
+ SDValue Chain = Node->getOperand(0);
+ SDValue N1 = Node->getOperand(1);
+ SDValue Skb = Node->getOperand(2);
+ SDValue N3 = Node->getOperand(3);
+
+ SDValue R6Reg = CurDAG->getRegister(BPF::R6, MVT::i64);
+ Chain = CurDAG->getCopyToReg(Chain, DL, R6Reg, Skb, SDValue());
+ Node = CurDAG->UpdateNodeOperands(Node, Chain, N1, R6Reg, N3);
+ break;
+ }
+ }
+ break;
+ }
+
+ case ISD::FrameIndex: {
+ int FI = dyn_cast<FrameIndexSDNode>(Node)->getIndex();
+ EVT VT = Node->getValueType(0);
+ SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
+ unsigned Opc = BPF::MOV_rr;
+ if (Node->hasOneUse())
+ return CurDAG->SelectNodeTo(Node, Opc, VT, TFI);
+ return CurDAG->getMachineNode(Opc, SDLoc(Node), VT, TFI);
+ }
+ }
+
+ // Select the default instruction
+ SDNode *ResNode = SelectCode(Node);
+
+ DEBUG(dbgs() << "=> ";
+ if (ResNode == nullptr || ResNode == Node)
+ Node->dump(CurDAG);
+ else
+ ResNode->dump(CurDAG);
+ dbgs() << '\n');
+ return ResNode;
+}
+
+FunctionPass *llvm::createBPFISelDag(BPFTargetMachine &TM) {
+ return new BPFDAGToDAGISel(TM);
+}
diff --git a/lib/Target/BPF/BPFISelLowering.cpp b/lib/Target/BPF/BPFISelLowering.cpp
new file mode 100644
index 0000000..d94416b
--- /dev/null
+++ b/lib/Target/BPF/BPFISelLowering.cpp
@@ -0,0 +1,642 @@
+//===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that BPF uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPFISelLowering.h"
+#include "BPF.h"
+#include "BPFTargetMachine.h"
+#include "BPFSubtarget.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "bpf-lower"
+
+namespace {
+
+// Diagnostic information for unimplemented or unsupported feature reporting.
+class DiagnosticInfoUnsupported : public DiagnosticInfo {
+private:
+ // Debug location where this diagnostic is triggered.
+ DebugLoc DLoc;
+ const Twine &Description;
+ const Function &Fn;
+ SDValue Value;
+
+ static int KindID;
+
+ static int getKindID() {
+ if (KindID == 0)
+ KindID = llvm::getNextAvailablePluginDiagnosticKind();
+ return KindID;
+ }
+
+public:
+ DiagnosticInfoUnsupported(SDLoc DLoc, const Function &Fn, const Twine &Desc,
+ SDValue Value)
+ : DiagnosticInfo(getKindID(), DS_Error), DLoc(DLoc.getDebugLoc()),
+ Description(Desc), Fn(Fn), Value(Value) {}
+
+ void print(DiagnosticPrinter &DP) const override {
+ std::string Str;
+ raw_string_ostream OS(Str);
+
+ if (DLoc.isUnknown() == false) {
+ DILocation DIL(DLoc.getAsMDNode(Fn.getContext()));
+ StringRef Filename = DIL.getFilename();
+ unsigned Line = DIL.getLineNumber();
+ unsigned Column = DIL.getColumnNumber();
+ OS << Filename << ':' << Line << ':' << Column << ' ';
+ }
+
+ OS << "in function " << Fn.getName() << ' ' << *Fn.getFunctionType() << '\n'
+ << Description;
+ if (Value)
+ Value->print(OS);
+ OS << '\n';
+ OS.flush();
+ DP << Str;
+ }
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == getKindID();
+ }
+};
+
+int DiagnosticInfoUnsupported::KindID = 0;
+}
+
+BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
+ const BPFSubtarget &STI)
+ : TargetLowering(TM) {
+
+ // Set up the register classes.
+ addRegisterClass(MVT::i64, &BPF::GPRRegClass);
+
+ // Compute derived properties from the register classes
+ computeRegisterProperties(STI.getRegisterInfo());
+
+ setStackPointerRegisterToSaveRestore(BPF::R11);
+
+ setOperationAction(ISD::BR_CC, MVT::i64, Custom);
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BRCOND, MVT::Other, Expand);
+ setOperationAction(ISD::SETCC, MVT::i64, Expand);
+ setOperationAction(ISD::SELECT, MVT::i64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
+
+ setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
+
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
+ setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
+
+ setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::SREM, MVT::i64, Expand);
+ setOperationAction(ISD::UREM, MVT::i64, Expand);
+
+ setOperationAction(ISD::MULHU, MVT::i64, Expand);
+ setOperationAction(ISD::MULHS, MVT::i64, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+
+ setOperationAction(ISD::ADDC, MVT::i64, Expand);
+ setOperationAction(ISD::ADDE, MVT::i64, Expand);
+ setOperationAction(ISD::SUBC, MVT::i64, Expand);
+ setOperationAction(ISD::SUBE, MVT::i64, Expand);
+
+ // no UNDEF allowed
+ setOperationAction(ISD::UNDEF, MVT::i64, Expand);
+
+ setOperationAction(ISD::ROTR, MVT::i64, Expand);
+ setOperationAction(ISD::ROTL, MVT::i64, Expand);
+ setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
+ setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
+
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i64, Custom);
+ setOperationAction(ISD::CTLZ, MVT::i64, Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
+ setOperationAction(ISD::CTPOP, MVT::i64, Expand);
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
+
+ // Extended load operations for i1 types must be promoted
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
+ }
+
+ setBooleanContents(ZeroOrOneBooleanContent);
+
+ // Function alignments (log2)
+ setMinFunctionAlignment(3);
+ setPrefFunctionAlignment(3);
+
+ // inline memcpy() for kernel to see explicit copy
+ MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 128;
+ MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 128;
+ MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = 128;
+}
+
+SDValue BPFTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ case ISD::BR_CC:
+ return LowerBR_CC(Op, DAG);
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::SELECT_CC:
+ return LowerSELECT_CC(Op, DAG);
+ default:
+ llvm_unreachable("unimplemented operand");
+ }
+}
+
+// Calling Convention Implementation
+#include "BPFGenCallingConv.inc"
+
+SDValue BPFTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+ switch (CallConv) {
+ default:
+ llvm_unreachable("Unsupported calling convention");
+ case CallingConv::C:
+ case CallingConv::Fast:
+ break;
+ }
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
+ CCInfo.AnalyzeFormalArguments(Ins, CC_BPF64);
+
+ for (auto &VA : ArgLocs) {
+ if (VA.isRegLoc()) {
+ // Arguments passed in registers
+ EVT RegVT = VA.getLocVT();
+ switch (RegVT.getSimpleVT().SimpleTy) {
+ default: {
+ errs() << "LowerFormalArguments Unhandled argument type: "
+ << RegVT.getSimpleVT().SimpleTy << '\n';
+ llvm_unreachable(0);
+ }
+ case MVT::i64:
+ unsigned VReg = RegInfo.createVirtualRegister(&BPF::GPRRegClass);
+ RegInfo.addLiveIn(VA.getLocReg(), VReg);
+ SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
+
+ // If this is an 8/16/32-bit value, it is really passed promoted to 64
+ // bits. Insert an assert[sz]ext to capture this, then truncate to the
+ // right size.
+ if (VA.getLocInfo() == CCValAssign::SExt)
+ ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+ else if (VA.getLocInfo() == CCValAssign::ZExt)
+ ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+
+ if (VA.getLocInfo() != CCValAssign::Full)
+ ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
+
+ InVals.push_back(ArgValue);
+ }
+ } else {
+ DiagnosticInfoUnsupported Err(DL, *MF.getFunction(),
+ "defined with too many args", SDValue());
+ DAG.getContext()->diagnose(Err);
+ }
+ }
+
+ if (IsVarArg || MF.getFunction()->hasStructRetAttr()) {
+ DiagnosticInfoUnsupported Err(
+ DL, *MF.getFunction(),
+ "functions with VarArgs or StructRet are not supported", SDValue());
+ DAG.getContext()->diagnose(Err);
+ }
+
+ return Chain;
+}
+
+SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ auto &Outs = CLI.Outs;
+ auto &OutVals = CLI.OutVals;
+ auto &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &IsTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool IsVarArg = CLI.IsVarArg;
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ // BPF target does not support tail call optimization.
+ IsTailCall = false;
+
+ switch (CallConv) {
+ default:
+ report_fatal_error("Unsupported calling convention");
+ case CallingConv::Fast:
+ case CallingConv::C:
+ break;
+ }
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
+
+ CCInfo.AnalyzeCallOperands(Outs, CC_BPF64);
+
+ unsigned NumBytes = CCInfo.getNextStackOffset();
+
+ if (Outs.size() >= 6) {
+ DiagnosticInfoUnsupported Err(CLI.DL, *MF.getFunction(),
+ "too many args to ", Callee);
+ DAG.getContext()->diagnose(Err);
+ }
+
+ for (auto &Arg : Outs) {
+ ISD::ArgFlagsTy Flags = Arg.Flags;
+ if (!Flags.isByVal())
+ continue;
+
+ DiagnosticInfoUnsupported Err(CLI.DL, *MF.getFunction(),
+ "pass by value not supported ", Callee);
+ DAG.getContext()->diagnose(Err);
+ }
+
+ Chain = DAG.getCALLSEQ_START(
+ Chain, DAG.getConstant(NumBytes, getPointerTy(), true), CLI.DL);
+
+ SmallVector<std::pair<unsigned, SDValue>, 5> RegsToPass;
+
+ // Walk arg assignments
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDValue Arg = OutVals[i];
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown loc info");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
+ break;
+ }
+
+ // Push arguments into RegsToPass vector
+ if (VA.isRegLoc())
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ else
+ llvm_unreachable("call arg pass bug");
+ }
+
+ SDValue InFlag;
+
+ // Build a sequence of copy-to-reg nodes chained together with token chain and
+ // flag operands which copy the outgoing args into registers. The InFlag in
+ // necessary since all emitted instructions must be stuck together.
+ for (auto &Reg : RegsToPass) {
+ Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
+ // If the callee is a GlobalAddress node (quite common, every direct call is)
+ // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
+ // Likewise ExternalSymbol -> TargetExternalSymbol.
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, getPointerTy(),
+ G->getOffset(), 0);
+ else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
+ Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy(), 0);
+
+ // Returns a chain & a flag for retval copy to use.
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+
+ // Add argument registers to the end of the list so that they are
+ // known live into the call.
+ for (auto &Reg : RegsToPass)
+ Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
+
+ if (InFlag.getNode())
+ Ops.push_back(InFlag);
+
+ Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
+ InFlag = Chain.getValue(1);
+
+ // Create the CALLSEQ_END node.
+ Chain = DAG.getCALLSEQ_END(
+ Chain, DAG.getConstant(NumBytes, getPointerTy(), true),
+ DAG.getConstant(0, getPointerTy(), true), InFlag, CLI.DL);
+ InFlag = Chain.getValue(1);
+
+ // Handle result values, copying them out of physregs into vregs that we
+ // return.
+ return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, CLI.DL, DAG,
+ InVals);
+}
+
+SDValue
+BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc DL, SelectionDAG &DAG) const {
+
+ // CCValAssign - represent the assignment of the return value to a location
+ SmallVector<CCValAssign, 16> RVLocs;
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ // CCState - Info about the registers and stack slot.
+ CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
+
+ if (MF.getFunction()->getReturnType()->isAggregateType()) {
+ DiagnosticInfoUnsupported Err(DL, *MF.getFunction(),
+ "only integer returns supported", SDValue());
+ DAG.getContext()->diagnose(Err);
+ }
+
+ // Analize return values.
+ CCInfo.AnalyzeReturn(Outs, RetCC_BPF64);
+
+ SDValue Flag;
+ SmallVector<SDValue, 4> RetOps(1, Chain);
+
+ // Copy the result values into the output registers.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag);
+
+ // Guarantee that all emitted copies are stuck together,
+ // avoiding something bad.
+ Flag = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+ }
+
+ unsigned Opc = BPFISD::RET_FLAG;
+ RetOps[0] = Chain; // Update chain.
+
+ // Add the flag if we have it.
+ if (Flag.getNode())
+ RetOps.push_back(Flag);
+
+ return DAG.getNode(Opc, DL, MVT::Other, RetOps);
+}
+
+SDValue BPFTargetLowering::LowerCallResult(
+ SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
+
+ if (Ins.size() >= 2) {
+ DiagnosticInfoUnsupported Err(DL, *MF.getFunction(),
+ "only small returns supported", SDValue());
+ DAG.getContext()->diagnose(Err);
+ }
+
+ CCInfo.AnalyzeCallResult(Ins, RetCC_BPF64);
+
+ // Copy all of the result registers out of their specified physreg.
+ for (auto &Val : RVLocs) {
+ Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
+ Val.getValVT(), InFlag).getValue(1);
+ InFlag = Chain.getValue(2);
+ InVals.push_back(Chain.getValue(0));
+ }
+
+ return Chain;
+}
+
+static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
+ switch (CC) {
+ default:
+ break;
+ case ISD::SETULT:
+ case ISD::SETULE:
+ case ISD::SETLT:
+ case ISD::SETLE:
+ CC = ISD::getSetCCSwappedOperands(CC);
+ std::swap(LHS, RHS);
+ break;
+ }
+}
+
+SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
+ SDValue LHS = Op.getOperand(2);
+ SDValue RHS = Op.getOperand(3);
+ SDValue Dest = Op.getOperand(4);
+ SDLoc DL(Op);
+
+ NegateCC(LHS, RHS, CC);
+
+ return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
+ DAG.getConstant(CC, MVT::i64), Dest);
+}
+
+SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ SDValue TrueV = Op.getOperand(2);
+ SDValue FalseV = Op.getOperand(3);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
+ SDLoc DL(Op);
+
+ NegateCC(LHS, RHS, CC);
+
+ SDValue TargetCC = DAG.getConstant(CC, MVT::i64);
+
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
+ SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
+
+ return DAG.getNode(BPFISD::SELECT_CC, DL, VTs, Ops);
+}
+
+const char *BPFTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch (Opcode) {
+ default:
+ return NULL;
+ case BPFISD::RET_FLAG:
+ return "BPFISD::RET_FLAG";
+ case BPFISD::CALL:
+ return "BPFISD::CALL";
+ case BPFISD::SELECT_CC:
+ return "BPFISD::SELECT_CC";
+ case BPFISD::BR_CC:
+ return "BPFISD::BR_CC";
+ case BPFISD::Wrapper:
+ return "BPFISD::Wrapper";
+ }
+}
+
+SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i64);
+
+ return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
+}
+
+MachineBasicBlock *
+BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ unsigned Opc = MI->getOpcode();
+
+ const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
+ DebugLoc DL = MI->getDebugLoc();
+
+ assert(Opc == BPF::Select && "Unexpected instr type to insert");
+
+ // To "insert" a SELECT instruction, we actually have to insert the diamond
+ // control-flow pattern. The incoming instruction knows the destination vreg
+ // to set, the condition code register to branch on, the true/false values to
+ // select between, and a branch opcode to use.
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction::iterator I = BB;
+ ++I;
+
+ // ThisMBB:
+ // ...
+ // TrueVal = ...
+ // jmp_XX r1, r2 goto Copy1MBB
+ // fallthrough --> Copy0MBB
+ MachineBasicBlock *ThisMBB = BB;
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
+
+ F->insert(I, Copy0MBB);
+ F->insert(I, Copy1MBB);
+ // Update machine-CFG edges by transferring all successors of the current
+ // block to the new block which will contain the Phi node for the select.
+ Copy1MBB->splice(Copy1MBB->begin(), BB,
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
+ Copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
+ // Next, add the true and fallthrough blocks as its successors.
+ BB->addSuccessor(Copy0MBB);
+ BB->addSuccessor(Copy1MBB);
+
+ // Insert Branch if Flag
+ unsigned LHS = MI->getOperand(1).getReg();
+ unsigned RHS = MI->getOperand(2).getReg();
+ int CC = MI->getOperand(3).getImm();
+ switch (CC) {
+ case ISD::SETGT:
+ BuildMI(BB, DL, TII.get(BPF::JSGT_rr))
+ .addReg(LHS)
+ .addReg(RHS)
+ .addMBB(Copy1MBB);
+ break;
+ case ISD::SETUGT:
+ BuildMI(BB, DL, TII.get(BPF::JUGT_rr))
+ .addReg(LHS)
+ .addReg(RHS)
+ .addMBB(Copy1MBB);
+ break;
+ case ISD::SETGE:
+ BuildMI(BB, DL, TII.get(BPF::JSGE_rr))
+ .addReg(LHS)
+ .addReg(RHS)
+ .addMBB(Copy1MBB);
+ break;
+ case ISD::SETUGE:
+ BuildMI(BB, DL, TII.get(BPF::JUGE_rr))
+ .addReg(LHS)
+ .addReg(RHS)
+ .addMBB(Copy1MBB);
+ break;
+ case ISD::SETEQ:
+ BuildMI(BB, DL, TII.get(BPF::JEQ_rr))
+ .addReg(LHS)
+ .addReg(RHS)
+ .addMBB(Copy1MBB);
+ break;
+ case ISD::SETNE:
+ BuildMI(BB, DL, TII.get(BPF::JNE_rr))
+ .addReg(LHS)
+ .addReg(RHS)
+ .addMBB(Copy1MBB);
+ break;
+ default:
+ report_fatal_error("unimplemented select CondCode " + Twine(CC));
+ }
+
+ // Copy0MBB:
+ // %FalseValue = ...
+ // # fallthrough to Copy1MBB
+ BB = Copy0MBB;
+
+ // Update machine-CFG edges
+ BB->addSuccessor(Copy1MBB);
+
+ // Copy1MBB:
+ // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
+ // ...
+ BB = Copy1MBB;
+ BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(5).getReg())
+ .addMBB(Copy0MBB)
+ .addReg(MI->getOperand(4).getReg())
+ .addMBB(ThisMBB);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
diff --git a/lib/Target/BPF/BPFISelLowering.h b/lib/Target/BPF/BPFISelLowering.h
new file mode 100644
index 0000000..04d7908
--- /dev/null
+++ b/lib/Target/BPF/BPFISelLowering.h
@@ -0,0 +1,89 @@
+//===-- BPFISelLowering.h - BPF DAG Lowering Interface ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that BPF uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_BPFISELLOWERING_H
+#define LLVM_LIB_TARGET_BPF_BPFISELLOWERING_H
+
+#include "BPF.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+namespace BPFISD {
+enum {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+ RET_FLAG,
+ CALL,
+ SELECT_CC,
+ BR_CC,
+ Wrapper
+};
+}
+
+class BPFTargetLowering : public TargetLowering {
+public:
+ explicit BPFTargetLowering(const TargetMachine &TM, const BPFSubtarget &STI);
+
+ // Provide custom lowering hooks for some operations.
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+
+ // This method returns the name of a target specific DAG node.
+ const char *getTargetNodeName(unsigned Opcode) const override;
+
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB) const override;
+
+private:
+ SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+
+ // Lower the result values of a call, copying them out of physregs into vregs
+ SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
+ CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
+ SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+ // Lower a call into CALLSEQ_START - BPFISD:CALL - CALLSEQ_END chain
+ SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ // Lower incoming arguments, copy physregs into vregs
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
+ SelectionDAG &DAG) const override;
+
+ EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
+ bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
+ MachineFunction &MF) const override {
+ return Size >= 8 ? MVT::i64 : MVT::i32;
+ }
+
+ bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const override {
+ return true;
+ }
+};
+}
+
+#endif
diff --git a/lib/Target/BPF/BPFInstrFormats.td b/lib/Target/BPF/BPFInstrFormats.td
new file mode 100644
index 0000000..53f3ad6
--- /dev/null
+++ b/lib/Target/BPF/BPFInstrFormats.td
@@ -0,0 +1,33 @@
+//===-- BPFInstrFormats.td - BPF Instruction Formats -------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+class InstBPF<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : Instruction {
+ field bits<64> Inst;
+ field bits<64> SoftFail = 0;
+ let Size = 8;
+
+ let Namespace = "BPF";
+ let DecoderNamespace = "BPF";
+
+ bits<3> BPFClass;
+ let Inst{58-56} = BPFClass;
+
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+ let AsmString = asmstr;
+ let Pattern = pattern;
+}
+
+// Pseudo instructions
+class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstBPF<outs, ins, asmstr, pattern> {
+ let Inst{63-0} = 0;
+ let isPseudo = 1;
+}
diff --git a/lib/Target/BPF/BPFInstrInfo.cpp b/lib/Target/BPF/BPFInstrInfo.cpp
new file mode 100644
index 0000000..28bd0ec
--- /dev/null
+++ b/lib/Target/BPF/BPFInstrInfo.cpp
@@ -0,0 +1,168 @@
+//===-- BPFInstrInfo.cpp - BPF Instruction Information ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the BPF implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPF.h"
+#include "BPFInstrInfo.h"
+#include "BPFSubtarget.h"
+#include "BPFTargetMachine.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+
+#define GET_INSTRINFO_CTOR_DTOR
+#include "BPFGenInstrInfo.inc"
+
+using namespace llvm;
+
+BPFInstrInfo::BPFInstrInfo()
+ : BPFGenInstrInfo(BPF::ADJCALLSTACKDOWN, BPF::ADJCALLSTACKUP) {}
+
+void BPFInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ if (BPF::GPRRegClass.contains(DestReg, SrcReg))
+ BuildMI(MBB, I, DL, get(BPF::MOV_rr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else
+ llvm_unreachable("Impossible reg-to-reg copy");
+}
+
+void BPFInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned SrcReg, bool IsKill, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL;
+ if (I != MBB.end())
+ DL = I->getDebugLoc();
+
+ if (RC == &BPF::GPRRegClass)
+ BuildMI(MBB, I, DL, get(BPF::STD))
+ .addReg(SrcReg, getKillRegState(IsKill))
+ .addFrameIndex(FI)
+ .addImm(0);
+ else
+ llvm_unreachable("Can't store this register to stack slot");
+}
+
+void BPFInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DestReg, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL;
+ if (I != MBB.end())
+ DL = I->getDebugLoc();
+
+ if (RC == &BPF::GPRRegClass)
+ BuildMI(MBB, I, DL, get(BPF::LDD), DestReg).addFrameIndex(FI).addImm(0);
+ else
+ llvm_unreachable("Can't load this register from stack slot");
+}
+
+bool BPFInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ // Start from the bottom of the block and work up, examining the
+ // terminator instructions.
+ MachineBasicBlock::iterator I = MBB.end();
+ while (I != MBB.begin()) {
+ --I;
+ if (I->isDebugValue())
+ continue;
+
+ // Working from the bottom, when we see a non-terminator
+ // instruction, we're done.
+ if (!isUnpredicatedTerminator(I))
+ break;
+
+ // A terminator that isn't a branch can't easily be handled
+ // by this analysis.
+ if (!I->isBranch())
+ return true;
+
+ // Handle unconditional branches.
+ if (I->getOpcode() == BPF::JMP) {
+ if (!AllowModify) {
+ TBB = I->getOperand(0).getMBB();
+ continue;
+ }
+
+ // If the block has any instructions after a J, delete them.
+ while (std::next(I) != MBB.end())
+ std::next(I)->eraseFromParent();
+ Cond.clear();
+ FBB = 0;
+
+ // Delete the J if it's equivalent to a fall-through.
+ if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
+ TBB = 0;
+ I->eraseFromParent();
+ I = MBB.end();
+ continue;
+ }
+
+ // TBB is used to indicate the unconditinal destination.
+ TBB = I->getOperand(0).getMBB();
+ continue;
+ }
+ // Cannot handle conditional branches
+ return true;
+ }
+
+ return false;
+}
+
+unsigned BPFInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
+ // Shouldn't be a fall through.
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+
+ if (Cond.empty()) {
+ // Unconditional branch
+ assert(!FBB && "Unconditional branch with multiple successors!");
+ BuildMI(&MBB, DL, get(BPF::JMP)).addMBB(TBB);
+ return 1;
+ }
+
+ llvm_unreachable("Unexpected conditional branch");
+}
+
+unsigned BPFInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator I = MBB.end();
+ unsigned Count = 0;
+
+ while (I != MBB.begin()) {
+ --I;
+ if (I->isDebugValue())
+ continue;
+ if (I->getOpcode() != BPF::JMP)
+ break;
+ // Remove the branch.
+ I->eraseFromParent();
+ I = MBB.end();
+ ++Count;
+ }
+
+ return Count;
+}
diff --git a/lib/Target/BPF/BPFInstrInfo.h b/lib/Target/BPF/BPFInstrInfo.h
new file mode 100644
index 0000000..4056c2e
--- /dev/null
+++ b/lib/Target/BPF/BPFInstrInfo.h
@@ -0,0 +1,60 @@
+//===-- BPFInstrInfo.h - BPF Instruction Information ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the BPF implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_BPFINSTRINFO_H
+#define LLVM_LIB_TARGET_BPF_BPFINSTRINFO_H
+
+#include "BPFRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#include "BPFGenInstrInfo.inc"
+
+namespace llvm {
+
+class BPFInstrInfo : public BPFGenInstrInfo {
+ const BPFRegisterInfo RI;
+
+public:
+ BPFInstrInfo();
+
+ const BPFRegisterInfo &getRegisterInfo() const { return RI; }
+
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, unsigned SrcReg,
+ bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, unsigned DestReg,
+ int FrameIndex, const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+ bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
+
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const override;
+};
+}
+
+#endif
diff --git a/lib/Target/BPF/BPFInstrInfo.td b/lib/Target/BPF/BPFInstrInfo.td
new file mode 100644
index 0000000..47001f0
--- /dev/null
+++ b/lib/Target/BPF/BPFInstrInfo.td
@@ -0,0 +1,507 @@
+//===-- BPFInstrInfo.td - Target Description for BPF Target ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the BPF instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+include "BPFInstrFormats.td"
+
+// Instruction Operands and Patterns
+
+// These are target-independent nodes, but have target-specific formats.
+def SDT_BPFCallSeqStart : SDCallSeqStart<[SDTCisVT<0, iPTR>]>;
+def SDT_BPFCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
+def SDT_BPFCall : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
+def SDT_BPFSetFlag : SDTypeProfile<0, 3, [SDTCisSameAs<0, 1>]>;
+def SDT_BPFSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>,
+ SDTCisSameAs<0, 4>,
+ SDTCisSameAs<4, 5>]>;
+def SDT_BPFBrCC : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>,
+ SDTCisVT<3, OtherVT>]>;
+def SDT_BPFWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
+ SDTCisPtrTy<0>]>;
+
+def BPFcall : SDNode<"BPFISD::CALL", SDT_BPFCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
+def BPFretflag : SDNode<"BPFISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+def BPFcallseq_start: SDNode<"ISD::CALLSEQ_START", SDT_BPFCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue]>;
+def BPFcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_BPFCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def BPFbrcc : SDNode<"BPFISD::BR_CC", SDT_BPFBrCC,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue]>;
+
+def BPFselectcc : SDNode<"BPFISD::SELECT_CC", SDT_BPFSelectCC, [SDNPInGlue]>;
+def BPFWrapper : SDNode<"BPFISD::Wrapper", SDT_BPFWrapper>;
+
+def brtarget : Operand<OtherVT>;
+def calltarget : Operand<i64>;
+
+def u64imm : Operand<i64> {
+ let PrintMethod = "printImm64Operand";
+}
+
+def i64immSExt32 : PatLeaf<(imm),
+ [{return isInt<32>(N->getSExtValue()); }]>;
+
+// Addressing modes.
+def ADDRri : ComplexPattern<i64, 2, "SelectAddr", [frameindex], []>;
+
+// Address operands
+def MEMri : Operand<i64> {
+ let PrintMethod = "printMemOperand";
+ let EncoderMethod = "getMemoryOpValue";
+ let MIOperandInfo = (ops GPR, i16imm);
+}
+
+// Conditional code predicates - used for pattern matching for jump instructions
+def BPF_CC_EQ : PatLeaf<(imm),
+ [{return (N->getZExtValue() == ISD::SETEQ);}]>;
+def BPF_CC_NE : PatLeaf<(imm),
+ [{return (N->getZExtValue() == ISD::SETNE);}]>;
+def BPF_CC_GE : PatLeaf<(imm),
+ [{return (N->getZExtValue() == ISD::SETGE);}]>;
+def BPF_CC_GT : PatLeaf<(imm),
+ [{return (N->getZExtValue() == ISD::SETGT);}]>;
+def BPF_CC_GTU : PatLeaf<(imm),
+ [{return (N->getZExtValue() == ISD::SETUGT);}]>;
+def BPF_CC_GEU : PatLeaf<(imm),
+ [{return (N->getZExtValue() == ISD::SETUGE);}]>;
+
+// jump instructions
+class JMP_RR<bits<4> Opc, string OpcodeStr, PatLeaf Cond>
+ : InstBPF<(outs), (ins GPR:$dst, GPR:$src, brtarget:$BrDst),
+ !strconcat(OpcodeStr, "\t$dst, $src goto $BrDst"),
+ [(BPFbrcc i64:$dst, i64:$src, Cond, bb:$BrDst)]> {
+ bits<4> op;
+ bits<1> BPFSrc;
+ bits<4> dst;
+ bits<4> src;
+ bits<16> BrDst;
+
+ let Inst{63-60} = op;
+ let Inst{59} = BPFSrc;
+ let Inst{55-52} = src;
+ let Inst{51-48} = dst;
+ let Inst{47-32} = BrDst;
+
+ let op = Opc;
+ let BPFSrc = 1;
+ let BPFClass = 5; // BPF_JMP
+}
+
+class JMP_RI<bits<4> Opc, string OpcodeStr, PatLeaf Cond>
+ : InstBPF<(outs), (ins GPR:$dst, i64imm:$imm, brtarget:$BrDst),
+ !strconcat(OpcodeStr, "i\t$dst, $imm goto $BrDst"),
+ [(BPFbrcc i64:$dst, i64immSExt32:$imm, Cond, bb:$BrDst)]> {
+ bits<4> op;
+ bits<1> BPFSrc;
+ bits<4> dst;
+ bits<16> BrDst;
+ bits<32> imm;
+
+ let Inst{63-60} = op;
+ let Inst{59} = BPFSrc;
+ let Inst{51-48} = dst;
+ let Inst{47-32} = BrDst;
+ let Inst{31-0} = imm;
+
+ let op = Opc;
+ let BPFSrc = 0;
+ let BPFClass = 5; // BPF_JMP
+}
+
+multiclass J<bits<4> Opc, string OpcodeStr, PatLeaf Cond> {
+ def _rr : JMP_RR<Opc, OpcodeStr, Cond>;
+ def _ri : JMP_RI<Opc, OpcodeStr, Cond>;
+}
+
+let isBranch = 1, isTerminator = 1, hasDelaySlot=0 in {
+// cmp+goto instructions
+defm JEQ : J<0x1, "jeq", BPF_CC_EQ>;
+defm JUGT : J<0x2, "jgt", BPF_CC_GTU>;
+defm JUGE : J<0x3, "jge", BPF_CC_GEU>;
+defm JNE : J<0x5, "jne", BPF_CC_NE>;
+defm JSGT : J<0x6, "jsgt", BPF_CC_GT>;
+defm JSGE : J<0x7, "jsge", BPF_CC_GE>;
+}
+
+// ALU instructions
+class ALU_RI<bits<4> Opc, string OpcodeStr, SDNode OpNode>
+ : InstBPF<(outs GPR:$dst), (ins GPR:$src2, i64imm:$imm),
+ !strconcat(OpcodeStr, "i\t$dst, $imm"),
+ [(set GPR:$dst, (OpNode GPR:$src2, i64immSExt32:$imm))]> {
+ bits<4> op;
+ bits<1> BPFSrc;
+ bits<4> dst;
+ bits<32> imm;
+
+ let Inst{63-60} = op;
+ let Inst{59} = BPFSrc;
+ let Inst{51-48} = dst;
+ let Inst{31-0} = imm;
+
+ let op = Opc;
+ let BPFSrc = 0;
+ let BPFClass = 7; // BPF_ALU64
+}
+
+class ALU_RR<bits<4> Opc, string OpcodeStr, SDNode OpNode>
+ : InstBPF<(outs GPR:$dst), (ins GPR:$src2, GPR:$src),
+ !strconcat(OpcodeStr, "\t$dst, $src"),
+ [(set GPR:$dst, (OpNode i64:$src2, i64:$src))]> {
+ bits<4> op;
+ bits<1> BPFSrc;
+ bits<4> dst;
+ bits<4> src;
+
+ let Inst{63-60} = op;
+ let Inst{59} = BPFSrc;
+ let Inst{55-52} = src;
+ let Inst{51-48} = dst;
+
+ let op = Opc;
+ let BPFSrc = 1;
+ let BPFClass = 7; // BPF_ALU64
+}
+
+multiclass ALU<bits<4> Opc, string OpcodeStr, SDNode OpNode> {
+ def _rr : ALU_RR<Opc, OpcodeStr, OpNode>;
+ def _ri : ALU_RI<Opc, OpcodeStr, OpNode>;
+}
+
+let Constraints = "$dst = $src2" in {
+let isAsCheapAsAMove = 1 in {
+ defm ADD : ALU<0x0, "add", add>;
+ defm SUB : ALU<0x1, "sub", sub>;
+ defm OR : ALU<0x4, "or", or>;
+ defm AND : ALU<0x5, "and", and>;
+ defm SLL : ALU<0x6, "sll", shl>;
+ defm SRL : ALU<0x7, "srl", srl>;
+ defm XOR : ALU<0xa, "xor", xor>;
+ defm SRA : ALU<0xc, "sra", sra>;
+}
+ defm MUL : ALU<0x2, "mul", mul>;
+ defm DIV : ALU<0x3, "div", udiv>;
+}
+
+class MOV_RR<string OpcodeStr>
+ : InstBPF<(outs GPR:$dst), (ins GPR:$src),
+ !strconcat(OpcodeStr, "\t$dst, $src"),
+ []> {
+ bits<4> op;
+ bits<1> BPFSrc;
+ bits<4> dst;
+ bits<4> src;
+
+ let Inst{63-60} = op;
+ let Inst{59} = BPFSrc;
+ let Inst{55-52} = src;
+ let Inst{51-48} = dst;
+
+ let op = 0xb; // BPF_MOV
+ let BPFSrc = 1; // BPF_X
+ let BPFClass = 7; // BPF_ALU64
+}
+
+class MOV_RI<string OpcodeStr>
+ : InstBPF<(outs GPR:$dst), (ins i64imm:$imm),
+ !strconcat(OpcodeStr, "\t$dst, $imm"),
+ [(set GPR:$dst, (i64 i64immSExt32:$imm))]> {
+ bits<4> op;
+ bits<1> BPFSrc;
+ bits<4> dst;
+ bits<32> imm;
+
+ let Inst{63-60} = op;
+ let Inst{59} = BPFSrc;
+ let Inst{51-48} = dst;
+ let Inst{31-0} = imm;
+
+ let op = 0xb; // BPF_MOV
+ let BPFSrc = 0; // BPF_K
+ let BPFClass = 7; // BPF_ALU64
+}
+def MOV_rr : MOV_RR<"mov">;
+def MOV_ri : MOV_RI<"mov">;
+
+class LD_IMM64<bits<4> Pseudo, string OpcodeStr>
+ : InstBPF<(outs GPR:$dst), (ins u64imm:$imm),
+ !strconcat(OpcodeStr, "\t$dst, $imm"),
+ [(set GPR:$dst, (i64 imm:$imm))]> {
+
+ bits<3> mode;
+ bits<2> size;
+ bits<4> dst;
+ bits<64> imm;
+
+ let Inst{63-61} = mode;
+ let Inst{60-59} = size;
+ let Inst{51-48} = dst;
+ let Inst{55-52} = Pseudo;
+ let Inst{47-32} = 0;
+ let Inst{31-0} = imm{31-0};
+
+ let mode = 0; // BPF_IMM
+ let size = 3; // BPF_DW
+ let BPFClass = 0; // BPF_LD
+}
+def LD_imm64 : LD_IMM64<0, "ld_64">;
+
+// STORE instructions
+class STORE<bits<2> SizeOp, string OpcodeStr, list<dag> Pattern>
+ : InstBPF<(outs), (ins GPR:$src, MEMri:$addr),
+ !strconcat(OpcodeStr, "\t$addr, $src"), Pattern> {
+ bits<3> mode;
+ bits<2> size;
+ bits<4> src;
+ bits<20> addr;
+
+ let Inst{63-61} = mode;
+ let Inst{60-59} = size;
+ let Inst{51-48} = addr{19-16}; // base reg
+ let Inst{55-52} = src;
+ let Inst{47-32} = addr{15-0}; // offset
+
+ let mode = 3; // BPF_MEM
+ let size = SizeOp;
+ let BPFClass = 3; // BPF_STX
+}
+
+class STOREi64<bits<2> Opc, string OpcodeStr, PatFrag OpNode>
+ : STORE<Opc, OpcodeStr, [(OpNode i64:$src, ADDRri:$addr)]>;
+
+def STW : STOREi64<0x0, "stw", truncstorei32>;
+def STH : STOREi64<0x1, "sth", truncstorei16>;
+def STB : STOREi64<0x2, "stb", truncstorei8>;
+def STD : STOREi64<0x3, "std", store>;
+
+// LOAD instructions
+class LOAD<bits<2> SizeOp, string OpcodeStr, list<dag> Pattern>
+ : InstBPF<(outs GPR:$dst), (ins MEMri:$addr),
+ !strconcat(OpcodeStr, "\t$dst, $addr"), Pattern> {
+ bits<3> mode;
+ bits<2> size;
+ bits<4> dst;
+ bits<20> addr;
+
+ let Inst{63-61} = mode;
+ let Inst{60-59} = size;
+ let Inst{51-48} = dst;
+ let Inst{55-52} = addr{19-16};
+ let Inst{47-32} = addr{15-0};
+
+ let mode = 3; // BPF_MEM
+ let size = SizeOp;
+ let BPFClass = 1; // BPF_LDX
+}
+
+class LOADi64<bits<2> SizeOp, string OpcodeStr, PatFrag OpNode>
+ : LOAD<SizeOp, OpcodeStr, [(set i64:$dst, (OpNode ADDRri:$addr))]>;
+
+def LDW : LOADi64<0x0, "ldw", zextloadi32>;
+def LDH : LOADi64<0x1, "ldh", zextloadi16>;
+def LDB : LOADi64<0x2, "ldb", zextloadi8>;
+def LDD : LOADi64<0x3, "ldd", load>;
+
+class BRANCH<bits<4> Opc, string OpcodeStr, list<dag> Pattern>
+ : InstBPF<(outs), (ins brtarget:$BrDst),
+ !strconcat(OpcodeStr, "\t$BrDst"), Pattern> {
+ bits<4> op;
+ bits<16> BrDst;
+ bits<1> BPFSrc;
+
+ let Inst{63-60} = op;
+ let Inst{59} = BPFSrc;
+ let Inst{47-32} = BrDst;
+
+ let op = Opc;
+ let BPFSrc = 0;
+ let BPFClass = 5; // BPF_JMP
+}
+
+class CALL<string OpcodeStr>
+ : InstBPF<(outs), (ins calltarget:$BrDst),
+ !strconcat(OpcodeStr, "\t$BrDst"), []> {
+ bits<4> op;
+ bits<32> BrDst;
+ bits<1> BPFSrc;
+
+ let Inst{63-60} = op;
+ let Inst{59} = BPFSrc;
+ let Inst{31-0} = BrDst;
+
+ let op = 8; // BPF_CALL
+ let BPFSrc = 0;
+ let BPFClass = 5; // BPF_JMP
+}
+
+// Jump always
+let isBranch = 1, isTerminator = 1, hasDelaySlot=0, isBarrier = 1 in {
+ def JMP : BRANCH<0x0, "jmp", [(br bb:$BrDst)]>;
+}
+
+// Jump and link
+let isCall=1, hasDelaySlot=0, Uses = [R11],
+ // Potentially clobbered registers
+ Defs = [R0, R1, R2, R3, R4, R5] in {
+ def JAL : CALL<"call">;
+}
+
+class NOP_I<string OpcodeStr>
+ : InstBPF<(outs), (ins i32imm:$imm),
+ !strconcat(OpcodeStr, "\t$imm"), []> {
+ // mov r0, r0 == nop
+ bits<4> op;
+ bits<1> BPFSrc;
+ bits<4> dst;
+ bits<4> src;
+
+ let Inst{63-60} = op;
+ let Inst{59} = BPFSrc;
+ let Inst{55-52} = src;
+ let Inst{51-48} = dst;
+
+ let op = 0xb; // BPF_MOV
+ let BPFSrc = 1; // BPF_X
+ let BPFClass = 7; // BPF_ALU64
+ let src = 0; // R0
+ let dst = 0; // R0
+}
+
+let hasSideEffects = 0 in
+ def NOP : NOP_I<"nop">;
+
+class RET<string OpcodeStr>
+ : InstBPF<(outs), (ins),
+ !strconcat(OpcodeStr, ""), [(BPFretflag)]> {
+ bits<4> op;
+
+ let Inst{63-60} = op;
+ let Inst{59} = 0;
+ let Inst{31-0} = 0;
+
+ let op = 9; // BPF_EXIT
+ let BPFClass = 5; // BPF_JMP
+}
+
+let isReturn = 1, isTerminator = 1, hasDelaySlot=0, isBarrier = 1,
+ isNotDuplicable = 1 in {
+ def RET : RET<"ret">;
+}
+
+// ADJCALLSTACKDOWN/UP pseudo insns
+let Defs = [R11], Uses = [R11] in {
+def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt),
+ "#ADJCALLSTACKDOWN $amt",
+ [(BPFcallseq_start timm:$amt)]>;
+def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
+ "#ADJCALLSTACKUP $amt1 $amt2",
+ [(BPFcallseq_end timm:$amt1, timm:$amt2)]>;
+}
+
+let usesCustomInserter = 1 in {
+ def Select : Pseudo<(outs GPR:$dst),
+ (ins GPR:$lhs, GPR:$rhs, i64imm:$imm, GPR:$src, GPR:$src2),
+ "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2",
+ [(set i64:$dst,
+ (BPFselectcc i64:$lhs, i64:$rhs, (i64 imm:$imm), i64:$src, i64:$src2))]>;
+}
+
+// load 64-bit global addr into register
+def : Pat<(BPFWrapper tglobaladdr:$in), (LD_imm64 tglobaladdr:$in)>;
+
+// 0xffffFFFF doesn't fit into simm32, optimize common case
+def : Pat<(i64 (and (i64 GPR:$src), 0xffffFFFF)),
+ (SRL_ri (SLL_ri (i64 GPR:$src), 32), 32)>;
+
+// Calls
+def : Pat<(BPFcall tglobaladdr:$dst), (JAL tglobaladdr:$dst)>;
+def : Pat<(BPFcall imm:$dst), (JAL imm:$dst)>;
+
+// Loads
+def : Pat<(extloadi8 ADDRri:$src), (i64 (LDB ADDRri:$src))>;
+def : Pat<(extloadi16 ADDRri:$src), (i64 (LDH ADDRri:$src))>;
+def : Pat<(extloadi32 ADDRri:$src), (i64 (LDW ADDRri:$src))>;
+
+// Atomics
+class XADD<bits<2> SizeOp, string OpcodeStr, PatFrag OpNode>
+ : InstBPF<(outs GPR:$dst), (ins MEMri:$addr, GPR:$val),
+ !strconcat(OpcodeStr, "\t$dst, $addr, $val"),
+ [(set GPR:$dst, (OpNode ADDRri:$addr, GPR:$val))]> {
+ bits<3> mode;
+ bits<2> size;
+ bits<4> src;
+ bits<20> addr;
+
+ let Inst{63-61} = mode;
+ let Inst{60-59} = size;
+ let Inst{51-48} = addr{19-16}; // base reg
+ let Inst{55-52} = src;
+ let Inst{47-32} = addr{15-0}; // offset
+
+ let mode = 6; // BPF_XADD
+ let size = SizeOp;
+ let BPFClass = 3; // BPF_STX
+}
+
+let Constraints = "$dst = $val" in {
+def XADD32 : XADD<0, "xadd32", atomic_load_add_32>;
+def XADD64 : XADD<3, "xadd64", atomic_load_add_64>;
+// undefined def XADD16 : XADD<1, "xadd16", atomic_load_add_16>;
+// undefined def XADD8 : XADD<2, "xadd8", atomic_load_add_8>;
+}
+
+let Defs = [R0, R1, R2, R3, R4, R5], Uses = [R6], hasSideEffects = 1,
+ hasExtraDefRegAllocReq = 1, hasExtraSrcRegAllocReq = 1, mayLoad = 1 in {
+class LOAD_ABS<bits<2> SizeOp, string OpcodeStr, Intrinsic OpNode>
+ : InstBPF<(outs), (ins GPR:$skb, i64imm:$imm),
+ !strconcat(OpcodeStr, "\tr0, $skb.data + $imm"),
+ [(set R0, (OpNode GPR:$skb, i64immSExt32:$imm))]> {
+ bits<3> mode;
+ bits<2> size;
+ bits<32> imm;
+
+ let Inst{63-61} = mode;
+ let Inst{60-59} = size;
+ let Inst{31-0} = imm;
+
+ let mode = 1; // BPF_ABS
+ let size = SizeOp;
+ let BPFClass = 0; // BPF_LD
+}
+
+class LOAD_IND<bits<2> SizeOp, string OpcodeStr, Intrinsic OpNode>
+ : InstBPF<(outs), (ins GPR:$skb, GPR:$val),
+ !strconcat(OpcodeStr, "\tr0, $skb.data + $val"),
+ [(set R0, (OpNode GPR:$skb, GPR:$val))]> {
+ bits<3> mode;
+ bits<2> size;
+ bits<4> val;
+
+ let Inst{63-61} = mode;
+ let Inst{60-59} = size;
+ let Inst{55-52} = val;
+
+ let mode = 2; // BPF_IND
+ let size = SizeOp;
+ let BPFClass = 0; // BPF_LD
+}
+}
+
+def LD_ABS_B : LOAD_ABS<2, "ldabs_b", int_bpf_load_byte>;
+def LD_ABS_H : LOAD_ABS<1, "ldabs_h", int_bpf_load_half>;
+def LD_ABS_W : LOAD_ABS<0, "ldabs_w", int_bpf_load_word>;
+
+def LD_IND_B : LOAD_IND<2, "ldind_b", int_bpf_load_byte>;
+def LD_IND_H : LOAD_IND<1, "ldind_h", int_bpf_load_half>;
+def LD_IND_W : LOAD_IND<0, "ldind_w", int_bpf_load_word>;
diff --git a/lib/Target/BPF/BPFMCInstLower.cpp b/lib/Target/BPF/BPFMCInstLower.cpp
new file mode 100644
index 0000000..5a695f0
--- /dev/null
+++ b/lib/Target/BPF/BPFMCInstLower.cpp
@@ -0,0 +1,77 @@
+//=-- BPFMCInstLower.cpp - Convert BPF MachineInstr to an MCInst ------------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to lower BPF MachineInstrs to their corresponding
+// MCInst records.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPFMCInstLower.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/SmallString.h"
+using namespace llvm;
+
+MCSymbol *
+BPFMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const {
+ return Printer.getSymbol(MO.getGlobal());
+}
+
+MCOperand BPFMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
+ MCSymbol *Sym) const {
+
+ const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, Ctx);
+
+ if (!MO.isJTI() && MO.getOffset())
+ llvm_unreachable("unknown symbol op");
+
+ return MCOperand::CreateExpr(Expr);
+}
+
+void BPFMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
+ OutMI.setOpcode(MI->getOpcode());
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+
+ MCOperand MCOp;
+ switch (MO.getType()) {
+ default:
+ MI->dump();
+ llvm_unreachable("unknown operand type");
+ case MachineOperand::MO_Register:
+ // Ignore all implicit register operands.
+ if (MO.isImplicit())
+ continue;
+ MCOp = MCOperand::CreateReg(MO.getReg());
+ break;
+ case MachineOperand::MO_Immediate:
+ MCOp = MCOperand::CreateImm(MO.getImm());
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ MCOp = MCOperand::CreateExpr(
+ MCSymbolRefExpr::Create(MO.getMBB()->getSymbol(), Ctx));
+ break;
+ case MachineOperand::MO_RegisterMask:
+ continue;
+ case MachineOperand::MO_GlobalAddress:
+ MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO));
+ break;
+ }
+
+ OutMI.addOperand(MCOp);
+ }
+}
diff --git a/lib/Target/BPF/BPFMCInstLower.h b/lib/Target/BPF/BPFMCInstLower.h
new file mode 100644
index 0000000..054e894
--- /dev/null
+++ b/lib/Target/BPF/BPFMCInstLower.h
@@ -0,0 +1,43 @@
+//===-- BPFMCInstLower.h - Lower MachineInstr to MCInst ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_BPFMCINSTLOWER_H
+#define LLVM_LIB_TARGET_BPF_BPFMCINSTLOWER_H
+
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class AsmPrinter;
+class MCContext;
+class MCInst;
+class MCOperand;
+class MCSymbol;
+class MachineInstr;
+class MachineModuleInfoMachO;
+class MachineOperand;
+class Mangler;
+
+// BPFMCInstLower - This class is used to lower an MachineInstr into an MCInst.
+class LLVM_LIBRARY_VISIBILITY BPFMCInstLower {
+ MCContext &Ctx;
+
+ AsmPrinter &Printer;
+
+public:
+ BPFMCInstLower(MCContext &ctx, AsmPrinter &printer)
+ : Ctx(ctx), Printer(printer) {}
+ void Lower(const MachineInstr *MI, MCInst &OutMI) const;
+
+ MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
+
+ MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const;
+};
+}
+
+#endif
diff --git a/lib/Target/BPF/BPFRegisterInfo.cpp b/lib/Target/BPF/BPFRegisterInfo.cpp
new file mode 100644
index 0000000..8f885c3
--- /dev/null
+++ b/lib/Target/BPF/BPFRegisterInfo.cpp
@@ -0,0 +1,88 @@
+//===-- BPFRegisterInfo.cpp - BPF Register Information ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the BPF implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPF.h"
+#include "BPFRegisterInfo.h"
+#include "BPFSubtarget.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+#define GET_REGINFO_TARGET_DESC
+#include "BPFGenRegisterInfo.inc"
+using namespace llvm;
+
+BPFRegisterInfo::BPFRegisterInfo()
+ : BPFGenRegisterInfo(BPF::R0) {}
+
+const MCPhysReg *
+BPFRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ return CSR_SaveList;
+}
+
+BitVector BPFRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
+ BitVector Reserved(getNumRegs());
+ Reserved.set(BPF::R10); // R10 is read only frame pointer
+ Reserved.set(BPF::R11); // R11 is pseudo stack pointer
+ return Reserved;
+}
+
+void BPFRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const {
+ assert(SPAdj == 0 && "Unexpected");
+
+ unsigned i = 0;
+ MachineInstr &MI = *II;
+ MachineFunction &MF = *MI.getParent()->getParent();
+ DebugLoc DL = MI.getDebugLoc();
+
+ while (!MI.getOperand(i).isFI()) {
+ ++i;
+ assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
+ }
+
+ unsigned FrameReg = getFrameRegister(MF);
+ int FrameIndex = MI.getOperand(i).getIndex();
+
+ if (MI.getOpcode() == BPF::MOV_rr) {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
+
+ MI.getOperand(i).ChangeToRegister(FrameReg, false);
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ unsigned reg = MI.getOperand(i - 1).getReg();
+ BuildMI(MBB, ++II, DL, TII.get(BPF::ADD_ri), reg)
+ .addReg(reg)
+ .addImm(Offset);
+ return;
+ }
+
+ int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
+ MI.getOperand(i + 1).getImm();
+
+ if (!isInt<32>(Offset))
+ llvm_unreachable("bug in frame offset");
+
+ MI.getOperand(i).ChangeToRegister(FrameReg, false);
+ MI.getOperand(i + 1).ChangeToImmediate(Offset);
+}
+
+unsigned BPFRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+ return BPF::R10;
+}
diff --git a/lib/Target/BPF/BPFRegisterInfo.h b/lib/Target/BPF/BPFRegisterInfo.h
new file mode 100644
index 0000000..364d6f6
--- /dev/null
+++ b/lib/Target/BPF/BPFRegisterInfo.h
@@ -0,0 +1,41 @@
+//===-- BPFRegisterInfo.h - BPF Register Information Impl -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the BPF implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_BPFREGISTERINFO_H
+#define LLVM_LIB_TARGET_BPF_BPFREGISTERINFO_H
+
+#include "llvm/Target/TargetRegisterInfo.h"
+
+#define GET_REGINFO_HEADER
+#include "BPFGenRegisterInfo.inc"
+
+namespace llvm {
+
+struct BPFRegisterInfo : public BPFGenRegisterInfo {
+
+ BPFRegisterInfo();
+
+ const MCPhysReg *
+ getCalleeSavedRegs(const MachineFunction *MF = nullptr) const override;
+
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
+
+ void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS = nullptr) const override;
+
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
+};
+}
+
+#endif
diff --git a/lib/Target/BPF/BPFRegisterInfo.td b/lib/Target/BPF/BPFRegisterInfo.td
new file mode 100644
index 0000000..c8e24f8
--- /dev/null
+++ b/lib/Target/BPF/BPFRegisterInfo.td
@@ -0,0 +1,41 @@
+//===-- BPFRegisterInfo.td - BPF Register defs -------------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Declarations that describe the BPF register file
+//===----------------------------------------------------------------------===//
+
+// Registers are identified with 4-bit ID numbers.
+// Ri - 64-bit integer registers
+class Ri<bits<16> Enc, string n> : Register<n> {
+ let Namespace = "BPF";
+ let HWEncoding = Enc;
+}
+
+// Integer registers
+def R0 : Ri< 0, "r0">, DwarfRegNum<[0]>;
+def R1 : Ri< 1, "r1">, DwarfRegNum<[1]>;
+def R2 : Ri< 2, "r2">, DwarfRegNum<[2]>;
+def R3 : Ri< 3, "r3">, DwarfRegNum<[3]>;
+def R4 : Ri< 4, "r4">, DwarfRegNum<[4]>;
+def R5 : Ri< 5, "r5">, DwarfRegNum<[5]>;
+def R6 : Ri< 6, "r6">, DwarfRegNum<[6]>;
+def R7 : Ri< 7, "r7">, DwarfRegNum<[7]>;
+def R8 : Ri< 8, "r8">, DwarfRegNum<[8]>;
+def R9 : Ri< 9, "r9">, DwarfRegNum<[9]>;
+def R10 : Ri<10, "r10">, DwarfRegNum<[10]>;
+def R11 : Ri<11, "r11">, DwarfRegNum<[11]>;
+
+// Register classes.
+def GPR : RegisterClass<"BPF", [i64], 64, (add R1, R2, R3, R4, R5,
+ R6, R7, R8, R9, // callee saved
+ R0, // return value
+ R11, // stack ptr
+ R10 // frame ptr
+ )>;
diff --git a/lib/Target/BPF/BPFSubtarget.cpp b/lib/Target/BPF/BPFSubtarget.cpp
new file mode 100644
index 0000000..7f7a262
--- /dev/null
+++ b/lib/Target/BPF/BPFSubtarget.cpp
@@ -0,0 +1,31 @@
+//===-- BPFSubtarget.cpp - BPF Subtarget Information ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the BPF specific subclass of TargetSubtargetInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPFSubtarget.h"
+#include "BPF.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "bpf-subtarget"
+
+#define GET_SUBTARGETINFO_TARGET_DESC
+#define GET_SUBTARGETINFO_CTOR
+#include "BPFGenSubtargetInfo.inc"
+
+void BPFSubtarget::anchor() {}
+
+BPFSubtarget::BPFSubtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS, const TargetMachine &TM)
+ : BPFGenSubtargetInfo(TT, CPU, FS), InstrInfo(), FrameLowering(*this),
+ TLInfo(TM, *this), TSInfo(TM.getDataLayout()) {}
diff --git a/lib/Target/BPF/BPFSubtarget.h b/lib/Target/BPF/BPFSubtarget.h
new file mode 100644
index 0000000..347cffd8
--- /dev/null
+++ b/lib/Target/BPF/BPFSubtarget.h
@@ -0,0 +1,64 @@
+//===-- BPFSubtarget.h - Define Subtarget for the BPF -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the BPF specific subclass of TargetSubtargetInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_BPFSUBTARGET_H
+#define LLVM_LIB_TARGET_BPF_BPFSUBTARGET_H
+
+#include "BPFFrameLowering.h"
+#include "BPFISelLowering.h"
+#include "BPFInstrInfo.h"
+#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
+#define GET_SUBTARGETINFO_HEADER
+#include "BPFGenSubtargetInfo.inc"
+
+namespace llvm {
+class StringRef;
+
+class BPFSubtarget : public BPFGenSubtargetInfo {
+ virtual void anchor();
+ BPFInstrInfo InstrInfo;
+ BPFFrameLowering FrameLowering;
+ BPFTargetLowering TLInfo;
+ TargetSelectionDAGInfo TSInfo;
+
+public:
+ // This constructor initializes the data members to match that
+ // of the specified triple.
+ BPFSubtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS, const TargetMachine &TM);
+
+ // ParseSubtargetFeatures - Parses features string setting specified
+ // subtarget options. Definition of function is auto generated by tblgen.
+ void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+
+ const BPFInstrInfo *getInstrInfo() const override { return &InstrInfo; }
+ const BPFFrameLowering *getFrameLowering() const override {
+ return &FrameLowering;
+ }
+ const BPFTargetLowering *getTargetLowering() const override {
+ return &TLInfo;
+ }
+ const TargetSelectionDAGInfo *getSelectionDAGInfo() const override {
+ return &TSInfo;
+ }
+ const TargetRegisterInfo *getRegisterInfo() const override {
+ return &InstrInfo.getRegisterInfo();
+ }
+};
+} // End llvm namespace
+
+#endif
diff --git a/lib/Target/BPF/BPFTargetMachine.cpp b/lib/Target/BPF/BPFTargetMachine.cpp
new file mode 100644
index 0000000..5245395
--- /dev/null
+++ b/lib/Target/BPF/BPFTargetMachine.cpp
@@ -0,0 +1,69 @@
+//===-- BPFTargetMachine.cpp - Define TargetMachine for BPF ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements the info about BPF target spec.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPF.h"
+#include "BPFTargetMachine.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
+extern "C" void LLVMInitializeBPFTarget() {
+ // Register the target.
+ RegisterTargetMachine<BPFTargetMachine> X(TheBPFTarget);
+}
+
+// DataLayout --> Little-endian, 64-bit pointer/ABI/alignment
+// The stack is always 8 byte aligned
+// On function prologue, the stack is created by decrementing
+// its pointer. Once decremented, all references are done with positive
+// offset from the stack/frame pointer.
+BPFTargetMachine::BPFTargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ TLOF(make_unique<TargetLoweringObjectFileELF>()),
+ DL("e-m:e-p:64:64-i64:64-n32:64-S128"),
+ Subtarget(TT, CPU, FS, *this) {
+ initAsmInfo();
+}
+namespace {
+// BPF Code Generator Pass Configuration Options.
+class BPFPassConfig : public TargetPassConfig {
+public:
+ BPFPassConfig(BPFTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ BPFTargetMachine &getBPFTargetMachine() const {
+ return getTM<BPFTargetMachine>();
+ }
+
+ bool addInstSelector() override;
+};
+}
+
+TargetPassConfig *BPFTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new BPFPassConfig(this, PM);
+}
+
+// Install an instruction selector pass using
+// the ISelDag to gen BPF code.
+bool BPFPassConfig::addInstSelector() {
+ addPass(createBPFISelDag(getBPFTargetMachine()));
+
+ return false;
+}
diff --git a/lib/Target/BPF/BPFTargetMachine.h b/lib/Target/BPF/BPFTargetMachine.h
new file mode 100644
index 0000000..821cffc
--- /dev/null
+++ b/lib/Target/BPF/BPFTargetMachine.h
@@ -0,0 +1,42 @@
+//===-- BPFTargetMachine.h - Define TargetMachine for BPF --- C++ ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the BPF specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_BPFTARGETMACHINE_H
+#define LLVM_LIB_TARGET_BPF_BPFTARGETMACHINE_H
+
+#include "BPFSubtarget.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class BPFTargetMachine : public LLVMTargetMachine {
+ std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ const DataLayout DL;
+ BPFSubtarget Subtarget;
+
+public:
+ BPFTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL);
+
+ const DataLayout *getDataLayout() const override { return &DL; }
+ const BPFSubtarget *getSubtargetImpl() const override { return &Subtarget; }
+
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+
+ TargetLoweringObjectFile *getObjFileLowering() const override {
+ return TLOF.get();
+ }
+};
+}
+
+#endif
diff --git a/lib/Target/BPF/CMakeLists.txt b/lib/Target/BPF/CMakeLists.txt
new file mode 100644
index 0000000..3eac6e9
--- /dev/null
+++ b/lib/Target/BPF/CMakeLists.txt
@@ -0,0 +1,27 @@
+set(LLVM_TARGET_DEFINITIONS BPF.td)
+
+tablegen(LLVM BPFGenRegisterInfo.inc -gen-register-info)
+tablegen(LLVM BPFGenInstrInfo.inc -gen-instr-info)
+tablegen(LLVM BPFGenAsmWriter.inc -gen-asm-writer)
+tablegen(LLVM X86GenAsmMatcher.inc -gen-asm-matcher)
+tablegen(LLVM BPFGenDAGISel.inc -gen-dag-isel)
+tablegen(LLVM BPFGenMCCodeEmitter.inc -gen-emitter)
+tablegen(LLVM BPFGenCallingConv.inc -gen-callingconv)
+tablegen(LLVM BPFGenSubtargetInfo.inc -gen-subtarget)
+add_public_tablegen_target(BPFCommonTableGen)
+
+add_llvm_target(BPFCodeGen
+ BPFAsmPrinter.cpp
+ BPFFrameLowering.cpp
+ BPFInstrInfo.cpp
+ BPFISelDAGToDAG.cpp
+ BPFISelLowering.cpp
+ BPFMCInstLower.cpp
+ BPFRegisterInfo.cpp
+ BPFSubtarget.cpp
+ BPFTargetMachine.cpp
+ )
+
+add_subdirectory(InstPrinter)
+add_subdirectory(TargetInfo)
+add_subdirectory(MCTargetDesc)
diff --git a/lib/Target/BPF/InstPrinter/BPFInstPrinter.cpp b/lib/Target/BPF/InstPrinter/BPFInstPrinter.cpp
new file mode 100644
index 0000000..3f09379
--- /dev/null
+++ b/lib/Target/BPF/InstPrinter/BPFInstPrinter.cpp
@@ -0,0 +1,86 @@
+//===-- BPFInstPrinter.cpp - Convert BPF MCInst to asm syntax -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an BPF MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPF.h"
+#include "BPFInstPrinter.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+// Include the auto-generated portion of the assembly writer.
+#include "BPFGenAsmWriter.inc"
+
+void BPFInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
+ StringRef Annot) {
+ printInstruction(MI, O);
+ printAnnotation(O, Annot);
+}
+
+static void printExpr(const MCExpr *Expr, raw_ostream &O) {
+ const MCSymbolRefExpr *SRE;
+
+ if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr))
+ SRE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
+ else
+ SRE = dyn_cast<MCSymbolRefExpr>(Expr);
+ assert(SRE && "Unexpected MCExpr type.");
+
+ MCSymbolRefExpr::VariantKind Kind = SRE->getKind();
+
+ assert(Kind == MCSymbolRefExpr::VK_None);
+ O << *Expr;
+}
+
+void BPFInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O, const char *Modifier) {
+ assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isReg()) {
+ O << getRegisterName(Op.getReg());
+ } else if (Op.isImm()) {
+ O << (int32_t)Op.getImm();
+ } else {
+ assert(Op.isExpr() && "Expected an expression");
+ printExpr(Op.getExpr(), O);
+ }
+}
+
+void BPFInstPrinter::printMemOperand(const MCInst *MI, int OpNo, raw_ostream &O,
+ const char *Modifier) {
+ const MCOperand &RegOp = MI->getOperand(OpNo);
+ const MCOperand &OffsetOp = MI->getOperand(OpNo + 1);
+ // offset
+ if (OffsetOp.isImm())
+ O << formatDec(OffsetOp.getImm());
+ else
+ assert(0 && "Expected an immediate");
+
+ // register
+ assert(RegOp.isReg() && "Register operand not a register");
+ O << '(' << getRegisterName(RegOp.getReg()) << ')';
+}
+
+void BPFInstPrinter::printImm64Operand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isImm())
+ O << (uint64_t)Op.getImm();
+ else
+ O << Op;
+}
diff --git a/lib/Target/BPF/InstPrinter/BPFInstPrinter.h b/lib/Target/BPF/InstPrinter/BPFInstPrinter.h
new file mode 100644
index 0000000..d7c2899
--- /dev/null
+++ b/lib/Target/BPF/InstPrinter/BPFInstPrinter.h
@@ -0,0 +1,41 @@
+//===-- BPFInstPrinter.h - Convert BPF MCInst to asm syntax -------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints a BPF MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_INSTPRINTER_BPFINSTPRINTER_H
+#define LLVM_LIB_TARGET_BPF_INSTPRINTER_BPFINSTPRINTER_H
+
+#include "llvm/MC/MCInstPrinter.h"
+
+namespace llvm {
+class MCOperand;
+
+class BPFInstPrinter : public MCInstPrinter {
+public:
+ BPFInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
+
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
+ void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O,
+ const char *Modifier = nullptr);
+ void printMemOperand(const MCInst *MI, int OpNo, raw_ostream &O,
+ const char *Modifier = nullptr);
+ void printImm64Operand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+
+ // Autogenerated by tblgen.
+ void printInstruction(const MCInst *MI, raw_ostream &O);
+ static const char *getRegisterName(unsigned RegNo);
+};
+}
+
+#endif
diff --git a/lib/Target/BPF/InstPrinter/CMakeLists.txt b/lib/Target/BPF/InstPrinter/CMakeLists.txt
new file mode 100644
index 0000000..f9e9161
--- /dev/null
+++ b/lib/Target/BPF/InstPrinter/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMBPFAsmPrinter
+ BPFInstPrinter.cpp
+ )
diff --git a/lib/Target/BPF/InstPrinter/LLVMBuild.txt b/lib/Target/BPF/InstPrinter/LLVMBuild.txt
new file mode 100644
index 0000000..88a937a
--- /dev/null
+++ b/lib/Target/BPF/InstPrinter/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===- ./lib/Target/BPF/InstPrinter/LLVMBuild.txt ---------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = BPFAsmPrinter
+parent = BPF
+required_libraries = MC Support
+add_to_library_groups = BPF
diff --git a/lib/Target/BPF/InstPrinter/Makefile b/lib/Target/BPF/InstPrinter/Makefile
new file mode 100644
index 0000000..f46af83
--- /dev/null
+++ b/lib/Target/BPF/InstPrinter/Makefile
@@ -0,0 +1,16 @@
+##===- lib/Target/BPF/InstPrinter/Makefile -----------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME = LLVMBPFAsmPrinter
+
+# Hack: we need to include 'main' BPF target directory to grab private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/lib/Target/BPF/LLVMBuild.txt b/lib/Target/BPF/LLVMBuild.txt
new file mode 100644
index 0000000..11578c8
--- /dev/null
+++ b/lib/Target/BPF/LLVMBuild.txt
@@ -0,0 +1,32 @@
+;===- ./lib/Target/BPF/LLVMBuild.txt ---------------------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[common]
+subdirectories = InstPrinter MCTargetDesc TargetInfo
+
+[component_0]
+type = TargetGroup
+name = BPF
+parent = Target
+has_asmprinter = 1
+
+[component_1]
+type = Library
+name = BPFCodeGen
+parent = BPF
+required_libraries = AsmPrinter CodeGen Core MC BPFAsmPrinter BPFDesc BPFInfo SelectionDAG Support Target
+add_to_library_groups = BPF
diff --git a/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
new file mode 100644
index 0000000..87c8077
--- /dev/null
+++ b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
@@ -0,0 +1,83 @@
+//===-- BPFAsmBackend.cpp - BPF Assembler Backend -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/BPFMCTargetDesc.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+class BPFAsmBackend : public MCAsmBackend {
+public:
+ BPFAsmBackend() : MCAsmBackend() {}
+ ~BPFAsmBackend() override {}
+
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value, bool IsPCRel) const override;
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override;
+
+ // No instruction requires relaxation
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override {
+ return false;
+ }
+
+ unsigned getNumFixupKinds() const override { return 1; }
+
+ bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
+
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {}
+
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
+};
+
+bool BPFAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
+ if ((Count % 8) != 0)
+ return false;
+
+ for (uint64_t i = 0; i < Count; i += 8)
+ OW->Write64(0x15000000);
+
+ return true;
+}
+
+void BPFAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
+ unsigned DataSize, uint64_t Value,
+ bool IsPCRel) const {
+
+ if (Fixup.getKind() == FK_SecRel_4 || Fixup.getKind() == FK_SecRel_8) {
+ assert(Value == 0);
+ return;
+ }
+ assert(Fixup.getKind() == FK_PCRel_2);
+ *(uint16_t *)&Data[Fixup.getOffset() + 2] = (uint16_t)((Value - 8) / 8);
+}
+
+MCObjectWriter *BPFAsmBackend::createObjectWriter(raw_ostream &OS) const {
+ return createBPFELFObjectWriter(OS, 0);
+}
+}
+
+MCAsmBackend *llvm::createBPFAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI, StringRef TT,
+ StringRef CPU) {
+ return new BPFAsmBackend();
+}
diff --git a/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp b/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp
new file mode 100644
index 0000000..169a8a7
--- /dev/null
+++ b/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp
@@ -0,0 +1,53 @@
+//===-- BPFELFObjectWriter.cpp - BPF ELF Writer ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/BPFMCTargetDesc.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+namespace {
+class BPFELFObjectWriter : public MCELFObjectTargetWriter {
+public:
+ BPFELFObjectWriter(uint8_t OSABI);
+
+ ~BPFELFObjectWriter() override;
+
+protected:
+ unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel) const override;
+};
+}
+
+BPFELFObjectWriter::BPFELFObjectWriter(uint8_t OSABI)
+ : MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_NONE,
+ /*HasRelocationAddend*/ false) {}
+
+BPFELFObjectWriter::~BPFELFObjectWriter() {}
+
+unsigned BPFELFObjectWriter::GetRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ // determine the type of the relocation
+ switch ((unsigned)Fixup.getKind()) {
+ default:
+ llvm_unreachable("invalid fixup kind!");
+ case FK_SecRel_8:
+ return ELF::R_X86_64_64;
+ case FK_SecRel_4:
+ return ELF::R_X86_64_PC32;
+ }
+}
+
+MCObjectWriter *llvm::createBPFELFObjectWriter(raw_ostream &OS, uint8_t OSABI) {
+ MCELFObjectTargetWriter *MOTW = new BPFELFObjectWriter(OSABI);
+ return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/true);
+}
diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h b/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h
new file mode 100644
index 0000000..ab61ae7
--- /dev/null
+++ b/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h
@@ -0,0 +1,36 @@
+//===-- BPFMCAsmInfo.h - BPF asm properties -------------------*- C++ -*--====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the BPFMCAsmInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_MCTARGETDESC_BPFMCASMINFO_H
+#define LLVM_LIB_TARGET_BPF_MCTARGETDESC_BPFMCASMINFO_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCAsmInfo.h"
+
+namespace llvm {
+class Target;
+
+class BPFMCAsmInfo : public MCAsmInfo {
+public:
+ explicit BPFMCAsmInfo(StringRef TT) {
+ PrivateGlobalPrefix = ".L";
+ WeakRefDirective = "\t.weak\t";
+
+ UsesELFSectionDirectiveForBSS = true;
+ HasSingleParameterDotFile = false;
+ HasDotTypeDotSizeDirective = false;
+ }
+};
+}
+
+#endif
diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp b/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp
new file mode 100644
index 0000000..b94693a
--- /dev/null
+++ b/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp
@@ -0,0 +1,167 @@
+//===-- BPFMCCodeEmitter.cpp - Convert BPF code to machine code -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the BPFMCCodeEmitter class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/BPFMCTargetDesc.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "mccodeemitter"
+
+namespace {
+class BPFMCCodeEmitter : public MCCodeEmitter {
+ BPFMCCodeEmitter(const BPFMCCodeEmitter &) = delete;
+ void operator=(const BPFMCCodeEmitter &) = delete;
+ const MCRegisterInfo &MRI;
+
+public:
+ BPFMCCodeEmitter(const MCRegisterInfo &mri) : MRI(mri) {}
+
+ ~BPFMCCodeEmitter() {}
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getMachineOpValue - Return binary encoding of operand. If the machin
+ // operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ uint64_t getMemoryOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
+};
+}
+
+MCCodeEmitter *llvm::createBPFMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new BPFMCCodeEmitter(MRI);
+}
+
+unsigned BPFMCCodeEmitter::getMachineOpValue(const MCInst &MI,
+ const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ if (MO.isReg())
+ return MRI.getEncodingValue(MO.getReg());
+ if (MO.isImm())
+ return static_cast<unsigned>(MO.getImm());
+
+ assert(MO.isExpr());
+
+ const MCExpr *Expr = MO.getExpr();
+ MCExpr::ExprKind Kind = Expr->getKind();
+
+ assert(Kind == MCExpr::SymbolRef);
+
+ if (MI.getOpcode() == BPF::JAL)
+ // func call name
+ Fixups.push_back(MCFixup::Create(0, Expr, FK_SecRel_4));
+ else if (MI.getOpcode() == BPF::LD_imm64)
+ Fixups.push_back(MCFixup::Create(0, Expr, FK_SecRel_8));
+ else
+ // bb label
+ Fixups.push_back(MCFixup::Create(0, Expr, FK_PCRel_2));
+
+ return 0;
+}
+
+// Emit one byte through output stream
+void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) {
+ OS << (char)C;
+ ++CurByte;
+}
+
+// Emit a series of bytes (little endian)
+void EmitLEConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
+ raw_ostream &OS) {
+ assert(Size <= 8 && "size too big in emit constant");
+
+ for (unsigned i = 0; i != Size; ++i) {
+ EmitByte(Val & 255, CurByte, OS);
+ Val >>= 8;
+ }
+}
+
+// Emit a series of bytes (big endian)
+void EmitBEConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
+ raw_ostream &OS) {
+ assert(Size <= 8 && "size too big in emit constant");
+
+ for (int i = (Size - 1) * 8; i >= 0; i -= 8)
+ EmitByte((Val >> i) & 255, CurByte, OS);
+}
+
+void BPFMCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ unsigned Opcode = MI.getOpcode();
+ // Keep track of the current byte being emitted
+ unsigned CurByte = 0;
+
+ if (Opcode == BPF::LD_imm64) {
+ uint64_t Value = getBinaryCodeForInstr(MI, Fixups, STI);
+ EmitByte(Value >> 56, CurByte, OS);
+ EmitByte(((Value >> 48) & 0xff), CurByte, OS);
+ EmitLEConstant(0, 2, CurByte, OS);
+ EmitLEConstant(Value & 0xffffFFFF, 4, CurByte, OS);
+
+ const MCOperand &MO = MI.getOperand(1);
+ uint64_t Imm = MO.isImm() ? MO.getImm() : 0;
+ EmitByte(0, CurByte, OS);
+ EmitByte(0, CurByte, OS);
+ EmitLEConstant(0, 2, CurByte, OS);
+ EmitLEConstant(Imm >> 32, 4, CurByte, OS);
+ } else {
+ // Get instruction encoding and emit it
+ uint64_t Value = getBinaryCodeForInstr(MI, Fixups, STI);
+ EmitByte(Value >> 56, CurByte, OS);
+ EmitByte((Value >> 48) & 0xff, CurByte, OS);
+ EmitLEConstant((Value >> 32) & 0xffff, 2, CurByte, OS);
+ EmitLEConstant(Value & 0xffffFFFF, 4, CurByte, OS);
+ }
+}
+
+// Encode BPF Memory Operand
+uint64_t BPFMCCodeEmitter::getMemoryOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Encoding;
+ const MCOperand Op1 = MI.getOperand(1);
+ assert(Op1.isReg() && "First operand is not register.");
+ Encoding = MRI.getEncodingValue(Op1.getReg());
+ Encoding <<= 16;
+ MCOperand Op2 = MI.getOperand(2);
+ assert(Op2.isImm() && "Second operand is not immediate.");
+ Encoding |= Op2.getImm() & 0xffff;
+ return Encoding;
+}
+
+#include "BPFGenMCCodeEmitter.inc"
diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp b/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp
new file mode 100644
index 0000000..f82f009
--- /dev/null
+++ b/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp
@@ -0,0 +1,111 @@
+//===-- BPFMCTargetDesc.cpp - BPF Target Descriptions ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides BPF specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPF.h"
+#include "BPFMCTargetDesc.h"
+#include "BPFMCAsmInfo.h"
+#include "InstPrinter/BPFInstPrinter.h"
+#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#define GET_INSTRINFO_MC_DESC
+#include "BPFGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_MC_DESC
+#include "BPFGenSubtargetInfo.inc"
+
+#define GET_REGINFO_MC_DESC
+#include "BPFGenRegisterInfo.inc"
+
+using namespace llvm;
+
+static MCInstrInfo *createBPFMCInstrInfo() {
+ MCInstrInfo *X = new MCInstrInfo();
+ InitBPFMCInstrInfo(X);
+ return X;
+}
+
+static MCRegisterInfo *createBPFMCRegisterInfo(StringRef TT) {
+ MCRegisterInfo *X = new MCRegisterInfo();
+ InitBPFMCRegisterInfo(X, BPF::R11 /* RAReg doesn't exist */);
+ return X;
+}
+
+static MCSubtargetInfo *createBPFMCSubtargetInfo(StringRef TT, StringRef CPU,
+ StringRef FS) {
+ MCSubtargetInfo *X = new MCSubtargetInfo();
+ InitBPFMCSubtargetInfo(X, TT, CPU, FS);
+ return X;
+}
+
+static MCCodeGenInfo *createBPFMCCodeGenInfo(StringRef TT, Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
+ MCCodeGenInfo *X = new MCCodeGenInfo();
+ X->InitMCCodeGenInfo(RM, CM, OL);
+ return X;
+}
+
+static MCStreamer *createBPFMCStreamer(const Target &T, StringRef TT,
+ MCContext &Ctx, MCAsmBackend &MAB,
+ raw_ostream &_OS,
+ MCCodeEmitter *_Emitter,
+ const MCSubtargetInfo &STI,
+ bool RelaxAll) {
+ return createELFStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll);
+}
+
+static MCInstPrinter *
+createBPFMCInstPrinter(const Target &T, unsigned SyntaxVariant,
+ const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI, const MCSubtargetInfo &STI) {
+ if (SyntaxVariant == 0)
+ return new BPFInstPrinter(MAI, MII, MRI);
+ return 0;
+}
+
+extern "C" void LLVMInitializeBPFTargetMC() {
+ // Register the MC asm info.
+ RegisterMCAsmInfo<BPFMCAsmInfo> X(TheBPFTarget);
+
+ // Register the MC codegen info.
+ TargetRegistry::RegisterMCCodeGenInfo(TheBPFTarget, createBPFMCCodeGenInfo);
+
+ // Register the MC instruction info.
+ TargetRegistry::RegisterMCInstrInfo(TheBPFTarget, createBPFMCInstrInfo);
+
+ // Register the MC register info.
+ TargetRegistry::RegisterMCRegInfo(TheBPFTarget, createBPFMCRegisterInfo);
+
+ // Register the MC subtarget info.
+ TargetRegistry::RegisterMCSubtargetInfo(TheBPFTarget,
+ createBPFMCSubtargetInfo);
+
+ // Register the MC code emitter
+ TargetRegistry::RegisterMCCodeEmitter(TheBPFTarget,
+ llvm::createBPFMCCodeEmitter);
+
+ // Register the ASM Backend
+ TargetRegistry::RegisterMCAsmBackend(TheBPFTarget, createBPFAsmBackend);
+
+ // Register the object streamer
+ TargetRegistry::RegisterMCObjectStreamer(TheBPFTarget, createBPFMCStreamer);
+
+ // Register the MCInstPrinter.
+ TargetRegistry::RegisterMCInstPrinter(TheBPFTarget, createBPFMCInstPrinter);
+}
diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h b/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h
new file mode 100644
index 0000000..55901cc
--- /dev/null
+++ b/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h
@@ -0,0 +1,59 @@
+//===-- BPFMCTargetDesc.h - BPF Target Descriptions -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides BPF specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_BPF_MCTARGETDESC_BPFMCTARGETDESC_H
+#define LLVM_LIB_TARGET_BPF_MCTARGETDESC_BPFMCTARGETDESC_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Config/config.h"
+
+namespace llvm {
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCContext;
+class MCInstrInfo;
+class MCObjectWriter;
+class MCRegisterInfo;
+class MCSubtargetInfo;
+class Target;
+class StringRef;
+class raw_ostream;
+
+extern Target TheBPFTarget;
+
+MCCodeEmitter *createBPFMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx);
+
+MCAsmBackend *createBPFAsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+
+MCObjectWriter *createBPFELFObjectWriter(raw_ostream &OS, uint8_t OSABI);
+}
+
+// Defines symbolic names for BPF registers. This defines a mapping from
+// register name to register number.
+//
+#define GET_REGINFO_ENUM
+#include "BPFGenRegisterInfo.inc"
+
+// Defines symbolic names for the BPF instructions.
+//
+#define GET_INSTRINFO_ENUM
+#include "BPFGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_ENUM
+#include "BPFGenSubtargetInfo.inc"
+
+#endif
diff --git a/lib/Target/BPF/MCTargetDesc/CMakeLists.txt b/lib/Target/BPF/MCTargetDesc/CMakeLists.txt
new file mode 100644
index 0000000..5fcd874
--- /dev/null
+++ b/lib/Target/BPF/MCTargetDesc/CMakeLists.txt
@@ -0,0 +1,6 @@
+add_llvm_library(LLVMBPFDesc
+ BPFMCTargetDesc.cpp
+ BPFAsmBackend.cpp
+ BPFMCCodeEmitter.cpp
+ BPFELFObjectWriter.cpp
+ )
diff --git a/lib/Target/BPF/MCTargetDesc/LLVMBuild.txt b/lib/Target/BPF/MCTargetDesc/LLVMBuild.txt
new file mode 100644
index 0000000..209d17c
--- /dev/null
+++ b/lib/Target/BPF/MCTargetDesc/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===- ./lib/Target/BPF/MCTargetDesc/LLVMBuild.txt --------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = BPFDesc
+parent = BPF
+required_libraries = MC BPFAsmPrinter BPFInfo
+add_to_library_groups = BPF
diff --git a/lib/Target/BPF/MCTargetDesc/Makefile b/lib/Target/BPF/MCTargetDesc/Makefile
new file mode 100644
index 0000000..af70cd0
--- /dev/null
+++ b/lib/Target/BPF/MCTargetDesc/Makefile
@@ -0,0 +1,16 @@
+##===- lib/Target/BPF/MCTargetDesc/Makefile ----------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME = LLVMBPFDesc
+
+# Hack: we need to include 'main' target directory to grab private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/lib/Target/BPF/Makefile b/lib/Target/BPF/Makefile
new file mode 100644
index 0000000..7492f5e
--- /dev/null
+++ b/lib/Target/BPF/Makefile
@@ -0,0 +1,21 @@
+##===- lib/Target/BPF/Makefile -----------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../..
+LIBRARYNAME = LLVMBPFCodeGen
+TARGET = BPF
+
+# Make sure that tblgen is run, first thing.
+BUILT_SOURCES = BPFGenRegisterInfo.inc BPFGenInstrInfo.inc \
+ BPFGenAsmWriter.inc BPFGenAsmMatcher.inc BPFGenDAGISel.inc \
+ BPFGenMCCodeEmitter.inc BPFGenSubtargetInfo.inc BPFGenCallingConv.inc
+
+DIRS = InstPrinter TargetInfo MCTargetDesc
+
+include $(LEVEL)/Makefile.common
diff --git a/lib/Target/BPF/TargetInfo/BPFTargetInfo.cpp b/lib/Target/BPF/TargetInfo/BPFTargetInfo.cpp
new file mode 100644
index 0000000..818a992
--- /dev/null
+++ b/lib/Target/BPF/TargetInfo/BPFTargetInfo.cpp
@@ -0,0 +1,18 @@
+//===-- BPFTargetInfo.cpp - BPF Target Implementation ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BPF.h"
+#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+Target llvm::TheBPFTarget;
+
+extern "C" void LLVMInitializeBPFTargetInfo() {
+ RegisterTarget<Triple::bpf> X(TheBPFTarget, "bpf", "BPF");
+}
diff --git a/lib/Target/BPF/TargetInfo/CMakeLists.txt b/lib/Target/BPF/TargetInfo/CMakeLists.txt
new file mode 100644
index 0000000..ca08846
--- /dev/null
+++ b/lib/Target/BPF/TargetInfo/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMBPFInfo
+ BPFTargetInfo.cpp
+ )
diff --git a/lib/Target/BPF/TargetInfo/LLVMBuild.txt b/lib/Target/BPF/TargetInfo/LLVMBuild.txt
new file mode 100644
index 0000000..b56a858
--- /dev/null
+++ b/lib/Target/BPF/TargetInfo/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===- ./lib/Target/BPF/TargetInfo/LLVMBuild.txt ----------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = BPFInfo
+parent = BPF
+required_libraries = Support
+add_to_library_groups = BPF
diff --git a/lib/Target/BPF/TargetInfo/Makefile b/lib/Target/BPF/TargetInfo/Makefile
new file mode 100644
index 0000000..02af58e
--- /dev/null
+++ b/lib/Target/BPF/TargetInfo/Makefile
@@ -0,0 +1,16 @@
+##===- lib/Target/BPF/TargetInfo/Makefile ------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME = LLVMBPFInfo
+
+# Hack: we need to include 'main' target directory to grab private headers
+CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/lib/Target/CMakeLists.txt b/lib/Target/CMakeLists.txt
index c61805b..1805437 100644
--- a/lib/Target/CMakeLists.txt
+++ b/lib/Target/CMakeLists.txt
@@ -1,14 +1,16 @@
+list(APPEND LLVM_COMMON_DEPENDS intrinsics_gen)
+
add_llvm_library(LLVMTarget
Target.cpp
TargetIntrinsicInfo.cpp
- TargetLibraryInfo.cpp
TargetLoweringObjectFile.cpp
TargetMachine.cpp
TargetMachineC.cpp
TargetSubtargetInfo.cpp
- )
-list(APPEND LLVM_COMMON_DEPENDS intrinsics_gen)
+ ADDITIONAL_HEADER_DIRS
+ ${LLVM_MAIN_INCLUDE_DIR}/llvm/Target
+ )
foreach(t ${LLVM_TARGETS_TO_BUILD})
message(STATUS "Targeting ${t}")
diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp
index f610fbb..c7fec52 100644
--- a/lib/Target/CppBackend/CPPBackend.cpp
+++ b/lib/Target/CppBackend/CPPBackend.cpp
@@ -22,12 +22,12 @@
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Pass.h"
-#include "llvm/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
@@ -1942,7 +1942,6 @@ void CppWriter::printModuleBody() {
void CppWriter::printProgram(const std::string& fname,
const std::string& mName) {
Out << "#include <llvm/Pass.h>\n";
- Out << "#include <llvm/PassManager.h>\n";
Out << "#include <llvm/ADT/SmallVector.h>\n";
Out << "#include <llvm/Analysis/Verifier.h>\n";
@@ -1956,6 +1955,7 @@ void CppWriter::printProgram(const std::string& fname,
Out << "#include <llvm/IR/InlineAsm.h>\n";
Out << "#include <llvm/IR/Instructions.h>\n";
Out << "#include <llvm/IR/LLVMContext.h>\n";
+ Out << "#include <llvm/IR/LegacyPassManager.h>\n";
Out << "#include <llvm/IR/Module.h>\n";
Out << "#include <llvm/Support/FormattedStream.h>\n";
Out << "#include <llvm/Support/MathExtras.h>\n";
diff --git a/lib/Target/Hexagon/CMakeLists.txt b/lib/Target/Hexagon/CMakeLists.txt
index af7914f..eaa8bef 100644
--- a/lib/Target/Hexagon/CMakeLists.txt
+++ b/lib/Target/Hexagon/CMakeLists.txt
@@ -13,7 +13,6 @@ add_public_tablegen_target(HexagonCommonTableGen)
add_llvm_target(HexagonCodeGen
HexagonAsmPrinter.cpp
- HexagonCallingConvLower.cpp
HexagonCFGOptimizer.cpp
HexagonCopyToCombine.cpp
HexagonExpandPredSpillCode.cpp
diff --git a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
index bc64be1..669af8c 100644
--- a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
+++ b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
@@ -8,8 +8,8 @@
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/HexagonBaseInfo.h"
+#include "MCTargetDesc/HexagonMCInstrInfo.h"
#include "MCTargetDesc/HexagonMCTargetDesc.h"
-
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCExpr.h"
@@ -18,14 +18,13 @@
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LEB128.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/Endian.h"
-
-#include <vector>
+#include "llvm/Support/raw_ostream.h"
#include <array>
+#include <vector>
using namespace llvm;
@@ -48,6 +47,13 @@ public:
};
}
+static DecodeStatus DecodeModRegsRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeCtrRegsRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeCtrRegs64RegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, void const *Decoder);
+
static const uint16_t IntRegDecoderTable[] = {
Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
Hexagon::R5, Hexagon::R6, Hexagon::R7, Hexagon::R8, Hexagon::R9,
@@ -60,6 +66,16 @@ static const uint16_t IntRegDecoderTable[] = {
static const uint16_t PredRegDecoderTable[] = { Hexagon::P0, Hexagon::P1,
Hexagon::P2, Hexagon::P3 };
+static DecodeStatus DecodeRegisterClass(MCInst &Inst, unsigned RegNo,
+ const uint16_t Table[], size_t Size) {
+ if (RegNo < Size) {
+ Inst.addOperand(MCOperand::CreateReg(Table[RegNo]));
+ return MCDisassembler::Success;
+ }
+ else
+ return MCDisassembler::Fail;
+}
+
static DecodeStatus DecodeIntRegsRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t /*Address*/,
void const *Decoder) {
@@ -71,6 +87,81 @@ static DecodeStatus DecodeIntRegsRegisterClass(MCInst &Inst, unsigned RegNo,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeCtrRegsRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t /*Address*/, const void *Decoder) {
+ static const uint16_t CtrlRegDecoderTable[] = {
+ Hexagon::SA0, Hexagon::LC0, Hexagon::SA1, Hexagon::LC1,
+ Hexagon::P3_0, Hexagon::NoRegister, Hexagon::C6, Hexagon::C7,
+ Hexagon::USR, Hexagon::PC, Hexagon::UGP, Hexagon::GP,
+ Hexagon::CS0, Hexagon::CS1, Hexagon::UPCL, Hexagon::UPCH
+ };
+
+ if (RegNo >= sizeof(CtrlRegDecoderTable) / sizeof(CtrlRegDecoderTable[0]))
+ return MCDisassembler::Fail;
+
+ if (CtrlRegDecoderTable[RegNo] == Hexagon::NoRegister)
+ return MCDisassembler::Fail;
+
+ unsigned Register = CtrlRegDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeCtrRegs64RegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t /*Address*/, void const *Decoder) {
+ static const uint16_t CtrlReg64DecoderTable[] = {
+ Hexagon::C1_0, Hexagon::NoRegister,
+ Hexagon::C3_2, Hexagon::NoRegister,
+ Hexagon::NoRegister, Hexagon::NoRegister,
+ Hexagon::C7_6, Hexagon::NoRegister,
+ Hexagon::C9_8, Hexagon::NoRegister,
+ Hexagon::C11_10, Hexagon::NoRegister,
+ Hexagon::CS, Hexagon::NoRegister,
+ Hexagon::UPC, Hexagon::NoRegister
+ };
+
+ if (RegNo >= sizeof(CtrlReg64DecoderTable) / sizeof(CtrlReg64DecoderTable[0]))
+ return MCDisassembler::Fail;
+
+ if (CtrlReg64DecoderTable[RegNo] == Hexagon::NoRegister)
+ return MCDisassembler::Fail;
+
+ unsigned Register = CtrlReg64DecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeModRegsRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t /*Address*/, const void *Decoder) {
+ unsigned Register = 0;
+ switch (RegNo) {
+ case 0:
+ Register = Hexagon::M0;
+ break;
+ case 1:
+ Register = Hexagon::M1;
+ break;
+ default:
+ return MCDisassembler::Fail;
+ }
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeDoubleRegsRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t /*Address*/, const void *Decoder) {
+ static const uint16_t DoubleRegDecoderTable[] = {
+ Hexagon::D0, Hexagon::D1, Hexagon::D2, Hexagon::D3,
+ Hexagon::D4, Hexagon::D5, Hexagon::D6, Hexagon::D7,
+ Hexagon::D8, Hexagon::D9, Hexagon::D10, Hexagon::D11,
+ Hexagon::D12, Hexagon::D13, Hexagon::D14, Hexagon::D15
+ };
+
+ return (DecodeRegisterClass(Inst, RegNo >> 1,
+ DoubleRegDecoderTable,
+ sizeof (DoubleRegDecoderTable)));
+}
+
static DecodeStatus DecodePredRegsRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t /*Address*/,
void const *Decoder) {
@@ -110,5 +201,7 @@ DecodeStatus HexagonDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
// Remove parse bits.
insn &= ~static_cast<uint32_t>(HexagonII::InstParseBits::INST_PARSE_MASK);
- return decodeInstruction(DecoderTable32, MI, insn, Address, this, STI);
+ DecodeStatus Result = decodeInstruction(DecoderTable32, MI, insn, Address, this, STI);
+ HexagonMCInstrInfo::AppendImplicitOperands(MI);
+ return Result;
}
diff --git a/lib/Target/Hexagon/Disassembler/LLVMBuild.txt b/lib/Target/Hexagon/Disassembler/LLVMBuild.txt
index 17ad11b..43bace7 100644
--- a/lib/Target/Hexagon/Disassembler/LLVMBuild.txt
+++ b/lib/Target/Hexagon/Disassembler/LLVMBuild.txt
@@ -19,5 +19,5 @@
type = Library
name = HexagonDisassembler
parent = Hexagon
-required_libraries = HexagonInfo MCDisassembler Support
+required_libraries = HexagonDesc HexagonInfo MCDisassembler Support
add_to_library_groups = Hexagon
diff --git a/lib/Target/Hexagon/Hexagon.h b/lib/Target/Hexagon/Hexagon.h
index 64ae69c..e0a3b2f 100644
--- a/lib/Target/Hexagon/Hexagon.h
+++ b/lib/Target/Hexagon/Hexagon.h
@@ -21,26 +21,24 @@
namespace llvm {
class FunctionPass;
- class ModulePass;
- class TargetMachine;
- class MachineInstr;
- class HexagonMCInst;
class HexagonAsmPrinter;
class HexagonTargetMachine;
+ class MachineInstr;
+ class MCInst;
+ class ModulePass;
class raw_ostream;
+ class TargetMachine;
FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM,
CodeGenOpt::Level OptLevel);
FunctionPass *createHexagonDelaySlotFillerPass(const TargetMachine &TM);
FunctionPass *createHexagonFPMoverPass(const TargetMachine &TM);
FunctionPass *createHexagonRemoveExtendArgs(const HexagonTargetMachine &TM);
- FunctionPass *createHexagonCFGOptimizer(const HexagonTargetMachine &TM);
+ FunctionPass *createHexagonCFGOptimizer();
- FunctionPass *createHexagonSplitTFRCondSets(const HexagonTargetMachine &TM);
- FunctionPass *createHexagonSplitConst32AndConst64(
- const HexagonTargetMachine &TM);
- FunctionPass *createHexagonExpandPredSpillCode(
- const HexagonTargetMachine &TM);
+ FunctionPass *createHexagonSplitTFRCondSets();
+ FunctionPass *createHexagonSplitConst32AndConst64();
+ FunctionPass *createHexagonExpandPredSpillCode();
FunctionPass *createHexagonHardwareLoops();
FunctionPass *createHexagonPeephole();
FunctionPass *createHexagonFixupHwLoops();
@@ -58,7 +56,7 @@ namespace llvm {
TargetAsmBackend *createHexagonAsmBackend(const Target &,
const std::string &);
*/
- void HexagonLowerToMC(const MachineInstr *MI, HexagonMCInst &MCI,
+ void HexagonLowerToMC(MachineInstr const *MI, MCInst &MCI,
HexagonAsmPrinter &AP);
} // end namespace llvm;
diff --git a/lib/Target/Hexagon/Hexagon.td b/lib/Target/Hexagon/Hexagon.td
index 5f4a6c6..f892c9f 100644
--- a/lib/Target/Hexagon/Hexagon.td
+++ b/lib/Target/Hexagon/Hexagon.td
@@ -21,35 +21,23 @@ include "llvm/Target/Target.td"
// Hexagon Subtarget features.
//===----------------------------------------------------------------------===//
-// Hexagon Archtectures
-def ArchV2 : SubtargetFeature<"v2", "HexagonArchVersion", "V2",
- "Hexagon v2">;
-def ArchV3 : SubtargetFeature<"v3", "HexagonArchVersion", "V3",
- "Hexagon v3">;
-def ArchV4 : SubtargetFeature<"v4", "HexagonArchVersion", "V4",
- "Hexagon v4">;
-def ArchV5 : SubtargetFeature<"v5", "HexagonArchVersion", "V5",
- "Hexagon v5">;
+// Hexagon Architectures
+def ArchV4: SubtargetFeature<"v4", "HexagonArchVersion", "V4", "Hexagon V4">;
+def ArchV5: SubtargetFeature<"v5", "HexagonArchVersion", "V5", "Hexagon V5">;
//===----------------------------------------------------------------------===//
// Hexagon Instruction Predicate Definitions.
//===----------------------------------------------------------------------===//
-def HasV2T : Predicate<"Subtarget.hasV2TOps()">;
-def HasV2TOnly : Predicate<"Subtarget.hasV2TOpsOnly()">;
-def NoV2T : Predicate<"!Subtarget.hasV2TOps()">;
-def HasV3T : Predicate<"Subtarget.hasV3TOps()">;
-def HasV3TOnly : Predicate<"Subtarget.hasV3TOpsOnly()">;
-def NoV3T : Predicate<"!Subtarget.hasV3TOps()">;
-def HasV4T : Predicate<"Subtarget.hasV4TOps()">;
-def NoV4T : Predicate<"!Subtarget.hasV4TOps()">;
-def HasV5T : Predicate<"Subtarget.hasV5TOps()">;
-def NoV5T : Predicate<"!Subtarget.hasV5TOps()">;
-def UseMEMOP : Predicate<"Subtarget.useMemOps()">;
-def IEEERndNearV5T : Predicate<"Subtarget.modeIEEERndNear()">;
+def HasV5T : Predicate<"Subtarget->hasV5TOps()">;
+def NoV5T : Predicate<"!Subtarget->hasV5TOps()">;
+def UseMEMOP : Predicate<"Subtarget->useMemOps()">;
+def IEEERndNearV5T : Predicate<"Subtarget->modeIEEERndNear()">;
//===----------------------------------------------------------------------===//
// Classes used for relation maps.
//===----------------------------------------------------------------------===//
+
+class ImmRegShl;
// PredRel - Filter class used to relate non-predicated instructions with their
// predicated forms.
class PredRel;
@@ -137,7 +125,7 @@ def getPredOldOpcode : InstrMapping {
//
def getNewValueOpcode : InstrMapping {
let FilterClass = "NewValueRel";
- let RowFields = ["BaseOpcode", "PredSense", "PNewValue"];
+ let RowFields = ["BaseOpcode", "PredSense", "PNewValue", "addrMode"];
let ColFields = ["NValueST"];
let KeyCol = ["false"];
let ValueCols = [["true"]];
@@ -149,7 +137,7 @@ def getNewValueOpcode : InstrMapping {
//
def getNonNVStore : InstrMapping {
let FilterClass = "NewValueRel";
- let RowFields = ["BaseOpcode", "PredSense", "PNewValue"];
+ let RowFields = ["BaseOpcode", "PredSense", "PNewValue", "addrMode"];
let ColFields = ["NValueST"];
let KeyCol = ["true"];
let ValueCols = [["false"]];
@@ -180,6 +168,14 @@ def getRegForm : InstrMapping {
let ValueCols = [["reg"]];
}
+def getRegShlForm : InstrMapping {
+ let FilterClass = "ImmRegShl";
+ let RowFields = ["CextOpcode", "PredSense", "PNewValue", "isNVStore"];
+ let ColFields = ["InputType"];
+ let KeyCol = ["imm"];
+ let ValueCols = [["reg"]];
+}
+
//===----------------------------------------------------------------------===//
// Register File, Calling Conv, Instruction Descriptions
//===----------------------------------------------------------------------===//
@@ -200,8 +196,10 @@ class Proc<string Name, SchedMachineModel Model,
list<SubtargetFeature> Features>
: ProcessorModel<Name, Model, Features>;
-def : Proc<"hexagonv4", HexagonModelV4, [ArchV2, ArchV3, ArchV4]>;
-def : Proc<"hexagonv5", HexagonModelV4, [ArchV2, ArchV3, ArchV4, ArchV5]>;
+def : Proc<"hexagonv4", HexagonModelV4,
+ [ArchV4]>;
+def : Proc<"hexagonv5", HexagonModelV4,
+ [ArchV4, ArchV5]>;
//===----------------------------------------------------------------------===//
// Declare the target which we are implementing
diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/lib/Target/Hexagon/HexagonAsmPrinter.cpp
index 9240282..180762f 100644
--- a/lib/Target/Hexagon/HexagonAsmPrinter.cpp
+++ b/lib/Target/Hexagon/HexagonAsmPrinter.cpp
@@ -19,7 +19,7 @@
#include "HexagonSubtarget.h"
#include "HexagonTargetMachine.h"
#include "MCTargetDesc/HexagonInstPrinter.h"
-#include "MCTargetDesc/HexagonMCInst.h"
+#include "MCTargetDesc/HexagonMCInstrInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
@@ -61,6 +61,10 @@ static cl::opt<bool> AlignCalls(
"hexagon-align-calls", cl::Hidden, cl::init(true),
cl::desc("Insert falign after call instruction for Hexagon target"));
+HexagonAsmPrinter::HexagonAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)), Subtarget(nullptr) {}
+
void HexagonAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(OpNo);
@@ -174,7 +178,7 @@ bool HexagonAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
///
void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) {
if (MI->isBundle()) {
- std::vector<const MachineInstr*> BundleMIs;
+ std::vector<MachineInstr const *> BundleMIs;
const MachineBasicBlock *MBB = MI->getParent();
MachineBasicBlock::const_instr_iterator MII = MI;
@@ -183,33 +187,35 @@ void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) {
while (MII != MBB->end() && MII->isInsideBundle()) {
const MachineInstr *MInst = MII;
if (MInst->getOpcode() == TargetOpcode::DBG_VALUE ||
- MInst->getOpcode() == TargetOpcode::IMPLICIT_DEF) {
- IgnoreCount++;
- ++MII;
- continue;
+ MInst->getOpcode() == TargetOpcode::IMPLICIT_DEF) {
+ IgnoreCount++;
+ ++MII;
+ continue;
}
- //BundleMIs.push_back(&*MII);
+ // BundleMIs.push_back(&*MII);
BundleMIs.push_back(MInst);
++MII;
}
unsigned Size = BundleMIs.size();
- assert((Size+IgnoreCount) == MI->getBundleSize() && "Corrupt Bundle!");
+ assert((Size + IgnoreCount) == MI->getBundleSize() && "Corrupt Bundle!");
for (unsigned Index = 0; Index < Size; Index++) {
- HexagonMCInst MCI;
- MCI.setPacketStart(Index == 0);
- MCI.setPacketEnd(Index == (Size-1));
+ MCInst MCI;
HexagonLowerToMC(BundleMIs[Index], MCI, *this);
+ HexagonMCInstrInfo::AppendImplicitOperands(MCI);
+ HexagonMCInstrInfo::setPacketBegin(MCI, Index == 0);
+ HexagonMCInstrInfo::setPacketEnd(MCI, Index == (Size - 1));
EmitToStreamer(OutStreamer, MCI);
}
}
else {
- HexagonMCInst MCI;
+ MCInst MCI;
+ HexagonLowerToMC(MI, MCI, *this);
+ HexagonMCInstrInfo::AppendImplicitOperands(MCI);
if (MI->getOpcode() == Hexagon::ENDLOOP0) {
- MCI.setPacketStart(true);
- MCI.setPacketEnd(true);
+ HexagonMCInstrInfo::setPacketBegin(MCI, true);
+ HexagonMCInstrInfo::setPacketEnd(MCI, true);
}
- HexagonLowerToMC(MI, MCI, *this);
EmitToStreamer(OutStreamer, MCI);
}
diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.h b/lib/Target/Hexagon/HexagonAsmPrinter.h
index 5f4c162..792fc8b 100644
--- a/lib/Target/Hexagon/HexagonAsmPrinter.h
+++ b/lib/Target/Hexagon/HexagonAsmPrinter.h
@@ -25,9 +25,12 @@ namespace llvm {
const HexagonSubtarget *Subtarget;
public:
- explicit HexagonAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {
- Subtarget = &TM.getSubtarget<HexagonSubtarget>();
+ explicit HexagonAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer);
+
+ bool runOnMachineFunction(MachineFunction &Fn) override {
+ Subtarget = &Fn.getSubtarget<HexagonSubtarget>();
+ return AsmPrinter::runOnMachineFunction(Fn);
}
const char *getPassName() const override {
diff --git a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
index 8a4e02c..703e691 100644
--- a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
+++ b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
@@ -37,15 +37,11 @@ namespace {
class HexagonCFGOptimizer : public MachineFunctionPass {
private:
- const HexagonTargetMachine& QTM;
- const HexagonSubtarget &QST;
-
void InvertAndChangeJumpTarget(MachineInstr*, MachineBasicBlock*);
public:
static char ID;
- HexagonCFGOptimizer(const HexagonTargetMachine& TM)
- : MachineFunctionPass(ID), QTM(TM), QST(*TM.getSubtargetImpl()) {
+ HexagonCFGOptimizer() : MachineFunctionPass(ID) {
initializeHexagonCFGOptimizerPass(*PassRegistry::getPassRegistry());
}
@@ -59,49 +55,49 @@ private:
char HexagonCFGOptimizer::ID = 0;
static bool IsConditionalBranch(int Opc) {
- return (Opc == Hexagon::JMP_t) || (Opc == Hexagon::JMP_f)
- || (Opc == Hexagon::JMP_tnew_t) || (Opc == Hexagon::JMP_fnew_t);
+ return (Opc == Hexagon::J2_jumpt) || (Opc == Hexagon::J2_jumpf)
+ || (Opc == Hexagon::J2_jumptnewpt) || (Opc == Hexagon::J2_jumpfnewpt);
}
static bool IsUnconditionalJump(int Opc) {
- return (Opc == Hexagon::JMP);
+ return (Opc == Hexagon::J2_jump);
}
void
HexagonCFGOptimizer::InvertAndChangeJumpTarget(MachineInstr* MI,
MachineBasicBlock* NewTarget) {
- const HexagonInstrInfo *QII = QTM.getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII =
+ MI->getParent()->getParent()->getSubtarget().getInstrInfo();
int NewOpcode = 0;
switch(MI->getOpcode()) {
- case Hexagon::JMP_t:
- NewOpcode = Hexagon::JMP_f;
+ case Hexagon::J2_jumpt:
+ NewOpcode = Hexagon::J2_jumpf;
break;
- case Hexagon::JMP_f:
- NewOpcode = Hexagon::JMP_t;
+ case Hexagon::J2_jumpf:
+ NewOpcode = Hexagon::J2_jumpt;
break;
- case Hexagon::JMP_tnew_t:
- NewOpcode = Hexagon::JMP_fnew_t;
+ case Hexagon::J2_jumptnewpt:
+ NewOpcode = Hexagon::J2_jumpfnewpt;
break;
- case Hexagon::JMP_fnew_t:
- NewOpcode = Hexagon::JMP_tnew_t;
+ case Hexagon::J2_jumpfnewpt:
+ NewOpcode = Hexagon::J2_jumptnewpt;
break;
default:
llvm_unreachable("Cannot handle this case");
}
- MI->setDesc(QII->get(NewOpcode));
+ MI->setDesc(TII->get(NewOpcode));
MI->getOperand(1).setMBB(NewTarget);
}
bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
-
// Loop over all of the basic blocks.
for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
MBBb != MBBe; ++MBBb) {
@@ -163,8 +159,8 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
// The target of the unconditional branch must be JumpAroundTarget.
// TODO: If not, we should not invert the unconditional branch.
MachineBasicBlock* CondBranchTarget = nullptr;
- if ((MI->getOpcode() == Hexagon::JMP_t) ||
- (MI->getOpcode() == Hexagon::JMP_f)) {
+ if ((MI->getOpcode() == Hexagon::J2_jumpt) ||
+ (MI->getOpcode() == Hexagon::J2_jumpf)) {
CondBranchTarget = MI->getOperand(1).getMBB();
}
@@ -248,6 +244,6 @@ void llvm::initializeHexagonCFGOptimizerPass(PassRegistry &Registry) {
CALL_ONCE_INITIALIZATION(initializePassOnce)
}
-FunctionPass *llvm::createHexagonCFGOptimizer(const HexagonTargetMachine &TM) {
- return new HexagonCFGOptimizer(TM);
+FunctionPass *llvm::createHexagonCFGOptimizer() {
+ return new HexagonCFGOptimizer();
}
diff --git a/lib/Target/Hexagon/HexagonCallingConvLower.cpp b/lib/Target/Hexagon/HexagonCallingConvLower.cpp
deleted file mode 100644
index 8d78409..0000000
--- a/lib/Target/Hexagon/HexagonCallingConvLower.cpp
+++ /dev/null
@@ -1,206 +0,0 @@
-//===-- llvm/CallingConvLower.cpp - Calling Convention lowering -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the Hexagon_CCState class, used for lowering and
-// implementing calling conventions. Adapted from the machine independent
-// version of the class (CCState) but this handles calls to varargs functions
-//
-//===----------------------------------------------------------------------===//
-
-#include "HexagonCallingConvLower.h"
-#include "Hexagon.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
-using namespace llvm;
-
-Hexagon_CCState::Hexagon_CCState(CallingConv::ID CC, bool isVarArg,
- const TargetMachine &tm,
- SmallVectorImpl<CCValAssign> &locs,
- LLVMContext &c)
- : CallingConv(CC), IsVarArg(isVarArg), TM(tm), Locs(locs), Context(c) {
- // No stack is used.
- StackOffset = 0;
-
- UsedRegs.resize(
- (TM.getSubtargetImpl()->getRegisterInfo()->getNumRegs() + 31) / 32);
-}
-
-// HandleByVal - Allocate a stack slot large enough to pass an argument by
-// value. The size and alignment information of the argument is encoded in its
-// parameter attribute.
-void Hexagon_CCState::HandleByVal(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- int MinSize, int MinAlign,
- ISD::ArgFlagsTy ArgFlags) {
- unsigned Align = ArgFlags.getByValAlign();
- unsigned Size = ArgFlags.getByValSize();
- if (MinSize > (int)Size)
- Size = MinSize;
- if (MinAlign > (int)Align)
- Align = MinAlign;
- unsigned Offset = AllocateStack(Size, Align);
-
- addLoc(CCValAssign::getMem(ValNo, ValVT.getSimpleVT(), Offset,
- LocVT.getSimpleVT(), LocInfo));
-}
-
-/// MarkAllocated - Mark a register and all of its aliases as allocated.
-void Hexagon_CCState::MarkAllocated(unsigned Reg) {
- const TargetRegisterInfo &TRI = *TM.getSubtargetImpl()->getRegisterInfo();
- for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
- UsedRegs[*AI/32] |= 1 << (*AI&31);
-}
-
-/// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node,
-/// incorporating info about the formals into this state.
-void
-Hexagon_CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg>
- &Ins,
- Hexagon_CCAssignFn Fn,
- unsigned SretValueInRegs) {
- unsigned NumArgs = Ins.size();
- unsigned i = 0;
-
- // If the function returns a small struct in registers, skip
- // over the first (dummy) argument.
- if (SretValueInRegs != 0) {
- ++i;
- }
-
-
- for (; i != NumArgs; ++i) {
- EVT ArgVT = Ins[i].VT;
- ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
- if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this, 0, 0, false)) {
- dbgs() << "Formal argument #" << i << " has unhandled type "
- << ArgVT.getEVTString() << "\n";
- abort();
- }
- }
-}
-
-/// AnalyzeReturn - Analyze the returned values of an ISD::RET node,
-/// incorporating info about the result values into this state.
-void
-Hexagon_CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
- Hexagon_CCAssignFn Fn,
- unsigned SretValueInRegs) {
-
- // For Hexagon, Return small structures in registers.
- if (SretValueInRegs != 0) {
- if (SretValueInRegs <= 32) {
- unsigned Reg = Hexagon::R0;
- addLoc(CCValAssign::getReg(0, MVT::i32, Reg, MVT::i32,
- CCValAssign::Full));
- return;
- }
- if (SretValueInRegs <= 64) {
- unsigned Reg = Hexagon::D0;
- addLoc(CCValAssign::getReg(0, MVT::i64, Reg, MVT::i64,
- CCValAssign::Full));
- return;
- }
- }
-
-
- // Determine which register each value should be copied into.
- for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- EVT VT = Outs[i].VT;
- ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
- if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this, -1, -1, false)){
- dbgs() << "Return operand #" << i << " has unhandled type "
- << VT.getEVTString() << "\n";
- abort();
- }
- }
-}
-
-
-/// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info
-/// about the passed values into this state.
-void
-Hexagon_CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg>
- &Outs,
- Hexagon_CCAssignFn Fn,
- int NonVarArgsParams,
- unsigned SretValueSize) {
- unsigned NumOps = Outs.size();
-
- unsigned i = 0;
- // If the called function returns a small struct in registers, skip
- // the first actual parameter. We do not want to pass a pointer to
- // the stack location.
- if (SretValueSize != 0) {
- ++i;
- }
-
- for (; i != NumOps; ++i) {
- EVT ArgVT = Outs[i].VT;
- ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
- if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this,
- NonVarArgsParams, i+1, false)) {
- dbgs() << "Call operand #" << i << " has unhandled type "
- << ArgVT.getEVTString() << "\n";
- abort();
- }
- }
-}
-
-/// AnalyzeCallOperands - Same as above except it takes vectors of types
-/// and argument flags.
-void
-Hexagon_CCState::AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
- SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
- Hexagon_CCAssignFn Fn) {
- unsigned NumOps = ArgVTs.size();
- for (unsigned i = 0; i != NumOps; ++i) {
- EVT ArgVT = ArgVTs[i];
- ISD::ArgFlagsTy ArgFlags = Flags[i];
- if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this, -1, -1,
- false)) {
- dbgs() << "Call operand #" << i << " has unhandled type "
- << ArgVT.getEVTString() << "\n";
- abort();
- }
- }
-}
-
-/// AnalyzeCallResult - Analyze the return values of an ISD::CALL node,
-/// incorporating info about the passed values into this state.
-void
-Hexagon_CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
- Hexagon_CCAssignFn Fn,
- unsigned SretValueInRegs) {
-
- for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
- EVT VT = Ins[i].VT;
- ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
- if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this, -1, -1, false)) {
- dbgs() << "Call result #" << i << " has unhandled type "
- << VT.getEVTString() << "\n";
- abort();
- }
- }
-}
-
-/// AnalyzeCallResult - Same as above except it's specialized for calls which
-/// produce a single value.
-void Hexagon_CCState::AnalyzeCallResult(EVT VT, Hexagon_CCAssignFn Fn) {
- if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this, -1, -1,
- false)) {
- dbgs() << "Call result has unhandled type "
- << VT.getEVTString() << "\n";
- abort();
- }
-}
diff --git a/lib/Target/Hexagon/HexagonCallingConvLower.h b/lib/Target/Hexagon/HexagonCallingConvLower.h
deleted file mode 100644
index 738ed1a..0000000
--- a/lib/Target/Hexagon/HexagonCallingConvLower.h
+++ /dev/null
@@ -1,187 +0,0 @@
-//===-- HexagonCallingConvLower.h - Calling Conventions ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the Hexagon_CCState class, used for lowering
-// and implementing calling conventions. Adapted from the target independent
-// version but this handles calls to varargs functions
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONCALLINGCONVLOWER_H
-#define LLVM_LIB_TARGET_HEXAGON_HEXAGONCALLINGCONVLOWER_H
-
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
-
-//
-// Need to handle varargs.
-//
-namespace llvm {
- class TargetRegisterInfo;
- class TargetMachine;
- class Hexagon_CCState;
- class SDNode;
- struct EVT;
-
-/// Hexagon_CCAssignFn - This function assigns a location for Val, updating
-/// State to reflect the change.
-typedef bool Hexagon_CCAssignFn(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, Hexagon_CCState &State,
- int NonVarArgsParams,
- int CurrentParam,
- bool ForceMem);
-
-
-/// CCState - This class holds information needed while lowering arguments and
-/// return values. It captures which registers are already assigned and which
-/// stack slots are used. It provides accessors to allocate these values.
-class Hexagon_CCState {
- CallingConv::ID CallingConv;
- bool IsVarArg;
- const TargetMachine &TM;
- SmallVectorImpl<CCValAssign> &Locs;
- LLVMContext &Context;
-
- unsigned StackOffset;
- SmallVector<uint32_t, 16> UsedRegs;
-public:
- Hexagon_CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &TM,
- SmallVectorImpl<CCValAssign> &locs, LLVMContext &c);
-
- void addLoc(const CCValAssign &V) {
- Locs.push_back(V);
- }
-
- LLVMContext &getContext() const { return Context; }
- const TargetMachine &getTarget() const { return TM; }
- unsigned getCallingConv() const { return CallingConv; }
- bool isVarArg() const { return IsVarArg; }
-
- unsigned getNextStackOffset() const { return StackOffset; }
-
- /// isAllocated - Return true if the specified register (or an alias) is
- /// allocated.
- bool isAllocated(unsigned Reg) const {
- return UsedRegs[Reg/32] & (1 << (Reg&31));
- }
-
- /// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node,
- /// incorporating info about the formals into this state.
- void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
- Hexagon_CCAssignFn Fn, unsigned SretValueInRegs);
-
- /// AnalyzeReturn - Analyze the returned values of an ISD::RET node,
- /// incorporating info about the result values into this state.
- void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
- Hexagon_CCAssignFn Fn, unsigned SretValueInRegs);
-
- /// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info
- /// about the passed values into this state.
- void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
- Hexagon_CCAssignFn Fn, int NonVarArgsParams,
- unsigned SretValueSize);
-
- /// AnalyzeCallOperands - Same as above except it takes vectors of types
- /// and argument flags.
- void AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
- SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
- Hexagon_CCAssignFn Fn);
-
- /// AnalyzeCallResult - Analyze the return values of an ISD::CALL node,
- /// incorporating info about the passed values into this state.
- void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
- Hexagon_CCAssignFn Fn, unsigned SretValueInRegs);
-
- /// AnalyzeCallResult - Same as above except it's specialized for calls which
- /// produce a single value.
- void AnalyzeCallResult(EVT VT, Hexagon_CCAssignFn Fn);
-
- /// getFirstUnallocated - Return the first unallocated register in the set, or
- /// NumRegs if they are all allocated.
- unsigned getFirstUnallocated(const unsigned *Regs, unsigned NumRegs) const {
- for (unsigned i = 0; i != NumRegs; ++i)
- if (!isAllocated(Regs[i]))
- return i;
- return NumRegs;
- }
-
- /// AllocateReg - Attempt to allocate one register. If it is not available,
- /// return zero. Otherwise, return the register, marking it and any aliases
- /// as allocated.
- unsigned AllocateReg(unsigned Reg) {
- if (isAllocated(Reg)) return 0;
- MarkAllocated(Reg);
- return Reg;
- }
-
- /// Version of AllocateReg with extra register to be shadowed.
- unsigned AllocateReg(unsigned Reg, unsigned ShadowReg) {
- if (isAllocated(Reg)) return 0;
- MarkAllocated(Reg);
- MarkAllocated(ShadowReg);
- return Reg;
- }
-
- /// AllocateReg - Attempt to allocate one of the specified registers. If none
- /// are available, return zero. Otherwise, return the first one available,
- /// marking it and any aliases as allocated.
- unsigned AllocateReg(const unsigned *Regs, unsigned NumRegs) {
- unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
- if (FirstUnalloc == NumRegs)
- return 0; // Didn't find the reg.
-
- // Mark the register and any aliases as allocated.
- unsigned Reg = Regs[FirstUnalloc];
- MarkAllocated(Reg);
- return Reg;
- }
-
- /// Version of AllocateReg with list of registers to be shadowed.
- unsigned AllocateReg(const unsigned *Regs, const unsigned *ShadowRegs,
- unsigned NumRegs) {
- unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
- if (FirstUnalloc == NumRegs)
- return 0; // Didn't find the reg.
-
- // Mark the register and any aliases as allocated.
- unsigned Reg = Regs[FirstUnalloc], ShadowReg = ShadowRegs[FirstUnalloc];
- MarkAllocated(Reg);
- MarkAllocated(ShadowReg);
- return Reg;
- }
-
- /// AllocateStack - Allocate a chunk of stack space with the specified size
- /// and alignment.
- unsigned AllocateStack(unsigned Size, unsigned Align) {
- assert(Align && ((Align-1) & Align) == 0); // Align is power of 2.
- StackOffset = ((StackOffset + Align-1) & ~(Align-1));
- unsigned Result = StackOffset;
- StackOffset += Size;
- return Result;
- }
-
- // HandleByVal - Allocate a stack slot large enough to pass an argument by
- // value. The size and alignment information of the argument is encoded in its
- // parameter attribute.
- void HandleByVal(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags);
-
-private:
- /// MarkAllocated - Mark a register and all of its aliases as allocated.
- void MarkAllocated(unsigned Reg);
-};
-
-
-
-} // end namespace llvm
-
-#endif
diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index 4e76698..dd193f9 100644
--- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -114,7 +114,7 @@ static bool isCombinableInstType(MachineInstr *MI,
const HexagonInstrInfo *TII,
bool ShouldCombineAggressively) {
switch(MI->getOpcode()) {
- case Hexagon::TFR: {
+ case Hexagon::A2_tfr: {
// A COPY instruction can be combined if its arguments are IntRegs (32bit).
assert(MI->getOperand(0).isReg() && MI->getOperand(1).isReg());
@@ -124,7 +124,7 @@ static bool isCombinableInstType(MachineInstr *MI,
Hexagon::IntRegsRegClass.contains(SrcReg);
}
- case Hexagon::TFRI: {
+ case Hexagon::A2_tfrsi: {
// A transfer-immediate can be combined if its argument is a signed 8bit
// value.
assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm());
@@ -158,11 +158,11 @@ static bool isCombinableInstType(MachineInstr *MI,
}
static bool isGreaterThan8BitTFRI(MachineInstr *I) {
- return I->getOpcode() == Hexagon::TFRI &&
+ return I->getOpcode() == Hexagon::A2_tfrsi &&
!isInt<8>(I->getOperand(1).getImm());
}
static bool isGreaterThan6BitTFRI(MachineInstr *I) {
- return I->getOpcode() == Hexagon::TFRI &&
+ return I->getOpcode() == Hexagon::A2_tfrsi &&
!isUInt<6>(I->getOperand(1).getImm());
}
@@ -171,26 +171,14 @@ static bool isGreaterThan6BitTFRI(MachineInstr *I) {
static bool areCombinableOperations(const TargetRegisterInfo *TRI,
MachineInstr *HighRegInst,
MachineInstr *LowRegInst) {
- assert((HighRegInst->getOpcode() == Hexagon::TFR ||
- HighRegInst->getOpcode() == Hexagon::TFRI ||
+ assert((HighRegInst->getOpcode() == Hexagon::A2_tfr ||
+ HighRegInst->getOpcode() == Hexagon::A2_tfrsi ||
HighRegInst->getOpcode() == Hexagon::TFRI_V4) &&
- (LowRegInst->getOpcode() == Hexagon::TFR ||
- LowRegInst->getOpcode() == Hexagon::TFRI ||
+ (LowRegInst->getOpcode() == Hexagon::A2_tfr ||
+ LowRegInst->getOpcode() == Hexagon::A2_tfrsi ||
LowRegInst->getOpcode() == Hexagon::TFRI_V4) &&
"Assume individual instructions are of a combinable type");
- const HexagonRegisterInfo *QRI =
- static_cast<const HexagonRegisterInfo *>(TRI);
-
- // V4 added some combine variations (mixed immediate and register source
- // operands), if we are on < V4 we can only combine 2 register-to-register
- // moves and 2 immediate-to-register moves. We also don't have
- // constant-extenders.
- if (!QRI->Subtarget.hasV4TOps())
- return HighRegInst->getOpcode() == LowRegInst->getOpcode() &&
- !isGreaterThan8BitTFRI(HighRegInst) &&
- !isGreaterThan6BitTFRI(LowRegInst);
-
// There is no combine of two constant extended values.
if ((HighRegInst->getOpcode() == Hexagon::TFRI_V4 ||
isGreaterThan8BitTFRI(HighRegInst)) &&
@@ -418,7 +406,7 @@ bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) {
// Get target info.
TRI = MF.getSubtarget().getRegisterInfo();
- TII = static_cast<const HexagonInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ TII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
// Combine aggressively (for code size)
ShouldCombineAggressively =
@@ -563,14 +551,14 @@ void HexagonCopyToCombine::emitCombineII(MachineBasicBlock::iterator &InsertPt,
// Handle globals.
if (HiOperand.isGlobal()) {
- BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_Ii), DoubleDestReg)
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A2_combineii), DoubleDestReg)
.addGlobalAddress(HiOperand.getGlobal(), HiOperand.getOffset(),
HiOperand.getTargetFlags())
.addImm(LoOperand.getImm());
return;
}
if (LoOperand.isGlobal()) {
- BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_iI_V4), DoubleDestReg)
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineii), DoubleDestReg)
.addImm(HiOperand.getImm())
.addGlobalAddress(LoOperand.getGlobal(), LoOperand.getOffset(),
LoOperand.getTargetFlags());
@@ -580,7 +568,7 @@ void HexagonCopyToCombine::emitCombineII(MachineBasicBlock::iterator &InsertPt,
// Handle constant extended immediates.
if (!isInt<8>(HiOperand.getImm())) {
assert(isInt<8>(LoOperand.getImm()));
- BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_Ii), DoubleDestReg)
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A2_combineii), DoubleDestReg)
.addImm(HiOperand.getImm())
.addImm(LoOperand.getImm());
return;
@@ -588,7 +576,7 @@ void HexagonCopyToCombine::emitCombineII(MachineBasicBlock::iterator &InsertPt,
if (!isUInt<6>(LoOperand.getImm())) {
assert(isInt<8>(HiOperand.getImm()));
- BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_iI_V4), DoubleDestReg)
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineii), DoubleDestReg)
.addImm(HiOperand.getImm())
.addImm(LoOperand.getImm());
return;
@@ -596,7 +584,7 @@ void HexagonCopyToCombine::emitCombineII(MachineBasicBlock::iterator &InsertPt,
// Insert new combine instruction.
// DoubleRegDest = combine #HiImm, #LoImm
- BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_Ii), DoubleDestReg)
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A2_combineii), DoubleDestReg)
.addImm(HiOperand.getImm())
.addImm(LoOperand.getImm());
}
@@ -613,7 +601,7 @@ void HexagonCopyToCombine::emitCombineIR(MachineBasicBlock::iterator &InsertPt,
// Handle global.
if (HiOperand.isGlobal()) {
- BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_Ir_V4), DoubleDestReg)
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineir), DoubleDestReg)
.addGlobalAddress(HiOperand.getGlobal(), HiOperand.getOffset(),
HiOperand.getTargetFlags())
.addReg(LoReg, LoRegKillFlag);
@@ -621,7 +609,7 @@ void HexagonCopyToCombine::emitCombineIR(MachineBasicBlock::iterator &InsertPt,
}
// Insert new combine instruction.
// DoubleRegDest = combine #HiImm, LoReg
- BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_Ir_V4), DoubleDestReg)
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineir), DoubleDestReg)
.addImm(HiOperand.getImm())
.addReg(LoReg, LoRegKillFlag);
}
@@ -638,7 +626,7 @@ void HexagonCopyToCombine::emitCombineRI(MachineBasicBlock::iterator &InsertPt,
// Handle global.
if (LoOperand.isGlobal()) {
- BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_rI_V4), DoubleDestReg)
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineri), DoubleDestReg)
.addReg(HiReg, HiRegKillFlag)
.addGlobalAddress(LoOperand.getGlobal(), LoOperand.getOffset(),
LoOperand.getTargetFlags());
@@ -647,7 +635,7 @@ void HexagonCopyToCombine::emitCombineRI(MachineBasicBlock::iterator &InsertPt,
// Insert new combine instruction.
// DoubleRegDest = combine HiReg, #LoImm
- BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_rI_V4), DoubleDestReg)
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineri), DoubleDestReg)
.addReg(HiReg, HiRegKillFlag)
.addImm(LoOperand.getImm());
}
@@ -666,7 +654,7 @@ void HexagonCopyToCombine::emitCombineRR(MachineBasicBlock::iterator &InsertPt,
// Insert new combine instruction.
// DoubleRegDest = combine HiReg, LoReg
- BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::COMBINE_rr), DoubleDestReg)
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A2_combinew), DoubleDestReg)
.addReg(HiReg, HiRegKillFlag)
.addReg(LoReg, LoRegKillFlag);
}
diff --git a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp b/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
index 8ef4c3a..8176598 100644
--- a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
+++ b/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
@@ -20,7 +20,6 @@
#include "Hexagon.h"
#include "HexagonMachineFunctionInfo.h"
#include "HexagonSubtarget.h"
-#include "HexagonTargetMachine.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -49,13 +48,9 @@ namespace llvm {
namespace {
class HexagonExpandPredSpillCode : public MachineFunctionPass {
- const HexagonTargetMachine& QTM;
- const HexagonSubtarget &QST;
-
public:
static char ID;
- HexagonExpandPredSpillCode(const HexagonTargetMachine& TM) :
- MachineFunctionPass(ID), QTM(TM), QST(*TM.getSubtargetImpl()) {
+ HexagonExpandPredSpillCode() : MachineFunctionPass(ID) {
PassRegistry &Registry = *PassRegistry::getPassRegistry();
initializeHexagonExpandPredSpillCodePass(Registry);
}
@@ -72,7 +67,8 @@ char HexagonExpandPredSpillCode::ID = 0;
bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
- const HexagonInstrInfo *TII = QTM.getSubtargetImpl()->getInstrInfo();
+ const HexagonSubtarget &QST = Fn.getSubtarget<HexagonSubtarget>();
+ const HexagonInstrInfo *TII = QST.getInstrInfo();
// Loop over all of the basic blocks.
for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
@@ -86,45 +82,43 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
if (Opc == Hexagon::STriw_pred) {
// STriw_pred [R30], ofst, SrcReg;
unsigned FP = MI->getOperand(0).getReg();
- assert(
- FP ==
- QTM.getSubtargetImpl()->getRegisterInfo()->getFrameRegister() &&
- "Not a Frame Pointer, Nor a Spill Slot");
+ assert(FP == QST.getRegisterInfo()->getFrameRegister() &&
+ "Not a Frame Pointer, Nor a Spill Slot");
assert(MI->getOperand(1).isImm() && "Not an offset");
int Offset = MI->getOperand(1).getImm();
int SrcReg = MI->getOperand(2).getReg();
assert(Hexagon::PredRegsRegClass.contains(SrcReg) &&
"Not a predicate register");
- if (!TII->isValidOffset(Hexagon::STriw_indexed, Offset)) {
- if (!TII->isValidOffset(Hexagon::ADD_ri, Offset)) {
+ if (!TII->isValidOffset(Hexagon::S2_storeri_io, Offset)) {
+ if (!TII->isValidOffset(Hexagon::A2_addi, Offset)) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::CONST32_Int_Real),
HEXAGON_RESERVED_REG_1).addImm(Offset);
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_add),
HEXAGON_RESERVED_REG_1)
.addReg(FP).addReg(HEXAGON_RESERVED_REG_1);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrpr),
HEXAGON_RESERVED_REG_2).addReg(SrcReg);
BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::STriw_indexed))
+ TII->get(Hexagon::S2_storeri_io))
.addReg(HEXAGON_RESERVED_REG_1)
.addImm(0).addReg(HEXAGON_RESERVED_REG_2);
} else {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::ADD_ri),
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_addi),
HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrpr),
HEXAGON_RESERVED_REG_2).addReg(SrcReg);
BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::STriw_indexed))
+ TII->get(Hexagon::S2_storeri_io))
.addReg(HEXAGON_RESERVED_REG_1)
.addImm(0)
.addReg(HEXAGON_RESERVED_REG_2);
}
} else {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrpr),
HEXAGON_RESERVED_REG_2).addReg(SrcReg);
BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::STriw_indexed)).
+ TII->get(Hexagon::S2_storeri_io)).
addReg(FP).addImm(Offset).addReg(HEXAGON_RESERVED_REG_2);
}
MII = MBB->erase(MI);
@@ -135,14 +129,12 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
assert(Hexagon::PredRegsRegClass.contains(DstReg) &&
"Not a predicate register");
unsigned FP = MI->getOperand(1).getReg();
- assert(
- FP ==
- QTM.getSubtargetImpl()->getRegisterInfo()->getFrameRegister() &&
- "Not a Frame Pointer, Nor a Spill Slot");
+ assert(FP == QST.getRegisterInfo()->getFrameRegister() &&
+ "Not a Frame Pointer, Nor a Spill Slot");
assert(MI->getOperand(2).isImm() && "Not an offset");
int Offset = MI->getOperand(2).getImm();
- if (!TII->isValidOffset(Hexagon::LDriw, Offset)) {
- if (!TII->isValidOffset(Hexagon::ADD_ri, Offset)) {
+ if (!TII->isValidOffset(Hexagon::L2_loadri_io, Offset)) {
+ if (!TII->isValidOffset(Hexagon::A2_addi, Offset)) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::CONST32_Int_Real),
HEXAGON_RESERVED_REG_1).addImm(Offset);
@@ -150,26 +142,26 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
HEXAGON_RESERVED_REG_1)
.addReg(FP)
.addReg(HEXAGON_RESERVED_REG_1);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::LDriw),
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::L2_loadri_io),
HEXAGON_RESERVED_REG_2)
.addReg(HEXAGON_RESERVED_REG_1)
.addImm(0);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_PdRs),
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrrp),
DstReg).addReg(HEXAGON_RESERVED_REG_2);
} else {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::ADD_ri),
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_addi),
HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::LDriw),
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::L2_loadri_io),
HEXAGON_RESERVED_REG_2)
.addReg(HEXAGON_RESERVED_REG_1)
.addImm(0);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_PdRs),
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrrp),
DstReg).addReg(HEXAGON_RESERVED_REG_2);
}
} else {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::LDriw),
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::L2_loadri_io),
HEXAGON_RESERVED_REG_2).addReg(FP).addImm(Offset);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_PdRs),
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrrp),
DstReg).addReg(HEXAGON_RESERVED_REG_2);
}
MII = MBB->erase(MI);
@@ -200,6 +192,6 @@ void llvm::initializeHexagonExpandPredSpillCodePass(PassRegistry &Registry) {
}
FunctionPass*
-llvm::createHexagonExpandPredSpillCode(const HexagonTargetMachine &TM) {
- return new HexagonExpandPredSpillCode(TM);
+llvm::createHexagonExpandPredSpillCode() {
+ return new HexagonExpandPredSpillCode();
}
diff --git a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
index 5f9b927..e8d8f14 100644
--- a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
+++ b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
@@ -81,8 +81,8 @@ FunctionPass *llvm::createHexagonFixupHwLoops() {
/// \brief Returns true if the instruction is a hardware loop instruction.
static bool isHardwareLoop(const MachineInstr *MI) {
- return MI->getOpcode() == Hexagon::LOOP0_r ||
- MI->getOpcode() == Hexagon::LOOP0_i;
+ return MI->getOpcode() == Hexagon::J2_loop0r ||
+ MI->getOpcode() == Hexagon::J2_loop0i;
}
@@ -168,18 +168,18 @@ void HexagonFixupHwLoops::convertLoopInstr(MachineFunction &MF,
// First, set the LC0 with the trip count.
if (MII->getOperand(1).isReg()) {
// Trip count is a register
- BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::LC0)
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::A2_tfrrcr), Hexagon::LC0)
.addReg(MII->getOperand(1).getReg());
} else {
// Trip count is an immediate.
- BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFRI), Scratch)
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::A2_tfrsi), Scratch)
.addImm(MII->getOperand(1).getImm());
- BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::LC0)
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::A2_tfrrcr), Hexagon::LC0)
.addReg(Scratch);
}
// Then, set the SA0 with the loop start address.
BuildMI(*MBB, MII, DL, TII->get(Hexagon::CONST32_Label), Scratch)
.addMBB(MII->getOperand(0).getMBB());
- BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::SA0)
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::A2_tfrrcr), Hexagon::SA0)
.addReg(Scratch);
}
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 356f279..2b1992f 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -50,10 +50,8 @@ void HexagonFrameLowering::determineFrameLayout(MachineFunction &MF) const {
unsigned FrameSize = MFI->getStackSize();
// Get the alignments provided by the target.
- unsigned TargetAlign = MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment();
+ unsigned TargetAlign =
+ MF.getSubtarget().getFrameLowering()->getStackAlignment();
// Get the maximum call frame size of all the calls.
unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
@@ -80,8 +78,8 @@ void HexagonFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineBasicBlock::iterator MBBI = MBB.begin();
- const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
+ const HexagonRegisterInfo *QRI =
+ MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
determineFrameLayout(MF);
@@ -122,17 +120,17 @@ void HexagonFrameLowering::emitPrologue(MachineFunction &MF) const {
if (NumBytes >= ALLOCFRAME_MAX) {
// Emit allocframe(#0).
- BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::ALLOCFRAME)).addImm(0);
+ BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::S2_allocframe)).addImm(0);
// Subtract offset from frame pointer.
BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::CONST32_Int_Real),
HEXAGON_RESERVED_REG_1).addImm(NumBytes);
- BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::SUB_rr),
+ BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::A2_sub),
QRI->getStackRegister()).
addReg(QRI->getStackRegister()).
addReg(HEXAGON_RESERVED_REG_1);
} else {
- BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::ALLOCFRAME)).addImm(NumBytes);
+ BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::S2_allocframe)).addImm(NumBytes);
}
}
}
@@ -161,15 +159,14 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
// Handle EH_RETURN.
if (MBBI->getOpcode() == Hexagon::EH_RETURN_JMPR) {
assert(MBBI->getOperand(0).isReg() && "Offset should be in register!");
- BuildMI(MBB, MBBI, dl, TII.get(Hexagon::DEALLOCFRAME));
+ BuildMI(MBB, MBBI, dl, TII.get(Hexagon::L2_deallocframe));
BuildMI(MBB, MBBI, dl, TII.get(Hexagon::A2_add),
Hexagon::R29).addReg(Hexagon::R29).addReg(Hexagon::R28);
return;
}
// Replace 'jumpr r31' instruction with dealloc_return for V4 and higher
// versions.
- if (MF.getTarget().getSubtarget<HexagonSubtarget>().hasV4TOps() &&
- MBBI->getOpcode() == Hexagon::JMPret && !DisableDeallocRet) {
+ if (MBBI->getOpcode() == Hexagon::JMPret && !DisableDeallocRet) {
// Check for RESTORE_DEALLOC_RET_JMP_V4 call. Don't emit an extra DEALLOC
// instruction if we encounter it.
MachineBasicBlock::iterator BeforeJMPR =
@@ -183,7 +180,7 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
// Add dealloc_return.
MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI_end, dl, TII.get(Hexagon::DEALLOC_RET_V4));
+ BuildMI(MBB, MBBI_end, dl, TII.get(Hexagon::L4_return));
// Transfer the function live-out registers.
MIB->copyImplicitOps(*MBB.getParent(), &*MBBI);
// Remove the JUMPR node.
@@ -198,7 +195,7 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
I->getOpcode() == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4)
return;
- BuildMI(MBB, MBBI, dl, TII.get(Hexagon::DEALLOCFRAME));
+ BuildMI(MBB, MBBI, dl, TII.get(Hexagon::L2_deallocframe));
}
}
}
diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index e2062a3..1577c33 100644
--- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -28,7 +28,7 @@
#include "llvm/ADT/SmallSet.h"
#include "Hexagon.h"
-#include "HexagonTargetMachine.h"
+#include "HexagonSubtarget.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -64,9 +64,7 @@ namespace {
MachineLoopInfo *MLI;
MachineRegisterInfo *MRI;
MachineDominatorTree *MDT;
- const HexagonTargetMachine *TM;
const HexagonInstrInfo *TII;
- const HexagonRegisterInfo *TRI;
#ifndef NDEBUG
static int Counter;
#endif
@@ -265,9 +263,7 @@ namespace {
return Contents.ImmVal;
}
- void print(raw_ostream &OS, const TargetMachine *TM = nullptr) const {
- const TargetRegisterInfo *TRI =
- TM ? TM->getSubtargetImpl()->getRegisterInfo() : nullptr;
+ void print(raw_ostream &OS, const TargetRegisterInfo *TRI = nullptr) const {
if (isReg()) { OS << PrintReg(Contents.R.Reg, TRI, Contents.R.Sub); }
if (isImm()) { OS << Contents.ImmVal; }
}
@@ -285,8 +281,8 @@ INITIALIZE_PASS_END(HexagonHardwareLoops, "hwloops",
/// \brief Returns true if the instruction is a hardware loop instruction.
static bool isHardwareLoop(const MachineInstr *MI) {
- return MI->getOpcode() == Hexagon::LOOP0_r ||
- MI->getOpcode() == Hexagon::LOOP0_i;
+ return MI->getOpcode() == Hexagon::J2_loop0r ||
+ MI->getOpcode() == Hexagon::J2_loop0i;
}
FunctionPass *llvm::createHexagonHardwareLoops() {
@@ -302,11 +298,7 @@ bool HexagonHardwareLoops::runOnMachineFunction(MachineFunction &MF) {
MLI = &getAnalysis<MachineLoopInfo>();
MRI = &MF.getRegInfo();
MDT = &getAnalysis<MachineDominatorTree>();
- TM = static_cast<const HexagonTargetMachine*>(&MF.getTarget());
- TII = static_cast<const HexagonInstrInfo *>(
- TM->getSubtargetImpl()->getInstrInfo());
- TRI = static_cast<const HexagonRegisterInfo *>(
- TM->getSubtargetImpl()->getRegisterInfo());
+ TII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end();
I != E; ++I) {
@@ -357,7 +349,7 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L,
unsigned PhiOpReg = Phi->getOperand(i).getReg();
MachineInstr *DI = MRI->getVRegDef(PhiOpReg);
unsigned UpdOpc = DI->getOpcode();
- bool isAdd = (UpdOpc == Hexagon::ADD_ri);
+ bool isAdd = (UpdOpc == Hexagon::A2_addi);
if (isAdd) {
// If the register operand to the add is the PHI we're
@@ -540,21 +532,21 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
return nullptr;
switch (CondOpc) {
- case Hexagon::CMPEQri:
- case Hexagon::CMPEQrr:
+ case Hexagon::C2_cmpeqi:
+ case Hexagon::C2_cmpeq:
Cmp = !Negated ? Comparison::EQ : Comparison::NE;
break;
- case Hexagon::CMPGTUri:
- case Hexagon::CMPGTUrr:
+ case Hexagon::C2_cmpgtui:
+ case Hexagon::C2_cmpgtu:
Cmp = !Negated ? Comparison::GTu : Comparison::LEu;
break;
- case Hexagon::CMPGTri:
- case Hexagon::CMPGTrr:
+ case Hexagon::C2_cmpgti:
+ case Hexagon::C2_cmpgt:
Cmp = !Negated ? Comparison::GTs : Comparison::LEs;
break;
// Very limited support for byte/halfword compares.
- case Hexagon::CMPbEQri_V4:
- case Hexagon::CMPhEQri_V4: {
+ case Hexagon::A4_cmpbeqi:
+ case Hexagon::A4_cmpheqi: {
if (IVBump != 1)
return nullptr;
@@ -574,7 +566,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
}
if (InitV >= EndV)
return nullptr;
- if (CondOpc == Hexagon::CMPbEQri_V4) {
+ if (CondOpc == Hexagon::A4_cmpbeqi) {
if (!isInt<8>(InitV) || !isInt<8>(EndV))
return nullptr;
} else { // Hexagon::CMPhEQri_V4
@@ -626,12 +618,12 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop,
// If so, use the immediate value rather than the register.
if (Start->isReg()) {
const MachineInstr *StartValInstr = MRI->getVRegDef(Start->getReg());
- if (StartValInstr && StartValInstr->getOpcode() == Hexagon::TFRI)
+ if (StartValInstr && StartValInstr->getOpcode() == Hexagon::A2_tfrsi)
Start = &StartValInstr->getOperand(1);
}
if (End->isReg()) {
const MachineInstr *EndValInstr = MRI->getVRegDef(End->getReg());
- if (EndValInstr && EndValInstr->getOpcode() == Hexagon::TFRI)
+ if (EndValInstr && EndValInstr->getOpcode() == Hexagon::A2_tfrsi)
End = &EndValInstr->getOperand(1);
}
@@ -781,9 +773,9 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop,
DistR = End->getReg();
DistSR = End->getSubReg();
} else {
- const MCInstrDesc &SubD = RegToReg ? TII->get(Hexagon::SUB_rr) :
- (RegToImm ? TII->get(Hexagon::SUB_ri) :
- TII->get(Hexagon::ADD_ri));
+ const MCInstrDesc &SubD = RegToReg ? TII->get(Hexagon::A2_sub) :
+ (RegToImm ? TII->get(Hexagon::A2_subri) :
+ TII->get(Hexagon::A2_addi));
unsigned SubR = MRI->createVirtualRegister(IntRC);
MachineInstrBuilder SubIB =
BuildMI(*PH, InsertPos, DL, SubD, SubR);
@@ -811,7 +803,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop,
} else {
// Generate CountR = ADD DistR, AdjVal
unsigned AddR = MRI->createVirtualRegister(IntRC);
- const MCInstrDesc &AddD = TII->get(Hexagon::ADD_ri);
+ MCInstrDesc const &AddD = TII->get(Hexagon::A2_addi);
BuildMI(*PH, InsertPos, DL, AddD, AddR)
.addReg(DistR, 0, DistSR)
.addImm(AdjV);
@@ -832,7 +824,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop,
// Generate NormR = LSR DistR, Shift.
unsigned LsrR = MRI->createVirtualRegister(IntRC);
- const MCInstrDesc &LsrD = TII->get(Hexagon::LSR_ri);
+ const MCInstrDesc &LsrD = TII->get(Hexagon::S2_lsr_i_r);
BuildMI(*PH, InsertPos, DL, LsrD, LsrR)
.addReg(AdjR, 0, AdjSR)
.addImm(Shift);
@@ -1086,7 +1078,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
BuildMI(*Preheader, InsertPos, DL, TII->get(TargetOpcode::COPY), CountReg)
.addReg(TripCount->getReg(), 0, TripCount->getSubReg());
// Add the Loop instruction to the beginning of the loop.
- BuildMI(*Preheader, InsertPos, DL, TII->get(Hexagon::LOOP0_r))
+ BuildMI(*Preheader, InsertPos, DL, TII->get(Hexagon::J2_loop0r))
.addMBB(LoopStart)
.addReg(CountReg);
} else {
@@ -1095,14 +1087,14 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
// if the immediate fits in the instructions. Otherwise, we need to
// create a new virtual register.
int64_t CountImm = TripCount->getImm();
- if (!TII->isValidOffset(Hexagon::LOOP0_i, CountImm)) {
+ if (!TII->isValidOffset(Hexagon::J2_loop0i, CountImm)) {
unsigned CountReg = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass);
- BuildMI(*Preheader, InsertPos, DL, TII->get(Hexagon::TFRI), CountReg)
+ BuildMI(*Preheader, InsertPos, DL, TII->get(Hexagon::A2_tfrsi), CountReg)
.addImm(CountImm);
- BuildMI(*Preheader, InsertPos, DL, TII->get(Hexagon::LOOP0_r))
+ BuildMI(*Preheader, InsertPos, DL, TII->get(Hexagon::J2_loop0r))
.addMBB(LoopStart).addReg(CountReg);
} else
- BuildMI(*Preheader, InsertPos, DL, TII->get(Hexagon::LOOP0_i))
+ BuildMI(*Preheader, InsertPos, DL, TII->get(Hexagon::J2_loop0i))
.addMBB(LoopStart).addImm(CountImm);
}
@@ -1122,8 +1114,8 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
// The loop ends with either:
// - a conditional branch followed by an unconditional branch, or
// - a conditional branch to the loop start.
- if (LastI->getOpcode() == Hexagon::JMP_t ||
- LastI->getOpcode() == Hexagon::JMP_f) {
+ if (LastI->getOpcode() == Hexagon::J2_jumpt ||
+ LastI->getOpcode() == Hexagon::J2_jumpf) {
// Delete one and change/add an uncond. branch to out of the loop.
MachineBasicBlock *BranchTarget = LastI->getOperand(1).getMBB();
LastI = LastMBB->erase(LastI);
@@ -1194,8 +1186,8 @@ MachineInstr *HexagonHardwareLoops::defWithImmediate(unsigned R) {
MachineInstr *DI = MRI->getVRegDef(R);
unsigned DOpc = DI->getOpcode();
switch (DOpc) {
- case Hexagon::TFRI:
- case Hexagon::TFRI64:
+ case Hexagon::A2_tfrsi:
+ case Hexagon::A2_tfrpi:
case Hexagon::CONST32_Int_Real:
case Hexagon::CONST64_Int_Real:
return DI;
@@ -1277,7 +1269,7 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
unsigned PhiReg = Phi->getOperand(i).getReg();
MachineInstr *DI = MRI->getVRegDef(PhiReg);
unsigned UpdOpc = DI->getOpcode();
- bool isAdd = (UpdOpc == Hexagon::ADD_ri);
+ bool isAdd = (UpdOpc == Hexagon::A2_addi);
if (isAdd) {
// If the register operand to the add/sub is the PHI we are looking
diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index dc58c42..fb056b5 100644
--- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -47,7 +47,7 @@ namespace {
class HexagonDAGToDAGISel : public SelectionDAGISel {
/// Subtarget - Keep a pointer to the Hexagon Subtarget around so that we can
/// make the right decision when generating code for different targets.
- const HexagonSubtarget &Subtarget;
+ const HexagonSubtarget *Subtarget;
// Keep a reference to HexagonTargetMachine.
const HexagonTargetMachine& TM;
@@ -55,9 +55,7 @@ class HexagonDAGToDAGISel : public SelectionDAGISel {
public:
explicit HexagonDAGToDAGISel(HexagonTargetMachine &targetmachine,
CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(targetmachine, OptLevel),
- Subtarget(targetmachine.getSubtarget<HexagonSubtarget>()),
- TM(targetmachine) {
+ : SelectionDAGISel(targetmachine, OptLevel), TM(targetmachine) {
initializeHexagonDAGToDAGISelPass(*PassRegistry::getPassRegistry());
}
bool hasNumUsesBelowThresGA(SDNode *N) const;
@@ -79,10 +77,21 @@ public:
bool SelectADDRriU6_1(SDValue& N, SDValue &R1, SDValue &R2);
bool SelectADDRriU6_2(SDValue& N, SDValue &R1, SDValue &R2);
+ // Complex Pattern Selectors.
+ inline bool SelectAddrGA(SDValue &N, SDValue &R);
+ inline bool SelectAddrGP(SDValue &N, SDValue &R);
+ bool SelectGlobalAddress(SDValue &N, SDValue &R, bool UseGP);
+ bool SelectAddrFI(SDValue &N, SDValue &R);
+
const char *getPassName() const override {
return "Hexagon DAG->DAG Pattern Instruction Selection";
}
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ Subtarget = &MF.getSubtarget<HexagonSubtarget>();
+ return SelectionDAGISel::runOnMachineFunction(MF);
+ }
+
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
bool SelectInlineAsmMemoryOperand(const SDValue &Op,
@@ -138,9 +147,7 @@ SDValue XformMskToBitPosU3Imm(uint8_t Imm) {
// Return true if there is exactly one bit set in V, i.e., if V is one of the
// following integers: 2^0, 2^1, ..., 2^31.
bool ImmIsSingleBit(uint32_t v) const {
- uint32_t c = CountPopulation_64(v);
- // Only return true if we counted 1 bit.
- return c == 1;
+ return isPowerOf2_32(v);
}
// XformM5ToU5Imm - Return a target constant with the specified value, of type
@@ -170,8 +177,21 @@ inline SDValue XformUToUM1Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm - 1, MVT::i32);
}
+// XformSToSM2Imm - Return a target constant decremented by 2.
+inline SDValue XformSToSM2Imm(unsigned Imm) {
+ return CurDAG->getTargetConstant(Imm - 2, MVT::i32);
+}
+
+// XformSToSM3Imm - Return a target constant decremented by 3.
+inline SDValue XformSToSM3Imm(unsigned Imm) {
+ return CurDAG->getTargetConstant(Imm - 3, MVT::i32);
+}
+
// Include the pieces autogenerated from the target description.
#include "HexagonGenDAGISel.inc"
+
+private:
+ bool isValueExtension(SDValue const &Val, unsigned FromBits, SDValue &Src);
};
} // end anonymous namespace
@@ -312,56 +332,6 @@ static unsigned doesIntrinsicReturnPredicate(unsigned ID)
}
}
-
-// Intrinsics that have predicate operands.
-static unsigned doesIntrinsicContainPredicate(unsigned ID)
-{
- switch (ID) {
- default:
- return 0;
- case Intrinsic::hexagon_C2_tfrpr:
- return Hexagon::TFR_RsPd;
- case Intrinsic::hexagon_C2_and:
- return Hexagon::AND_pp;
- case Intrinsic::hexagon_C2_xor:
- return Hexagon::XOR_pp;
- case Intrinsic::hexagon_C2_or:
- return Hexagon::OR_pp;
- case Intrinsic::hexagon_C2_not:
- return Hexagon::NOT_p;
- case Intrinsic::hexagon_C2_any8:
- return Hexagon::ANY_pp;
- case Intrinsic::hexagon_C2_all8:
- return Hexagon::ALL_pp;
- case Intrinsic::hexagon_C2_vitpack:
- return Hexagon::VITPACK_pp;
- case Intrinsic::hexagon_C2_mask:
- return Hexagon::MASK_p;
- case Intrinsic::hexagon_C2_mux:
- return Hexagon::MUX_rr;
-
- // Mapping hexagon_C2_muxir to MUX_pri. This is pretty weird - but
- // that's how it's mapped in q6protos.h.
- case Intrinsic::hexagon_C2_muxir:
- return Hexagon::MUX_ri;
-
- // Mapping hexagon_C2_muxri to MUX_pir. This is pretty weird - but
- // that's how it's mapped in q6protos.h.
- case Intrinsic::hexagon_C2_muxri:
- return Hexagon::MUX_ir;
-
- case Intrinsic::hexagon_C2_muxii:
- return Hexagon::MUX_ii;
- case Intrinsic::hexagon_C2_vmux:
- return Hexagon::VMUX_prr64;
- case Intrinsic::hexagon_S2_valignrb:
- return Hexagon::VALIGN_rrp;
- case Intrinsic::hexagon_S2_vsplicerb:
- return Hexagon::VSPLICE_rrp;
- }
-}
-
-
static bool OffsetFitsS11(EVT MemType, int64_t Offset) {
if (MemType == MVT::i64 && isShiftedInt<11,3>(Offset)) {
return true;
@@ -404,10 +374,10 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, SDLoc dl) {
dl, PointerTy,
TargAddr);
// Figure out base + offset opcode
- if (LoadedVT == MVT::i64) Opcode = Hexagon::LDrid_indexed;
- else if (LoadedVT == MVT::i32) Opcode = Hexagon::LDriw_indexed;
- else if (LoadedVT == MVT::i16) Opcode = Hexagon::LDrih_indexed;
- else if (LoadedVT == MVT::i8) Opcode = Hexagon::LDrib_indexed;
+ if (LoadedVT == MVT::i64) Opcode = Hexagon::L2_loadrd_io;
+ else if (LoadedVT == MVT::i32) Opcode = Hexagon::L2_loadri_io;
+ else if (LoadedVT == MVT::i16) Opcode = Hexagon::L2_loadrh_io;
+ else if (LoadedVT == MVT::i8) Opcode = Hexagon::L2_loadrb_io;
else llvm_unreachable("unknown memory type");
// Build indexed load.
@@ -446,14 +416,13 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
if (SelectADDRriS11_2(N1, CPTmpN1_0, CPTmpN1_1) &&
N1.getNode()->getValueType(0) == MVT::i32) {
- const HexagonInstrInfo *TII = static_cast<const HexagonInstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
+ const HexagonInstrInfo *TII = Subtarget->getInstrInfo();
if (TII->isValidAutoIncImm(LoadedVT, Val)) {
SDValue TargetConst = CurDAG->getTargetConstant(Val, MVT::i32);
SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::i32,
MVT::Other, Base, TargetConst,
Chain);
- SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::SXTW, dl, MVT::i64,
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::A2_sxtw, dl, MVT::i64,
SDValue(Result_1, 0));
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = LD->getMemOperand();
@@ -474,9 +443,9 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
MVT::Other, Base, TargetConst0,
Chain);
- SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::SXTW, dl,
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::A2_sxtw, dl,
MVT::i64, SDValue(Result_1, 0));
- SDNode* Result_3 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl,
+ SDNode* Result_3 = CurDAG->getMachineNode(Hexagon::A2_addi, dl,
MVT::i32, Base, TargetConstVal,
SDValue(Result_1, 1));
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
@@ -513,17 +482,16 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
if (SelectADDRriS11_2(N1, CPTmpN1_0, CPTmpN1_1) &&
N1.getNode()->getValueType(0) == MVT::i32) {
- const HexagonInstrInfo *TII = static_cast<const HexagonInstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
+ const HexagonInstrInfo *TII = Subtarget->getInstrInfo();
if (TII->isValidAutoIncImm(LoadedVT, Val)) {
SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
MVT::i32, MVT::Other, Base,
TargetConstVal, Chain);
- SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::TFRI, dl, MVT::i32,
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::i32,
TargetConst0);
- SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::COMBINE_rr, dl,
+ SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::A2_combinew, dl,
MVT::i64, MVT::Other,
SDValue(Result_2,0),
SDValue(Result_1,0));
@@ -548,14 +516,14 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
MVT::Other,
Base, TargetConst0, Chain);
- SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::TFRI, dl, MVT::i32,
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::i32,
TargetConst0);
- SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::COMBINE_rr, dl,
+ SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::A2_combinew, dl,
MVT::i64, MVT::Other,
SDValue(Result_2,0),
SDValue(Result_1,0));
// Add offset to base.
- SDNode* Result_4 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl, MVT::i32,
+ SDNode* Result_4 = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
Base, TargetConstVal,
SDValue(Result_1, 1));
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
@@ -591,28 +559,27 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, SDLoc dl) {
bool zextval = (LD->getExtensionType() == ISD::ZEXTLOAD);
// Figure out the opcode.
- const HexagonInstrInfo *TII = static_cast<const HexagonInstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
+ const HexagonInstrInfo *TII = Subtarget->getInstrInfo();
if (LoadedVT == MVT::i64) {
if (TII->isValidAutoIncImm(LoadedVT, Val))
- Opcode = Hexagon::POST_LDrid;
+ Opcode = Hexagon::L2_loadrd_pi;
else
- Opcode = Hexagon::LDrid;
+ Opcode = Hexagon::L2_loadrd_io;
} else if (LoadedVT == MVT::i32) {
if (TII->isValidAutoIncImm(LoadedVT, Val))
- Opcode = Hexagon::POST_LDriw;
+ Opcode = Hexagon::L2_loadri_pi;
else
- Opcode = Hexagon::LDriw;
+ Opcode = Hexagon::L2_loadri_io;
} else if (LoadedVT == MVT::i16) {
if (TII->isValidAutoIncImm(LoadedVT, Val))
- Opcode = zextval ? Hexagon::POST_LDriuh : Hexagon::POST_LDrih;
+ Opcode = zextval ? Hexagon::L2_loadruh_pi : Hexagon::L2_loadrh_pi;
else
- Opcode = zextval ? Hexagon::LDriuh : Hexagon::LDrih;
+ Opcode = zextval ? Hexagon::L2_loadruh_io : Hexagon::L2_loadrh_io;
} else if (LoadedVT == MVT::i8) {
if (TII->isValidAutoIncImm(LoadedVT, Val))
- Opcode = zextval ? Hexagon::POST_LDriub : Hexagon::POST_LDrib;
+ Opcode = zextval ? Hexagon::L2_loadrub_pi : Hexagon::L2_loadrb_pi;
else
- Opcode = zextval ? Hexagon::LDriub : Hexagon::LDrib;
+ Opcode = zextval ? Hexagon::L2_loadrub_io : Hexagon::L2_loadrb_io;
} else
llvm_unreachable("unknown memory type");
@@ -652,7 +619,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, SDLoc dl) {
LD->getValueType(0),
MVT::Other, Base, TargetConst0,
Chain);
- SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl, MVT::i32,
+ SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
Base, TargetConstVal,
SDValue(Result_1, 1));
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
@@ -701,18 +668,17 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, SDLoc dl) {
// Offset value must be within representable range
// and must have correct alignment properties.
- const HexagonInstrInfo *TII = static_cast<const HexagonInstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
+ const HexagonInstrInfo *TII = Subtarget->getInstrInfo();
if (TII->isValidAutoIncImm(StoredVT, Val)) {
SDValue Ops[] = {Base, CurDAG->getTargetConstant(Val, MVT::i32), Value,
Chain};
unsigned Opcode = 0;
// Figure out the post inc version of opcode.
- if (StoredVT == MVT::i64) Opcode = Hexagon::POST_STdri;
- else if (StoredVT == MVT::i32) Opcode = Hexagon::POST_STwri;
- else if (StoredVT == MVT::i16) Opcode = Hexagon::POST_SThri;
- else if (StoredVT == MVT::i8) Opcode = Hexagon::POST_STbri;
+ if (StoredVT == MVT::i64) Opcode = Hexagon::S2_storerd_pi;
+ else if (StoredVT == MVT::i32) Opcode = Hexagon::S2_storeri_pi;
+ else if (StoredVT == MVT::i16) Opcode = Hexagon::S2_storerh_pi;
+ else if (StoredVT == MVT::i8) Opcode = Hexagon::S2_storerb_pi;
else llvm_unreachable("unknown memory type");
// Build post increment store.
@@ -735,17 +701,17 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, SDLoc dl) {
unsigned Opcode = 0;
// Figure out the opcode.
- if (StoredVT == MVT::i64) Opcode = Hexagon::STrid;
- else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed;
- else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih;
- else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib;
+ if (StoredVT == MVT::i64) Opcode = Hexagon::S2_storerd_io;
+ else if (StoredVT == MVT::i32) Opcode = Hexagon::S2_storeri_io;
+ else if (StoredVT == MVT::i16) Opcode = Hexagon::S2_storerh_io;
+ else if (StoredVT == MVT::i8) Opcode = Hexagon::S2_storerb_io;
else llvm_unreachable("unknown memory type");
// Build regular store.
SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
SDNode* Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
// Build splitted incriment instruction.
- SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl, MVT::i32,
+ SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
Base,
TargetConstVal,
SDValue(Result_1, 0));
@@ -788,10 +754,10 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST,
TargAddr);
// Figure out base + offset opcode
- if (StoredVT == MVT::i64) Opcode = Hexagon::STrid_indexed;
- else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed;
- else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih_indexed;
- else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib_indexed;
+ if (StoredVT == MVT::i64) Opcode = Hexagon::S2_storerd_io;
+ else if (StoredVT == MVT::i32) Opcode = Hexagon::S2_storeri_io;
+ else if (StoredVT == MVT::i16) Opcode = Hexagon::S2_storerh_io;
+ else if (StoredVT == MVT::i8) Opcode = Hexagon::S2_storerb_io;
else llvm_unreachable("unknown memory type");
SDValue Ops[] = {SDValue(NewBase,0),
@@ -865,7 +831,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
SDValue Chain = LD->getChain();
SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
- OP0 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ OP0 = SDValue(CurDAG->getMachineNode(Hexagon::L2_loadri_io, dl, MVT::i32,
MVT::Other,
LD->getBasePtr(), TargetConst0,
Chain), 0);
@@ -891,7 +857,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
SDValue Chain = LD->getChain();
SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
- OP1 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ OP1 = SDValue(CurDAG->getMachineNode(Hexagon::L2_loadri_io, dl, MVT::i32,
MVT::Other,
LD->getBasePtr(), TargetConst0,
Chain), 0);
@@ -900,7 +866,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
}
// Generate a mpy instruction.
- SDNode *Result = CurDAG->getMachineNode(Hexagon::MPY64, dl, MVT::i64,
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::M2_dpmpyss_s0, dl, MVT::i64,
OP0, OP1);
ReplaceUses(N, Result);
return Result;
@@ -934,9 +900,9 @@ SDNode *HexagonDAGToDAGISel::SelectSelect(SDNode *N) {
if (N000 == N2 &&
N0.getNode()->getValueType(N0.getResNo()) == MVT::i1 &&
N00.getNode()->getValueType(N00.getResNo()) == MVT::i32) {
- SDNode *SextNode = CurDAG->getMachineNode(Hexagon::SXTH, dl,
+ SDNode *SextNode = CurDAG->getMachineNode(Hexagon::A2_sxth, dl,
MVT::i32, N000);
- SDNode *Result = CurDAG->getMachineNode(Hexagon::MAXw_rr, dl,
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::A2_max, dl,
MVT::i32,
SDValue(SextNode, 0),
N1);
@@ -958,9 +924,9 @@ SDNode *HexagonDAGToDAGISel::SelectSelect(SDNode *N) {
if (N000 == N2 &&
N0.getNode()->getValueType(N0.getResNo()) == MVT::i1 &&
N00.getNode()->getValueType(N00.getResNo()) == MVT::i32) {
- SDNode *SextNode = CurDAG->getMachineNode(Hexagon::SXTH, dl,
+ SDNode *SextNode = CurDAG->getMachineNode(Hexagon::A2_sxth, dl,
MVT::i32, N000);
- SDNode *Result = CurDAG->getMachineNode(Hexagon::MINw_rr, dl,
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::A2_min, dl,
MVT::i32,
SDValue(SextNode, 0),
N1);
@@ -1045,7 +1011,7 @@ SDNode *HexagonDAGToDAGISel::SelectTruncate(SDNode *N) {
SDValue Chain = LD->getChain();
SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
- OP0 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ OP0 = SDValue(CurDAG->getMachineNode(Hexagon::L2_loadri_io, dl, MVT::i32,
MVT::Other,
LD->getBasePtr(),
TargetConst0, Chain), 0);
@@ -1070,7 +1036,7 @@ SDNode *HexagonDAGToDAGISel::SelectTruncate(SDNode *N) {
SDValue Chain = LD->getChain();
SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
- OP1 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ OP1 = SDValue(CurDAG->getMachineNode(Hexagon::L2_loadri_io, dl, MVT::i32,
MVT::Other,
LD->getBasePtr(),
TargetConst0, Chain), 0);
@@ -1079,7 +1045,7 @@ SDNode *HexagonDAGToDAGISel::SelectTruncate(SDNode *N) {
}
// Generate a mpy instruction.
- SDNode *Result = CurDAG->getMachineNode(Hexagon::MPY, dl, MVT::i32,
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::M2_mpy_up, dl, MVT::i32,
OP0, OP1);
ReplaceUses(N, Result);
return Result;
@@ -1112,7 +1078,7 @@ SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val.getNode()))
if (isInt<9>(CN->getSExtValue())) {
SDNode* Result =
- CurDAG->getMachineNode(Hexagon::MPYI_ri, dl,
+ CurDAG->getMachineNode(Hexagon::M2_mpysmi, dl,
MVT::i32, Mul_0, Val);
ReplaceUses(N, Result);
return Result;
@@ -1140,7 +1106,7 @@ SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
dyn_cast<ConstantSDNode>(Val.getNode()))
if (isInt<9>(CN->getSExtValue())) {
SDNode* Result =
- CurDAG->getMachineNode(Hexagon::MPYI_ri, dl, MVT::i32,
+ CurDAG->getMachineNode(Hexagon::M2_mpysmi, dl, MVT::i32,
Shl2_0, Val);
ReplaceUses(N, Result);
return Result;
@@ -1177,13 +1143,13 @@ SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
if (N->getValueType(0) == MVT::i64) {
// Convert the zero_extend to Rs = Pd followed by COMBINE_rr(0,Rs).
SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
- SDNode *Result_1 = CurDAG->getMachineNode(Hexagon::TFR_RsPd, dl,
+ SDNode *Result_1 = CurDAG->getMachineNode(Hexagon::C2_tfrpr, dl,
MVT::i32,
SDValue(IsIntrinsic, 0));
- SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::TFRI, dl,
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl,
MVT::i32,
TargetConst0);
- SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::COMBINE_rr, dl,
+ SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::A2_combinew, dl,
MVT::i64, MVT::Other,
SDValue(Result_2, 0),
SDValue(Result_1, 0));
@@ -1192,7 +1158,7 @@ SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
}
if (N->getValueType(0) == MVT::i32) {
// Convert the zero_extend to Rs = Pd
- SDNode* RsPd = CurDAG->getMachineNode(Hexagon::TFR_RsPd, dl,
+ SDNode* RsPd = CurDAG->getMachineNode(Hexagon::C2_tfrpr, dl,
MVT::i32,
SDValue(IsIntrinsic, 0));
ReplaceUses(N, RsPd);
@@ -1204,56 +1170,30 @@ SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
return SelectCode(N);
}
-
//
// Checking for intrinsics which have predicate registers as operand(s)
// and lowering to the actual intrinsic.
//
SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
- SDLoc dl(N);
- unsigned ID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
- unsigned IntrinsicWithPred = doesIntrinsicContainPredicate(ID);
-
- // We are concerned with only those intrinsics that have predicate registers
- // as at least one of the operands.
- if (IntrinsicWithPred) {
- SmallVector<SDValue, 8> Ops;
- const HexagonInstrInfo *TII = static_cast<const HexagonInstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
- const MCInstrDesc &MCID = TII->get(IntrinsicWithPred);
- const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
-
- // Iterate over all the operands of the intrinsics.
- // For PredRegs, do the transfer.
- // For Double/Int Regs, just preserve the value
- // For immediates, lower it.
- for (unsigned i = 1; i < N->getNumOperands(); ++i) {
- SDNode *Arg = N->getOperand(i).getNode();
- const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI, *MF);
-
- if (RC == &Hexagon::IntRegsRegClass ||
- RC == &Hexagon::DoubleRegsRegClass) {
- Ops.push_back(SDValue(Arg, 0));
- } else if (RC == &Hexagon::PredRegsRegClass) {
- // Do the transfer.
- SDNode *PdRs = CurDAG->getMachineNode(Hexagon::TFR_PdRs, dl, MVT::i1,
- SDValue(Arg, 0));
- Ops.push_back(SDValue(PdRs,0));
- } else if (!RC && (dyn_cast<ConstantSDNode>(Arg) != nullptr)) {
- // This is immediate operand. Lower it here making sure that we DO have
- // const SDNode for immediate value.
- int32_t Val = cast<ConstantSDNode>(Arg)->getSExtValue();
- SDValue SDVal = CurDAG->getTargetConstant(Val, MVT::i32);
- Ops.push_back(SDVal);
- } else {
- llvm_unreachable("Unimplemented");
- }
- }
- EVT ReturnValueVT = N->getValueType(0);
- SDNode *Result = CurDAG->getMachineNode(IntrinsicWithPred, dl,
- ReturnValueVT, Ops);
- ReplaceUses(N, Result);
- return Result;
+ unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned Bits;
+ switch (IID) {
+ case Intrinsic::hexagon_S2_vsplatrb:
+ Bits = 8;
+ break;
+ case Intrinsic::hexagon_S2_vsplatrh:
+ Bits = 16;
+ break;
+ default:
+ return SelectCode(N);
+ }
+
+ SDValue const &V = N->getOperand(1);
+ SDValue U;
+ if (isValueExtension(V, Bits, U)) {
+ SDValue R = CurDAG->getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
+ N->getOperand(0), U);
+ return SelectCode(R.getNode());
}
return SelectCode(N);
}
@@ -1289,19 +1229,19 @@ SDNode *HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
if (Val == -1) {
// Create the IntReg = 1 node.
SDNode* IntRegTFR =
- CurDAG->getMachineNode(Hexagon::TFRI, dl, MVT::i32,
+ CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::i32,
CurDAG->getTargetConstant(0, MVT::i32));
// Pd = IntReg
- SDNode* Pd = CurDAG->getMachineNode(Hexagon::TFR_PdRs, dl, MVT::i1,
+ SDNode* Pd = CurDAG->getMachineNode(Hexagon::C2_tfrrp, dl, MVT::i1,
SDValue(IntRegTFR, 0));
// not(Pd)
- SDNode* NotPd = CurDAG->getMachineNode(Hexagon::NOT_p, dl, MVT::i1,
+ SDNode* NotPd = CurDAG->getMachineNode(Hexagon::C2_not, dl, MVT::i1,
SDValue(Pd, 0));
// xor(not(Pd))
- Result = CurDAG->getMachineNode(Hexagon::XOR_pp, dl, MVT::i1,
+ Result = CurDAG->getMachineNode(Hexagon::C2_xor, dl, MVT::i1,
SDValue(Pd, 0), SDValue(NotPd, 0));
// We have just built:
@@ -1334,7 +1274,7 @@ SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) {
// Build Rd = Rd' + asr(Rs, Rt). The machine constraints will ensure that
// Rd and Rd' are assigned to the same register
- SDNode* Result = CurDAG->getMachineNode(Hexagon::ASR_ADD_rr, dl, MVT::i32,
+ SDNode* Result = CurDAG->getMachineNode(Hexagon::S2_asr_r_r_acc, dl, MVT::i32,
N->getOperand(1),
Src1->getOperand(0),
Src1->getOperand(1));
@@ -1683,3 +1623,126 @@ bool HexagonDAGToDAGISel::foldGlobalAddressImpl(SDValue &N, SDValue &R,
}
return false;
}
+
+bool HexagonDAGToDAGISel::SelectAddrFI(SDValue& N, SDValue &R) {
+ if (N.getOpcode() != ISD::FrameIndex)
+ return false;
+ FrameIndexSDNode *FX = cast<FrameIndexSDNode>(N);
+ R = CurDAG->getTargetFrameIndex(FX->getIndex(), MVT::i32);
+ return true;
+}
+
+inline bool HexagonDAGToDAGISel::SelectAddrGA(SDValue &N, SDValue &R) {
+ return SelectGlobalAddress(N, R, false);
+}
+
+inline bool HexagonDAGToDAGISel::SelectAddrGP(SDValue &N, SDValue &R) {
+ return SelectGlobalAddress(N, R, true);
+}
+
+bool HexagonDAGToDAGISel::SelectGlobalAddress(SDValue &N, SDValue &R,
+ bool UseGP) {
+ switch (N.getOpcode()) {
+ case ISD::ADD: {
+ SDValue N0 = N.getOperand(0);
+ SDValue N1 = N.getOperand(1);
+ unsigned GAOpc = N0.getOpcode();
+ if (UseGP && GAOpc != HexagonISD::CONST32_GP)
+ return false;
+ if (!UseGP && GAOpc != HexagonISD::CONST32)
+ return false;
+ if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N1)) {
+ SDValue Addr = N0.getOperand(0);
+ if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Addr)) {
+ if (GA->getOpcode() == ISD::TargetGlobalAddress) {
+ uint64_t NewOff = GA->getOffset() + (uint64_t)Const->getSExtValue();
+ R = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(Const),
+ N.getValueType(), NewOff);
+ return true;
+ }
+ }
+ }
+ break;
+ }
+ case HexagonISD::CONST32:
+ // The operand(0) of CONST32 is TargetGlobalAddress, which is what we
+ // want in the instruction.
+ if (!UseGP)
+ R = N.getOperand(0);
+ return !UseGP;
+ case HexagonISD::CONST32_GP:
+ if (UseGP)
+ R = N.getOperand(0);
+ return UseGP;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+bool HexagonDAGToDAGISel::isValueExtension(SDValue const &Val,
+ unsigned FromBits, SDValue &Src) {
+ unsigned Opc = Val.getOpcode();
+ switch (Opc) {
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND: {
+ SDValue const &Op0 = Val.getOperand(0);
+ EVT T = Op0.getValueType();
+ if (T.isInteger() && T.getSizeInBits() == FromBits) {
+ Src = Op0;
+ return true;
+ }
+ break;
+ }
+ case ISD::SIGN_EXTEND_INREG:
+ case ISD::AssertSext:
+ case ISD::AssertZext:
+ if (Val.getOperand(0).getValueType().isInteger()) {
+ VTSDNode *T = cast<VTSDNode>(Val.getOperand(1));
+ if (T->getVT().getSizeInBits() == FromBits) {
+ Src = Val.getOperand(0);
+ return true;
+ }
+ }
+ break;
+ case ISD::AND: {
+ // Check if this is an AND with "FromBits" of lower bits set to 1.
+ uint64_t FromMask = (1 << FromBits) - 1;
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(0))) {
+ if (C->getZExtValue() == FromMask) {
+ Src = Val.getOperand(1);
+ return true;
+ }
+ }
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(1))) {
+ if (C->getZExtValue() == FromMask) {
+ Src = Val.getOperand(0);
+ return true;
+ }
+ }
+ break;
+ }
+ case ISD::OR:
+ case ISD::XOR: {
+ // OR/XOR with the lower "FromBits" bits set to 0.
+ uint64_t FromMask = (1 << FromBits) - 1;
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(0))) {
+ if ((C->getZExtValue() & FromMask) == 0) {
+ Src = Val.getOperand(1);
+ return true;
+ }
+ }
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(1))) {
+ if ((C->getZExtValue() & FromMask) == 0) {
+ Src = Val.getOperand(0);
+ return true;
+ }
+ }
+ }
+ default:
+ break;
+ }
+ return false;
+}
diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp
index 7646088..0072994 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -188,7 +188,7 @@ static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
Hexagon::R5
};
- if (unsigned Reg = State.AllocateReg(RegList, 6)) {
+ if (unsigned Reg = State.AllocateReg(RegList)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
}
@@ -213,7 +213,7 @@ static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
static const MCPhysReg RegList2[] = {
Hexagon::R1, Hexagon::R3
};
- if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) {
+ if (unsigned Reg = State.AllocateReg(RegList1, RegList2)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
}
@@ -404,6 +404,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool &isTailCall = CLI.IsTailCall;
CallingConv::ID CallConv = CLI.CallConv;
bool isVarArg = CLI.IsVarArg;
+ bool doesNotReturn = CLI.DoesNotReturn;
bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
@@ -462,8 +463,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
- const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
+ const HexagonRegisterInfo *QRI = Subtarget->getRegisterInfo();
SDValue StackPtr =
DAG.getCopyFromReg(Chain, dl, QRI->getStackRegister(), getPointerTy());
@@ -597,7 +597,8 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (isTailCall)
return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
- Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
+ int OpCode = doesNotReturn ? HexagonISD::CALLv3nr : HexagonISD::CALLv3;
+ Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
@@ -720,9 +721,7 @@ SDValue HexagonTargetLowering::LowerINLINEASM(SDValue Op,
cast<RegisterSDNode>(Node->getOperand(i))->getReg();
// Check it to be lr
- const HexagonRegisterInfo *QRI =
- static_cast<const HexagonRegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
+ const HexagonRegisterInfo *QRI = Subtarget->getRegisterInfo();
if (Reg == QRI->getRARegister()) {
FuncInfo->setHasClobberLR(true);
break;
@@ -815,8 +814,7 @@ HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
// The Sub result contains the new stack start address, so it
// must be placed in the stack pointer register.
- const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
+ const HexagonRegisterInfo *QRI = Subtarget->getRegisterInfo();
SDValue CopyChain = DAG.getCopyToReg(Chain, dl, QRI->getStackRegister(), Sub);
SDValue Ops[2] = { ArgAdjust, CopyChain };
@@ -875,7 +873,7 @@ const {
RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
- } else if (RegVT == MVT::i64) {
+ } else if (RegVT == MVT::i64 || RegVT == MVT::f64) {
unsigned VReg =
RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
@@ -963,7 +961,7 @@ HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
SDValue
HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
- const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setReturnAddressIsTaken(true);
@@ -989,8 +987,7 @@ HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
SDValue
HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
- const HexagonRegisterInfo *TRI = static_cast<const HexagonRegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
+ const HexagonRegisterInfo *TRI = Subtarget->getRegisterInfo();
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setFrameAddressIsTaken(true);
@@ -1021,9 +1018,10 @@ SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op,
SDLoc dl(Op);
Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
- const HexagonTargetObjectFile &TLOF =
- static_cast<const HexagonTargetObjectFile &>(getObjFileLowering());
- if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
+ const HexagonTargetObjectFile *TLOF =
+ static_cast<const HexagonTargetObjectFile *>(
+ getTargetMachine().getObjFileLowering());
+ if (TLOF->IsGlobalInSmallSection(GV, getTargetMachine())) {
return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result);
}
@@ -1042,24 +1040,22 @@ HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
// TargetLowering Implementation
//===----------------------------------------------------------------------===//
-HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
- : TargetLowering(targetmachine),
- TM(targetmachine) {
-
- const HexagonSubtarget &Subtarget = TM.getSubtarget<HexagonSubtarget>();
+HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
+ const HexagonSubtarget &STI)
+ : TargetLowering(TM), Subtarget(&STI) {
// Set up the register classes.
addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
- if (Subtarget.hasV5TOps()) {
+ if (Subtarget->hasV5TOps()) {
addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
}
addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
- computeRegisterProperties();
+ computeRegisterProperties(Subtarget->getRegisterInfo());
// Align loop entry
setPrefLoopAlignment(4);
@@ -1109,15 +1105,22 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
setOperationAction(ISD::FDIV, MVT::f64, Expand);
+ setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
+ setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
+ setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
+
setOperationAction(ISD::FSQRT, MVT::f32, Expand);
setOperationAction(ISD::FSQRT, MVT::f64, Expand);
setOperationAction(ISD::FSIN, MVT::f32, Expand);
setOperationAction(ISD::FSIN, MVT::f64, Expand);
- if (Subtarget.hasV5TOps()) {
+ if (Subtarget->hasV5TOps()) {
// Hexagon V5 Support.
setOperationAction(ISD::FADD, MVT::f32, Legal);
- setOperationAction(ISD::FADD, MVT::f64, Legal);
+ setOperationAction(ISD::FADD, MVT::f64, Expand);
+ setOperationAction(ISD::FSUB, MVT::f32, Legal);
+ setOperationAction(ISD::FSUB, MVT::f64, Expand);
+ setOperationAction(ISD::FMUL, MVT::f64, Expand);
setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal);
setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal);
@@ -1202,11 +1205,14 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
- setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
- setOperationAction(ISD::FADD, MVT::f64, Expand);
setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
setOperationAction(ISD::FADD, MVT::f32, Expand);
+ setOperationAction(ISD::FADD, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
+ setOperationAction(ISD::FSUB, MVT::f32, Expand);
+ setOperationAction(ISD::FSUB, MVT::f64, Expand);
setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
@@ -1247,7 +1253,6 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
- setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
setOperationAction(ISD::FMUL, MVT::f64, Expand);
setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
@@ -1301,9 +1306,11 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
// Turn FP extload into load/fextend.
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ for (MVT VT : MVT::fp_valuetypes())
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
// Hexagon has a i1 sign extending load.
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand);
+ for (MVT VT : MVT::integer_valuetypes())
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand);
// Turn FP truncstore into trunc + store.
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
@@ -1333,7 +1340,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
- if (Subtarget.hasV5TOps()) {
+ if (Subtarget->hasV5TOps()) {
// We need to make the operation type of SELECT node to be Custom,
// such that we don't go into the infinite loop of
@@ -1422,19 +1429,15 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
-
+
+ setOperationAction(ISD::MULHS, MVT::i64, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
- if (Subtarget.isSubtargetV2()) {
- setExceptionPointerRegister(Hexagon::R20);
- setExceptionSelectorRegister(Hexagon::R21);
- } else {
- setExceptionPointerRegister(Hexagon::R0);
- setExceptionSelectorRegister(Hexagon::R1);
- }
+ setExceptionPointerRegister(Hexagon::R0);
+ setExceptionSelectorRegister(Hexagon::R1);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex.
setOperationAction(ISD::VASTART, MVT::Other, Custom);
@@ -1452,8 +1455,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
setMinFunctionAlignment(2);
// Needed for DYNAMIC_STACKALLOC expansion.
- const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
+ const HexagonRegisterInfo *QRI = Subtarget->getRegisterInfo();
setStackPointerRegisterToSaveRestore(QRI->getStackRegister());
setSchedulingPreference(Sched::VLIW);
}
@@ -1476,7 +1478,9 @@ HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
case HexagonISD::Lo: return "HexagonISD::Lo";
case HexagonISD::FTOI: return "HexagonISD::FTOI";
case HexagonISD::ITOF: return "HexagonISD::ITOF";
- case HexagonISD::CALL: return "HexagonISD::CALL";
+ case HexagonISD::CALLv3: return "HexagonISD::CALLv3";
+ case HexagonISD::CALLv3nr: return "HexagonISD::CALLv3nr";
+ case HexagonISD::CALLR: return "HexagonISD::CALLR";
case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
@@ -1591,10 +1595,10 @@ const {
// Inline Assembly Support
//===----------------------------------------------------------------------===//
-std::pair<unsigned, const TargetRegisterClass*>
-HexagonTargetLowering::getRegForInlineAsmConstraint(const
- std::string &Constraint,
- MVT VT) const {
+std::pair<unsigned, const TargetRegisterClass *>
+HexagonTargetLowering::getRegForInlineAsmConstraint(
+ const TargetRegisterInfo *TRI, const std::string &Constraint,
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r': // R0-R31
@@ -1615,14 +1619,14 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(const
}
}
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
/// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
- return TM.getSubtarget<HexagonSubtarget>().hasV5TOps();
+ return Subtarget->hasV5TOps();
}
/// isLegalAddressingMode - Return true if the addressing mode represented by
@@ -1705,3 +1709,17 @@ bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
// information is not available.
return true;
}
+
+// Return true when the given node fits in a positive half word.
+bool llvm::isPositiveHalfWord(SDNode *N) {
+ ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
+ if (CN && CN->getSExtValue() > 0 && isInt<16>(CN->getSExtValue()))
+ return true;
+
+ switch (N->getOpcode()) {
+ default:
+ return false;
+ case ISD::SIGN_EXTEND_INREG:
+ return true;
+ }
+}
diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h
index 63e4392..151c28f 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/lib/Target/Hexagon/HexagonISelLowering.h
@@ -21,6 +21,10 @@
#include "llvm/Target/TargetLowering.h"
namespace llvm {
+
+// Return true when the given node fits in a positive half word.
+bool isPositiveHalfWord(SDNode *N);
+
namespace HexagonISD {
enum {
FIRST_NUMBER = ISD::BUILTIN_OP_END,
@@ -45,10 +49,15 @@ namespace llvm {
FTOI, // FP to Int within a FP register.
ITOF, // Int to FP within a FP register.
- CALL, // A call instruction.
+ CALLv3, // A V3+ call instruction.
+ CALLv3nr, // A V3+ call instruction that doesn't return.
+ CALLR,
+
RET_FLAG, // Return with a flag operand.
BR_JT, // Jump table.
- BARRIER, // Memory barrier.
+ BARRIER, // Memory barrier
+ POPCOUNT,
+ COMBINE,
WrapperJT,
WrapperCP,
WrapperCombineII,
@@ -63,10 +72,13 @@ namespace llvm {
WrapperShuffOB,
WrapperShuffOH,
TC_RETURN,
- EH_RETURN
+ EH_RETURN,
+ DCFETCH
};
}
+ class HexagonSubtarget;
+
class HexagonTargetLowering : public TargetLowering {
int VarArgsFrameOffset; // Frame offset to start of varargs area.
@@ -74,8 +86,9 @@ namespace llvm {
unsigned& RetSize) const;
public:
- const TargetMachine &TM;
- explicit HexagonTargetLowering(const TargetMachine &targetmachine);
+ const HexagonSubtarget *Subtarget;
+ explicit HexagonTargetLowering(const TargetMachine &TM,
+ const HexagonSubtarget &Subtarget);
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
@@ -152,8 +165,9 @@ namespace llvm {
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const override;
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint,
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
MVT VT) const override;
// Intrinsics
diff --git a/lib/Target/Hexagon/HexagonInstrFormats.td b/lib/Target/Hexagon/HexagonInstrFormats.td
index cc27c4c..3d04678 100644
--- a/lib/Target/Hexagon/HexagonInstrFormats.td
+++ b/lib/Target/Hexagon/HexagonInstrFormats.td
@@ -28,20 +28,12 @@ def TypeXTYPE : IType<8>;
def TypeENDLOOP: IType<31>;
// Maintain list of valid subtargets for each instruction.
-class SubTarget<bits<4> value> {
- bits<4> Value = value;
+class SubTarget<bits<6> value> {
+ bits<6> Value = value;
}
-def HasV2SubT : SubTarget<0xf>;
-def HasV2SubTOnly : SubTarget<0x1>;
-def NoV2SubT : SubTarget<0x0>;
-def HasV3SubT : SubTarget<0xe>;
-def HasV3SubTOnly : SubTarget<0x2>;
-def NoV3SubT : SubTarget<0x1>;
-def HasV4SubT : SubTarget<0xc>;
-def NoV4SubT : SubTarget<0x3>;
-def HasV5SubT : SubTarget<0x8>;
-def NoV5SubT : SubTarget<0x7>;
+def HasAnySubT : SubTarget<0x3f>; // 111111
+def HasV5SubT : SubTarget<0x3e>; // 111110
// Addressing modes for load/store instructions
class AddrModeType<bits<3> value> {
@@ -56,8 +48,8 @@ def BaseLongOffset : AddrModeType<4>; // Indirect with long offset
def BaseRegOffset : AddrModeType<5>; // Indirect with register offset
def PostInc : AddrModeType<6>; // Post increment addressing mode
-class MemAccessSize<bits<3> value> {
- bits<3> Value = value;
+class MemAccessSize<bits<4> value> {
+ bits<4> Value = value;
}
def NoMemAccess : MemAccessSize<0>;// Not a memory acces instruction.
@@ -157,11 +149,11 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
bits<2> opExtentAlign = 0;
let TSFlags{33-32} = opExtentAlign; // Alignment exponent before extending.
- // If an instruction is valid on a subtarget (v2-v5), set the corresponding
- // bit from validSubTargets. v2 is the least significant bit.
+ // If an instruction is valid on a subtarget, set the corresponding
+ // bit from validSubTargets.
// By default, instruction is valid on all subtargets.
- SubTarget validSubTargets = HasV2SubT;
- let TSFlags{37-34} = validSubTargets.Value;
+ SubTarget validSubTargets = HasAnySubT;
+ let TSFlags{39-34} = validSubTargets.Value;
// Addressing mode for load/store instructions.
AddrModeType addrMode = NoAddrMode;
@@ -169,7 +161,7 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
// Memory access size for mem access instructions (load/store)
MemAccessSize accessSize = NoMemAccess;
- let TSFlags{45-43} = accessSize.Value;
+ let TSFlags{46-43} = accessSize.Value;
bits<1> isTaken = 0;
let TSFlags {47} = isTaken; // Branch prediction.
@@ -186,13 +178,12 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
string InputType = ""; // Input is "imm" or "reg" type.
string isMEMri = "false"; // Set to "true" for load/store with MEMri operand.
string isFloat = "false"; // Set to "true" for the floating-point load/store.
- string isBrTaken = ""; // Set to "true"/"false" for jump instructions
+ string isBrTaken = !if(isTaken, "true", "false"); // Set to "true"/"false" for jump instructions
let PredSense = !if(isPredicated, !if(isPredicatedFalse, "false", "true"),
"");
let PNewValue = !if(isPredicatedNew, "new", "");
let NValueST = !if(isNVStore, "true", "false");
- let isCodeGenOnly = 1;
// *** Must match MCTargetDesc/HexagonBaseInfo.h ***
}
@@ -203,6 +194,7 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
// LD Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
+let mayLoad = 1 in
class LDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "", InstrItinClass itin = LD_tc_ld_SLOT01>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>;
@@ -365,7 +357,6 @@ class ALU32_ii<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "", InstrItinClass itin = ALU32_2op_tc_1_SLOT0123>
: ALU32Inst<outs, ins, asmstr, pattern, cstr, itin>;
-
//
// ALU64 patterns.
//
diff --git a/lib/Target/Hexagon/HexagonInstrFormatsV4.td b/lib/Target/Hexagon/HexagonInstrFormatsV4.td
index d92f97b..5fec80b 100644
--- a/lib/Target/Hexagon/HexagonInstrFormatsV4.td
+++ b/lib/Target/Hexagon/HexagonInstrFormatsV4.td
@@ -19,6 +19,7 @@
def TypeMEMOP : IType<9>;
def TypeNV : IType<10>;
+def TypeCOMPOUND : IType<12>;
def TypePREFIX : IType<30>;
//----------------------------------------------------------------------------//
@@ -65,3 +66,7 @@ let isCodeGenOnly = 1 in
class EXTENDERInst<dag outs, dag ins, string asmstr, list<dag> pattern = []>
: InstHexagon<outs, ins, asmstr, pattern, "", EXTENDER_tc_1_SLOT0123,
TypePREFIX>;
+
+class CJInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
+ string cstr = "">
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, COMPOUND, TypeCOMPOUND>;
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 1688c4a..9bae12c 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -78,11 +78,11 @@ unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
switch (MI->getOpcode()) {
default: break;
- case Hexagon::LDriw:
- case Hexagon::LDrid:
- case Hexagon::LDrih:
- case Hexagon::LDrib:
- case Hexagon::LDriub:
+ case Hexagon::L2_loadri_io:
+ case Hexagon::L2_loadrd_io:
+ case Hexagon::L2_loadrh_io:
+ case Hexagon::L2_loadrb_io:
+ case Hexagon::L2_loadrub_io:
if (MI->getOperand(2).isFI() &&
MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
FrameIndex = MI->getOperand(2).getIndex();
@@ -103,10 +103,10 @@ unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
- case Hexagon::STriw:
- case Hexagon::STrid:
- case Hexagon::STrih:
- case Hexagon::STrib:
+ case Hexagon::S2_storeri_io:
+ case Hexagon::S2_storerd_io:
+ case Hexagon::S2_storerh_io:
+ case Hexagon::S2_storerb_io:
if (MI->getOperand(2).isFI() &&
MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
FrameIndex = MI->getOperand(0).getIndex();
@@ -124,8 +124,8 @@ HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
const SmallVectorImpl<MachineOperand> &Cond,
DebugLoc DL) const{
- int BOpc = Hexagon::JMP;
- int BccOpc = Hexagon::JMP_t;
+ int BOpc = Hexagon::J2_jump;
+ int BccOpc = Hexagon::J2_jumpt;
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
@@ -134,7 +134,7 @@ HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
// If we want to reverse the branch an odd number of times, we want
// JMP_f.
if (!Cond.empty() && Cond[0].isImm() && Cond[0].getImm() == 0) {
- BccOpc = Hexagon::JMP_f;
+ BccOpc = Hexagon::J2_jumpf;
regPos = 1;
}
@@ -213,7 +213,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
}
// Delete the JMP if it's equivalent to a fall-through.
- if (AllowModify && I->getOpcode() == Hexagon::JMP &&
+ if (AllowModify && I->getOpcode() == Hexagon::J2_jump &&
MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
DEBUG(dbgs()<< "\nErasing the jump to successor block\n";);
I->eraseFromParent();
@@ -249,7 +249,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// If there is only one terminator instruction, process it.
if (LastInst && !SecondLastInst) {
- if (LastOpcode == Hexagon::JMP) {
+ if (LastOpcode == Hexagon::J2_jump) {
TBB = LastInst->getOperand(0).getMBB();
return false;
}
@@ -274,7 +274,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
bool SecLastOpcodeHasNot = PredOpcodeHasNot(SecLastOpcode);
- if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::JMP)) {
+ if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
TBB = SecondLastInst->getOperand(1).getMBB();
if (SecLastOpcodeHasNot)
Cond.push_back(MachineOperand::CreateImm(0));
@@ -285,7 +285,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// If the block ends with two Hexagon:JMPs, handle it. The second one is not
// executed, so remove it.
- if (SecLastOpcode == Hexagon::JMP && LastOpcode == Hexagon::JMP) {
+ if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
TBB = SecondLastInst->getOperand(0).getMBB();
I = LastInst;
if (AllowModify)
@@ -295,7 +295,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// If the block ends with an ENDLOOP, and JMP, handle it.
if (SecLastOpcode == Hexagon::ENDLOOP0 &&
- LastOpcode == Hexagon::JMP) {
+ LastOpcode == Hexagon::J2_jump) {
TBB = SecondLastInst->getOperand(0).getMBB();
Cond.push_back(SecondLastInst->getOperand(0));
FBB = LastInst->getOperand(0).getMBB();
@@ -308,9 +308,9 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
- int BOpc = Hexagon::JMP;
- int BccOpc = Hexagon::JMP_t;
- int BccOpcNot = Hexagon::JMP_f;
+ int BOpc = Hexagon::J2_jump;
+ int BccOpc = Hexagon::J2_jumpt;
+ int BccOpcNot = Hexagon::J2_jumpf;
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin()) return 0;
@@ -346,33 +346,31 @@ bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
// Set mask and the first source register.
switch (Opc) {
- case Hexagon::CMPEHexagon4rr:
- case Hexagon::CMPEQri:
- case Hexagon::CMPEQrr:
- case Hexagon::CMPGT64rr:
- case Hexagon::CMPGTU64rr:
- case Hexagon::CMPGTUri:
- case Hexagon::CMPGTUrr:
- case Hexagon::CMPGTri:
- case Hexagon::CMPGTrr:
+ case Hexagon::C2_cmpeqp:
+ case Hexagon::C2_cmpeqi:
+ case Hexagon::C2_cmpeq:
+ case Hexagon::C2_cmpgtp:
+ case Hexagon::C2_cmpgtup:
+ case Hexagon::C2_cmpgtui:
+ case Hexagon::C2_cmpgtu:
+ case Hexagon::C2_cmpgti:
+ case Hexagon::C2_cmpgt:
SrcReg = MI->getOperand(1).getReg();
Mask = ~0;
break;
- case Hexagon::CMPbEQri_V4:
- case Hexagon::CMPbEQrr_sbsb_V4:
- case Hexagon::CMPbEQrr_ubub_V4:
- case Hexagon::CMPbGTUri_V4:
- case Hexagon::CMPbGTUrr_V4:
- case Hexagon::CMPbGTrr_V4:
+ case Hexagon::A4_cmpbeqi:
+ case Hexagon::A4_cmpbeq:
+ case Hexagon::A4_cmpbgtui:
+ case Hexagon::A4_cmpbgtu:
+ case Hexagon::A4_cmpbgt:
SrcReg = MI->getOperand(1).getReg();
Mask = 0xFF;
break;
- case Hexagon::CMPhEQri_V4:
- case Hexagon::CMPhEQrr_shl_V4:
- case Hexagon::CMPhEQrr_xor_V4:
- case Hexagon::CMPhGTUri_V4:
- case Hexagon::CMPhGTUrr_V4:
- case Hexagon::CMPhGTrr_shl_V4:
+ case Hexagon::A4_cmpheqi:
+ case Hexagon::A4_cmpheq:
+ case Hexagon::A4_cmphgtui:
+ case Hexagon::A4_cmphgtu:
+ case Hexagon::A4_cmphgt:
SrcReg = MI->getOperand(1).getReg();
Mask = 0xFFFF;
break;
@@ -380,30 +378,28 @@ bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
// Set the value/second source register.
switch (Opc) {
- case Hexagon::CMPEHexagon4rr:
- case Hexagon::CMPEQrr:
- case Hexagon::CMPGT64rr:
- case Hexagon::CMPGTU64rr:
- case Hexagon::CMPGTUrr:
- case Hexagon::CMPGTrr:
- case Hexagon::CMPbEQrr_sbsb_V4:
- case Hexagon::CMPbEQrr_ubub_V4:
- case Hexagon::CMPbGTUrr_V4:
- case Hexagon::CMPbGTrr_V4:
- case Hexagon::CMPhEQrr_shl_V4:
- case Hexagon::CMPhEQrr_xor_V4:
- case Hexagon::CMPhGTUrr_V4:
- case Hexagon::CMPhGTrr_shl_V4:
+ case Hexagon::C2_cmpeqp:
+ case Hexagon::C2_cmpeq:
+ case Hexagon::C2_cmpgtp:
+ case Hexagon::C2_cmpgtup:
+ case Hexagon::C2_cmpgtu:
+ case Hexagon::C2_cmpgt:
+ case Hexagon::A4_cmpbeq:
+ case Hexagon::A4_cmpbgtu:
+ case Hexagon::A4_cmpbgt:
+ case Hexagon::A4_cmpheq:
+ case Hexagon::A4_cmphgtu:
+ case Hexagon::A4_cmphgt:
SrcReg2 = MI->getOperand(2).getReg();
return true;
- case Hexagon::CMPEQri:
- case Hexagon::CMPGTUri:
- case Hexagon::CMPGTri:
- case Hexagon::CMPbEQri_V4:
- case Hexagon::CMPbGTUri_V4:
- case Hexagon::CMPhEQri_V4:
- case Hexagon::CMPhGTUri_V4:
+ case Hexagon::C2_cmpeqi:
+ case Hexagon::C2_cmpgtui:
+ case Hexagon::C2_cmpgti:
+ case Hexagon::A4_cmpbeqi:
+ case Hexagon::A4_cmpbgtui:
+ case Hexagon::A4_cmpheqi:
+ case Hexagon::A4_cmphgtui:
SrcReg2 = 0;
Value = MI->getOperand(2).getImm();
return true;
@@ -418,16 +414,16 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::TFR), DestReg).addReg(SrcReg);
+ BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg).addReg(SrcReg);
return;
}
if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::TFR64), DestReg).addReg(SrcReg);
+ BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg).addReg(SrcReg);
return;
}
if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
// Map Pd = Ps to Pd = or(Ps, Ps).
- BuildMI(MBB, I, DL, get(Hexagon::OR_pp),
+ BuildMI(MBB, I, DL, get(Hexagon::C2_or),
DestReg).addReg(SrcReg).addReg(SrcReg);
return;
}
@@ -436,31 +432,31 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// We can have an overlap between single and double reg: r1:0 = r0.
if(SrcReg == RI.getSubReg(DestReg, Hexagon::subreg_loreg)) {
// r1:0 = r0
- BuildMI(MBB, I, DL, get(Hexagon::TFRI), (RI.getSubReg(DestReg,
+ BuildMI(MBB, I, DL, get(Hexagon::A2_tfrsi), (RI.getSubReg(DestReg,
Hexagon::subreg_hireg))).addImm(0);
} else {
// r1:0 = r1 or no overlap.
- BuildMI(MBB, I, DL, get(Hexagon::TFR), (RI.getSubReg(DestReg,
+ BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), (RI.getSubReg(DestReg,
Hexagon::subreg_loreg))).addReg(SrcReg);
- BuildMI(MBB, I, DL, get(Hexagon::TFRI), (RI.getSubReg(DestReg,
+ BuildMI(MBB, I, DL, get(Hexagon::A2_tfrsi), (RI.getSubReg(DestReg,
Hexagon::subreg_hireg))).addImm(0);
}
return;
}
- if (Hexagon::CRRegsRegClass.contains(DestReg) &&
+ if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
Hexagon::IntRegsRegClass.contains(SrcReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::TFCR), DestReg).addReg(SrcReg);
+ BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg).addReg(SrcReg);
return;
}
if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
Hexagon::IntRegsRegClass.contains(DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::TFR_RsPd), DestReg).
+ BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg).
addReg(SrcReg, getKillRegState(KillSrc));
return;
}
if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
Hexagon::PredRegsRegClass.contains(DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::TFR_PdRs), DestReg).
+ BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg).
addReg(SrcReg, getKillRegState(KillSrc));
return;
}
@@ -488,11 +484,11 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
Align);
if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
- BuildMI(MBB, I, DL, get(Hexagon::STriw))
+ BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
.addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
} else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
- BuildMI(MBB, I, DL, get(Hexagon::STrid))
+ BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io))
.addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
} else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
@@ -533,10 +529,10 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MFI.getObjectSize(FI),
Align);
if (RC == &Hexagon::IntRegsRegClass) {
- BuildMI(MBB, I, DL, get(Hexagon::LDriw), DestReg)
+ BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
} else if (RC == &Hexagon::DoubleRegsRegClass) {
- BuildMI(MBB, I, DL, get(Hexagon::LDrid), DestReg)
+ BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
} else if (RC == &Hexagon::PredRegsRegClass) {
BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
@@ -582,10 +578,6 @@ unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
}
bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const {
- // Constant extenders are allowed only for V4 and above.
- if (!Subtarget.hasV4TOps())
- return false;
-
const MCInstrDesc &MID = MI->getDesc();
const uint64_t F = MID.TSFlags;
if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
@@ -648,78 +640,68 @@ bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
const int Opc = MI->getOpcode();
switch(Opc) {
- case Hexagon::TFRI:
+ case Hexagon::A2_tfrsi:
return isInt<12>(MI->getOperand(1).getImm());
- case Hexagon::STrid:
- case Hexagon::STrid_indexed:
+ case Hexagon::S2_storerd_io:
return isShiftedUInt<6,3>(MI->getOperand(1).getImm());
- case Hexagon::STriw:
- case Hexagon::STriw_indexed:
- case Hexagon::STriw_nv_V4:
+ case Hexagon::S2_storeri_io:
+ case Hexagon::S2_storerinew_io:
return isShiftedUInt<6,2>(MI->getOperand(1).getImm());
- case Hexagon::STrih:
- case Hexagon::STrih_indexed:
- case Hexagon::STrih_nv_V4:
+ case Hexagon::S2_storerh_io:
+ case Hexagon::S2_storerhnew_io:
return isShiftedUInt<6,1>(MI->getOperand(1).getImm());
- case Hexagon::STrib:
- case Hexagon::STrib_indexed:
- case Hexagon::STrib_nv_V4:
+ case Hexagon::S2_storerb_io:
+ case Hexagon::S2_storerbnew_io:
return isUInt<6>(MI->getOperand(1).getImm());
- case Hexagon::LDrid:
- case Hexagon::LDrid_indexed:
+ case Hexagon::L2_loadrd_io:
return isShiftedUInt<6,3>(MI->getOperand(2).getImm());
- case Hexagon::LDriw:
- case Hexagon::LDriw_indexed:
+ case Hexagon::L2_loadri_io:
return isShiftedUInt<6,2>(MI->getOperand(2).getImm());
- case Hexagon::LDrih:
- case Hexagon::LDriuh:
- case Hexagon::LDrih_indexed:
- case Hexagon::LDriuh_indexed:
+ case Hexagon::L2_loadrh_io:
+ case Hexagon::L2_loadruh_io:
return isShiftedUInt<6,1>(MI->getOperand(2).getImm());
- case Hexagon::LDrib:
- case Hexagon::LDriub:
- case Hexagon::LDrib_indexed:
- case Hexagon::LDriub_indexed:
+ case Hexagon::L2_loadrb_io:
+ case Hexagon::L2_loadrub_io:
return isUInt<6>(MI->getOperand(2).getImm());
- case Hexagon::POST_LDrid:
+ case Hexagon::L2_loadrd_pi:
return isShiftedInt<4,3>(MI->getOperand(3).getImm());
- case Hexagon::POST_LDriw:
+ case Hexagon::L2_loadri_pi:
return isShiftedInt<4,2>(MI->getOperand(3).getImm());
- case Hexagon::POST_LDrih:
- case Hexagon::POST_LDriuh:
+ case Hexagon::L2_loadrh_pi:
+ case Hexagon::L2_loadruh_pi:
return isShiftedInt<4,1>(MI->getOperand(3).getImm());
- case Hexagon::POST_LDrib:
- case Hexagon::POST_LDriub:
+ case Hexagon::L2_loadrb_pi:
+ case Hexagon::L2_loadrub_pi:
return isInt<4>(MI->getOperand(3).getImm());
- case Hexagon::STrib_imm_V4:
- case Hexagon::STrih_imm_V4:
- case Hexagon::STriw_imm_V4:
+ case Hexagon::S4_storeirb_io:
+ case Hexagon::S4_storeirh_io:
+ case Hexagon::S4_storeiri_io:
return (isUInt<6>(MI->getOperand(1).getImm()) &&
isInt<6>(MI->getOperand(2).getImm()));
- case Hexagon::ADD_ri:
+ case Hexagon::A2_addi:
return isInt<8>(MI->getOperand(2).getImm());
- case Hexagon::ASLH:
- case Hexagon::ASRH:
- case Hexagon::SXTB:
- case Hexagon::SXTH:
- case Hexagon::ZXTB:
- case Hexagon::ZXTH:
- return Subtarget.hasV4TOps();
+ case Hexagon::A2_aslh:
+ case Hexagon::A2_asrh:
+ case Hexagon::A2_sxtb:
+ case Hexagon::A2_sxth:
+ case Hexagon::A2_zxtb:
+ case Hexagon::A2_zxth:
+ return true;
}
return true;
@@ -739,16 +721,16 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
switch(Opc) {
default: llvm_unreachable("Unexpected predicated instruction");
- case Hexagon::COMBINE_rr_cPt:
- return Hexagon::COMBINE_rr_cNotPt;
- case Hexagon::COMBINE_rr_cNotPt:
- return Hexagon::COMBINE_rr_cPt;
+ case Hexagon::C2_ccombinewt:
+ return Hexagon::C2_ccombinewf;
+ case Hexagon::C2_ccombinewf:
+ return Hexagon::C2_ccombinewt;
// Dealloc_return.
- case Hexagon::DEALLOC_RET_cPt_V4:
- return Hexagon::DEALLOC_RET_cNotPt_V4;
- case Hexagon::DEALLOC_RET_cNotPt_V4:
- return Hexagon::DEALLOC_RET_cPt_V4;
+ case Hexagon::L4_return_t:
+ return Hexagon::L4_return_f;
+ case Hexagon::L4_return_f:
+ return Hexagon::L4_return_t;
}
}
@@ -780,22 +762,14 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
case Hexagon::TFRI_f:
return !invertPredicate ? Hexagon::TFRI_cPt_f :
Hexagon::TFRI_cNotPt_f;
- case Hexagon::COMBINE_rr:
- return !invertPredicate ? Hexagon::COMBINE_rr_cPt :
- Hexagon::COMBINE_rr_cNotPt;
-
- // Word.
- case Hexagon::STriw_f:
- return !invertPredicate ? Hexagon::STriw_cPt :
- Hexagon::STriw_cNotPt;
- case Hexagon::STriw_indexed_f:
- return !invertPredicate ? Hexagon::STriw_indexed_cPt :
- Hexagon::STriw_indexed_cNotPt;
+ case Hexagon::A2_combinew:
+ return !invertPredicate ? Hexagon::C2_ccombinewt :
+ Hexagon::C2_ccombinewf;
// DEALLOC_RETURN.
- case Hexagon::DEALLOC_RET_V4:
- return !invertPredicate ? Hexagon::DEALLOC_RET_cPt_V4 :
- Hexagon::DEALLOC_RET_cNotPt_V4;
+ case Hexagon::L4_return:
+ return !invertPredicate ? Hexagon::L4_return_t:
+ Hexagon::L4_return_f;
}
llvm_unreachable("Unexpected predicable instruction");
}
@@ -901,7 +875,7 @@ PredicateInstruction(MachineInstr *MI,
continue;
}
else {
- assert(false && "Unexpected operand type");
+ llvm_unreachable("Unexpected operand type");
}
}
}
@@ -1024,12 +998,10 @@ bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
// Returns true, if a ST insn can be promoted to a new-value store.
bool HexagonInstrInfo::mayBeNewStore(const MachineInstr *MI) const {
- const HexagonRegisterInfo& QRI = getRegisterInfo();
const uint64_t F = MI->getDesc().TSFlags;
return ((F >> HexagonII::mayNVStorePos) &
- HexagonII::mayNVStoreMask &
- QRI.Subtarget.hasV4TOps());
+ HexagonII::mayNVStoreMask);
}
bool
@@ -1082,13 +1054,13 @@ isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs,
bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
default: return false;
- case Hexagon::DEALLOC_RET_V4 :
- case Hexagon::DEALLOC_RET_cPt_V4 :
- case Hexagon::DEALLOC_RET_cNotPt_V4 :
- case Hexagon::DEALLOC_RET_cdnPnt_V4 :
- case Hexagon::DEALLOC_RET_cNotdnPnt_V4 :
- case Hexagon::DEALLOC_RET_cdnPt_V4 :
- case Hexagon::DEALLOC_RET_cNotdnPt_V4 :
+ case Hexagon::L4_return:
+ case Hexagon::L4_return_t:
+ case Hexagon::L4_return_f:
+ case Hexagon::L4_return_tnew_pnt:
+ case Hexagon::L4_return_fnew_pnt:
+ case Hexagon::L4_return_tnew_pt:
+ case Hexagon::L4_return_fnew_pt:
return true;
}
}
@@ -1107,63 +1079,55 @@ isValidOffset(const int Opcode, const int Offset) const {
switch(Opcode) {
- case Hexagon::LDriw:
- case Hexagon::LDriw_indexed:
- case Hexagon::LDriw_f:
- case Hexagon::STriw_indexed:
- case Hexagon::STriw:
- case Hexagon::STriw_f:
+ case Hexagon::L2_loadri_io:
+ case Hexagon::S2_storeri_io:
return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
(Offset <= Hexagon_MEMW_OFFSET_MAX);
- case Hexagon::LDrid:
- case Hexagon::LDrid_indexed:
- case Hexagon::LDrid_f:
- case Hexagon::STrid:
- case Hexagon::STrid_indexed:
- case Hexagon::STrid_f:
+ case Hexagon::L2_loadrd_io:
+ case Hexagon::S2_storerd_io:
return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
(Offset <= Hexagon_MEMD_OFFSET_MAX);
- case Hexagon::LDrih:
- case Hexagon::LDriuh:
- case Hexagon::STrih:
+ case Hexagon::L2_loadrh_io:
+ case Hexagon::L2_loadruh_io:
+ case Hexagon::S2_storerh_io:
return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
(Offset <= Hexagon_MEMH_OFFSET_MAX);
- case Hexagon::LDrib:
- case Hexagon::STrib:
- case Hexagon::LDriub:
+ case Hexagon::L2_loadrb_io:
+ case Hexagon::S2_storerb_io:
+ case Hexagon::L2_loadrub_io:
return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
(Offset <= Hexagon_MEMB_OFFSET_MAX);
- case Hexagon::ADD_ri:
+ case Hexagon::A2_addi:
case Hexagon::TFR_FI:
return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
(Offset <= Hexagon_ADDI_OFFSET_MAX);
- case Hexagon::MemOPw_ADDi_V4 :
- case Hexagon::MemOPw_SUBi_V4 :
- case Hexagon::MemOPw_ADDr_V4 :
- case Hexagon::MemOPw_SUBr_V4 :
- case Hexagon::MemOPw_ANDr_V4 :
- case Hexagon::MemOPw_ORr_V4 :
+ case Hexagon::L4_iadd_memopw_io:
+ case Hexagon::L4_isub_memopw_io:
+ case Hexagon::L4_add_memopw_io:
+ case Hexagon::L4_sub_memopw_io:
+ case Hexagon::L4_and_memopw_io:
+ case Hexagon::L4_or_memopw_io:
return (0 <= Offset && Offset <= 255);
- case Hexagon::MemOPh_ADDi_V4 :
- case Hexagon::MemOPh_SUBi_V4 :
- case Hexagon::MemOPh_ADDr_V4 :
- case Hexagon::MemOPh_SUBr_V4 :
- case Hexagon::MemOPh_ANDr_V4 :
- case Hexagon::MemOPh_ORr_V4 :
+ case Hexagon::L4_iadd_memoph_io:
+ case Hexagon::L4_isub_memoph_io:
+ case Hexagon::L4_add_memoph_io:
+ case Hexagon::L4_sub_memoph_io:
+ case Hexagon::L4_and_memoph_io:
+ case Hexagon::L4_or_memoph_io:
return (0 <= Offset && Offset <= 127);
- case Hexagon::MemOPb_ADDi_V4 :
- case Hexagon::MemOPb_SUBi_V4 :
- case Hexagon::MemOPb_ADDr_V4 :
- case Hexagon::MemOPb_SUBr_V4 :
- case Hexagon::MemOPb_ANDr_V4 :
- case Hexagon::MemOPb_ORr_V4 :
+ case Hexagon::L4_iadd_memopb_io:
+ case Hexagon::L4_isub_memopb_io:
+ case Hexagon::L4_add_memopb_io:
+ case Hexagon::L4_sub_memopb_io:
+ case Hexagon::L4_and_memopb_io:
+ case Hexagon::L4_or_memopb_io:
return (0 <= Offset && Offset <= 63);
// LDri_pred and STriw_pred are pseudo operations, so it has to take offset of
@@ -1172,7 +1136,7 @@ isValidOffset(const int Opcode, const int Offset) const {
case Hexagon::LDriw_pred:
return true;
- case Hexagon::LOOP0_i:
+ case Hexagon::J2_loop0i:
return isUInt<10>(Offset);
// INLINEASM is very special.
@@ -1220,31 +1184,31 @@ isMemOp(const MachineInstr *MI) const {
switch (MI->getOpcode())
{
- default: return false;
- case Hexagon::MemOPw_ADDi_V4 :
- case Hexagon::MemOPw_SUBi_V4 :
- case Hexagon::MemOPw_ADDr_V4 :
- case Hexagon::MemOPw_SUBr_V4 :
- case Hexagon::MemOPw_ANDr_V4 :
- case Hexagon::MemOPw_ORr_V4 :
- case Hexagon::MemOPh_ADDi_V4 :
- case Hexagon::MemOPh_SUBi_V4 :
- case Hexagon::MemOPh_ADDr_V4 :
- case Hexagon::MemOPh_SUBr_V4 :
- case Hexagon::MemOPh_ANDr_V4 :
- case Hexagon::MemOPh_ORr_V4 :
- case Hexagon::MemOPb_ADDi_V4 :
- case Hexagon::MemOPb_SUBi_V4 :
- case Hexagon::MemOPb_ADDr_V4 :
- case Hexagon::MemOPb_SUBr_V4 :
- case Hexagon::MemOPb_ANDr_V4 :
- case Hexagon::MemOPb_ORr_V4 :
- case Hexagon::MemOPb_SETBITi_V4:
- case Hexagon::MemOPh_SETBITi_V4:
- case Hexagon::MemOPw_SETBITi_V4:
- case Hexagon::MemOPb_CLRBITi_V4:
- case Hexagon::MemOPh_CLRBITi_V4:
- case Hexagon::MemOPw_CLRBITi_V4:
+ default: return false;
+ case Hexagon::L4_iadd_memopw_io:
+ case Hexagon::L4_isub_memopw_io:
+ case Hexagon::L4_add_memopw_io:
+ case Hexagon::L4_sub_memopw_io:
+ case Hexagon::L4_and_memopw_io:
+ case Hexagon::L4_or_memopw_io:
+ case Hexagon::L4_iadd_memoph_io:
+ case Hexagon::L4_isub_memoph_io:
+ case Hexagon::L4_add_memoph_io:
+ case Hexagon::L4_sub_memoph_io:
+ case Hexagon::L4_and_memoph_io:
+ case Hexagon::L4_or_memoph_io:
+ case Hexagon::L4_iadd_memopb_io:
+ case Hexagon::L4_isub_memopb_io:
+ case Hexagon::L4_add_memopb_io:
+ case Hexagon::L4_sub_memopb_io:
+ case Hexagon::L4_and_memopb_io:
+ case Hexagon::L4_or_memopb_io:
+ case Hexagon::L4_ior_memopb_io:
+ case Hexagon::L4_ior_memoph_io:
+ case Hexagon::L4_ior_memopw_io:
+ case Hexagon::L4_iand_memopb_io:
+ case Hexagon::L4_iand_memoph_io:
+ case Hexagon::L4_iand_memopw_io:
return true;
}
return false;
@@ -1264,12 +1228,12 @@ isSpillPredRegOp(const MachineInstr *MI) const {
bool HexagonInstrInfo::isNewValueJumpCandidate(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
default: return false;
- case Hexagon::CMPEQrr:
- case Hexagon::CMPEQri:
- case Hexagon::CMPGTrr:
- case Hexagon::CMPGTri:
- case Hexagon::CMPGTUrr:
- case Hexagon::CMPGTUri:
+ case Hexagon::C2_cmpeq:
+ case Hexagon::C2_cmpeqi:
+ case Hexagon::C2_cmpgt:
+ case Hexagon::C2_cmpgti:
+ case Hexagon::C2_cmpgtu:
+ case Hexagon::C2_cmpgtui:
return true;
}
}
@@ -1278,20 +1242,19 @@ bool HexagonInstrInfo::
isConditionalTransfer (const MachineInstr *MI) const {
switch (MI->getOpcode()) {
default: return false;
- case Hexagon::TFR_cPt:
- case Hexagon::TFR_cNotPt:
- case Hexagon::TFRI_cPt:
- case Hexagon::TFRI_cNotPt:
- case Hexagon::TFR_cdnPt:
- case Hexagon::TFR_cdnNotPt:
- case Hexagon::TFRI_cdnPt:
- case Hexagon::TFRI_cdnNotPt:
+ case Hexagon::A2_tfrt:
+ case Hexagon::A2_tfrf:
+ case Hexagon::C2_cmoveit:
+ case Hexagon::C2_cmoveif:
+ case Hexagon::A2_tfrtnew:
+ case Hexagon::A2_tfrfnew:
+ case Hexagon::C2_cmovenewit:
+ case Hexagon::C2_cmovenewif:
return true;
}
}
bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const {
- const HexagonRegisterInfo& QRI = getRegisterInfo();
switch (MI->getOpcode())
{
default: return false;
@@ -1303,94 +1266,92 @@ bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const {
case Hexagon::A2_pandfnew:
case Hexagon::A2_pandt:
case Hexagon::A2_pandtnew:
+ case Hexagon::A4_paslhf:
+ case Hexagon::A4_paslhfnew:
+ case Hexagon::A4_paslht:
+ case Hexagon::A4_paslhtnew:
+ case Hexagon::A4_pasrhf:
+ case Hexagon::A4_pasrhfnew:
+ case Hexagon::A4_pasrht:
+ case Hexagon::A4_pasrhtnew:
case Hexagon::A2_porf:
case Hexagon::A2_porfnew:
case Hexagon::A2_port:
case Hexagon::A2_portnew:
+ case Hexagon::A2_psubf:
+ case Hexagon::A2_psubfnew:
+ case Hexagon::A2_psubt:
+ case Hexagon::A2_psubtnew:
case Hexagon::A2_pxorf:
case Hexagon::A2_pxorfnew:
case Hexagon::A2_pxort:
case Hexagon::A2_pxortnew:
- case Hexagon::ADD_ri_cPt:
- case Hexagon::ADD_ri_cNotPt:
- case Hexagon::SUB_rr_cPt:
- case Hexagon::SUB_rr_cNotPt:
- case Hexagon::COMBINE_rr_cPt:
- case Hexagon::COMBINE_rr_cNotPt:
+ case Hexagon::A4_psxthf:
+ case Hexagon::A4_psxthfnew:
+ case Hexagon::A4_psxtht:
+ case Hexagon::A4_psxthtnew:
+ case Hexagon::A4_psxtbf:
+ case Hexagon::A4_psxtbfnew:
+ case Hexagon::A4_psxtbt:
+ case Hexagon::A4_psxtbtnew:
+ case Hexagon::A4_pzxtbf:
+ case Hexagon::A4_pzxtbfnew:
+ case Hexagon::A4_pzxtbt:
+ case Hexagon::A4_pzxtbtnew:
+ case Hexagon::A4_pzxthf:
+ case Hexagon::A4_pzxthfnew:
+ case Hexagon::A4_pzxtht:
+ case Hexagon::A4_pzxthtnew:
+ case Hexagon::A2_paddit:
+ case Hexagon::A2_paddif:
+ case Hexagon::C2_ccombinewt:
+ case Hexagon::C2_ccombinewf:
return true;
- case Hexagon::ASLH_cPt_V4:
- case Hexagon::ASLH_cNotPt_V4:
- case Hexagon::ASRH_cPt_V4:
- case Hexagon::ASRH_cNotPt_V4:
- case Hexagon::SXTB_cPt_V4:
- case Hexagon::SXTB_cNotPt_V4:
- case Hexagon::SXTH_cPt_V4:
- case Hexagon::SXTH_cNotPt_V4:
- case Hexagon::ZXTB_cPt_V4:
- case Hexagon::ZXTB_cNotPt_V4:
- case Hexagon::ZXTH_cPt_V4:
- case Hexagon::ZXTH_cNotPt_V4:
- return QRI.Subtarget.hasV4TOps();
}
}
bool HexagonInstrInfo::
isConditionalLoad (const MachineInstr* MI) const {
- const HexagonRegisterInfo& QRI = getRegisterInfo();
switch (MI->getOpcode())
{
default: return false;
- case Hexagon::LDrid_cPt :
- case Hexagon::LDrid_cNotPt :
- case Hexagon::LDrid_indexed_cPt :
- case Hexagon::LDrid_indexed_cNotPt :
- case Hexagon::LDriw_cPt :
- case Hexagon::LDriw_cNotPt :
- case Hexagon::LDriw_indexed_cPt :
- case Hexagon::LDriw_indexed_cNotPt :
- case Hexagon::LDrih_cPt :
- case Hexagon::LDrih_cNotPt :
- case Hexagon::LDrih_indexed_cPt :
- case Hexagon::LDrih_indexed_cNotPt :
- case Hexagon::LDrib_cPt :
- case Hexagon::LDrib_cNotPt :
- case Hexagon::LDrib_indexed_cPt :
- case Hexagon::LDrib_indexed_cNotPt :
- case Hexagon::LDriuh_cPt :
- case Hexagon::LDriuh_cNotPt :
- case Hexagon::LDriuh_indexed_cPt :
- case Hexagon::LDriuh_indexed_cNotPt :
- case Hexagon::LDriub_cPt :
- case Hexagon::LDriub_cNotPt :
- case Hexagon::LDriub_indexed_cPt :
- case Hexagon::LDriub_indexed_cNotPt :
+ case Hexagon::L2_ploadrdt_io :
+ case Hexagon::L2_ploadrdf_io:
+ case Hexagon::L2_ploadrit_io:
+ case Hexagon::L2_ploadrif_io:
+ case Hexagon::L2_ploadrht_io:
+ case Hexagon::L2_ploadrhf_io:
+ case Hexagon::L2_ploadrbt_io:
+ case Hexagon::L2_ploadrbf_io:
+ case Hexagon::L2_ploadruht_io:
+ case Hexagon::L2_ploadruhf_io:
+ case Hexagon::L2_ploadrubt_io:
+ case Hexagon::L2_ploadrubf_io:
+ case Hexagon::L2_ploadrdt_pi:
+ case Hexagon::L2_ploadrdf_pi:
+ case Hexagon::L2_ploadrit_pi:
+ case Hexagon::L2_ploadrif_pi:
+ case Hexagon::L2_ploadrht_pi:
+ case Hexagon::L2_ploadrhf_pi:
+ case Hexagon::L2_ploadrbt_pi:
+ case Hexagon::L2_ploadrbf_pi:
+ case Hexagon::L2_ploadruht_pi:
+ case Hexagon::L2_ploadruhf_pi:
+ case Hexagon::L2_ploadrubt_pi:
+ case Hexagon::L2_ploadrubf_pi:
+ case Hexagon::L4_ploadrdt_rr:
+ case Hexagon::L4_ploadrdf_rr:
+ case Hexagon::L4_ploadrbt_rr:
+ case Hexagon::L4_ploadrbf_rr:
+ case Hexagon::L4_ploadrubt_rr:
+ case Hexagon::L4_ploadrubf_rr:
+ case Hexagon::L4_ploadrht_rr:
+ case Hexagon::L4_ploadrhf_rr:
+ case Hexagon::L4_ploadruht_rr:
+ case Hexagon::L4_ploadruhf_rr:
+ case Hexagon::L4_ploadrit_rr:
+ case Hexagon::L4_ploadrif_rr:
return true;
- case Hexagon::POST_LDrid_cPt :
- case Hexagon::POST_LDrid_cNotPt :
- case Hexagon::POST_LDriw_cPt :
- case Hexagon::POST_LDriw_cNotPt :
- case Hexagon::POST_LDrih_cPt :
- case Hexagon::POST_LDrih_cNotPt :
- case Hexagon::POST_LDrib_cPt :
- case Hexagon::POST_LDrib_cNotPt :
- case Hexagon::POST_LDriuh_cPt :
- case Hexagon::POST_LDriuh_cNotPt :
- case Hexagon::POST_LDriub_cPt :
- case Hexagon::POST_LDriub_cNotPt :
- return QRI.Subtarget.hasV4TOps();
- case Hexagon::LDrid_indexed_shl_cPt_V4 :
- case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
- case Hexagon::LDrib_indexed_shl_cPt_V4 :
- case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
- case Hexagon::LDriub_indexed_shl_cPt_V4 :
- case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
- case Hexagon::LDrih_indexed_shl_cPt_V4 :
- case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
- case Hexagon::LDriuh_indexed_shl_cPt_V4 :
- case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
- case Hexagon::LDriw_indexed_shl_cPt_V4 :
- case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
- return QRI.Subtarget.hasV4TOps();
}
}
@@ -1430,55 +1391,50 @@ isConditionalLoad (const MachineInstr* MI) const {
// is not valid for new-value stores.
bool HexagonInstrInfo::
isConditionalStore (const MachineInstr* MI) const {
- const HexagonRegisterInfo& QRI = getRegisterInfo();
switch (MI->getOpcode())
{
default: return false;
- case Hexagon::STrib_imm_cPt_V4 :
- case Hexagon::STrib_imm_cNotPt_V4 :
- case Hexagon::STrib_indexed_shl_cPt_V4 :
- case Hexagon::STrib_indexed_shl_cNotPt_V4 :
- case Hexagon::STrib_cPt :
- case Hexagon::STrib_cNotPt :
- case Hexagon::POST_STbri_cPt :
- case Hexagon::POST_STbri_cNotPt :
- case Hexagon::STrid_indexed_cPt :
- case Hexagon::STrid_indexed_cNotPt :
- case Hexagon::STrid_indexed_shl_cPt_V4 :
- case Hexagon::POST_STdri_cPt :
- case Hexagon::POST_STdri_cNotPt :
- case Hexagon::STrih_cPt :
- case Hexagon::STrih_cNotPt :
- case Hexagon::STrih_indexed_cPt :
- case Hexagon::STrih_indexed_cNotPt :
- case Hexagon::STrih_imm_cPt_V4 :
- case Hexagon::STrih_imm_cNotPt_V4 :
- case Hexagon::STrih_indexed_shl_cPt_V4 :
- case Hexagon::STrih_indexed_shl_cNotPt_V4 :
- case Hexagon::POST_SThri_cPt :
- case Hexagon::POST_SThri_cNotPt :
- case Hexagon::STriw_cPt :
- case Hexagon::STriw_cNotPt :
- case Hexagon::STriw_indexed_cPt :
- case Hexagon::STriw_indexed_cNotPt :
- case Hexagon::STriw_imm_cPt_V4 :
- case Hexagon::STriw_imm_cNotPt_V4 :
- case Hexagon::STriw_indexed_shl_cPt_V4 :
- case Hexagon::STriw_indexed_shl_cNotPt_V4 :
- case Hexagon::POST_STwri_cPt :
- case Hexagon::POST_STwri_cNotPt :
- return QRI.Subtarget.hasV4TOps();
+ case Hexagon::S4_storeirbt_io:
+ case Hexagon::S4_storeirbf_io:
+ case Hexagon::S4_pstorerbt_rr:
+ case Hexagon::S4_pstorerbf_rr:
+ case Hexagon::S2_pstorerbt_io:
+ case Hexagon::S2_pstorerbf_io:
+ case Hexagon::S2_pstorerbt_pi:
+ case Hexagon::S2_pstorerbf_pi:
+ case Hexagon::S2_pstorerdt_io:
+ case Hexagon::S2_pstorerdf_io:
+ case Hexagon::S4_pstorerdt_rr:
+ case Hexagon::S4_pstorerdf_rr:
+ case Hexagon::S2_pstorerdt_pi:
+ case Hexagon::S2_pstorerdf_pi:
+ case Hexagon::S2_pstorerht_io:
+ case Hexagon::S2_pstorerhf_io:
+ case Hexagon::S4_storeirht_io:
+ case Hexagon::S4_storeirhf_io:
+ case Hexagon::S4_pstorerht_rr:
+ case Hexagon::S4_pstorerhf_rr:
+ case Hexagon::S2_pstorerht_pi:
+ case Hexagon::S2_pstorerhf_pi:
+ case Hexagon::S2_pstorerit_io:
+ case Hexagon::S2_pstorerif_io:
+ case Hexagon::S4_storeirit_io:
+ case Hexagon::S4_storeirif_io:
+ case Hexagon::S4_pstorerit_rr:
+ case Hexagon::S4_pstorerif_rr:
+ case Hexagon::S2_pstorerit_pi:
+ case Hexagon::S2_pstorerif_pi:
// V4 global address store before promoting to dot new.
- case Hexagon::STd_GP_cPt_V4 :
- case Hexagon::STd_GP_cNotPt_V4 :
- case Hexagon::STb_GP_cPt_V4 :
- case Hexagon::STb_GP_cNotPt_V4 :
- case Hexagon::STh_GP_cPt_V4 :
- case Hexagon::STh_GP_cNotPt_V4 :
- case Hexagon::STw_GP_cPt_V4 :
- case Hexagon::STw_GP_cNotPt_V4 :
- return QRI.Subtarget.hasV4TOps();
+ case Hexagon::S4_pstorerdt_abs:
+ case Hexagon::S4_pstorerdf_abs:
+ case Hexagon::S4_pstorerbt_abs:
+ case Hexagon::S4_pstorerbf_abs:
+ case Hexagon::S4_pstorerht_abs:
+ case Hexagon::S4_pstorerhf_abs:
+ case Hexagon::S4_pstorerit_abs:
+ case Hexagon::S4_pstorerif_abs:
+ return true;
// Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
// from the "Conditional Store" list. Because a predicated new value store
@@ -1566,20 +1522,14 @@ int HexagonInstrInfo::GetDotNewOp(const MachineInstr* MI) const {
switch (MI->getOpcode()) {
default: llvm_unreachable("Unknown .new type");
// store new value byte
- case Hexagon::STrib_shl_V4:
- return Hexagon::STrib_shl_nv_V4;
-
- case Hexagon::STrih_shl_V4:
- return Hexagon::STrih_shl_nv_V4;
+ case Hexagon::S4_storerb_ur:
+ return Hexagon::S4_storerbnew_ur;
- case Hexagon::STriw_f:
- return Hexagon::STriw_nv_V4;
+ case Hexagon::S4_storerh_ur:
+ return Hexagon::S4_storerhnew_ur;
- case Hexagon::STriw_indexed_f:
- return Hexagon::STriw_indexed_nv_V4;
-
- case Hexagon::STriw_shl_V4:
- return Hexagon::STriw_shl_nv_V4;
+ case Hexagon::S4_storeri_ur:
+ return Hexagon::S4_storerinew_ur;
}
return 0;
@@ -1597,28 +1547,28 @@ int HexagonInstrInfo::GetDotNewPredOp(MachineInstr *MI,
switch (MI->getOpcode()) {
default: llvm_unreachable("Unknown .new type");
// Condtional Jumps
- case Hexagon::JMP_t:
- case Hexagon::JMP_f:
+ case Hexagon::J2_jumpt:
+ case Hexagon::J2_jumpf:
return getDotNewPredJumpOp(MI, MBPI);
- case Hexagon::JMPR_t:
- return Hexagon::JMPR_tnew_tV3;
+ case Hexagon::J2_jumprt:
+ return Hexagon::J2_jumptnewpt;
- case Hexagon::JMPR_f:
- return Hexagon::JMPR_fnew_tV3;
+ case Hexagon::J2_jumprf:
+ return Hexagon::J2_jumprfnewpt;
- case Hexagon::JMPret_t:
- return Hexagon::JMPret_tnew_tV3;
+ case Hexagon::JMPrett:
+ return Hexagon::J2_jumprtnewpt;
- case Hexagon::JMPret_f:
- return Hexagon::JMPret_fnew_tV3;
+ case Hexagon::JMPretf:
+ return Hexagon::J2_jumprfnewpt;
// Conditional combine
- case Hexagon::COMBINE_rr_cPt :
- return Hexagon::COMBINE_rr_cdnPt;
- case Hexagon::COMBINE_rr_cNotPt :
- return Hexagon::COMBINE_rr_cdnNotPt;
+ case Hexagon::C2_ccombinewt:
+ return Hexagon::C2_ccombinewnewt;
+ case Hexagon::C2_ccombinewf:
+ return Hexagon::C2_ccombinewnewf;
}
}
@@ -1670,11 +1620,6 @@ bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
}
bool HexagonInstrInfo::isConstExtended(MachineInstr *MI) const {
-
- // Constant extenders are allowed only for V4 and above.
- if (!Subtarget.hasV4TOps())
- return false;
-
const uint64_t F = MI->getDesc().TSFlags;
unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
if (isExtended) // Instruction must be extended.
@@ -1735,10 +1680,10 @@ HexagonInstrInfo::getDotNewPredJumpOp(MachineInstr *MI,
taken = true;
switch (MI->getOpcode()) {
- case Hexagon::JMP_t:
- return taken ? Hexagon::JMP_tnew_t : Hexagon::JMP_tnew_nt;
- case Hexagon::JMP_f:
- return taken ? Hexagon::JMP_fnew_t : Hexagon::JMP_fnew_nt;
+ case Hexagon::J2_jumpt:
+ return taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
+ case Hexagon::J2_jumpf:
+ return taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
default:
llvm_unreachable("Unexpected jump instruction.");
@@ -1747,10 +1692,6 @@ HexagonInstrInfo::getDotNewPredJumpOp(MachineInstr *MI,
// Returns true if a particular operand is extendable for an instruction.
bool HexagonInstrInfo::isOperandExtended(const MachineInstr *MI,
unsigned short OperandNum) const {
- // Constant extenders are allowed only for V4 and above.
- if (!Subtarget.hasV4TOps())
- return false;
-
const uint64_t F = MI->getDesc().TSFlags;
return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
@@ -1850,16 +1791,16 @@ short HexagonInstrInfo::getNonExtOpcode (const MachineInstr *MI) const {
}
bool HexagonInstrInfo::PredOpcodeHasJMP_c(Opcode_t Opcode) const {
- return (Opcode == Hexagon::JMP_t) ||
- (Opcode == Hexagon::JMP_f) ||
- (Opcode == Hexagon::JMP_tnew_t) ||
- (Opcode == Hexagon::JMP_fnew_t) ||
- (Opcode == Hexagon::JMP_tnew_nt) ||
- (Opcode == Hexagon::JMP_fnew_nt);
+ return (Opcode == Hexagon::J2_jumpt) ||
+ (Opcode == Hexagon::J2_jumpf) ||
+ (Opcode == Hexagon::J2_jumptnewpt) ||
+ (Opcode == Hexagon::J2_jumpfnewpt) ||
+ (Opcode == Hexagon::J2_jumpt) ||
+ (Opcode == Hexagon::J2_jumpf);
}
bool HexagonInstrInfo::PredOpcodeHasNot(Opcode_t Opcode) const {
- return (Opcode == Hexagon::JMP_f) ||
- (Opcode == Hexagon::JMP_fnew_t) ||
- (Opcode == Hexagon::JMP_fnew_nt);
+ return (Opcode == Hexagon::J2_jumpf) ||
+ (Opcode == Hexagon::J2_jumpfnewpt) ||
+ (Opcode == Hexagon::J2_jumpfnew);
}
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td b/lib/Target/Hexagon/HexagonInstrInfo.td
index 4090681..60635cf 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.td
+++ b/lib/Target/Hexagon/HexagonInstrInfo.td
@@ -14,83 +14,100 @@
include "HexagonInstrFormats.td"
include "HexagonOperands.td"
-//===----------------------------------------------------------------------===//
+// Pattern fragment that combines the value type and the register class
+// into a single parameter.
+// The pat frags in the definitions below need to have a named register,
+// otherwise i32 will be assumed regardless of the register class. The
+// name of the register does not matter.
+def I1 : PatLeaf<(i1 PredRegs:$R)>;
+def I32 : PatLeaf<(i32 IntRegs:$R)>;
+def I64 : PatLeaf<(i64 DoubleRegs:$R)>;
+def F32 : PatLeaf<(f32 IntRegs:$R)>;
+def F64 : PatLeaf<(f64 DoubleRegs:$R)>;
+
+// Pattern fragments to extract the low and high subregisters from a
+// 64-bit value.
+def LoReg: OutPatFrag<(ops node:$Rs),
+ (EXTRACT_SUBREG (i64 $Rs), subreg_loreg)>;
+def HiReg: OutPatFrag<(ops node:$Rs),
+ (EXTRACT_SUBREG (i64 $Rs), subreg_hireg)>;
-// Multi-class for logical operators.
-multiclass ALU32_rr_ri<string OpcStr, SDNode OpNode> {
- def rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
- !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
- [(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$b),
- (i32 IntRegs:$c)))]>;
- def ri : ALU32_ri<(outs IntRegs:$dst), (ins s10Imm:$b, IntRegs:$c),
- !strconcat("$dst = ", !strconcat(OpcStr, "(#$b, $c)")),
- [(set (i32 IntRegs:$dst), (OpNode s10Imm:$b,
- (i32 IntRegs:$c)))]>;
-}
+// SDNode for converting immediate C to C-1.
+def DEC_CONST_SIGNED : SDNodeXForm<imm, [{
+ // Return the byte immediate const-1 as an SDNode.
+ int32_t imm = N->getSExtValue();
+ return XformSToSM1Imm(imm);
+}]>;
-// Multi-class for compare ops.
-let isCompare = 1 in {
-multiclass CMP64_rr<string OpcStr, PatFrag OpNode> {
- def rr : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$b, DoubleRegs:$c),
- !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
- [(set (i1 PredRegs:$dst),
- (OpNode (i64 DoubleRegs:$b), (i64 DoubleRegs:$c)))]>;
-}
+// SDNode for converting immediate C to C-2.
+def DEC2_CONST_SIGNED : SDNodeXForm<imm, [{
+ // Return the byte immediate const-2 as an SDNode.
+ int32_t imm = N->getSExtValue();
+ return XformSToSM2Imm(imm);
+}]>;
+
+// SDNode for converting immediate C to C-3.
+def DEC3_CONST_SIGNED : SDNodeXForm<imm, [{
+ // Return the byte immediate const-3 as an SDNode.
+ int32_t imm = N->getSExtValue();
+ return XformSToSM3Imm(imm);
+}]>;
-multiclass CMP32_rr_ri_s10<string OpcStr, string CextOp, PatFrag OpNode> {
- let CextOpcode = CextOp in {
- let InputType = "reg" in
- def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
- !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
- [(set (i1 PredRegs:$dst),
- (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>;
+// SDNode for converting immediate C to C-1.
+def DEC_CONST_UNSIGNED : SDNodeXForm<imm, [{
+ // Return the byte immediate const-1 as an SDNode.
+ uint32_t imm = N->getZExtValue();
+ return XformUToUM1Imm(imm);
+}]>;
- let isExtendable = 1, opExtendable = 2, isExtentSigned = 1,
- opExtentBits = 10, InputType = "imm" in
- def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s10Ext:$c),
- !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
- [(set (i1 PredRegs:$dst),
- (OpNode (i32 IntRegs:$b), s10ExtPred:$c))]>;
+//===----------------------------------------------------------------------===//
+// Compare
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, isCompare = 1, InputType = "imm", isExtendable = 1,
+ opExtendable = 2 in
+class T_CMP <string mnemonic, bits<2> MajOp, bit isNot, Operand ImmOp>
+ : ALU32Inst <(outs PredRegs:$dst),
+ (ins IntRegs:$src1, ImmOp:$src2),
+ "$dst = "#!if(isNot, "!","")#mnemonic#"($src1, #$src2)",
+ [], "",ALU32_2op_tc_2early_SLOT0123 >, ImmRegRel {
+ bits<2> dst;
+ bits<5> src1;
+ bits<10> src2;
+ let CextOpcode = mnemonic;
+ let opExtentBits = !if(!eq(mnemonic, "cmp.gtu"), 9, 10);
+ let isExtentSigned = !if(!eq(mnemonic, "cmp.gtu"), 0, 1);
+
+ let IClass = 0b0111;
+
+ let Inst{27-24} = 0b0101;
+ let Inst{23-22} = MajOp;
+ let Inst{21} = !if(!eq(mnemonic, "cmp.gtu"), 0, src2{9});
+ let Inst{20-16} = src1;
+ let Inst{13-5} = src2{8-0};
+ let Inst{4} = isNot;
+ let Inst{3-2} = 0b00;
+ let Inst{1-0} = dst;
}
-}
-multiclass CMP32_rr_ri_u9<string OpcStr, string CextOp, PatFrag OpNode> {
- let CextOpcode = CextOp in {
- let InputType = "reg" in
- def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
- !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
- [(set (i1 PredRegs:$dst),
- (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>;
+def C2_cmpeqi : T_CMP <"cmp.eq", 0b00, 0, s10Ext>;
+def C2_cmpgti : T_CMP <"cmp.gt", 0b01, 0, s10Ext>;
+def C2_cmpgtui : T_CMP <"cmp.gtu", 0b10, 0, u9Ext>;
- let isExtendable = 1, opExtendable = 2, isExtentSigned = 0,
- opExtentBits = 9, InputType = "imm" in
- def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u9Ext:$c),
- !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
- [(set (i1 PredRegs:$dst),
- (OpNode (i32 IntRegs:$b), u9ExtPred:$c))]>;
- }
-}
+class T_CMP_pat <InstHexagon MI, PatFrag OpNode, PatLeaf ImmPred>
+ : Pat<(i1 (OpNode (i32 IntRegs:$src1), ImmPred:$src2)),
+ (MI IntRegs:$src1, ImmPred:$src2)>;
-multiclass CMP32_ri_s8<string OpcStr, PatFrag OpNode> {
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 8 in
- def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s8Ext:$c),
- !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
- [(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b),
- s8ExtPred:$c))]>;
-}
-}
+def : T_CMP_pat <C2_cmpeqi, seteq, s10ImmPred>;
+def : T_CMP_pat <C2_cmpgti, setgt, s10ImmPred>;
+def : T_CMP_pat <C2_cmpgtui, setugt, u9ImmPred>;
//===----------------------------------------------------------------------===//
-// ALU32/ALU (Instructions with register-register form)
+// ALU32/ALU +
//===----------------------------------------------------------------------===//
def SDTHexagonI64I32I32 : SDTypeProfile<1, 2,
[SDTCisVT<0, i64>, SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>;
-def HexagonWrapperCombineII :
- SDNode<"HexagonISD::WrapperCombineII", SDTHexagonI64I32I32>;
-
-def HexagonWrapperCombineRR :
- SDNode<"HexagonISD::WrapperCombineRR", SDTHexagonI64I32I32>;
+def HexagonCOMBINE : SDNode<"HexagonISD::COMBINE", SDTHexagonI64I32I32>;
let hasSideEffects = 0, hasNewValue = 1, InputType = "reg" in
class T_ALU32_3op<string mnemonic, bits<3> MajOp, bits<3> MinOp, bit OpsRev,
@@ -145,6 +162,41 @@ class T_ALU32_3op_pred<string mnemonic, bits<3> MajOp, bits<3> MinOp,
let Inst{4-0} = Rd;
}
+class T_ALU32_combineh<string Op1, string Op2, bits<3> MajOp, bits<3> MinOp,
+ bit OpsRev>
+ : T_ALU32_3op<"", MajOp, MinOp, OpsRev, 0> {
+ let AsmString = "$Rd = combine($Rs"#Op1#", $Rt"#Op2#")";
+}
+
+def A2_combine_hh : T_ALU32_combineh<".h", ".h", 0b011, 0b100, 1>;
+def A2_combine_hl : T_ALU32_combineh<".h", ".l", 0b011, 0b101, 1>;
+def A2_combine_lh : T_ALU32_combineh<".l", ".h", 0b011, 0b110, 1>;
+def A2_combine_ll : T_ALU32_combineh<".l", ".l", 0b011, 0b111, 1>;
+
+class T_ALU32_3op_sfx<string mnemonic, string suffix, bits<3> MajOp,
+ bits<3> MinOp, bit OpsRev, bit IsComm>
+ : T_ALU32_3op<"", MajOp, MinOp, OpsRev, IsComm> {
+ let AsmString = "$Rd = "#mnemonic#"($Rs, $Rt)"#suffix;
+}
+
+def A2_svaddh : T_ALU32_3op<"vaddh", 0b110, 0b000, 0, 1>;
+def A2_svsubh : T_ALU32_3op<"vsubh", 0b110, 0b100, 1, 0>;
+
+let Defs = [USR_OVF], Itinerary = ALU32_3op_tc_2_SLOT0123 in {
+ def A2_svaddhs : T_ALU32_3op_sfx<"vaddh", ":sat", 0b110, 0b001, 0, 1>;
+ def A2_addsat : T_ALU32_3op_sfx<"add", ":sat", 0b110, 0b010, 0, 1>;
+ def A2_svadduhs : T_ALU32_3op_sfx<"vadduh", ":sat", 0b110, 0b011, 0, 1>;
+ def A2_svsubhs : T_ALU32_3op_sfx<"vsubh", ":sat", 0b110, 0b101, 1, 0>;
+ def A2_subsat : T_ALU32_3op_sfx<"sub", ":sat", 0b110, 0b110, 1, 0>;
+ def A2_svsubuhs : T_ALU32_3op_sfx<"vsubuh", ":sat", 0b110, 0b111, 1, 0>;
+}
+
+let Itinerary = ALU32_3op_tc_2_SLOT0123 in
+def A2_svavghs : T_ALU32_3op_sfx<"vavgh", ":rnd", 0b111, 0b001, 0, 1>;
+
+def A2_svavgh : T_ALU32_3op<"vavgh", 0b111, 0b000, 0, 1>;
+def A2_svnavgh : T_ALU32_3op<"vnavgh", 0b111, 0b011, 1, 0>;
+
multiclass T_ALU32_3op_p<string mnemonic, bits<3> MajOp, bits<3> MinOp,
bit OpsRev> {
def t : T_ALU32_3op_pred<mnemonic, MajOp, MinOp, OpsRev, 0, 0>;
@@ -160,7 +212,6 @@ multiclass T_ALU32_3op_A2<string mnemonic, bits<3> MajOp, bits<3> MinOp,
defm A2_p#NAME : T_ALU32_3op_p<mnemonic, MajOp, MinOp, OpsRev>;
}
-let isCodeGenOnly = 0 in
defm add : T_ALU32_3op_A2<"add", 0b011, 0b000, 0, 1>;
defm and : T_ALU32_3op_A2<"and", 0b001, 0b000, 0, 1>;
defm or : T_ALU32_3op_A2<"or", 0b001, 0b001, 0, 1>;
@@ -178,282 +229,418 @@ def: BinOp32_pat<or, A2_or, i32>;
def: BinOp32_pat<sub, A2_sub, i32>;
def: BinOp32_pat<xor, A2_xor, i32>;
-multiclass ALU32_Pbase<string mnemonic, RegisterClass RC, bit isNot,
- bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : ALU32_rr<(outs RC:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs: $src3),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ",
- ") $dst = ")#mnemonic#"($src2, $src3)",
- []>;
+// A few special cases producing register pairs:
+let OutOperandList = (outs DoubleRegs:$Rd), hasNewValue = 0 in {
+ def S2_packhl : T_ALU32_3op <"packhl", 0b101, 0b100, 0, 0>;
+
+ let isPredicable = 1 in
+ def A2_combinew : T_ALU32_3op <"combine", 0b101, 0b000, 0, 0>;
+
+ // Conditional combinew uses "newt/f" instead of "t/fnew".
+ def C2_ccombinewt : T_ALU32_3op_pred<"combine", 0b101, 0b000, 0, 0, 0>;
+ def C2_ccombinewf : T_ALU32_3op_pred<"combine", 0b101, 0b000, 0, 1, 0>;
+ def C2_ccombinewnewt : T_ALU32_3op_pred<"combine", 0b101, 0b000, 0, 0, 1>;
+ def C2_ccombinewnewf : T_ALU32_3op_pred<"combine", 0b101, 0b000, 0, 1, 1>;
}
-multiclass ALU32_Pred<string mnemonic, RegisterClass RC, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ALU32_Pbase<mnemonic, RC, PredNot, 0>;
- // Predicate new
- defm _cdn#NAME : ALU32_Pbase<mnemonic, RC, PredNot, 1>;
- }
+let hasSideEffects = 0, hasNewValue = 1, isCompare = 1, InputType = "reg" in
+class T_ALU32_3op_cmp<string mnemonic, bits<2> MinOp, bit IsNeg, bit IsComm>
+ : ALU32_rr<(outs PredRegs:$Pd), (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Pd = "#mnemonic#"($Rs, $Rt)",
+ [], "", ALU32_3op_tc_1_SLOT0123>, ImmRegRel {
+ let CextOpcode = mnemonic;
+ let isCommutable = IsComm;
+ bits<5> Rs;
+ bits<5> Rt;
+ bits<2> Pd;
+
+ let IClass = 0b1111;
+ let Inst{27-24} = 0b0010;
+ let Inst{22-21} = MinOp;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{4} = IsNeg;
+ let Inst{3-2} = 0b00;
+ let Inst{1-0} = Pd;
}
-let InputType = "reg" in
-multiclass ALU32_base<string mnemonic, string CextOp, SDNode OpNode> {
- let CextOpcode = CextOp, BaseOpcode = CextOp#_rr in {
- let isPredicable = 1 in
- def NAME : ALU32_rr<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = "#mnemonic#"($src1, $src2)",
- [(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
- let neverHasSideEffects = 1, isPredicated = 1 in {
- defm Pt : ALU32_Pred<mnemonic, IntRegs, 0>;
- defm NotPt : ALU32_Pred<mnemonic, IntRegs, 1>;
- }
- }
+let Itinerary = ALU32_3op_tc_2early_SLOT0123 in {
+ def C2_cmpeq : T_ALU32_3op_cmp< "cmp.eq", 0b00, 0, 1>;
+ def C2_cmpgt : T_ALU32_3op_cmp< "cmp.gt", 0b10, 0, 0>;
+ def C2_cmpgtu : T_ALU32_3op_cmp< "cmp.gtu", 0b11, 0, 0>;
}
-defm SUB_rr : ALU32_base<"sub", "SUB", sub>, ImmRegRel, PredNewRel;
+// Patfrag to convert the usual comparison patfrags (e.g. setlt) to ones
+// that reverse the order of the operands.
+class RevCmp<PatFrag F> : PatFrag<(ops node:$rhs, node:$lhs), F.Fragment>;
-// Combines the two integer registers SRC1 and SRC2 into a double register.
-let isPredicable = 1 in
-class T_Combine : ALU32_rr<(outs DoubleRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = combine($src1, $src2)",
- [(set (i64 DoubleRegs:$dst),
- (i64 (HexagonWrapperCombineRR (i32 IntRegs:$src1),
- (i32 IntRegs:$src2))))]>;
-
-multiclass Combine_base {
- let BaseOpcode = "combine" in {
- def NAME : T_Combine;
- let neverHasSideEffects = 1, isPredicated = 1 in {
- defm Pt : ALU32_Pred<"combine", DoubleRegs, 0>;
- defm NotPt : ALU32_Pred<"combine", DoubleRegs, 1>;
- }
- }
-}
+// Pats for compares. They use PatFrags as operands, not SDNodes,
+// since seteq/setgt/etc. are defined as ParFrags.
+class T_cmp32_rr_pat<InstHexagon MI, PatFrag Op, ValueType VT>
+ : Pat<(VT (Op (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))),
+ (VT (MI IntRegs:$Rs, IntRegs:$Rt))>;
-defm COMBINE_rr : Combine_base, PredNewRel;
+def: T_cmp32_rr_pat<C2_cmpeq, seteq, i1>;
+def: T_cmp32_rr_pat<C2_cmpgt, setgt, i1>;
+def: T_cmp32_rr_pat<C2_cmpgtu, setugt, i1>;
-// Combines the two immediates SRC1 and SRC2 into a double register.
-class COMBINE_imm<Operand imm1, Operand imm2, PatLeaf pat1, PatLeaf pat2> :
- ALU32_ii<(outs DoubleRegs:$dst), (ins imm1:$src1, imm2:$src2),
- "$dst = combine(#$src1, #$src2)",
- [(set (i64 DoubleRegs:$dst),
- (i64 (HexagonWrapperCombineII (i32 pat1:$src1), (i32 pat2:$src2))))]>;
+def: T_cmp32_rr_pat<C2_cmpgt, RevCmp<setlt>, i1>;
+def: T_cmp32_rr_pat<C2_cmpgtu, RevCmp<setult>, i1>;
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 8 in
-def COMBINE_Ii : COMBINE_imm<s8Ext, s8Imm, s8ExtPred, s8ImmPred>;
+let CextOpcode = "MUX", InputType = "reg", hasNewValue = 1 in
+def C2_mux: ALU32_rr<(outs IntRegs:$Rd),
+ (ins PredRegs:$Pu, IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd = mux($Pu, $Rs, $Rt)", [], "", ALU32_3op_tc_1_SLOT0123>, ImmRegRel {
+ bits<5> Rd;
+ bits<2> Pu;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let CextOpcode = "mux";
+ let InputType = "reg";
+ let hasSideEffects = 0;
+ let IClass = 0b1111;
+
+ let Inst{27-24} = 0b0100;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{6-5} = Pu;
+ let Inst{4-0} = Rd;
+}
+
+def: Pat<(i32 (select (i1 PredRegs:$Pu), (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))),
+ (C2_mux PredRegs:$Pu, IntRegs:$Rs, IntRegs:$Rt)>;
+
+// Combines the two immediates into a double register.
+// Increase complexity to make it greater than any complexity of a combine
+// that involves a register.
+
+let isReMaterializable = 1, isMoveImm = 1, isAsCheapAsAMove = 1,
+ isExtentSigned = 1, isExtendable = 1, opExtentBits = 8, opExtendable = 1,
+ AddedComplexity = 75 in
+def A2_combineii: ALU32Inst <(outs DoubleRegs:$Rdd), (ins s8Ext:$s8, s8Imm:$S8),
+ "$Rdd = combine(#$s8, #$S8)",
+ [(set (i64 DoubleRegs:$Rdd),
+ (i64 (HexagonCOMBINE(i32 s8ExtPred:$s8), (i32 s8ImmPred:$S8))))]> {
+ bits<5> Rdd;
+ bits<8> s8;
+ bits<8> S8;
+
+ let IClass = 0b0111;
+ let Inst{27-23} = 0b11000;
+ let Inst{22-16} = S8{7-1};
+ let Inst{13} = S8{0};
+ let Inst{12-5} = s8;
+ let Inst{4-0} = Rdd;
+ }
//===----------------------------------------------------------------------===//
-// ALU32/ALU (ADD with register-immediate form)
+// Template class for predicated ADD of a reg and an Immediate value.
//===----------------------------------------------------------------------===//
-multiclass ALU32ri_Pbase<string mnemonic, bit isNot, bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, s8Ext: $src3),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ",
- ") $dst = ")#mnemonic#"($src2, #$src3)",
- []>;
-}
+let hasNewValue = 1, hasSideEffects = 0 in
+class T_Addri_Pred <bit PredNot, bit PredNew>
+ : ALU32_ri <(outs IntRegs:$Rd),
+ (ins PredRegs:$Pu, IntRegs:$Rs, s8Ext:$s8),
+ !if(PredNot, "if (!$Pu", "if ($Pu")#!if(PredNew,".new) $Rd = ",
+ ") $Rd = ")#"add($Rs, #$s8)"> {
+ bits<5> Rd;
+ bits<2> Pu;
+ bits<5> Rs;
+ bits<8> s8;
+
+ let isPredicatedNew = PredNew;
+ let IClass = 0b0111;
+
+ let Inst{27-24} = 0b0100;
+ let Inst{23} = PredNot;
+ let Inst{22-21} = Pu;
+ let Inst{20-16} = Rs;
+ let Inst{13} = PredNew;
+ let Inst{12-5} = s8;
+ let Inst{4-0} = Rd;
+ }
-multiclass ALU32ri_Pred<string mnemonic, bit PredNot> {
+//===----------------------------------------------------------------------===//
+// A2_addi: Add a signed immediate to a register.
+//===----------------------------------------------------------------------===//
+let hasNewValue = 1, hasSideEffects = 0 in
+class T_Addri <Operand immOp>
+ : ALU32_ri <(outs IntRegs:$Rd),
+ (ins IntRegs:$Rs, immOp:$s16),
+ "$Rd = add($Rs, #$s16)", [], "", ALU32_ADDI_tc_1_SLOT0123> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<16> s16;
+
+ let IClass = 0b1011;
+
+ let Inst{27-21} = s16{15-9};
+ let Inst{20-16} = Rs;
+ let Inst{13-5} = s16{8-0};
+ let Inst{4-0} = Rd;
+ }
+
+//===----------------------------------------------------------------------===//
+// Multiclass for ADD of a register and an immediate value.
+//===----------------------------------------------------------------------===//
+multiclass Addri_Pred<string mnemonic, bit PredNot> {
let isPredicatedFalse = PredNot in {
- defm _c#NAME : ALU32ri_Pbase<mnemonic, PredNot, 0>;
+ def NAME : T_Addri_Pred<PredNot, 0>;
// Predicate new
- defm _cdn#NAME : ALU32ri_Pbase<mnemonic, PredNot, 1>;
+ def NAME#new : T_Addri_Pred<PredNot, 1>;
}
}
-let isExtendable = 1, InputType = "imm" in
-multiclass ALU32ri_base<string mnemonic, string CextOp, SDNode OpNode> {
- let CextOpcode = CextOp, BaseOpcode = CextOp#_ri in {
- let opExtendable = 2, isExtentSigned = 1, opExtentBits = 16,
- isPredicable = 1 in
- def NAME : ALU32_ri<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s16Ext:$src2),
- "$dst = "#mnemonic#"($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$src1),
- (s16ExtPred:$src2)))]>;
+let isExtendable = 1, isExtentSigned = 1, InputType = "imm" in
+multiclass Addri_base<string mnemonic, SDNode OpNode> {
+ let CextOpcode = mnemonic, BaseOpcode = mnemonic#_ri in {
+ let opExtendable = 2, opExtentBits = 16, isPredicable = 1 in
+ def A2_#NAME : T_Addri<s16Ext>;
- let opExtendable = 3, isExtentSigned = 1, opExtentBits = 8,
- neverHasSideEffects = 1, isPredicated = 1 in {
- defm Pt : ALU32ri_Pred<mnemonic, 0>;
- defm NotPt : ALU32ri_Pred<mnemonic, 1>;
+ let opExtendable = 3, opExtentBits = 8, isPredicated = 1 in {
+ defm A2_p#NAME#t : Addri_Pred<mnemonic, 0>;
+ defm A2_p#NAME#f : Addri_Pred<mnemonic, 1>;
}
}
}
-defm ADD_ri : ALU32ri_base<"add", "ADD", add>, ImmRegRel, PredNewRel;
+defm addi : Addri_base<"add", add>, ImmRegRel, PredNewRel;
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 10,
-CextOpcode = "OR", InputType = "imm" in
-def OR_ri : ALU32_ri<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s10Ext:$src2),
- "$dst = or($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1),
- s10ExtPred:$src2))]>, ImmRegRel;
+def: Pat<(i32 (add I32:$Rs, s16ExtPred:$s16)),
+ (i32 (A2_addi I32:$Rs, imm:$s16))>;
+//===----------------------------------------------------------------------===//
+// Template class used for the following ALU32 instructions.
+// Rd=and(Rs,#s10)
+// Rd=or(Rs,#s10)
+//===----------------------------------------------------------------------===//
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 10,
-InputType = "imm", CextOpcode = "AND" in
-def AND_ri : ALU32_ri<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s10Ext:$src2),
- "$dst = and($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1),
- s10ExtPred:$src2))]>, ImmRegRel;
+InputType = "imm", hasNewValue = 1 in
+class T_ALU32ri_logical <string mnemonic, SDNode OpNode, bits<2> MinOp>
+ : ALU32_ri <(outs IntRegs:$Rd),
+ (ins IntRegs:$Rs, s10Ext:$s10),
+ "$Rd = "#mnemonic#"($Rs, #$s10)" ,
+ [(set (i32 IntRegs:$Rd), (OpNode (i32 IntRegs:$Rs), s10ExtPred:$s10))]> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<10> s10;
+ let CextOpcode = mnemonic;
+
+ let IClass = 0b0111;
+
+ let Inst{27-24} = 0b0110;
+ let Inst{23-22} = MinOp;
+ let Inst{21} = s10{9};
+ let Inst{20-16} = Rs;
+ let Inst{13-5} = s10{8-0};
+ let Inst{4-0} = Rd;
+ }
-// Nop.
-let neverHasSideEffects = 1, isCodeGenOnly = 0 in
-def NOP : ALU32_rr<(outs), (ins),
- "nop",
- []>;
+def A2_orir : T_ALU32ri_logical<"or", or, 0b10>, ImmRegRel;
+def A2_andir : T_ALU32ri_logical<"and", and, 0b00>, ImmRegRel;
+// Subtract register from immediate
// Rd32=sub(#s10,Rs32)
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 10,
-CextOpcode = "SUB", InputType = "imm" in
-def SUB_ri : ALU32_ri<(outs IntRegs:$dst),
- (ins s10Ext:$src1, IntRegs:$src2),
- "$dst = sub(#$src1, $src2)",
- [(set IntRegs:$dst, (sub s10ExtPred:$src1, IntRegs:$src2))]>,
- ImmRegRel;
-
-// Rd = not(Rs) gets mapped to Rd=sub(#-1, Rs).
-def : Pat<(not (i32 IntRegs:$src1)),
- (SUB_ri -1, (i32 IntRegs:$src1))>;
-
-// Rd = neg(Rs) gets mapped to Rd=sub(#0, Rs).
-// Pattern definition for 'neg' was not necessary.
-
-multiclass TFR_Pred<bit PredNot> {
- let isPredicatedFalse = PredNot in {
- def _c#NAME : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2),
- !if(PredNot, "if (!$src1", "if ($src1")#") $dst = $src2",
- []>;
- // Predicate new
- let isPredicatedNew = 1 in
- def _cdn#NAME : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2),
- !if(PredNot, "if (!$src1", "if ($src1")#".new) $dst = $src2",
- []>;
+let isExtendable = 1, CextOpcode = "sub", opExtendable = 1, isExtentSigned = 1,
+ opExtentBits = 10, InputType = "imm", hasNewValue = 1, hasSideEffects = 0 in
+def A2_subri: ALU32_ri <(outs IntRegs:$Rd), (ins s10Ext:$s10, IntRegs:$Rs),
+ "$Rd = sub(#$s10, $Rs)", []>, ImmRegRel {
+ bits<5> Rd;
+ bits<10> s10;
+ bits<5> Rs;
+
+ let IClass = 0b0111;
+
+ let Inst{27-22} = 0b011001;
+ let Inst{21} = s10{9};
+ let Inst{20-16} = Rs;
+ let Inst{13-5} = s10{8-0};
+ let Inst{4-0} = Rd;
}
+
+// Nop.
+let hasSideEffects = 0 in
+def A2_nop: ALU32Inst <(outs), (ins), "nop" > {
+ let IClass = 0b0111;
+ let Inst{27-24} = 0b1111;
}
-let InputType = "reg", neverHasSideEffects = 1 in
-multiclass TFR_base<string CextOp> {
- let CextOpcode = CextOp, BaseOpcode = CextOp in {
- let isPredicable = 1 in
- def NAME : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
- "$dst = $src1",
- []>;
+def: Pat<(sub s10ExtPred:$s10, IntRegs:$Rs),
+ (A2_subri imm:$s10, IntRegs:$Rs)>;
- let isPredicated = 1 in {
- defm Pt : TFR_Pred<0>;
- defm NotPt : TFR_Pred<1>;
- }
+// Rd = not(Rs) gets mapped to Rd=sub(#-1, Rs).
+def: Pat<(not (i32 IntRegs:$src1)),
+ (A2_subri -1, IntRegs:$src1)>;
+
+let hasSideEffects = 0, hasNewValue = 1 in
+class T_tfr16<bit isHi>
+ : ALU32Inst <(outs IntRegs:$Rx), (ins IntRegs:$src1, u16Imm:$u16),
+ "$Rx"#!if(isHi, ".h", ".l")#" = #$u16",
+ [], "$src1 = $Rx" > {
+ bits<5> Rx;
+ bits<16> u16;
+
+ let IClass = 0b0111;
+ let Inst{27-26} = 0b00;
+ let Inst{25-24} = !if(isHi, 0b10, 0b01);
+ let Inst{23-22} = u16{15-14};
+ let Inst{21} = 0b1;
+ let Inst{20-16} = Rx;
+ let Inst{13-0} = u16{13-0};
}
-}
-class T_TFR64_Pred<bit PredNot, bit isPredNew>
- : ALU32_rr<(outs DoubleRegs:$dst),
- (ins PredRegs:$src1, DoubleRegs:$src2),
- !if(PredNot, "if (!$src1", "if ($src1")#
- !if(isPredNew, ".new) ", ") ")#"$dst = $src2", []>
-{
+def A2_tfril: T_tfr16<0>;
+def A2_tfrih: T_tfr16<1>;
+
+// Conditional transfer is an alias to conditional "Rd = add(Rs, #0)".
+let isPredicated = 1, hasNewValue = 1, opNewValue = 0 in
+class T_tfr_pred<bit isPredNot, bit isPredNew>
+ : ALU32Inst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ("#!if(isPredNot, "!", "")#
+ "$src1"#!if(isPredNew, ".new", "")#
+ ") $dst = $src2"> {
bits<5> dst;
bits<2> src1;
bits<5> src2;
- let IClass = 0b1111;
- let Inst{27-24} = 0b1101;
+ let isPredicatedFalse = isPredNot;
+ let isPredicatedNew = isPredNew;
+ let IClass = 0b0111;
+
+ let Inst{27-24} = 0b0100;
+ let Inst{23} = isPredNot;
let Inst{13} = isPredNew;
- let Inst{7} = PredNot;
+ let Inst{12-5} = 0;
let Inst{4-0} = dst;
- let Inst{6-5} = src1;
- let Inst{20-17} = src2{4-1};
- let Inst{16} = 0b1;
- let Inst{12-9} = src2{4-1};
- let Inst{8} = 0b0;
-}
+ let Inst{22-21} = src1;
+ let Inst{20-16} = src2;
+ }
-multiclass TFR64_Pred<bit PredNot> {
- let isPredicatedFalse = PredNot in {
- def _c#NAME : T_TFR64_Pred<PredNot, 0>;
+let isPredicable = 1 in
+class T_tfr : ALU32Inst<(outs IntRegs:$dst), (ins IntRegs:$src),
+ "$dst = $src"> {
+ bits<5> dst;
+ bits<5> src;
- let isPredicatedNew = 1 in
- def _cdn#NAME : T_TFR64_Pred<PredNot, 1>; // Predicate new
+ let IClass = 0b0111;
+
+ let Inst{27-21} = 0b0000011;
+ let Inst{20-16} = src;
+ let Inst{13} = 0b0;
+ let Inst{4-0} = dst;
+ }
+
+let InputType = "reg", hasNewValue = 1, hasSideEffects = 0 in
+multiclass tfr_base<string CextOp> {
+ let CextOpcode = CextOp, BaseOpcode = CextOp in {
+ def NAME : T_tfr;
+
+ // Predicate
+ def t : T_tfr_pred<0, 0>;
+ def f : T_tfr_pred<1, 0>;
+ // Predicate new
+ def tnew : T_tfr_pred<0, 1>;
+ def fnew : T_tfr_pred<1, 1>;
}
}
-let neverHasSideEffects = 1 in
+// Assembler mapped to C2_ccombinew[t|f|newt|newf].
+// Please don't add bits to this instruction as it'll be converted into
+// 'combine' before object code emission.
+let isPredicated = 1 in
+class T_tfrp_pred<bit PredNot, bit PredNew>
+ : ALU32_rr <(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, DoubleRegs:$src2),
+ "if ("#!if(PredNot, "!", "")#"$src1"
+ #!if(PredNew, ".new", "")#") $dst = $src2" > {
+ let isPredicatedFalse = PredNot;
+ let isPredicatedNew = PredNew;
+ }
+
+// Assembler mapped to A2_combinew.
+// Please don't add bits to this instruction as it'll be converted into
+// 'combine' before object code emission.
+class T_tfrp : ALU32Inst <(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src),
+ "$dst = $src">;
+
+let hasSideEffects = 0 in
multiclass TFR64_base<string BaseName> {
let BaseOpcode = BaseName in {
let isPredicable = 1 in
- def NAME : ALU32Inst <(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1),
- "$dst = $src1" > {
- bits<5> dst;
- bits<5> src1;
-
- let IClass = 0b1111;
- let Inst{27-23} = 0b01010;
- let Inst{4-0} = dst;
- let Inst{20-17} = src1{4-1};
- let Inst{16} = 0b1;
- let Inst{12-9} = src1{4-1};
- let Inst{8} = 0b0;
- }
-
- let isPredicated = 1 in {
- defm Pt : TFR64_Pred<0>;
- defm NotPt : TFR64_Pred<1>;
- }
+ def NAME : T_tfrp;
+ // Predicate
+ def t : T_tfrp_pred <0, 0>;
+ def f : T_tfrp_pred <1, 0>;
+ // Predicate new
+ def tnew : T_tfrp_pred <0, 1>;
+ def fnew : T_tfrp_pred <1, 1>;
}
}
-multiclass TFRI_Pred<bit PredNot> {
- let isMoveImm = 1, isPredicatedFalse = PredNot in {
- def _c#NAME : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, s12Ext:$src2),
- !if(PredNot, "if (!$src1", "if ($src1")#") $dst = #$src2",
- []>;
+let InputType = "imm", isExtendable = 1, isExtentSigned = 1, opExtentBits = 12,
+ isMoveImm = 1, opExtendable = 2, BaseOpcode = "TFRI", CextOpcode = "TFR",
+ hasSideEffects = 0, isPredicated = 1, hasNewValue = 1 in
+class T_TFRI_Pred<bit PredNot, bit PredNew>
+ : ALU32_ri<(outs IntRegs:$Rd), (ins PredRegs:$Pu, s12Ext:$s12),
+ "if ("#!if(PredNot,"!","")#"$Pu"#!if(PredNew,".new","")#") $Rd = #$s12",
+ [], "", ALU32_2op_tc_1_SLOT0123>, ImmRegRel, PredNewRel {
+ let isPredicatedFalse = PredNot;
+ let isPredicatedNew = PredNew;
- // Predicate new
- let isPredicatedNew = 1 in
- def _cdn#NAME : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, s12Ext:$src2),
- !if(PredNot, "if (!$src1", "if ($src1")#".new) $dst = #$src2",
- []>;
- }
-}
-
-let InputType = "imm", isExtendable = 1, isExtentSigned = 1 in
-multiclass TFRI_base<string CextOp> {
- let CextOpcode = CextOp, BaseOpcode = CextOp#I in {
- let isAsCheapAsAMove = 1 , opExtendable = 1, opExtentBits = 16,
- isMoveImm = 1, isPredicable = 1, isReMaterializable = 1 in
- def NAME : ALU32_ri<(outs IntRegs:$dst), (ins s16Ext:$src1),
- "$dst = #$src1",
- [(set (i32 IntRegs:$dst), s16ExtPred:$src1)]>;
-
- let opExtendable = 2, opExtentBits = 12, neverHasSideEffects = 1,
- isPredicated = 1 in {
- defm Pt : TFRI_Pred<0>;
- defm NotPt : TFRI_Pred<1>;
- }
- }
+ bits<5> Rd;
+ bits<2> Pu;
+ bits<12> s12;
+
+ let IClass = 0b0111;
+ let Inst{27-24} = 0b1110;
+ let Inst{23} = PredNot;
+ let Inst{22-21} = Pu;
+ let Inst{20} = 0b0;
+ let Inst{19-16,12-5} = s12;
+ let Inst{13} = PredNew;
+ let Inst{4-0} = Rd;
}
-defm TFRI : TFRI_base<"TFR">, ImmRegRel, PredNewRel;
-defm TFR : TFR_base<"TFR">, ImmRegRel, PredNewRel;
-defm TFR64 : TFR64_base<"TFR64">, PredNewRel;
+def C2_cmoveit : T_TFRI_Pred<0, 0>;
+def C2_cmoveif : T_TFRI_Pred<1, 0>;
+def C2_cmovenewit : T_TFRI_Pred<0, 1>;
+def C2_cmovenewif : T_TFRI_Pred<1, 1>;
+
+let InputType = "imm", isExtendable = 1, isExtentSigned = 1,
+ CextOpcode = "TFR", BaseOpcode = "TFRI", hasNewValue = 1, opNewValue = 0,
+ isAsCheapAsAMove = 1 , opExtendable = 1, opExtentBits = 16, isMoveImm = 1,
+ isPredicated = 0, isPredicable = 1, isReMaterializable = 1 in
+def A2_tfrsi : ALU32Inst<(outs IntRegs:$Rd), (ins s16Ext:$s16), "$Rd = #$s16",
+ [(set (i32 IntRegs:$Rd), s16ExtPred:$s16)], "", ALU32_2op_tc_1_SLOT0123>,
+ ImmRegRel, PredRel {
+ bits<5> Rd;
+ bits<16> s16;
+
+ let IClass = 0b0111;
+ let Inst{27-24} = 0b1000;
+ let Inst{23-22,20-16,13-5} = s16;
+ let Inst{4-0} = Rd;
+}
+
+defm A2_tfr : tfr_base<"TFR">, ImmRegRel, PredNewRel;
+let isAsmParserOnly = 1 in
+defm A2_tfrp : TFR64_base<"TFR64">, PredNewRel;
+
+// Assembler mapped
+let isReMaterializable = 1, isMoveImm = 1, isAsCheapAsAMove = 1,
+ isAsmParserOnly = 1 in
+def A2_tfrpi : ALU64_rr<(outs DoubleRegs:$dst), (ins s8Imm64:$src1),
+ "$dst = #$src1",
+ [(set (i64 DoubleRegs:$dst), s8Imm64Pred:$src1)]>;
+
+// TODO: see if this instruction can be deleted..
+let isExtendable = 1, opExtendable = 1, opExtentBits = 6,
+ isAsmParserOnly = 1 in
+def TFRI64_V4 : ALU64_rr<(outs DoubleRegs:$dst), (ins u6Ext:$src1),
+ "$dst = #$src1">;
-// Transfer control register.
-let neverHasSideEffects = 1 in
-def TFCR : CRInst<(outs CRRegs:$dst), (ins IntRegs:$src1),
- "$dst = $src1",
- []>;
//===----------------------------------------------------------------------===//
// ALU32/ALU -
//===----------------------------------------------------------------------===//
@@ -462,159 +649,344 @@ def TFCR : CRInst<(outs CRRegs:$dst), (ins IntRegs:$src1),
//===----------------------------------------------------------------------===//
// ALU32/PERM +
//===----------------------------------------------------------------------===//
+// Scalar mux register immediate.
+let hasSideEffects = 0, isExtentSigned = 1, CextOpcode = "MUX",
+ InputType = "imm", hasNewValue = 1, isExtendable = 1, opExtentBits = 8 in
+class T_MUX1 <bit MajOp, dag ins, string AsmStr>
+ : ALU32Inst <(outs IntRegs:$Rd), ins, AsmStr>, ImmRegRel {
+ bits<5> Rd;
+ bits<2> Pu;
+ bits<8> s8;
+ bits<5> Rs;
+
+ let IClass = 0b0111;
+ let Inst{27-24} = 0b0011;
+ let Inst{23} = MajOp;
+ let Inst{22-21} = Pu;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0b0;
+ let Inst{12-5} = s8;
+ let Inst{4-0} = Rd;
+}
+
+let opExtendable = 2 in
+def C2_muxri : T_MUX1<0b1, (ins PredRegs:$Pu, s8Ext:$s8, IntRegs:$Rs),
+ "$Rd = mux($Pu, #$s8, $Rs)">;
+
+let opExtendable = 3 in
+def C2_muxir : T_MUX1<0b0, (ins PredRegs:$Pu, IntRegs:$Rs, s8Ext:$s8),
+ "$Rd = mux($Pu, $Rs, #$s8)">;
+
+def : Pat<(i32 (select I1:$Pu, s8ExtPred:$s8, I32:$Rs)),
+ (C2_muxri I1:$Pu, s8ExtPred:$s8, I32:$Rs)>;
+
+def : Pat<(i32 (select I1:$Pu, I32:$Rs, s8ExtPred:$s8)),
+ (C2_muxir I1:$Pu, I32:$Rs, s8ExtPred:$s8)>;
+
+// C2_muxii: Scalar mux immediates.
+let isExtentSigned = 1, hasNewValue = 1, isExtendable = 1,
+ opExtentBits = 8, opExtendable = 2 in
+def C2_muxii: ALU32Inst <(outs IntRegs:$Rd),
+ (ins PredRegs:$Pu, s8Ext:$s8, s8Imm:$S8),
+ "$Rd = mux($Pu, #$s8, #$S8)" ,
+ [(set (i32 IntRegs:$Rd),
+ (i32 (select I1:$Pu, s8ExtPred:$s8, s8ImmPred:$S8)))] > {
+ bits<5> Rd;
+ bits<2> Pu;
+ bits<8> s8;
+ bits<8> S8;
+
+ let IClass = 0b0111;
+
+ let Inst{27-25} = 0b101;
+ let Inst{24-23} = Pu;
+ let Inst{22-16} = S8{7-1};
+ let Inst{13} = S8{0};
+ let Inst{12-5} = s8;
+ let Inst{4-0} = Rd;
+ }
+
+//===----------------------------------------------------------------------===//
+// template class for non-predicated alu32_2op instructions
+// - aslh, asrh, sxtb, sxth, zxth
+//===----------------------------------------------------------------------===//
+let hasNewValue = 1, opNewValue = 0 in
+class T_ALU32_2op <string mnemonic, bits<3> minOp> :
+ ALU32Inst <(outs IntRegs:$Rd), (ins IntRegs:$Rs),
+ "$Rd = "#mnemonic#"($Rs)", [] > {
+ bits<5> Rd;
+ bits<5> Rs;
+
+ let IClass = 0b0111;
+
+ let Inst{27-24} = 0b0000;
+ let Inst{23-21} = minOp;
+ let Inst{13} = 0b0;
+ let Inst{4-0} = Rd;
+ let Inst{20-16} = Rs;
+}
+
+//===----------------------------------------------------------------------===//
+// template class for predicated alu32_2op instructions
+// - aslh, asrh, sxtb, sxth, zxtb, zxth
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, hasNewValue = 1, opNewValue = 0 in
+class T_ALU32_2op_Pred <string mnemonic, bits<3> minOp, bit isPredNot,
+ bit isPredNew > :
+ ALU32Inst <(outs IntRegs:$Rd), (ins PredRegs:$Pu, IntRegs:$Rs),
+ !if(isPredNot, "if (!$Pu", "if ($Pu")
+ #!if(isPredNew, ".new) ",") ")#"$Rd = "#mnemonic#"($Rs)"> {
+ bits<5> Rd;
+ bits<2> Pu;
+ bits<5> Rs;
+
+ let IClass = 0b0111;
-let neverHasSideEffects = 1 in
-def COMBINE_ii : ALU32_ii<(outs DoubleRegs:$dst),
- (ins s8Imm:$src1, s8Imm:$src2),
- "$dst = combine(#$src1, #$src2)",
- []>;
-
-// Mux.
-def VMUX_prr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
- DoubleRegs:$src2,
- DoubleRegs:$src3),
- "$dst = vmux($src1, $src2, $src3)",
- []>;
-
-let CextOpcode = "MUX", InputType = "reg" in
-def MUX_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
- IntRegs:$src2, IntRegs:$src3),
- "$dst = mux($src1, $src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (i32 (select (i1 PredRegs:$src1), (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))]>, ImmRegRel;
-
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 8,
-CextOpcode = "MUX", InputType = "imm" in
-def MUX_ir : ALU32_ir<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Ext:$src2,
- IntRegs:$src3),
- "$dst = mux($src1, #$src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (i32 (select (i1 PredRegs:$src1), s8ExtPred:$src2,
- (i32 IntRegs:$src3))))]>, ImmRegRel;
-
-let isExtendable = 1, opExtendable = 3, isExtentSigned = 1, opExtentBits = 8,
-CextOpcode = "MUX", InputType = "imm" in
-def MUX_ri : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2,
- s8Ext:$src3),
- "$dst = mux($src1, $src2, #$src3)",
- [(set (i32 IntRegs:$dst),
- (i32 (select (i1 PredRegs:$src1), (i32 IntRegs:$src2),
- s8ExtPred:$src3)))]>, ImmRegRel;
-
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 8 in
-def MUX_ii : ALU32_ii<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Ext:$src2,
- s8Imm:$src3),
- "$dst = mux($src1, #$src2, #$src3)",
- [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1),
- s8ExtPred:$src2,
- s8ImmPred:$src3)))]>;
-
-// ALU32 - aslh, asrh, sxtb, sxth, zxtb, zxth
-multiclass ALU32_2op_Pbase<string mnemonic, bit isNot, bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : ALU32Inst<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ",
- ") $dst = ")#mnemonic#"($src2)">,
- Requires<[HasV4T]>;
-}
-
-multiclass ALU32_2op_Pred<string mnemonic, bit PredNot> {
+ let Inst{27-24} = 0b0000;
+ let Inst{23-21} = minOp;
+ let Inst{13} = 0b1;
+ let Inst{11} = isPredNot;
+ let Inst{10} = isPredNew;
+ let Inst{4-0} = Rd;
+ let Inst{9-8} = Pu;
+ let Inst{20-16} = Rs;
+}
+
+multiclass ALU32_2op_Pred<string mnemonic, bits<3> minOp, bit PredNot> {
let isPredicatedFalse = PredNot in {
- defm _c#NAME : ALU32_2op_Pbase<mnemonic, PredNot, 0>;
+ def NAME : T_ALU32_2op_Pred<mnemonic, minOp, PredNot, 0>;
+
// Predicate new
- defm _cdn#NAME : ALU32_2op_Pbase<mnemonic, PredNot, 1>;
+ let isPredicatedNew = 1 in
+ def NAME#new : T_ALU32_2op_Pred<mnemonic, minOp, PredNot, 1>;
}
}
-multiclass ALU32_2op_base<string mnemonic> {
+multiclass ALU32_2op_base<string mnemonic, bits<3> minOp> {
let BaseOpcode = mnemonic in {
- let isPredicable = 1, neverHasSideEffects = 1 in
- def NAME : ALU32Inst<(outs IntRegs:$dst),
- (ins IntRegs:$src1),
- "$dst = "#mnemonic#"($src1)">;
-
- let Predicates = [HasV4T], validSubTargets = HasV4SubT, isPredicated = 1,
- neverHasSideEffects = 1 in {
- defm Pt_V4 : ALU32_2op_Pred<mnemonic, 0>;
- defm NotPt_V4 : ALU32_2op_Pred<mnemonic, 1>;
+ let isPredicable = 1, hasSideEffects = 0 in
+ def A2_#NAME : T_ALU32_2op<mnemonic, minOp>;
+
+ let isPredicated = 1, hasSideEffects = 0 in {
+ defm A4_p#NAME#t : ALU32_2op_Pred<mnemonic, minOp, 0>;
+ defm A4_p#NAME#f : ALU32_2op_Pred<mnemonic, minOp, 1>;
}
}
}
-defm ASLH : ALU32_2op_base<"aslh">, PredNewRel;
-defm ASRH : ALU32_2op_base<"asrh">, PredNewRel;
-defm SXTB : ALU32_2op_base<"sxtb">, PredNewRel;
-defm SXTH : ALU32_2op_base<"sxth">, PredNewRel;
-defm ZXTB : ALU32_2op_base<"zxtb">, PredNewRel;
-defm ZXTH : ALU32_2op_base<"zxth">, PredNewRel;
+defm aslh : ALU32_2op_base<"aslh", 0b000>, PredNewRel;
+defm asrh : ALU32_2op_base<"asrh", 0b001>, PredNewRel;
+defm sxtb : ALU32_2op_base<"sxtb", 0b101>, PredNewRel;
+defm sxth : ALU32_2op_base<"sxth", 0b111>, PredNewRel;
+defm zxth : ALU32_2op_base<"zxth", 0b110>, PredNewRel;
+
+// Rd=zxtb(Rs): assembler mapped to Rd=and(Rs,#255).
+// Compiler would want to generate 'zxtb' instead of 'and' becuase 'zxtb' has
+// predicated forms while 'and' doesn't. Since integrated assembler can't
+// handle 'mapped' instructions, we need to encode 'zxtb' same as 'and' where
+// immediate operand is set to '255'.
+
+let hasNewValue = 1, opNewValue = 0 in
+class T_ZXTB: ALU32Inst < (outs IntRegs:$Rd), (ins IntRegs:$Rs),
+ "$Rd = zxtb($Rs)", [] > { // Rd = and(Rs,255)
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<10> s10 = 255;
+
+ let IClass = 0b0111;
+
+ let Inst{27-22} = 0b011000;
+ let Inst{4-0} = Rd;
+ let Inst{20-16} = Rs;
+ let Inst{21} = s10{9};
+ let Inst{13-5} = s10{8-0};
+}
-def : Pat <(shl (i32 IntRegs:$src1), (i32 16)),
- (ASLH IntRegs:$src1)>;
+//Rd=zxtb(Rs): assembler mapped to "Rd=and(Rs,#255)
+multiclass ZXTB_base <string mnemonic, bits<3> minOp> {
+ let BaseOpcode = mnemonic in {
+ let isPredicable = 1, hasSideEffects = 0 in
+ def A2_#NAME : T_ZXTB;
-def : Pat <(sra (i32 IntRegs:$src1), (i32 16)),
- (ASRH IntRegs:$src1)>;
+ let isPredicated = 1, hasSideEffects = 0 in {
+ defm A4_p#NAME#t : ALU32_2op_Pred<mnemonic, minOp, 0>;
+ defm A4_p#NAME#f : ALU32_2op_Pred<mnemonic, minOp, 1>;
+ }
+ }
+}
-def : Pat <(sext_inreg (i32 IntRegs:$src1), i8),
- (SXTB IntRegs:$src1)>;
+defm zxtb : ZXTB_base<"zxtb",0b100>, PredNewRel;
-def : Pat <(sext_inreg (i32 IntRegs:$src1), i16),
- (SXTH IntRegs:$src1)>;
+def: Pat<(shl I32:$src1, (i32 16)), (A2_aslh I32:$src1)>;
+def: Pat<(sra I32:$src1, (i32 16)), (A2_asrh I32:$src1)>;
+def: Pat<(sext_inreg I32:$src1, i8), (A2_sxtb I32:$src1)>;
+def: Pat<(sext_inreg I32:$src1, i16), (A2_sxth I32:$src1)>;
//===----------------------------------------------------------------------===//
-// ALU32/PERM -
+// Template class for vector add and avg
//===----------------------------------------------------------------------===//
+class T_VectALU_64 <string opc, bits<3> majOp, bits<3> minOp,
+ bit isSat, bit isRnd, bit isCrnd, bit SwapOps >
+ : ALU64_rr < (outs DoubleRegs:$Rdd),
+ (ins DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Rdd = "#opc#"($Rss, $Rtt)"#!if(isRnd, ":rnd", "")
+ #!if(isCrnd,":crnd","")
+ #!if(isSat, ":sat", ""),
+ [], "", ALU64_tc_2_SLOT23 > {
+ bits<5> Rdd;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1101;
+
+ let Inst{27-24} = 0b0011;
+ let Inst{23-21} = majOp;
+ let Inst{20-16} = !if (SwapOps, Rtt, Rss);
+ let Inst{12-8} = !if (SwapOps, Rss, Rtt);
+ let Inst{7-5} = minOp;
+ let Inst{4-0} = Rdd;
+ }
-//===----------------------------------------------------------------------===//
-// ALU32/PRED +
-//===----------------------------------------------------------------------===//
+// ALU64 - Vector add
+// Rdd=vadd[u][bhw](Rss,Rtt)
+let Itinerary = ALU64_tc_1_SLOT23 in {
+ def A2_vaddub : T_VectALU_64 < "vaddub", 0b000, 0b000, 0, 0, 0, 0>;
+ def A2_vaddh : T_VectALU_64 < "vaddh", 0b000, 0b010, 0, 0, 0, 0>;
+ def A2_vaddw : T_VectALU_64 < "vaddw", 0b000, 0b101, 0, 0, 0, 0>;
+}
-// Compare.
-defm CMPGTU : CMP32_rr_ri_u9<"cmp.gtu", "CMPGTU", setugt>, ImmRegRel;
-defm CMPGT : CMP32_rr_ri_s10<"cmp.gt", "CMPGT", setgt>, ImmRegRel;
-defm CMPEQ : CMP32_rr_ri_s10<"cmp.eq", "CMPEQ", seteq>, ImmRegRel;
+// Rdd=vadd[u][bhw](Rss,Rtt):sat
+let Defs = [USR_OVF] in {
+ def A2_vaddubs : T_VectALU_64 < "vaddub", 0b000, 0b001, 1, 0, 0, 0>;
+ def A2_vaddhs : T_VectALU_64 < "vaddh", 0b000, 0b011, 1, 0, 0, 0>;
+ def A2_vadduhs : T_VectALU_64 < "vadduh", 0b000, 0b100, 1, 0, 0, 0>;
+ def A2_vaddws : T_VectALU_64 < "vaddw", 0b000, 0b110, 1, 0, 0, 0>;
+}
-// SDNode for converting immediate C to C-1.
-def DEC_CONST_SIGNED : SDNodeXForm<imm, [{
- // Return the byte immediate const-1 as an SDNode.
- int32_t imm = N->getSExtValue();
- return XformSToSM1Imm(imm);
-}]>;
+// ALU64 - Vector average
+// Rdd=vavg[u][bhw](Rss,Rtt)
+let Itinerary = ALU64_tc_1_SLOT23 in {
+ def A2_vavgub : T_VectALU_64 < "vavgub", 0b010, 0b000, 0, 0, 0, 0>;
+ def A2_vavgh : T_VectALU_64 < "vavgh", 0b010, 0b010, 0, 0, 0, 0>;
+ def A2_vavguh : T_VectALU_64 < "vavguh", 0b010, 0b101, 0, 0, 0, 0>;
+ def A2_vavgw : T_VectALU_64 < "vavgw", 0b011, 0b000, 0, 0, 0, 0>;
+ def A2_vavguw : T_VectALU_64 < "vavguw", 0b011, 0b011, 0, 0, 0, 0>;
+}
-// SDNode for converting immediate C to C-1.
-def DEC_CONST_UNSIGNED : SDNodeXForm<imm, [{
- // Return the byte immediate const-1 as an SDNode.
- uint32_t imm = N->getZExtValue();
- return XformUToUM1Imm(imm);
-}]>;
+// Rdd=vavg[u][bhw](Rss,Rtt)[:rnd|:crnd]
+def A2_vavgubr : T_VectALU_64 < "vavgub", 0b010, 0b001, 0, 1, 0, 0>;
+def A2_vavghr : T_VectALU_64 < "vavgh", 0b010, 0b011, 0, 1, 0, 0>;
+def A2_vavghcr : T_VectALU_64 < "vavgh", 0b010, 0b100, 0, 0, 1, 0>;
+def A2_vavguhr : T_VectALU_64 < "vavguh", 0b010, 0b110, 0, 1, 0, 0>;
+
+def A2_vavgwr : T_VectALU_64 < "vavgw", 0b011, 0b001, 0, 1, 0, 0>;
+def A2_vavgwcr : T_VectALU_64 < "vavgw", 0b011, 0b010, 0, 0, 1, 0>;
+def A2_vavguwr : T_VectALU_64 < "vavguw", 0b011, 0b100, 0, 1, 0, 0>;
+
+// Rdd=vnavg[bh](Rss,Rtt)
+let Itinerary = ALU64_tc_1_SLOT23 in {
+ def A2_vnavgh : T_VectALU_64 < "vnavgh", 0b100, 0b000, 0, 0, 0, 1>;
+ def A2_vnavgw : T_VectALU_64 < "vnavgw", 0b100, 0b011, 0, 0, 0, 1>;
+}
+
+// Rdd=vnavg[bh](Rss,Rtt)[:rnd|:crnd]:sat
+let Defs = [USR_OVF] in {
+ def A2_vnavghr : T_VectALU_64 < "vnavgh", 0b100, 0b001, 1, 1, 0, 1>;
+ def A2_vnavghcr : T_VectALU_64 < "vnavgh", 0b100, 0b010, 1, 0, 1, 1>;
+ def A2_vnavgwr : T_VectALU_64 < "vnavgw", 0b100, 0b100, 1, 1, 0, 1>;
+ def A2_vnavgwcr : T_VectALU_64 < "vnavgw", 0b100, 0b110, 1, 0, 1, 1>;
+}
+
+// Rdd=vsub[u][bh](Rss,Rtt)
+let Itinerary = ALU64_tc_1_SLOT23 in {
+ def A2_vsubub : T_VectALU_64 < "vsubub", 0b001, 0b000, 0, 0, 0, 1>;
+ def A2_vsubh : T_VectALU_64 < "vsubh", 0b001, 0b010, 0, 0, 0, 1>;
+ def A2_vsubw : T_VectALU_64 < "vsubw", 0b001, 0b101, 0, 0, 0, 1>;
+}
+
+// Rdd=vsub[u][bh](Rss,Rtt):sat
+let Defs = [USR_OVF] in {
+ def A2_vsububs : T_VectALU_64 < "vsubub", 0b001, 0b001, 1, 0, 0, 1>;
+ def A2_vsubhs : T_VectALU_64 < "vsubh", 0b001, 0b011, 1, 0, 0, 1>;
+ def A2_vsubuhs : T_VectALU_64 < "vsubuh", 0b001, 0b100, 1, 0, 0, 1>;
+ def A2_vsubws : T_VectALU_64 < "vsubw", 0b001, 0b110, 1, 0, 0, 1>;
+}
-def CTLZ_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
- "$dst = cl0($src1)",
- [(set (i32 IntRegs:$dst), (ctlz (i32 IntRegs:$src1)))]>;
+// Rdd=vmax[u][bhw](Rss,Rtt)
+def A2_vmaxb : T_VectALU_64 < "vmaxb", 0b110, 0b110, 0, 0, 0, 1>;
+def A2_vmaxub : T_VectALU_64 < "vmaxub", 0b110, 0b000, 0, 0, 0, 1>;
+def A2_vmaxh : T_VectALU_64 < "vmaxh", 0b110, 0b001, 0, 0, 0, 1>;
+def A2_vmaxuh : T_VectALU_64 < "vmaxuh", 0b110, 0b010, 0, 0, 0, 1>;
+def A2_vmaxw : T_VectALU_64 < "vmaxw", 0b110, 0b011, 0, 0, 0, 1>;
+def A2_vmaxuw : T_VectALU_64 < "vmaxuw", 0b101, 0b101, 0, 0, 0, 1>;
+
+// Rdd=vmin[u][bhw](Rss,Rtt)
+def A2_vminb : T_VectALU_64 < "vminb", 0b110, 0b111, 0, 0, 0, 1>;
+def A2_vminub : T_VectALU_64 < "vminub", 0b101, 0b000, 0, 0, 0, 1>;
+def A2_vminh : T_VectALU_64 < "vminh", 0b101, 0b001, 0, 0, 0, 1>;
+def A2_vminuh : T_VectALU_64 < "vminuh", 0b101, 0b010, 0, 0, 0, 1>;
+def A2_vminw : T_VectALU_64 < "vminw", 0b101, 0b011, 0, 0, 0, 1>;
+def A2_vminuw : T_VectALU_64 < "vminuw", 0b101, 0b100, 0, 0, 0, 1>;
-def CTTZ_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
- "$dst = ct0($src1)",
- [(set (i32 IntRegs:$dst), (cttz (i32 IntRegs:$src1)))]>;
+//===----------------------------------------------------------------------===//
+// Template class for vector compare
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0 in
+class T_vcmp <string Str, bits<4> minOp>
+ : ALU64_rr <(outs PredRegs:$Pd),
+ (ins DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Pd = "#Str#"($Rss, $Rtt)", [],
+ "", ALU64_tc_2early_SLOT23> {
+ bits<2> Pd;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1101;
+
+ let Inst{27-23} = 0b00100;
+ let Inst{13} = minOp{3};
+ let Inst{7-5} = minOp{2-0};
+ let Inst{1-0} = Pd;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ }
-def CTLZ64_rr : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
- "$dst = cl0($src1)",
- [(set (i32 IntRegs:$dst), (i32 (trunc (ctlz (i64 DoubleRegs:$src1)))))]>;
+class T_vcmp_pat<InstHexagon MI, PatFrag Op, ValueType T>
+ : Pat<(i1 (Op (T DoubleRegs:$Rss), (T DoubleRegs:$Rtt))),
+ (i1 (MI DoubleRegs:$Rss, DoubleRegs:$Rtt))>;
+
+// Vector compare bytes
+def A2_vcmpbeq : T_vcmp <"vcmpb.eq", 0b0110>;
+def A2_vcmpbgtu : T_vcmp <"vcmpb.gtu", 0b0111>;
+
+// Vector compare halfwords
+def A2_vcmpheq : T_vcmp <"vcmph.eq", 0b0011>;
+def A2_vcmphgt : T_vcmp <"vcmph.gt", 0b0100>;
+def A2_vcmphgtu : T_vcmp <"vcmph.gtu", 0b0101>;
+
+// Vector compare words
+def A2_vcmpweq : T_vcmp <"vcmpw.eq", 0b0000>;
+def A2_vcmpwgt : T_vcmp <"vcmpw.gt", 0b0001>;
+def A2_vcmpwgtu : T_vcmp <"vcmpw.gtu", 0b0010>;
+
+def: T_vcmp_pat<A2_vcmpbeq, seteq, v8i8>;
+def: T_vcmp_pat<A2_vcmpbgtu, setugt, v8i8>;
+def: T_vcmp_pat<A2_vcmpheq, seteq, v4i16>;
+def: T_vcmp_pat<A2_vcmphgt, setgt, v4i16>;
+def: T_vcmp_pat<A2_vcmphgtu, setugt, v4i16>;
+def: T_vcmp_pat<A2_vcmpweq, seteq, v2i32>;
+def: T_vcmp_pat<A2_vcmpwgt, setgt, v2i32>;
+def: T_vcmp_pat<A2_vcmpwgtu, setugt, v2i32>;
-def CTTZ64_rr : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
- "$dst = ct0($src1)",
- [(set (i32 IntRegs:$dst), (i32 (trunc (cttz (i64 DoubleRegs:$src1)))))]>;
+//===----------------------------------------------------------------------===//
+// ALU32/PERM -
+//===----------------------------------------------------------------------===//
-def TSTBIT_rr : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = tstbit($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (setne (and (shl 1, (i32 IntRegs:$src2)), (i32 IntRegs:$src1)), 0))]>;
-def TSTBIT_ri : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- "$dst = tstbit($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (setne (and (shl 1, (u5ImmPred:$src2)), (i32 IntRegs:$src1)), 0))]>;
+//===----------------------------------------------------------------------===//
+// ALU32/PRED +
+//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// ALU32/PRED -
@@ -625,112 +997,280 @@ def TSTBIT_ri : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
// ALU64/ALU +
//===----------------------------------------------------------------------===//
// Add.
-def ADD64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = add($src1, $src2)",
- [(set (i64 DoubleRegs:$dst), (add (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2)))]>;
+//===----------------------------------------------------------------------===//
+// Template Class
+// Add/Subtract halfword
+// Rd=add(Rt.L,Rs.[HL])[:sat]
+// Rd=sub(Rt.L,Rs.[HL])[:sat]
+// Rd=add(Rt.[LH],Rs.[HL])[:sat][:<16]
+// Rd=sub(Rt.[LH],Rs.[HL])[:sat][:<16]
+//===----------------------------------------------------------------------===//
-// Add halfword.
+let hasNewValue = 1, opNewValue = 0 in
+class T_XTYPE_ADD_SUB <bits<2> LHbits, bit isSat, bit hasShift, bit isSub>
+ : ALU64Inst <(outs IntRegs:$Rd), (ins IntRegs:$Rt, IntRegs:$Rs),
+ "$Rd = "#!if(isSub,"sub","add")#"($Rt."
+ #!if(hasShift, !if(LHbits{1},"h","l"),"l") #", $Rs."
+ #!if(hasShift, !if(LHbits{0},"h)","l)"), !if(LHbits{1},"h)","l)"))
+ #!if(isSat,":sat","")
+ #!if(hasShift,":<<16",""), [], "", ALU64_tc_1_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rt;
+ bits<5> Rs;
+ let IClass = 0b1101;
+
+ let Inst{27-23} = 0b01010;
+ let Inst{22} = hasShift;
+ let Inst{21} = isSub;
+ let Inst{7} = isSat;
+ let Inst{6-5} = LHbits;
+ let Inst{4-0} = Rd;
+ let Inst{12-8} = Rt;
+ let Inst{20-16} = Rs;
+ }
-// Compare.
-defm CMPEHexagon4 : CMP64_rr<"cmp.eq", seteq>;
-defm CMPGT64 : CMP64_rr<"cmp.gt", setgt>;
-defm CMPGTU64 : CMP64_rr<"cmp.gtu", setugt>;
-
-// Logical operations.
-def AND_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = and($src1, $src2)",
- [(set (i64 DoubleRegs:$dst), (and (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2)))]>;
-
-def OR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = or($src1, $src2)",
- [(set (i64 DoubleRegs:$dst), (or (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2)))]>;
-
-def XOR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = xor($src1, $src2)",
- [(set (i64 DoubleRegs:$dst), (xor (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2)))]>;
-
-// Maximum.
-def MAXw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = max($src2, $src1)",
- [(set (i32 IntRegs:$dst),
- (i32 (select (i1 (setlt (i32 IntRegs:$src2),
- (i32 IntRegs:$src1))),
- (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
+//Rd=sub(Rt.L,Rs.[LH])
+def A2_subh_l16_ll : T_XTYPE_ADD_SUB <0b00, 0, 0, 1>;
+def A2_subh_l16_hl : T_XTYPE_ADD_SUB <0b10, 0, 0, 1>;
-def MAXUw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = maxu($src2, $src1)",
- [(set (i32 IntRegs:$dst),
- (i32 (select (i1 (setult (i32 IntRegs:$src2),
- (i32 IntRegs:$src1))),
- (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
-
-def MAXd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = max($src2, $src1)",
- [(set (i64 DoubleRegs:$dst),
- (i64 (select (i1 (setlt (i64 DoubleRegs:$src2),
- (i64 DoubleRegs:$src1))),
- (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2))))]>;
-
-def MAXUd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = maxu($src2, $src1)",
- [(set (i64 DoubleRegs:$dst),
- (i64 (select (i1 (setult (i64 DoubleRegs:$src2),
- (i64 DoubleRegs:$src1))),
- (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2))))]>;
-
-// Minimum.
-def MINw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = min($src2, $src1)",
- [(set (i32 IntRegs:$dst),
- (i32 (select (i1 (setgt (i32 IntRegs:$src2),
- (i32 IntRegs:$src1))),
- (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
+//Rd=add(Rt.L,Rs.[LH])
+def A2_addh_l16_ll : T_XTYPE_ADD_SUB <0b00, 0, 0, 0>;
+def A2_addh_l16_hl : T_XTYPE_ADD_SUB <0b10, 0, 0, 0>;
-def MINUw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = minu($src2, $src1)",
- [(set (i32 IntRegs:$dst),
- (i32 (select (i1 (setugt (i32 IntRegs:$src2),
- (i32 IntRegs:$src1))),
- (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
-
-def MINd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = min($src2, $src1)",
- [(set (i64 DoubleRegs:$dst),
- (i64 (select (i1 (setgt (i64 DoubleRegs:$src2),
- (i64 DoubleRegs:$src1))),
- (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2))))]>;
-
-def MINUd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = minu($src2, $src1)",
- [(set (i64 DoubleRegs:$dst),
- (i64 (select (i1 (setugt (i64 DoubleRegs:$src2),
- (i64 DoubleRegs:$src1))),
- (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2))))]>;
-
-// Subtract.
-def SUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = sub($src1, $src2)",
- [(set (i64 DoubleRegs:$dst), (sub (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2)))]>;
+let Itinerary = ALU64_tc_2_SLOT23, Defs = [USR_OVF] in {
+ //Rd=sub(Rt.L,Rs.[LH]):sat
+ def A2_subh_l16_sat_ll : T_XTYPE_ADD_SUB <0b00, 1, 0, 1>;
+ def A2_subh_l16_sat_hl : T_XTYPE_ADD_SUB <0b10, 1, 0, 1>;
+
+ //Rd=add(Rt.L,Rs.[LH]):sat
+ def A2_addh_l16_sat_ll : T_XTYPE_ADD_SUB <0b00, 1, 0, 0>;
+ def A2_addh_l16_sat_hl : T_XTYPE_ADD_SUB <0b10, 1, 0, 0>;
+}
+
+//Rd=sub(Rt.[LH],Rs.[LH]):<<16
+def A2_subh_h16_ll : T_XTYPE_ADD_SUB <0b00, 0, 1, 1>;
+def A2_subh_h16_lh : T_XTYPE_ADD_SUB <0b01, 0, 1, 1>;
+def A2_subh_h16_hl : T_XTYPE_ADD_SUB <0b10, 0, 1, 1>;
+def A2_subh_h16_hh : T_XTYPE_ADD_SUB <0b11, 0, 1, 1>;
+
+//Rd=add(Rt.[LH],Rs.[LH]):<<16
+def A2_addh_h16_ll : T_XTYPE_ADD_SUB <0b00, 0, 1, 0>;
+def A2_addh_h16_lh : T_XTYPE_ADD_SUB <0b01, 0, 1, 0>;
+def A2_addh_h16_hl : T_XTYPE_ADD_SUB <0b10, 0, 1, 0>;
+def A2_addh_h16_hh : T_XTYPE_ADD_SUB <0b11, 0, 1, 0>;
+
+let Itinerary = ALU64_tc_2_SLOT23, Defs = [USR_OVF] in {
+ //Rd=sub(Rt.[LH],Rs.[LH]):sat:<<16
+ def A2_subh_h16_sat_ll : T_XTYPE_ADD_SUB <0b00, 1, 1, 1>;
+ def A2_subh_h16_sat_lh : T_XTYPE_ADD_SUB <0b01, 1, 1, 1>;
+ def A2_subh_h16_sat_hl : T_XTYPE_ADD_SUB <0b10, 1, 1, 1>;
+ def A2_subh_h16_sat_hh : T_XTYPE_ADD_SUB <0b11, 1, 1, 1>;
+
+ //Rd=add(Rt.[LH],Rs.[LH]):sat:<<16
+ def A2_addh_h16_sat_ll : T_XTYPE_ADD_SUB <0b00, 1, 1, 0>;
+ def A2_addh_h16_sat_lh : T_XTYPE_ADD_SUB <0b01, 1, 1, 0>;
+ def A2_addh_h16_sat_hl : T_XTYPE_ADD_SUB <0b10, 1, 1, 0>;
+ def A2_addh_h16_sat_hh : T_XTYPE_ADD_SUB <0b11, 1, 1, 0>;
+}
+
+// Add halfword.
+def: Pat<(sext_inreg (add I32:$src1, I32:$src2), i16),
+ (A2_addh_l16_ll I32:$src1, I32:$src2)>;
+
+def: Pat<(sra (add (shl I32:$src1, (i32 16)), I32:$src2), (i32 16)),
+ (A2_addh_l16_hl I32:$src1, I32:$src2)>;
+
+def: Pat<(shl (add I32:$src1, I32:$src2), (i32 16)),
+ (A2_addh_h16_ll I32:$src1, I32:$src2)>;
// Subtract halfword.
+def: Pat<(sext_inreg (sub I32:$src1, I32:$src2), i16),
+ (A2_subh_l16_ll I32:$src1, I32:$src2)>;
+
+def: Pat<(shl (sub I32:$src1, I32:$src2), (i32 16)),
+ (A2_subh_h16_ll I32:$src1, I32:$src2)>;
+
+let hasSideEffects = 0, hasNewValue = 1 in
+def S2_parityp: ALU64Inst<(outs IntRegs:$Rd),
+ (ins DoubleRegs:$Rs, DoubleRegs:$Rt),
+ "$Rd = parity($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+ let Inst{27-24} = 0b0000;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{4-0} = Rd;
+}
+
+let hasNewValue = 1, opNewValue = 0, hasSideEffects = 0 in
+class T_XTYPE_MIN_MAX < bit isMax, bit isUnsigned >
+ : ALU64Inst < (outs IntRegs:$Rd), (ins IntRegs:$Rt, IntRegs:$Rs),
+ "$Rd = "#!if(isMax,"max","min")#!if(isUnsigned,"u","")
+ #"($Rt, $Rs)", [], "", ALU64_tc_2_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rt;
+ bits<5> Rs;
+
+ let IClass = 0b1101;
+
+ let Inst{27-23} = 0b01011;
+ let Inst{22-21} = !if(isMax, 0b10, 0b01);
+ let Inst{7} = isUnsigned;
+ let Inst{4-0} = Rd;
+ let Inst{12-8} = !if(isMax, Rs, Rt);
+ let Inst{20-16} = !if(isMax, Rt, Rs);
+ }
+
+def A2_min : T_XTYPE_MIN_MAX < 0, 0 >;
+def A2_minu : T_XTYPE_MIN_MAX < 0, 1 >;
+def A2_max : T_XTYPE_MIN_MAX < 1, 0 >;
+def A2_maxu : T_XTYPE_MIN_MAX < 1, 1 >;
+
+// Here, depending on the operand being selected, we'll either generate a
+// min or max instruction.
+// Ex:
+// (a>b)?a:b --> max(a,b) => Here check performed is '>' and the value selected
+// is the larger of two. So, the corresponding HexagonInst is passed in 'Inst'.
+// (a>b)?b:a --> min(a,b) => Here check performed is '>' but the smaller value
+// is selected and the corresponding HexagonInst is passed in 'SwapInst'.
+
+multiclass T_MinMax_pats <PatFrag Op, RegisterClass RC, ValueType VT,
+ InstHexagon Inst, InstHexagon SwapInst> {
+ def: Pat<(select (i1 (Op (VT RC:$src1), (VT RC:$src2))),
+ (VT RC:$src1), (VT RC:$src2)),
+ (Inst RC:$src1, RC:$src2)>;
+ def: Pat<(select (i1 (Op (VT RC:$src1), (VT RC:$src2))),
+ (VT RC:$src2), (VT RC:$src1)),
+ (SwapInst RC:$src1, RC:$src2)>;
+}
+
+
+multiclass MinMax_pats <PatFrag Op, InstHexagon Inst, InstHexagon SwapInst> {
+ defm: T_MinMax_pats<Op, IntRegs, i32, Inst, SwapInst>;
+
+ def: Pat<(sext_inreg (i32 (select (i1 (Op (i32 PositiveHalfWord:$src1),
+ (i32 PositiveHalfWord:$src2))),
+ (i32 PositiveHalfWord:$src1),
+ (i32 PositiveHalfWord:$src2))), i16),
+ (Inst IntRegs:$src1, IntRegs:$src2)>;
+
+ def: Pat<(sext_inreg (i32 (select (i1 (Op (i32 PositiveHalfWord:$src1),
+ (i32 PositiveHalfWord:$src2))),
+ (i32 PositiveHalfWord:$src2),
+ (i32 PositiveHalfWord:$src1))), i16),
+ (SwapInst IntRegs:$src1, IntRegs:$src2)>;
+}
+
+let AddedComplexity = 200 in {
+ defm: MinMax_pats<setge, A2_max, A2_min>;
+ defm: MinMax_pats<setgt, A2_max, A2_min>;
+ defm: MinMax_pats<setle, A2_min, A2_max>;
+ defm: MinMax_pats<setlt, A2_min, A2_max>;
+ defm: MinMax_pats<setuge, A2_maxu, A2_minu>;
+ defm: MinMax_pats<setugt, A2_maxu, A2_minu>;
+ defm: MinMax_pats<setule, A2_minu, A2_maxu>;
+ defm: MinMax_pats<setult, A2_minu, A2_maxu>;
+}
+
+class T_cmp64_rr<string mnemonic, bits<3> MinOp, bit IsComm>
+ : ALU64_rr<(outs PredRegs:$Pd), (ins DoubleRegs:$Rs, DoubleRegs:$Rt),
+ "$Pd = "#mnemonic#"($Rs, $Rt)", [], "", ALU64_tc_2early_SLOT23> {
+ let isCompare = 1;
+ let isCommutable = IsComm;
+ let hasSideEffects = 0;
+
+ bits<2> Pd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+ let Inst{27-21} = 0b0010100;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{7-5} = MinOp;
+ let Inst{1-0} = Pd;
+}
+
+def C2_cmpeqp : T_cmp64_rr<"cmp.eq", 0b000, 1>;
+def C2_cmpgtp : T_cmp64_rr<"cmp.gt", 0b010, 0>;
+def C2_cmpgtup : T_cmp64_rr<"cmp.gtu", 0b100, 0>;
+
+class T_cmp64_rr_pat<InstHexagon MI, PatFrag CmpOp>
+ : Pat<(i1 (CmpOp (i64 DoubleRegs:$Rs), (i64 DoubleRegs:$Rt))),
+ (i1 (MI DoubleRegs:$Rs, DoubleRegs:$Rt))>;
+
+def: T_cmp64_rr_pat<C2_cmpeqp, seteq>;
+def: T_cmp64_rr_pat<C2_cmpgtp, setgt>;
+def: T_cmp64_rr_pat<C2_cmpgtup, setugt>;
+def: T_cmp64_rr_pat<C2_cmpgtp, RevCmp<setlt>>;
+def: T_cmp64_rr_pat<C2_cmpgtup, RevCmp<setult>>;
+
+def C2_vmux : ALU64_rr<(outs DoubleRegs:$Rd),
+ (ins PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt),
+ "$Rd = vmux($Pu, $Rs, $Rt)", [], "", ALU64_tc_1_SLOT23> {
+ let hasSideEffects = 0;
+
+ bits<5> Rd;
+ bits<2> Pu;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+ let Inst{27-24} = 0b0001;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{6-5} = Pu;
+ let Inst{4-0} = Rd;
+}
+
+class T_ALU64_rr<string mnemonic, string suffix, bits<4> RegType,
+ bits<3> MajOp, bits<3> MinOp, bit OpsRev, bit IsComm,
+ string Op2Pfx>
+ : ALU64_rr<(outs DoubleRegs:$Rd), (ins DoubleRegs:$Rs, DoubleRegs:$Rt),
+ "$Rd = " #mnemonic# "($Rs, " #Op2Pfx# "$Rt)" #suffix, [],
+ "", ALU64_tc_1_SLOT23> {
+ let hasSideEffects = 0;
+ let isCommutable = IsComm;
+
+ bits<5> Rs;
+ bits<5> Rt;
+ bits<5> Rd;
+
+ let IClass = 0b1101;
+ let Inst{27-24} = RegType;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = !if (OpsRev,Rt,Rs);
+ let Inst{12-8} = !if (OpsRev,Rs,Rt);
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rd;
+}
+
+class T_ALU64_arith<string mnemonic, bits<3> MajOp, bits<3> MinOp, bit IsSat,
+ bit OpsRev, bit IsComm>
+ : T_ALU64_rr<mnemonic, !if(IsSat,":sat",""), 0b0011, MajOp, MinOp, OpsRev,
+ IsComm, "">;
+
+def A2_addp : T_ALU64_arith<"add", 0b000, 0b111, 0, 0, 1>;
+def A2_subp : T_ALU64_arith<"sub", 0b001, 0b111, 0, 1, 0>;
+
+def: Pat<(i64 (add I64:$Rs, I64:$Rt)), (A2_addp I64:$Rs, I64:$Rt)>;
+def: Pat<(i64 (sub I64:$Rs, I64:$Rt)), (A2_subp I64:$Rs, I64:$Rt)>;
+
+class T_ALU64_logical<string mnemonic, bits<3> MinOp, bit OpsRev, bit IsComm,
+ bit IsNeg>
+ : T_ALU64_rr<mnemonic, "", 0b0011, 0b111, MinOp, OpsRev, IsComm,
+ !if(IsNeg,"~","")>;
+
+def A2_andp : T_ALU64_logical<"and", 0b000, 0, 1, 0>;
+def A2_orp : T_ALU64_logical<"or", 0b010, 0, 1, 0>;
+def A2_xorp : T_ALU64_logical<"xor", 0b100, 0, 1, 0>;
+
+def: Pat<(i64 (and I64:$Rs, I64:$Rt)), (A2_andp I64:$Rs, I64:$Rt)>;
+def: Pat<(i64 (or I64:$Rs, I64:$Rt)), (A2_orp I64:$Rs, I64:$Rt)>;
+def: Pat<(i64 (xor I64:$Rs, I64:$Rt)), (A2_xorp I64:$Rs, I64:$Rt)>;
//===----------------------------------------------------------------------===//
// ALU64/ALU -
@@ -762,82 +1302,119 @@ def SUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
// Pipelined looping instructions.
// Logical operations on predicates.
-def AND_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
- "$dst = and($src1, $src2)",
- [(set (i1 PredRegs:$dst), (and (i1 PredRegs:$src1),
- (i1 PredRegs:$src2)))]>;
-
-let neverHasSideEffects = 1 in
-def AND_pnotp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1,
- PredRegs:$src2),
- "$dst = and($src1, !$src2)",
- []>;
-
-def ANY_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
- "$dst = any8($src1)",
- []>;
-
-def ALL_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
- "$dst = all8($src1)",
- []>;
-
-def VITPACK_pp : SInst<(outs IntRegs:$dst), (ins PredRegs:$src1,
- PredRegs:$src2),
- "$dst = vitpack($src1, $src2)",
- []>;
+let hasSideEffects = 0 in
+class T_LOGICAL_1OP<string MnOp, bits<2> OpBits>
+ : CRInst<(outs PredRegs:$Pd), (ins PredRegs:$Ps),
+ "$Pd = " # MnOp # "($Ps)", [], "", CR_tc_2early_SLOT23> {
+ bits<2> Pd;
+ bits<2> Ps;
+
+ let IClass = 0b0110;
+ let Inst{27-23} = 0b10111;
+ let Inst{22-21} = OpBits;
+ let Inst{20} = 0b0;
+ let Inst{17-16} = Ps;
+ let Inst{13} = 0b0;
+ let Inst{1-0} = Pd;
+}
-def VALIGN_rrp : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2,
- PredRegs:$src3),
- "$dst = valignb($src1, $src2, $src3)",
- []>;
+def C2_any8 : T_LOGICAL_1OP<"any8", 0b00>;
+def C2_all8 : T_LOGICAL_1OP<"all8", 0b01>;
+def C2_not : T_LOGICAL_1OP<"not", 0b10>;
+
+def: Pat<(i1 (not (i1 PredRegs:$Ps))),
+ (C2_not PredRegs:$Ps)>;
+
+let hasSideEffects = 0 in
+class T_LOGICAL_2OP<string MnOp, bits<3> OpBits, bit IsNeg, bit Rev>
+ : CRInst<(outs PredRegs:$Pd), (ins PredRegs:$Ps, PredRegs:$Pt),
+ "$Pd = " # MnOp # "($Ps, " # !if (IsNeg,"!","") # "$Pt)",
+ [], "", CR_tc_2early_SLOT23> {
+ bits<2> Pd;
+ bits<2> Ps;
+ bits<2> Pt;
+
+ let IClass = 0b0110;
+ let Inst{27-24} = 0b1011;
+ let Inst{23-21} = OpBits;
+ let Inst{20} = 0b0;
+ let Inst{17-16} = !if(Rev,Pt,Ps); // Rs and Rt are reversed for some
+ let Inst{13} = 0b0; // instructions.
+ let Inst{9-8} = !if(Rev,Ps,Pt);
+ let Inst{1-0} = Pd;
+}
-def VSPLICE_rrp : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2,
- PredRegs:$src3),
- "$dst = vspliceb($src1, $src2, $src3)",
- []>;
+def C2_and : T_LOGICAL_2OP<"and", 0b000, 0, 1>;
+def C2_or : T_LOGICAL_2OP<"or", 0b001, 0, 1>;
+def C2_xor : T_LOGICAL_2OP<"xor", 0b010, 0, 0>;
+def C2_andn : T_LOGICAL_2OP<"and", 0b011, 1, 1>;
+def C2_orn : T_LOGICAL_2OP<"or", 0b111, 1, 1>;
-def MASK_p : SInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1),
- "$dst = mask($src1)",
- []>;
+def: Pat<(i1 (and I1:$Ps, I1:$Pt)), (C2_and I1:$Ps, I1:$Pt)>;
+def: Pat<(i1 (or I1:$Ps, I1:$Pt)), (C2_or I1:$Ps, I1:$Pt)>;
+def: Pat<(i1 (xor I1:$Ps, I1:$Pt)), (C2_xor I1:$Ps, I1:$Pt)>;
+def: Pat<(i1 (and I1:$Ps, (not I1:$Pt))), (C2_andn I1:$Ps, I1:$Pt)>;
+def: Pat<(i1 (or I1:$Ps, (not I1:$Pt))), (C2_orn I1:$Ps, I1:$Pt)>;
-def NOT_p : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
- "$dst = not($src1)",
- [(set (i1 PredRegs:$dst), (not (i1 PredRegs:$src1)))]>;
-
-def OR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
- "$dst = or($src1, $src2)",
- [(set (i1 PredRegs:$dst), (or (i1 PredRegs:$src1),
- (i1 PredRegs:$src2)))]>;
+let hasSideEffects = 0, hasNewValue = 1 in
+def C2_vitpack : SInst<(outs IntRegs:$Rd), (ins PredRegs:$Ps, PredRegs:$Pt),
+ "$Rd = vitpack($Ps, $Pt)", [], "", S_2op_tc_1_SLOT23> {
+ bits<5> Rd;
+ bits<2> Ps;
+ bits<2> Pt;
+
+ let IClass = 0b1000;
+ let Inst{27-24} = 0b1001;
+ let Inst{22-21} = 0b00;
+ let Inst{17-16} = Ps;
+ let Inst{9-8} = Pt;
+ let Inst{4-0} = Rd;
+}
-def XOR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
- "$dst = xor($src1, $src2)",
- [(set (i1 PredRegs:$dst), (xor (i1 PredRegs:$src1),
- (i1 PredRegs:$src2)))]>;
+let hasSideEffects = 0 in
+def C2_mask : SInst<(outs DoubleRegs:$Rd), (ins PredRegs:$Pt),
+ "$Rd = mask($Pt)", [], "", S_2op_tc_1_SLOT23> {
+ bits<5> Rd;
+ bits<2> Pt;
+ let IClass = 0b1000;
+ let Inst{27-24} = 0b0110;
+ let Inst{9-8} = Pt;
+ let Inst{4-0} = Rd;
+}
// User control register transfer.
//===----------------------------------------------------------------------===//
// CR -
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// JR +
+//===----------------------------------------------------------------------===//
+
def retflag : SDNode<"HexagonISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
-def eh_return: SDNode<"HexagonISD::EH_RETURN", SDTNone,
- [SDNPHasChain]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+def eh_return: SDNode<"HexagonISD::EH_RETURN", SDTNone, [SDNPHasChain]>;
def SDHexagonBR_JT: SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
def HexagonBR_JT: SDNode<"HexagonISD::BR_JT", SDHexagonBR_JT, [SDNPHasChain]>;
-let InputType = "imm", isBarrier = 1, isPredicable = 1,
-Defs = [PC], isExtendable = 1, opExtendable = 0, isExtentSigned = 1,
-opExtentBits = 24, isCodeGenOnly = 0 in
-class T_JMP <dag InsDag, list<dag> JumpList = []>
- : JInst<(outs), InsDag,
- "jump $dst" , JumpList> {
- bits<24> dst;
+class CondStr<string CReg, bit True, bit New> {
+ string S = "if (" # !if(True,"","!") # CReg # !if(New,".new","") # ") ";
+}
+class JumpOpcStr<string Mnemonic, bit New, bit Taken> {
+ string S = Mnemonic # !if(Taken, ":t", !if(New, ":nt", ""));
+}
+let isBranch = 1, isBarrier = 1, Defs = [PC], hasSideEffects = 0,
+ isPredicable = 1,
+ isExtendable = 1, opExtendable = 0, isExtentSigned = 1,
+ opExtentBits = 24, opExtentAlign = 2, InputType = "imm" in
+class T_JMP<string ExtStr>
+ : JInst<(outs), (ins brtarget:$dst),
+ "jump " # ExtStr # "$dst",
+ [], "", J_tc_2early_SLOT23> {
+ bits<24> dst;
let IClass = 0b0101;
let Inst{27-25} = 0b100;
@@ -845,16 +1422,16 @@ class T_JMP <dag InsDag, list<dag> JumpList = []>
let Inst{13-1} = dst{14-2};
}
-let InputType = "imm", isExtendable = 1, opExtendable = 1, isExtentSigned = 1,
-Defs = [PC], isPredicated = 1, opExtentBits = 17 in
-class T_JMP_c <bit PredNot, bit isPredNew, bit isTak>:
- JInst<(outs ), (ins PredRegs:$src, brtarget:$dst),
- !if(PredNot, "if (!$src", "if ($src")#
- !if(isPredNew, ".new) ", ") ")#"jump"#
- !if(isPredNew, !if(isTak, ":t ", ":nt "), " ")#"$dst"> {
-
+let isBranch = 1, Defs = [PC], hasSideEffects = 0, isPredicated = 1,
+ isExtendable = 1, opExtendable = 1, isExtentSigned = 1,
+ opExtentBits = 17, opExtentAlign = 2, InputType = "imm" in
+class T_JMP_c<bit PredNot, bit isPredNew, bit isTak, string ExtStr>
+ : JInst<(outs), (ins PredRegs:$src, brtarget:$dst),
+ CondStr<"$src", !if(PredNot,0,1), isPredNew>.S #
+ JumpOpcStr<"jump", isPredNew, isTak>.S # " " #
+ ExtStr # "$dst",
+ [], "", J_tc_2early_SLOT23>, ImmRegRel {
let isTaken = isTak;
- let isBrTaken = !if(isPredNew, !if(isTaken, "true", "false"), "");
let isPredicatedFalse = PredNot;
let isPredicatedNew = isPredNew;
bits<2> src;
@@ -864,7 +1441,7 @@ class T_JMP_c <bit PredNot, bit isPredNew, bit isTak>:
let Inst{27-24} = 0b1100;
let Inst{21} = PredNot;
- let Inst{12} = !if(isPredNew, isTak, zero);
+ let Inst{12} = isTak;
let Inst{11} = isPredNew;
let Inst{9-8} = src;
let Inst{23-22} = dst{16-15};
@@ -873,11 +1450,28 @@ class T_JMP_c <bit PredNot, bit isPredNew, bit isTak>:
let Inst{7-1} = dst{8-2};
}
-let isBarrier = 1, Defs = [PC], isPredicable = 1, InputType = "reg" in
-class T_JMPr<dag InsDag = (ins IntRegs:$dst)>
- : JRInst<(outs ), InsDag,
- "jumpr $dst" ,
- []> {
+multiclass JMP_Pred<bit PredNot, string ExtStr> {
+ def NAME : T_JMP_c<PredNot, 0, 0, ExtStr>; // not taken
+ // Predicate new
+ def NAME#newpt : T_JMP_c<PredNot, 1, 1, ExtStr>; // taken
+ def NAME#new : T_JMP_c<PredNot, 1, 0, ExtStr>; // not taken
+}
+
+multiclass JMP_base<string BaseOp, string ExtStr> {
+ let BaseOpcode = BaseOp in {
+ def NAME : T_JMP<ExtStr>;
+ defm t : JMP_Pred<0, ExtStr>;
+ defm f : JMP_Pred<1, ExtStr>;
+ }
+}
+
+// Jumps to address stored in a register, JUMPR_MISC
+// if ([[!]P[.new]]) jumpr[:t/nt] Rs
+let isBranch = 1, isIndirectBranch = 1, isBarrier = 1, Defs = [PC],
+ isPredicable = 1, hasSideEffects = 0, InputType = "reg" in
+class T_JMPr
+ : JRInst<(outs), (ins IntRegs:$dst),
+ "jumpr $dst", [], "", J_tc_2early_SLOT2> {
bits<5> dst;
let IClass = 0b0101;
@@ -885,15 +1479,15 @@ class T_JMPr<dag InsDag = (ins IntRegs:$dst)>
let Inst{20-16} = dst;
}
-let Defs = [PC], isPredicated = 1, InputType = "reg" in
-class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak>:
- JRInst <(outs ), (ins PredRegs:$src, IntRegs:$dst),
- !if(PredNot, "if (!$src", "if ($src")#
- !if(isPredNew, ".new) ", ") ")#"jumpr"#
- !if(isPredNew, !if(isTak, ":t ", ":nt "), " ")#"$dst"> {
+let isBranch = 1, isIndirectBranch = 1, Defs = [PC], isPredicated = 1,
+ hasSideEffects = 0, InputType = "reg" in
+class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak>
+ : JRInst <(outs), (ins PredRegs:$src, IntRegs:$dst),
+ CondStr<"$src", !if(PredNot,0,1), isPredNew>.S #
+ JumpOpcStr<"jumpr", isPredNew, isTak>.S # " $dst", [],
+ "", J_tc_2early_SLOT2> {
let isTaken = isTak;
- let isBrTaken = !if(isPredNew, !if(isTaken, "true", "false"), "");
let isPredicatedFalse = PredNot;
let isPredicatedNew = isPredNew;
bits<2> src;
@@ -904,73 +1498,88 @@ class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak>:
let Inst{27-22} = 0b001101;
let Inst{21} = PredNot;
let Inst{20-16} = dst;
- let Inst{12} = !if(isPredNew, isTak, zero);
+ let Inst{12} = isTak;
let Inst{11} = isPredNew;
let Inst{9-8} = src;
- let Predicates = !if(isPredNew, [HasV3T], [HasV2T]);
- let validSubTargets = !if(isPredNew, HasV3SubT, HasV2SubT);
-}
-
-multiclass JMP_Pred<bit PredNot> {
- def _#NAME : T_JMP_c<PredNot, 0, 0>;
- // Predicate new
- def _#NAME#new_t : T_JMP_c<PredNot, 1, 1>; // taken
- def _#NAME#new_nt : T_JMP_c<PredNot, 1, 0>; // not taken
-}
-
-multiclass JMP_base<string BaseOp> {
- let BaseOpcode = BaseOp in {
- def NAME : T_JMP<(ins brtarget:$dst), [(br bb:$dst)]>;
- defm t : JMP_Pred<0>;
- defm f : JMP_Pred<1>;
- }
}
multiclass JMPR_Pred<bit PredNot> {
- def NAME: T_JMPr_c<PredNot, 0, 0>;
+ def NAME : T_JMPr_c<PredNot, 0, 0>; // not taken
// Predicate new
- def NAME#new_tV3 : T_JMPr_c<PredNot, 1, 1>; // taken
- def NAME#new_ntV3 : T_JMPr_c<PredNot, 1, 0>; // not taken
+ def NAME#newpt : T_JMPr_c<PredNot, 1, 1>; // taken
+ def NAME#new : T_JMPr_c<PredNot, 1, 0>; // not taken
}
multiclass JMPR_base<string BaseOp> {
let BaseOpcode = BaseOp in {
def NAME : T_JMPr;
- defm _t : JMPR_Pred<0>;
- defm _f : JMPR_Pred<1>;
+ defm t : JMPR_Pred<0>;
+ defm f : JMPR_Pred<1>;
}
}
-let isTerminator = 1, neverHasSideEffects = 1 in {
-let isBranch = 1 in
-defm JMP : JMP_base<"JMP">, PredNewRel;
+let isCall = 1, hasSideEffects = 1 in
+class JUMPR_MISC_CALLR<bit isPred, bit isPredNot,
+ dag InputDag = (ins IntRegs:$Rs)>
+ : JRInst<(outs), InputDag,
+ !if(isPred, !if(isPredNot, "if (!$Pu) callr $Rs",
+ "if ($Pu) callr $Rs"),
+ "callr $Rs"),
+ [], "", J_tc_2early_SLOT2> {
+ bits<5> Rs;
+ bits<2> Pu;
+ let isPredicated = isPred;
+ let isPredicatedFalse = isPredNot;
-let isBranch = 1, isIndirectBranch = 1 in
-defm JMPR : JMPR_base<"JMPr">, PredNewRel;
+ let IClass = 0b0101;
+ let Inst{27-25} = 0b000;
+ let Inst{24-23} = !if (isPred, 0b10, 0b01);
+ let Inst{22} = 0;
+ let Inst{21} = isPredNot;
+ let Inst{9-8} = !if (isPred, Pu, 0b00);
+ let Inst{20-16} = Rs;
-let isReturn = 1, isCodeGenOnly = 1 in
-defm JMPret : JMPR_base<"JMPret">, PredNewRel;
+ }
+
+let Defs = VolatileV3.Regs in {
+ def J2_callrt : JUMPR_MISC_CALLR<1, 0, (ins PredRegs:$Pu, IntRegs:$Rs)>;
+ def J2_callrf : JUMPR_MISC_CALLR<1, 1, (ins PredRegs:$Pu, IntRegs:$Rs)>;
}
-def : Pat<(retflag),
- (JMPret (i32 R31))>;
+let isTerminator = 1, hasSideEffects = 0 in {
+ defm J2_jump : JMP_base<"JMP", "">, PredNewRel;
-def : Pat <(brcond (i1 PredRegs:$src1), bb:$offset),
- (JMP_t (i1 PredRegs:$src1), bb:$offset)>;
+ // Deal with explicit assembly
+ // - never extened a jump #, always extend a jump ##
+ let isAsmParserOnly = 1 in {
+ defm J2_jump_ext : JMP_base<"JMP", "##">;
+ defm J2_jump_noext : JMP_base<"JMP", "#">;
+ }
-// A return through builtin_eh_return.
-let isReturn = 1, isTerminator = 1, isBarrier = 1, neverHasSideEffects = 1,
-isCodeGenOnly = 1, Defs = [PC], Uses = [R28], isPredicable = 0 in
-def EH_RETURN_JMPR : T_JMPr;
+ defm J2_jumpr : JMPR_base<"JMPr">, PredNewRel;
-def : Pat<(eh_return),
- (EH_RETURN_JMPR (i32 R31))>;
+ let isReturn = 1, isCodeGenOnly = 1 in
+ defm JMPret : JMPR_base<"JMPret">, PredNewRel;
+}
-def : Pat<(HexagonBR_JT (i32 IntRegs:$dst)),
- (JMPR (i32 IntRegs:$dst))>;
+def: Pat<(br bb:$dst),
+ (J2_jump brtarget:$dst)>;
+def: Pat<(retflag),
+ (JMPret (i32 R31))>;
+def: Pat<(brcond (i1 PredRegs:$src1), bb:$offset),
+ (J2_jumpt PredRegs:$src1, bb:$offset)>;
-def : Pat<(brind (i32 IntRegs:$dst)),
- (JMPR (i32 IntRegs:$dst))>;
+// A return through builtin_eh_return.
+let isReturn = 1, isTerminator = 1, isBarrier = 1, hasSideEffects = 0,
+ isCodeGenOnly = 1, Defs = [PC], Uses = [R28], isPredicable = 0 in
+def EH_RETURN_JMPR : T_JMPr;
+
+def: Pat<(eh_return),
+ (EH_RETURN_JMPR (i32 R31))>;
+def: Pat<(HexagonBR_JT (i32 IntRegs:$dst)),
+ (J2_jumpr IntRegs:$dst)>;
+def: Pat<(brind (i32 IntRegs:$dst)),
+ (J2_jumpr IntRegs:$dst)>;
//===----------------------------------------------------------------------===//
// JR -
@@ -979,265 +1588,688 @@ def : Pat<(brind (i32 IntRegs:$dst)),
//===----------------------------------------------------------------------===//
// LD +
//===----------------------------------------------------------------------===//
-///
-// Load -- MEMri operand
-multiclass LD_MEMri_Pbase<string mnemonic, RegisterClass RC,
- bit isNot, bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : LDInst2<(outs RC:$dst),
- (ins PredRegs:$src1, MEMri:$addr),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#"$dst = "#mnemonic#"($addr)",
- []>;
-}
-
-multiclass LD_MEMri_Pred<string mnemonic, RegisterClass RC, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : LD_MEMri_Pbase<mnemonic, RC, PredNot, 0>;
- // Predicate new
- defm _cdn#NAME : LD_MEMri_Pbase<mnemonic, RC, PredNot, 1>;
+
+// Load - Base with Immediate offset addressing mode
+let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, AddedComplexity = 20 in
+class T_load_io <string mnemonic, RegisterClass RC, bits<4> MajOp,
+ Operand ImmOp>
+ : LDInst<(outs RC:$dst), (ins IntRegs:$src1, ImmOp:$offset),
+ "$dst = "#mnemonic#"($src1 + #$offset)", []>, AddrModeRel {
+ bits<4> name;
+ bits<5> dst;
+ bits<5> src1;
+ bits<14> offset;
+ bits<11> offsetBits;
+
+ string ImmOpStr = !cast<string>(ImmOp);
+ let offsetBits = !if (!eq(ImmOpStr, "s11_3Ext"), offset{13-3},
+ !if (!eq(ImmOpStr, "s11_2Ext"), offset{12-2},
+ !if (!eq(ImmOpStr, "s11_1Ext"), offset{11-1},
+ /* s11_0Ext */ offset{10-0})));
+ let opExtentBits = !if (!eq(ImmOpStr, "s11_3Ext"), 14,
+ !if (!eq(ImmOpStr, "s11_2Ext"), 13,
+ !if (!eq(ImmOpStr, "s11_1Ext"), 12,
+ /* s11_0Ext */ 11)));
+ let hasNewValue = !if (!eq(!cast<string>(RC), "DoubleRegs"), 0, 1);
+
+ let IClass = 0b1001;
+
+ let Inst{27} = 0b0;
+ let Inst{26-25} = offsetBits{10-9};
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{13-5} = offsetBits{8-0};
+ let Inst{4-0} = dst;
}
-}
-let isExtendable = 1, neverHasSideEffects = 1 in
-multiclass LD_MEMri<string mnemonic, string CextOp, RegisterClass RC,
- bits<5> ImmBits, bits<5> PredImmBits> {
+let opExtendable = 3, isExtentSigned = 0, isPredicated = 1 in
+class T_pload_io <string mnemonic, RegisterClass RC, bits<4>MajOp,
+ Operand ImmOp, bit isNot, bit isPredNew>
+ : LDInst<(outs RC:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset),
+ "if ("#!if(isNot, "!$src1", "$src1")
+ #!if(isPredNew, ".new", "")
+ #") $dst = "#mnemonic#"($src2 + #$offset)",
+ [],"", V2LDST_tc_ld_SLOT01> , AddrModeRel {
+ bits<5> dst;
+ bits<2> src1;
+ bits<5> src2;
+ bits<9> offset;
+ bits<6> offsetBits;
+ string ImmOpStr = !cast<string>(ImmOp);
+
+ let offsetBits = !if (!eq(ImmOpStr, "u6_3Ext"), offset{8-3},
+ !if (!eq(ImmOpStr, "u6_2Ext"), offset{7-2},
+ !if (!eq(ImmOpStr, "u6_1Ext"), offset{6-1},
+ /* u6_0Ext */ offset{5-0})));
+ let opExtentBits = !if (!eq(ImmOpStr, "u6_3Ext"), 9,
+ !if (!eq(ImmOpStr, "u6_2Ext"), 8,
+ !if (!eq(ImmOpStr, "u6_1Ext"), 7,
+ /* u6_0Ext */ 6)));
+ let hasNewValue = !if (!eq(ImmOpStr, "u6_3Ext"), 0, 1);
+ let isPredicatedNew = isPredNew;
+ let isPredicatedFalse = isNot;
+
+ let IClass = 0b0100;
+
+ let Inst{27} = 0b0;
+ let Inst{27} = 0b0;
+ let Inst{26} = isNot;
+ let Inst{25} = isPredNew;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = src2;
+ let Inst{13} = 0b0;
+ let Inst{12-11} = src1;
+ let Inst{10-5} = offsetBits;
+ let Inst{4-0} = dst;
+ }
- let CextOpcode = CextOp, BaseOpcode = CextOp in {
- let opExtendable = 2, isExtentSigned = 1, opExtentBits = ImmBits,
- isPredicable = 1 in
- def NAME : LDInst2<(outs RC:$dst), (ins MEMri:$addr),
- "$dst = "#mnemonic#"($addr)",
- []>;
-
- let opExtendable = 3, isExtentSigned = 0, opExtentBits = PredImmBits,
- isPredicated = 1 in {
- defm Pt : LD_MEMri_Pred<mnemonic, RC, 0 >;
- defm NotPt : LD_MEMri_Pred<mnemonic, RC, 1 >;
- }
+let isExtendable = 1, hasSideEffects = 0, addrMode = BaseImmOffset in
+multiclass LD_Idxd<string mnemonic, string CextOp, RegisterClass RC,
+ Operand ImmOp, Operand predImmOp, bits<4>MajOp> {
+ let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed in {
+ let isPredicable = 1 in
+ def L2_#NAME#_io : T_load_io <mnemonic, RC, MajOp, ImmOp>;
+
+ // Predicated
+ def L2_p#NAME#t_io : T_pload_io <mnemonic, RC, MajOp, predImmOp, 0, 0>;
+ def L2_p#NAME#f_io : T_pload_io <mnemonic, RC, MajOp, predImmOp, 1, 0>;
+
+ // Predicated new
+ def L2_p#NAME#tnew_io : T_pload_io <mnemonic, RC, MajOp, predImmOp, 0, 1>;
+ def L2_p#NAME#fnew_io : T_pload_io <mnemonic, RC, MajOp, predImmOp, 1, 1>;
}
}
-let addrMode = BaseImmOffset, isMEMri = "true" in {
- let accessSize = ByteAccess in {
- defm LDrib: LD_MEMri < "memb", "LDrib", IntRegs, 11, 6>, AddrModeRel;
- defm LDriub: LD_MEMri < "memub" , "LDriub", IntRegs, 11, 6>, AddrModeRel;
- }
+let accessSize = ByteAccess in {
+ defm loadrb: LD_Idxd <"memb", "LDrib", IntRegs, s11_0Ext, u6_0Ext, 0b1000>;
+ defm loadrub: LD_Idxd <"memub", "LDriub", IntRegs, s11_0Ext, u6_0Ext, 0b1001>;
+}
- let accessSize = HalfWordAccess in {
- defm LDrih: LD_MEMri < "memh", "LDrih", IntRegs, 12, 7>, AddrModeRel;
- defm LDriuh: LD_MEMri < "memuh", "LDriuh", IntRegs, 12, 7>, AddrModeRel;
- }
+let accessSize = HalfWordAccess, opExtentAlign = 1 in {
+ defm loadrh: LD_Idxd <"memh", "LDrih", IntRegs, s11_1Ext, u6_1Ext, 0b1010>;
+ defm loadruh: LD_Idxd <"memuh", "LDriuh", IntRegs, s11_1Ext, u6_1Ext, 0b1011>;
+}
- let accessSize = WordAccess in
- defm LDriw: LD_MEMri < "memw", "LDriw", IntRegs, 13, 8>, AddrModeRel;
+let accessSize = WordAccess, opExtentAlign = 2 in
+defm loadri: LD_Idxd <"memw", "LDriw", IntRegs, s11_2Ext, u6_2Ext, 0b1100>;
- let accessSize = DoubleWordAccess in
- defm LDrid: LD_MEMri < "memd", "LDrid", DoubleRegs, 14, 9>, AddrModeRel;
+let accessSize = DoubleWordAccess, opExtentAlign = 3 in
+defm loadrd: LD_Idxd <"memd", "LDrid", DoubleRegs, s11_3Ext, u6_3Ext, 0b1110>;
+
+let accessSize = HalfWordAccess, opExtentAlign = 1 in {
+ def L2_loadbsw2_io: T_load_io<"membh", IntRegs, 0b0001, s11_1Ext>;
+ def L2_loadbzw2_io: T_load_io<"memubh", IntRegs, 0b0011, s11_1Ext>;
}
-def : Pat < (i32 (sextloadi8 ADDRriS11_0:$addr)),
- (LDrib ADDRriS11_0:$addr) >;
+let accessSize = WordAccess, opExtentAlign = 2 in {
+ def L2_loadbzw4_io: T_load_io<"memubh", DoubleRegs, 0b0101, s11_2Ext>;
+ def L2_loadbsw4_io: T_load_io<"membh", DoubleRegs, 0b0111, s11_2Ext>;
+}
-def : Pat < (i32 (zextloadi8 ADDRriS11_0:$addr)),
- (LDriub ADDRriS11_0:$addr) >;
+let addrMode = BaseImmOffset, isExtendable = 1, hasSideEffects = 0,
+ opExtendable = 3, isExtentSigned = 1 in
+class T_loadalign_io <string str, bits<4> MajOp, Operand ImmOp>
+ : LDInst<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, IntRegs:$src2, ImmOp:$offset),
+ "$dst = "#str#"($src2 + #$offset)", [],
+ "$src1 = $dst">, AddrModeRel {
+ bits<4> name;
+ bits<5> dst;
+ bits<5> src2;
+ bits<12> offset;
+ bits<11> offsetBits;
+
+ let offsetBits = !if (!eq(!cast<string>(ImmOp), "s11_1Ext"), offset{11-1},
+ /* s11_0Ext */ offset{10-0});
+ let IClass = 0b1001;
+
+ let Inst{27} = 0b0;
+ let Inst{26-25} = offsetBits{10-9};
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = src2;
+ let Inst{13-5} = offsetBits{8-0};
+ let Inst{4-0} = dst;
+ }
-def : Pat < (i32 (sextloadi16 ADDRriS11_1:$addr)),
- (LDrih ADDRriS11_1:$addr) >;
+let accessSize = HalfWordAccess, opExtentBits = 12, opExtentAlign = 1 in
+def L2_loadalignh_io: T_loadalign_io <"memh_fifo", 0b0010, s11_1Ext>;
-def : Pat < (i32 (zextloadi16 ADDRriS11_1:$addr)),
- (LDriuh ADDRriS11_1:$addr) >;
+let accessSize = ByteAccess, opExtentBits = 11 in
+def L2_loadalignb_io: T_loadalign_io <"memb_fifo", 0b0100, s11_0Ext>;
-def : Pat < (i32 (load ADDRriS11_2:$addr)),
- (LDriw ADDRriS11_2:$addr) >;
+// Patterns to select load-indexed (i.e. load from base+offset).
+multiclass Loadx_pat<PatFrag Load, ValueType VT, PatLeaf ImmPred,
+ InstHexagon MI> {
+ def: Pat<(VT (Load AddrFI:$fi)), (VT (MI AddrFI:$fi, 0))>;
+ def: Pat<(VT (Load (add (i32 IntRegs:$Rs), ImmPred:$Off))),
+ (VT (MI IntRegs:$Rs, imm:$Off))>;
+ def: Pat<(VT (Load (i32 IntRegs:$Rs))), (VT (MI IntRegs:$Rs, 0))>;
+}
-def : Pat < (i64 (load ADDRriS11_3:$addr)),
- (LDrid ADDRriS11_3:$addr) >;
+let AddedComplexity = 20 in {
+ defm: Loadx_pat<load, i32, s11_2ExtPred, L2_loadri_io>;
+ defm: Loadx_pat<load, i64, s11_3ExtPred, L2_loadrd_io>;
+ defm: Loadx_pat<atomic_load_8 , i32, s11_0ExtPred, L2_loadrub_io>;
+ defm: Loadx_pat<atomic_load_16, i32, s11_1ExtPred, L2_loadruh_io>;
+ defm: Loadx_pat<atomic_load_32, i32, s11_2ExtPred, L2_loadri_io>;
+ defm: Loadx_pat<atomic_load_64, i64, s11_3ExtPred, L2_loadrd_io>;
+
+ defm: Loadx_pat<extloadi1, i32, s11_0ExtPred, L2_loadrub_io>;
+ defm: Loadx_pat<extloadi8, i32, s11_0ExtPred, L2_loadrub_io>;
+ defm: Loadx_pat<extloadi16, i32, s11_1ExtPred, L2_loadruh_io>;
+ defm: Loadx_pat<sextloadi8, i32, s11_0ExtPred, L2_loadrb_io>;
+ defm: Loadx_pat<sextloadi16, i32, s11_1ExtPred, L2_loadrh_io>;
+ defm: Loadx_pat<zextloadi1, i32, s11_0ExtPred, L2_loadrub_io>;
+ defm: Loadx_pat<zextloadi8, i32, s11_0ExtPred, L2_loadrub_io>;
+ defm: Loadx_pat<zextloadi16, i32, s11_1ExtPred, L2_loadruh_io>;
+ // No sextloadi1.
+}
+// Sign-extending loads of i1 need to replicate the lowest bit throughout
+// the 32-bit value. Since the loaded value can only be 0 or 1, 0-v should
+// do the trick.
+let AddedComplexity = 20 in
+def: Pat<(i32 (sextloadi1 (i32 IntRegs:$Rs))),
+ (A2_subri 0, (L2_loadrub_io IntRegs:$Rs, 0))>;
-// Load - Base with Immediate offset addressing mode
-multiclass LD_Idxd_Pbase<string mnemonic, RegisterClass RC, Operand predImmOp,
- bit isNot, bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : LDInst2<(outs RC:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#"$dst = "#mnemonic#"($src2+#$src3)",
- []>;
-}
-
-multiclass LD_Idxd_Pred<string mnemonic, RegisterClass RC, Operand predImmOp,
- bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : LD_Idxd_Pbase<mnemonic, RC, predImmOp, PredNot, 0>;
- // Predicate new
- defm _cdn#NAME : LD_Idxd_Pbase<mnemonic, RC, predImmOp, PredNot, 1>;
+//===----------------------------------------------------------------------===//
+// Post increment load
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// Template class for non-predicated post increment loads with immediate offset.
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, addrMode = PostInc in
+class T_load_pi <string mnemonic, RegisterClass RC, Operand ImmOp,
+ bits<4> MajOp >
+ : LDInstPI <(outs RC:$dst, IntRegs:$dst2),
+ (ins IntRegs:$src1, ImmOp:$offset),
+ "$dst = "#mnemonic#"($src1++#$offset)" ,
+ [],
+ "$src1 = $dst2" > ,
+ PredNewRel {
+ bits<5> dst;
+ bits<5> src1;
+ bits<7> offset;
+ bits<4> offsetBits;
+
+ string ImmOpStr = !cast<string>(ImmOp);
+ let offsetBits = !if (!eq(ImmOpStr, "s4_3Imm"), offset{6-3},
+ !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2},
+ !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1},
+ /* s4_0Imm */ offset{3-0})));
+ let hasNewValue = !if (!eq(ImmOpStr, "s4_3Imm"), 0, 1);
+
+ let IClass = 0b1001;
+
+ let Inst{27-25} = 0b101;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{13-12} = 0b00;
+ let Inst{8-5} = offsetBits;
+ let Inst{4-0} = dst;
}
-}
-let isExtendable = 1, neverHasSideEffects = 1 in
-multiclass LD_Idxd<string mnemonic, string CextOp, RegisterClass RC,
- Operand ImmOp, Operand predImmOp, bits<5> ImmBits,
- bits<5> PredImmBits> {
+//===----------------------------------------------------------------------===//
+// Template class for predicated post increment loads with immediate offset.
+//===----------------------------------------------------------------------===//
+let isPredicated = 1, hasSideEffects = 0, addrMode = PostInc in
+class T_pload_pi <string mnemonic, RegisterClass RC, Operand ImmOp,
+ bits<4> MajOp, bit isPredNot, bit isPredNew >
+ : LDInst <(outs RC:$dst, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset),
+ !if(isPredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
+ ") ")#"$dst = "#mnemonic#"($src2++#$offset)",
+ [] ,
+ "$src2 = $dst2" > ,
+ PredNewRel {
+ bits<5> dst;
+ bits<2> src1;
+ bits<5> src2;
+ bits<7> offset;
+ bits<4> offsetBits;
- let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed in {
- let opExtendable = 2, isExtentSigned = 1, opExtentBits = ImmBits,
- isPredicable = 1, AddedComplexity = 20 in
- def NAME : LDInst2<(outs RC:$dst), (ins IntRegs:$src1, ImmOp:$offset),
- "$dst = "#mnemonic#"($src1+#$offset)",
- []>;
-
- let opExtendable = 3, isExtentSigned = 0, opExtentBits = PredImmBits,
- isPredicated = 1 in {
- defm Pt : LD_Idxd_Pred<mnemonic, RC, predImmOp, 0 >;
- defm NotPt : LD_Idxd_Pred<mnemonic, RC, predImmOp, 1 >;
- }
+ let isPredicatedNew = isPredNew;
+ let isPredicatedFalse = isPredNot;
+
+ string ImmOpStr = !cast<string>(ImmOp);
+ let offsetBits = !if (!eq(ImmOpStr, "s4_3Imm"), offset{6-3},
+ !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2},
+ !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1},
+ /* s4_0Imm */ offset{3-0})));
+ let hasNewValue = !if (!eq(ImmOpStr, "s4_3Imm"), 0, 1);
+
+ let IClass = 0b1001;
+
+ let Inst{27-25} = 0b101;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = src2;
+ let Inst{13} = 0b1;
+ let Inst{12} = isPredNew;
+ let Inst{11} = isPredNot;
+ let Inst{10-9} = src1;
+ let Inst{8-5} = offsetBits;
+ let Inst{4-0} = dst;
}
-}
-let addrMode = BaseImmOffset in {
- let accessSize = ByteAccess in {
- defm LDrib_indexed: LD_Idxd <"memb", "LDrib", IntRegs, s11_0Ext, u6_0Ext,
- 11, 6>, AddrModeRel;
- defm LDriub_indexed: LD_Idxd <"memub" , "LDriub", IntRegs, s11_0Ext, u6_0Ext,
- 11, 6>, AddrModeRel;
- }
- let accessSize = HalfWordAccess in {
- defm LDrih_indexed: LD_Idxd <"memh", "LDrih", IntRegs, s11_1Ext, u6_1Ext,
- 12, 7>, AddrModeRel;
- defm LDriuh_indexed: LD_Idxd <"memuh", "LDriuh", IntRegs, s11_1Ext, u6_1Ext,
- 12, 7>, AddrModeRel;
+//===----------------------------------------------------------------------===//
+// Multiclass for post increment loads with immediate offset.
+//===----------------------------------------------------------------------===//
+
+multiclass LD_PostInc <string mnemonic, string BaseOp, RegisterClass RC,
+ Operand ImmOp, bits<4> MajOp> {
+ let BaseOpcode = "POST_"#BaseOp in {
+ let isPredicable = 1 in
+ def L2_#NAME#_pi : T_load_pi < mnemonic, RC, ImmOp, MajOp>;
+
+ // Predicated
+ def L2_p#NAME#t_pi : T_pload_pi < mnemonic, RC, ImmOp, MajOp, 0, 0>;
+ def L2_p#NAME#f_pi : T_pload_pi < mnemonic, RC, ImmOp, MajOp, 1, 0>;
+
+ // Predicated new
+ def L2_p#NAME#tnew_pi : T_pload_pi < mnemonic, RC, ImmOp, MajOp, 0, 1>;
+ def L2_p#NAME#fnew_pi : T_pload_pi < mnemonic, RC, ImmOp, MajOp, 1, 1>;
}
- let accessSize = WordAccess in
- defm LDriw_indexed: LD_Idxd <"memw", "LDriw", IntRegs, s11_2Ext, u6_2Ext,
- 13, 8>, AddrModeRel;
+}
- let accessSize = DoubleWordAccess in
- defm LDrid_indexed: LD_Idxd <"memd", "LDrid", DoubleRegs, s11_3Ext, u6_3Ext,
- 14, 9>, AddrModeRel;
+// post increment byte loads with immediate offset
+let accessSize = ByteAccess in {
+ defm loadrb : LD_PostInc <"memb", "LDrib", IntRegs, s4_0Imm, 0b1000>;
+ defm loadrub : LD_PostInc <"memub", "LDriub", IntRegs, s4_0Imm, 0b1001>;
}
-let AddedComplexity = 20 in {
-def : Pat < (i32 (sextloadi8 (add IntRegs:$src1, s11_0ExtPred:$offset))),
- (LDrib_indexed IntRegs:$src1, s11_0ExtPred:$offset) >;
+// post increment halfword loads with immediate offset
+let accessSize = HalfWordAccess, opExtentAlign = 1 in {
+ defm loadrh : LD_PostInc <"memh", "LDrih", IntRegs, s4_1Imm, 0b1010>;
+ defm loadruh : LD_PostInc <"memuh", "LDriuh", IntRegs, s4_1Imm, 0b1011>;
+}
-def : Pat < (i32 (zextloadi8 (add IntRegs:$src1, s11_0ExtPred:$offset))),
- (LDriub_indexed IntRegs:$src1, s11_0ExtPred:$offset) >;
+// post increment word loads with immediate offset
+let accessSize = WordAccess, opExtentAlign = 2 in
+defm loadri : LD_PostInc <"memw", "LDriw", IntRegs, s4_2Imm, 0b1100>;
-def : Pat < (i32 (sextloadi16 (add IntRegs:$src1, s11_1ExtPred:$offset))),
- (LDrih_indexed IntRegs:$src1, s11_1ExtPred:$offset) >;
+// post increment doubleword loads with immediate offset
+let accessSize = DoubleWordAccess, opExtentAlign = 3 in
+defm loadrd : LD_PostInc <"memd", "LDrid", DoubleRegs, s4_3Imm, 0b1110>;
+
+// Rd=memb[u]h(Rx++#s4:1)
+// Rdd=memb[u]h(Rx++#s4:2)
+let accessSize = HalfWordAccess, opExtentAlign = 1 in {
+ def L2_loadbsw2_pi : T_load_pi <"membh", IntRegs, s4_1Imm, 0b0001>;
+ def L2_loadbzw2_pi : T_load_pi <"memubh", IntRegs, s4_1Imm, 0b0011>;
+}
+let accessSize = WordAccess, opExtentAlign = 2, hasNewValue = 0 in {
+ def L2_loadbsw4_pi : T_load_pi <"membh", DoubleRegs, s4_2Imm, 0b0111>;
+ def L2_loadbzw4_pi : T_load_pi <"memubh", DoubleRegs, s4_2Imm, 0b0101>;
+}
-def : Pat < (i32 (zextloadi16 (add IntRegs:$src1, s11_1ExtPred:$offset))),
- (LDriuh_indexed IntRegs:$src1, s11_1ExtPred:$offset) >;
+//===----------------------------------------------------------------------===//
+// Template class for post increment fifo loads with immediate offset.
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, addrMode = PostInc in
+class T_loadalign_pi <string mnemonic, Operand ImmOp, bits<4> MajOp >
+ : LDInstPI <(outs DoubleRegs:$dst, IntRegs:$dst2),
+ (ins DoubleRegs:$src1, IntRegs:$src2, ImmOp:$offset),
+ "$dst = "#mnemonic#"($src2++#$offset)" ,
+ [], "$src2 = $dst2, $src1 = $dst" > ,
+ PredNewRel {
+ bits<5> dst;
+ bits<5> src2;
+ bits<5> offset;
+ bits<4> offsetBits;
+
+ let offsetBits = !if (!eq(!cast<string>(ImmOp), "s4_1Imm"), offset{4-1},
+ /* s4_0Imm */ offset{3-0});
+ let IClass = 0b1001;
+
+ let Inst{27-25} = 0b101;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = src2;
+ let Inst{13-12} = 0b00;
+ let Inst{8-5} = offsetBits;
+ let Inst{4-0} = dst;
+ }
-def : Pat < (i32 (load (add IntRegs:$src1, s11_2ExtPred:$offset))),
- (LDriw_indexed IntRegs:$src1, s11_2ExtPred:$offset) >;
+// Ryy=memh_fifo(Rx++#s4:1)
+// Ryy=memb_fifo(Rx++#s4:0)
+let accessSize = ByteAccess in
+def L2_loadalignb_pi : T_loadalign_pi <"memb_fifo", s4_0Imm, 0b0100>;
-def : Pat < (i64 (load (add IntRegs:$src1, s11_3ExtPred:$offset))),
- (LDrid_indexed IntRegs:$src1, s11_3ExtPred:$offset) >;
-}
+let accessSize = HalfWordAccess, opExtentAlign = 1 in
+def L2_loadalignh_pi : T_loadalign_pi <"memh_fifo", s4_1Imm, 0b0010>;
//===----------------------------------------------------------------------===//
-// Post increment load
+// Template class for post increment loads with register offset.
//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, addrMode = PostInc in
+class T_load_pr <string mnemonic, RegisterClass RC, bits<4> MajOp,
+ MemAccessSize AccessSz>
+ : LDInstPI <(outs RC:$dst, IntRegs:$_dst_),
+ (ins IntRegs:$src1, ModRegs:$src2),
+ "$dst = "#mnemonic#"($src1++$src2)" ,
+ [], "$src1 = $_dst_" > {
+ bits<5> dst;
+ bits<5> src1;
+ bits<1> src2;
+
+ let accessSize = AccessSz;
+ let IClass = 0b1001;
+
+ let Inst{27-25} = 0b110;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{13} = src2;
+ let Inst{12} = 0b0;
+ let Inst{7} = 0b0;
+ let Inst{4-0} = dst;
+ }
+
+let hasNewValue = 1 in {
+ def L2_loadrb_pr : T_load_pr <"memb", IntRegs, 0b1000, ByteAccess>;
+ def L2_loadrub_pr : T_load_pr <"memub", IntRegs, 0b1001, ByteAccess>;
+ def L2_loadrh_pr : T_load_pr <"memh", IntRegs, 0b1010, HalfWordAccess>;
+ def L2_loadruh_pr : T_load_pr <"memuh", IntRegs, 0b1011, HalfWordAccess>;
+ def L2_loadri_pr : T_load_pr <"memw", IntRegs, 0b1100, WordAccess>;
-multiclass LD_PostInc_Pbase<string mnemonic, RegisterClass RC, Operand ImmOp,
- bit isNot, bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : LDInst2PI<(outs RC:$dst, IntRegs:$dst2),
- (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#"$dst = "#mnemonic#"($src2++#$offset)",
- [],
- "$src2 = $dst2">;
+ def L2_loadbzw2_pr : T_load_pr <"memubh", IntRegs, 0b0011, HalfWordAccess>;
}
-multiclass LD_PostInc_Pred<string mnemonic, RegisterClass RC,
- Operand ImmOp, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : LD_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 0>;
- // Predicate new
- let Predicates = [HasV4T], validSubTargets = HasV4SubT in
- defm _cdn#NAME#_V4 : LD_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 1>;
- }
+def L2_loadrd_pr : T_load_pr <"memd", DoubleRegs, 0b1110, DoubleWordAccess>;
+def L2_loadbzw4_pr : T_load_pr <"memubh", DoubleRegs, 0b0101, WordAccess>;
+
+// Load predicate.
+let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13,
+ isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in
+def LDriw_pred : LDInst<(outs PredRegs:$dst),
+ (ins IntRegs:$addr, s11_2Ext:$off),
+ ".error \"should not emit\"", []>;
+
+let Defs = [R29, R30, R31], Uses = [R30], hasSideEffects = 0 in
+ def L2_deallocframe : LDInst<(outs), (ins),
+ "deallocframe",
+ []> {
+ let IClass = 0b1001;
+
+ let Inst{27-16} = 0b000000011110;
+ let Inst{13} = 0b0;
+ let Inst{4-0} = 0b11110;
}
-multiclass LD_PostInc<string mnemonic, string BaseOp, RegisterClass RC,
- Operand ImmOp> {
+// Load / Post increment circular addressing mode.
+let Uses = [CS], hasSideEffects = 0 in
+class T_load_pcr<string mnemonic, RegisterClass RC, bits<4> MajOp>
+ : LDInst <(outs RC:$dst, IntRegs:$_dst_),
+ (ins IntRegs:$Rz, ModRegs:$Mu),
+ "$dst = "#mnemonic#"($Rz ++ I:circ($Mu))", [],
+ "$Rz = $_dst_" > {
+ bits<5> dst;
+ bits<5> Rz;
+ bit Mu;
+
+ let hasNewValue = !if (!eq(!cast<string>(RC), "DoubleRegs"), 0, 1);
+ let IClass = 0b1001;
- let BaseOpcode = "POST_"#BaseOp in {
- let isPredicable = 1 in
- def NAME : LDInst2PI<(outs RC:$dst, IntRegs:$dst2),
- (ins IntRegs:$src1, ImmOp:$offset),
- "$dst = "#mnemonic#"($src1++#$offset)",
- [],
- "$src1 = $dst2">;
-
- let isPredicated = 1 in {
- defm Pt : LD_PostInc_Pred<mnemonic, RC, ImmOp, 0 >;
- defm NotPt : LD_PostInc_Pred<mnemonic, RC, ImmOp, 1 >;
- }
+ let Inst{27-25} = 0b100;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = Rz;
+ let Inst{13} = Mu;
+ let Inst{12} = 0b0;
+ let Inst{9} = 0b1;
+ let Inst{7} = 0b0;
+ let Inst{4-0} = dst;
+ }
+
+let accessSize = ByteAccess in {
+ def L2_loadrb_pcr : T_load_pcr <"memb", IntRegs, 0b1000>;
+ def L2_loadrub_pcr : T_load_pcr <"memub", IntRegs, 0b1001>;
+}
+
+let accessSize = HalfWordAccess in {
+ def L2_loadrh_pcr : T_load_pcr <"memh", IntRegs, 0b1010>;
+ def L2_loadruh_pcr : T_load_pcr <"memuh", IntRegs, 0b1011>;
+ def L2_loadbsw2_pcr : T_load_pcr <"membh", IntRegs, 0b0001>;
+ def L2_loadbzw2_pcr : T_load_pcr <"memubh", IntRegs, 0b0011>;
+}
+
+let accessSize = WordAccess in {
+ def L2_loadri_pcr : T_load_pcr <"memw", IntRegs, 0b1100>;
+ let hasNewValue = 0 in {
+ def L2_loadbzw4_pcr : T_load_pcr <"memubh", DoubleRegs, 0b0101>;
+ def L2_loadbsw4_pcr : T_load_pcr <"membh", DoubleRegs, 0b0111>;
}
}
-let hasCtrlDep = 1, neverHasSideEffects = 1, addrMode = PostInc in {
- defm POST_LDrib : LD_PostInc<"memb", "LDrib", IntRegs, s4_0Imm>,
- PredNewRel;
- defm POST_LDriub : LD_PostInc<"memub", "LDriub", IntRegs, s4_0Imm>,
- PredNewRel;
- defm POST_LDrih : LD_PostInc<"memh", "LDrih", IntRegs, s4_1Imm>,
- PredNewRel;
- defm POST_LDriuh : LD_PostInc<"memuh", "LDriuh", IntRegs, s4_1Imm>,
- PredNewRel;
- defm POST_LDriw : LD_PostInc<"memw", "LDriw", IntRegs, s4_2Imm>,
- PredNewRel;
- defm POST_LDrid : LD_PostInc<"memd", "LDrid", DoubleRegs, s4_3Imm>,
- PredNewRel;
+let accessSize = DoubleWordAccess in
+def L2_loadrd_pcr : T_load_pcr <"memd", DoubleRegs, 0b1110>;
+
+// Load / Post increment circular addressing mode.
+let Uses = [CS], hasSideEffects = 0 in
+class T_loadalign_pcr<string mnemonic, bits<4> MajOp, MemAccessSize AccessSz >
+ : LDInst <(outs DoubleRegs:$dst, IntRegs:$_dst_),
+ (ins DoubleRegs:$_src_, IntRegs:$Rz, ModRegs:$Mu),
+ "$dst = "#mnemonic#"($Rz ++ I:circ($Mu))", [],
+ "$Rz = $_dst_, $dst = $_src_" > {
+ bits<5> dst;
+ bits<5> Rz;
+ bit Mu;
+
+ let accessSize = AccessSz;
+ let IClass = 0b1001;
+
+ let Inst{27-25} = 0b100;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = Rz;
+ let Inst{13} = Mu;
+ let Inst{12} = 0b0;
+ let Inst{9} = 0b1;
+ let Inst{7} = 0b0;
+ let Inst{4-0} = dst;
+ }
+
+def L2_loadalignb_pcr : T_loadalign_pcr <"memb_fifo", 0b0100, ByteAccess>;
+def L2_loadalignh_pcr : T_loadalign_pcr <"memh_fifo", 0b0010, HalfWordAccess>;
+
+//===----------------------------------------------------------------------===//
+// Circular loads with immediate offset.
+//===----------------------------------------------------------------------===//
+let Uses = [CS], mayLoad = 1, hasSideEffects = 0 in
+class T_load_pci <string mnemonic, RegisterClass RC,
+ Operand ImmOp, bits<4> MajOp>
+ : LDInstPI<(outs RC:$dst, IntRegs:$_dst_),
+ (ins IntRegs:$Rz, ImmOp:$offset, ModRegs:$Mu),
+ "$dst = "#mnemonic#"($Rz ++ #$offset:circ($Mu))", [],
+ "$Rz = $_dst_"> {
+ bits<5> dst;
+ bits<5> Rz;
+ bits<1> Mu;
+ bits<7> offset;
+ bits<4> offsetBits;
+
+ string ImmOpStr = !cast<string>(ImmOp);
+ let hasNewValue = !if (!eq(!cast<string>(RC), "DoubleRegs"), 0, 1);
+ let offsetBits = !if (!eq(ImmOpStr, "s4_3Imm"), offset{6-3},
+ !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2},
+ !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1},
+ /* s4_0Imm */ offset{3-0})));
+ let IClass = 0b1001;
+ let Inst{27-25} = 0b100;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = Rz;
+ let Inst{13} = Mu;
+ let Inst{12} = 0b0;
+ let Inst{9} = 0b0;
+ let Inst{8-5} = offsetBits;
+ let Inst{4-0} = dst;
+ }
+
+// Byte variants of circ load
+let accessSize = ByteAccess in {
+ def L2_loadrb_pci : T_load_pci <"memb", IntRegs, s4_0Imm, 0b1000>;
+ def L2_loadrub_pci : T_load_pci <"memub", IntRegs, s4_0Imm, 0b1001>;
}
-def : Pat< (i32 (extloadi1 ADDRriS11_0:$addr)),
- (i32 (LDrib ADDRriS11_0:$addr)) >;
+// Half word variants of circ load
+let accessSize = HalfWordAccess in {
+ def L2_loadrh_pci : T_load_pci <"memh", IntRegs, s4_1Imm, 0b1010>;
+ def L2_loadruh_pci : T_load_pci <"memuh", IntRegs, s4_1Imm, 0b1011>;
+ def L2_loadbzw2_pci : T_load_pci <"memubh", IntRegs, s4_1Imm, 0b0011>;
+ def L2_loadbsw2_pci : T_load_pci <"membh", IntRegs, s4_1Imm, 0b0001>;
+}
-// Load byte any-extend.
-def : Pat < (i32 (extloadi8 ADDRriS11_0:$addr)),
- (i32 (LDrib ADDRriS11_0:$addr)) >;
+// Word variants of circ load
+let accessSize = WordAccess in
+def L2_loadri_pci : T_load_pci <"memw", IntRegs, s4_2Imm, 0b1100>;
-// Indexed load byte any-extend.
-let AddedComplexity = 20 in
-def : Pat < (i32 (extloadi8 (add IntRegs:$src1, s11_0ImmPred:$offset))),
- (i32 (LDrib_indexed IntRegs:$src1, s11_0ImmPred:$offset)) >;
+let accessSize = WordAccess, hasNewValue = 0 in {
+ def L2_loadbzw4_pci : T_load_pci <"memubh", DoubleRegs, s4_2Imm, 0b0101>;
+ def L2_loadbsw4_pci : T_load_pci <"membh", DoubleRegs, s4_2Imm, 0b0111>;
+}
-def : Pat < (i32 (extloadi16 ADDRriS11_1:$addr)),
- (i32 (LDrih ADDRriS11_1:$addr))>;
+let accessSize = DoubleWordAccess, hasNewValue = 0 in
+def L2_loadrd_pci : T_load_pci <"memd", DoubleRegs, s4_3Imm, 0b1110>;
-let AddedComplexity = 20 in
-def : Pat < (i32 (extloadi16 (add IntRegs:$src1, s11_1ImmPred:$offset))),
- (i32 (LDrih_indexed IntRegs:$src1, s11_1ImmPred:$offset)) >;
+//===----------------------------------------------------------------------===//
+// Circular loads - Pseudo
+//
+// Please note that the input operand order in the pseudo instructions
+// doesn't match with the real instructions. Pseudo instructions operand
+// order should mimics the ordering in the intrinsics. Also, 'src2' doesn't
+// appear in the AsmString because it's same as 'dst'.
+//===----------------------------------------------------------------------===//
+let isCodeGenOnly = 1, mayLoad = 1, hasSideEffects = 0, isPseudo = 1 in
+class T_load_pci_pseudo <string opc, RegisterClass RC>
+ : LDInstPI<(outs IntRegs:$_dst_, RC:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4Imm:$src4),
+ ".error \"$dst = "#opc#"($src1++#$src4:circ($src3))\"",
+ [], "$src1 = $_dst_">;
+
+def L2_loadrb_pci_pseudo : T_load_pci_pseudo <"memb", IntRegs>;
+def L2_loadrub_pci_pseudo : T_load_pci_pseudo <"memub", IntRegs>;
+def L2_loadrh_pci_pseudo : T_load_pci_pseudo <"memh", IntRegs>;
+def L2_loadruh_pci_pseudo : T_load_pci_pseudo <"memuh", IntRegs>;
+def L2_loadri_pci_pseudo : T_load_pci_pseudo <"memw", IntRegs>;
+def L2_loadrd_pci_pseudo : T_load_pci_pseudo <"memd", DoubleRegs>;
+
+
+// TODO: memb_fifo and memh_fifo must take destination register as input.
+// One-off circ loads - not enough in common to break into a class.
+let accessSize = ByteAccess in
+def L2_loadalignb_pci : T_load_pci <"memb_fifo", DoubleRegs, s4_0Imm, 0b0100>;
+
+let accessSize = HalfWordAccess, opExtentAlign = 1 in
+def L2_loadalignh_pci : T_load_pci <"memh_fifo", DoubleRegs, s4_1Imm, 0b0010>;
+
+// L[24]_load[wd]_locked: Load word/double with lock.
+let isSoloAX = 1 in
+class T_load_locked <string mnemonic, RegisterClass RC>
+ : LD0Inst <(outs RC:$dst),
+ (ins IntRegs:$src),
+ "$dst = "#mnemonic#"($src)"> {
+ bits<5> dst;
+ bits<5> src;
+ let IClass = 0b1001;
+ let Inst{27-21} = 0b0010000;
+ let Inst{20-16} = src;
+ let Inst{13-12} = !if (!eq(mnemonic, "memd_locked"), 0b01, 0b00);
+ let Inst{5} = 0;
+ let Inst{4-0} = dst;
+}
+let hasNewValue = 1, accessSize = WordAccess, opNewValue = 0 in
+ def L2_loadw_locked : T_load_locked <"memw_locked", IntRegs>;
+let accessSize = DoubleWordAccess in
+ def L4_loadd_locked : T_load_locked <"memd_locked", DoubleRegs>;
+
+// S[24]_store[wd]_locked: Store word/double conditionally.
+let isSoloAX = 1, isPredicateLate = 1 in
+class T_store_locked <string mnemonic, RegisterClass RC>
+ : ST0Inst <(outs PredRegs:$Pd), (ins IntRegs:$Rs, RC:$Rt),
+ mnemonic#"($Rs, $Pd) = $Rt"> {
+ bits<2> Pd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1010;
+ let Inst{27-23} = 0b00001;
+ let Inst{22} = !if (!eq(mnemonic, "memw_locked"), 0b0, 0b1);
+ let Inst{21} = 0b1;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{1-0} = Pd;
+}
-let AddedComplexity = 10 in
-def : Pat < (i32 (zextloadi1 ADDRriS11_0:$addr)),
- (i32 (LDriub ADDRriS11_0:$addr))>;
+let accessSize = WordAccess in
+def S2_storew_locked : T_store_locked <"memw_locked", IntRegs>;
-let AddedComplexity = 20 in
-def : Pat < (i32 (zextloadi1 (add IntRegs:$src1, s11_0ImmPred:$offset))),
- (i32 (LDriub_indexed IntRegs:$src1, s11_0ImmPred:$offset))>;
+let accessSize = DoubleWordAccess in
+def S4_stored_locked : T_store_locked <"memd_locked", DoubleRegs>;
-// Load predicate.
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13,
-isPseudo = 1, Defs = [R10,R11,D5], neverHasSideEffects = 1 in
-def LDriw_pred : LDInst2<(outs PredRegs:$dst),
- (ins MEMri:$addr),
- "Error; should not emit",
- []>;
+//===----------------------------------------------------------------------===//
+// Bit-reversed loads with auto-increment register
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0 in
+class T_load_pbr<string mnemonic, RegisterClass RC,
+ MemAccessSize addrSize, bits<4> majOp>
+ : LDInst
+ <(outs RC:$dst, IntRegs:$_dst_),
+ (ins IntRegs:$Rz, ModRegs:$Mu),
+ "$dst = "#mnemonic#"($Rz ++ $Mu:brev)" ,
+ [] , "$Rz = $_dst_" > {
+
+ let accessSize = addrSize;
+
+ bits<5> dst;
+ bits<5> Rz;
+ bits<1> Mu;
+
+ let IClass = 0b1001;
+
+ let Inst{27-25} = 0b111;
+ let Inst{24-21} = majOp;
+ let Inst{20-16} = Rz;
+ let Inst{13} = Mu;
+ let Inst{12} = 0b0;
+ let Inst{7} = 0b0;
+ let Inst{4-0} = dst;
+ }
-// Deallocate stack frame.
-let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in {
- def DEALLOCFRAME : LDInst2<(outs), (ins),
- "deallocframe",
- []>;
+let hasNewValue =1, opNewValue = 0 in {
+ def L2_loadrb_pbr : T_load_pbr <"memb", IntRegs, ByteAccess, 0b1000>;
+ def L2_loadrub_pbr : T_load_pbr <"memub", IntRegs, ByteAccess, 0b1001>;
+ def L2_loadrh_pbr : T_load_pbr <"memh", IntRegs, HalfWordAccess, 0b1010>;
+ def L2_loadruh_pbr : T_load_pbr <"memuh", IntRegs, HalfWordAccess, 0b1011>;
+ def L2_loadbsw2_pbr : T_load_pbr <"membh", IntRegs, HalfWordAccess, 0b0001>;
+ def L2_loadbzw2_pbr : T_load_pbr <"memubh", IntRegs, HalfWordAccess, 0b0011>;
+ def L2_loadri_pbr : T_load_pbr <"memw", IntRegs, WordAccess, 0b1100>;
}
-// Load and unpack bytes to halfwords.
+def L2_loadbzw4_pbr : T_load_pbr <"memubh", DoubleRegs, WordAccess, 0b0101>;
+def L2_loadbsw4_pbr : T_load_pbr <"membh", DoubleRegs, WordAccess, 0b0111>;
+def L2_loadrd_pbr : T_load_pbr <"memd", DoubleRegs, DoubleWordAccess, 0b1110>;
+
+def L2_loadalignb_pbr :T_load_pbr <"memb_fifo", DoubleRegs, ByteAccess, 0b0100>;
+def L2_loadalignh_pbr :T_load_pbr <"memh_fifo", DoubleRegs,
+ HalfWordAccess, 0b0010>;
+
+//===----------------------------------------------------------------------===//
+// Bit-reversed loads - Pseudo
+//
+// Please note that 'src2' doesn't appear in the AsmString because
+// it's same as 'dst'.
+//===----------------------------------------------------------------------===//
+let isCodeGenOnly = 1, mayLoad = 1, hasSideEffects = 0, isPseudo = 1 in
+class T_load_pbr_pseudo <string opc, RegisterClass RC>
+ : LDInstPI<(outs IntRegs:$_dst_, RC:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ ".error \"$dst = "#opc#"($src1++$src3:brev)\"",
+ [], "$src1 = $_dst_">;
+
+def L2_loadrb_pbr_pseudo : T_load_pbr_pseudo <"memb", IntRegs>;
+def L2_loadrub_pbr_pseudo : T_load_pbr_pseudo <"memub", IntRegs>;
+def L2_loadrh_pbr_pseudo : T_load_pbr_pseudo <"memh", IntRegs>;
+def L2_loadruh_pbr_pseudo : T_load_pbr_pseudo <"memuh", IntRegs>;
+def L2_loadri_pbr_pseudo : T_load_pbr_pseudo <"memw", IntRegs>;
+def L2_loadrd_pbr_pseudo : T_load_pbr_pseudo <"memd", DoubleRegs>;
+
//===----------------------------------------------------------------------===//
// LD -
//===----------------------------------------------------------------------===//
@@ -1259,180 +2291,934 @@ let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in {
//===----------------------------------------------------------------------===//
// MTYPE/MPYH +
//===----------------------------------------------------------------------===//
-// Multiply and use lower result.
-// Rd=+mpyi(Rs,#u8)
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 0, opExtentBits = 8 in
-def MPYI_riu : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u8Ext:$src2),
- "$dst =+ mpyi($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
- u8ExtPred:$src2))]>;
-// Rd=-mpyi(Rs,#u8)
-def MPYI_rin : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u8Imm:$src2),
- "$dst =- mpyi($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (ineg (mul (i32 IntRegs:$src1),
- u8ImmPred:$src2)))]>;
+//===----------------------------------------------------------------------===//
+// Template Class
+// MPYS / Multipy signed/unsigned halfwords
+//Rd=mpy[u](Rs.[H|L],Rt.[H|L])[:<<1][:rnd][:sat]
+//===----------------------------------------------------------------------===//
+
+let hasNewValue = 1, opNewValue = 0 in
+class T_M2_mpy < bits<2> LHbits, bit isSat, bit isRnd,
+ bit hasShift, bit isUnsigned>
+ : MInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd = "#!if(isUnsigned,"mpyu","mpy")#"($Rs."#!if(LHbits{1},"h","l")
+ #", $Rt."#!if(LHbits{0},"h)","l)")
+ #!if(hasShift,":<<1","")
+ #!if(isRnd,":rnd","")
+ #!if(isSat,":sat",""),
+ [], "", M_tc_3x_SLOT23 > {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b1100;
+ let Inst{23} = hasShift;
+ let Inst{22} = isUnsigned;
+ let Inst{21} = isRnd;
+ let Inst{7} = isSat;
+ let Inst{6-5} = LHbits;
+ let Inst{4-0} = Rd;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ }
+
+//Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1]
+def M2_mpy_ll_s1: T_M2_mpy<0b00, 0, 0, 1, 0>;
+def M2_mpy_ll_s0: T_M2_mpy<0b00, 0, 0, 0, 0>;
+def M2_mpy_lh_s1: T_M2_mpy<0b01, 0, 0, 1, 0>;
+def M2_mpy_lh_s0: T_M2_mpy<0b01, 0, 0, 0, 0>;
+def M2_mpy_hl_s1: T_M2_mpy<0b10, 0, 0, 1, 0>;
+def M2_mpy_hl_s0: T_M2_mpy<0b10, 0, 0, 0, 0>;
+def M2_mpy_hh_s1: T_M2_mpy<0b11, 0, 0, 1, 0>;
+def M2_mpy_hh_s0: T_M2_mpy<0b11, 0, 0, 0, 0>;
+
+//Rd=mpyu(Rs.[H|L],Rt.[H|L])[:<<1]
+def M2_mpyu_ll_s1: T_M2_mpy<0b00, 0, 0, 1, 1>;
+def M2_mpyu_ll_s0: T_M2_mpy<0b00, 0, 0, 0, 1>;
+def M2_mpyu_lh_s1: T_M2_mpy<0b01, 0, 0, 1, 1>;
+def M2_mpyu_lh_s0: T_M2_mpy<0b01, 0, 0, 0, 1>;
+def M2_mpyu_hl_s1: T_M2_mpy<0b10, 0, 0, 1, 1>;
+def M2_mpyu_hl_s0: T_M2_mpy<0b10, 0, 0, 0, 1>;
+def M2_mpyu_hh_s1: T_M2_mpy<0b11, 0, 0, 1, 1>;
+def M2_mpyu_hh_s0: T_M2_mpy<0b11, 0, 0, 0, 1>;
+
+//Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1]:rnd
+def M2_mpy_rnd_ll_s1: T_M2_mpy <0b00, 0, 1, 1, 0>;
+def M2_mpy_rnd_ll_s0: T_M2_mpy <0b00, 0, 1, 0, 0>;
+def M2_mpy_rnd_lh_s1: T_M2_mpy <0b01, 0, 1, 1, 0>;
+def M2_mpy_rnd_lh_s0: T_M2_mpy <0b01, 0, 1, 0, 0>;
+def M2_mpy_rnd_hl_s1: T_M2_mpy <0b10, 0, 1, 1, 0>;
+def M2_mpy_rnd_hl_s0: T_M2_mpy <0b10, 0, 1, 0, 0>;
+def M2_mpy_rnd_hh_s1: T_M2_mpy <0b11, 0, 1, 1, 0>;
+def M2_mpy_rnd_hh_s0: T_M2_mpy <0b11, 0, 1, 0, 0>;
+
+//Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1][:sat]
+//Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1][:rnd][:sat]
+let Defs = [USR_OVF] in {
+ def M2_mpy_sat_ll_s1: T_M2_mpy <0b00, 1, 0, 1, 0>;
+ def M2_mpy_sat_ll_s0: T_M2_mpy <0b00, 1, 0, 0, 0>;
+ def M2_mpy_sat_lh_s1: T_M2_mpy <0b01, 1, 0, 1, 0>;
+ def M2_mpy_sat_lh_s0: T_M2_mpy <0b01, 1, 0, 0, 0>;
+ def M2_mpy_sat_hl_s1: T_M2_mpy <0b10, 1, 0, 1, 0>;
+ def M2_mpy_sat_hl_s0: T_M2_mpy <0b10, 1, 0, 0, 0>;
+ def M2_mpy_sat_hh_s1: T_M2_mpy <0b11, 1, 0, 1, 0>;
+ def M2_mpy_sat_hh_s0: T_M2_mpy <0b11, 1, 0, 0, 0>;
+
+ def M2_mpy_sat_rnd_ll_s1: T_M2_mpy <0b00, 1, 1, 1, 0>;
+ def M2_mpy_sat_rnd_ll_s0: T_M2_mpy <0b00, 1, 1, 0, 0>;
+ def M2_mpy_sat_rnd_lh_s1: T_M2_mpy <0b01, 1, 1, 1, 0>;
+ def M2_mpy_sat_rnd_lh_s0: T_M2_mpy <0b01, 1, 1, 0, 0>;
+ def M2_mpy_sat_rnd_hl_s1: T_M2_mpy <0b10, 1, 1, 1, 0>;
+ def M2_mpy_sat_rnd_hl_s0: T_M2_mpy <0b10, 1, 1, 0, 0>;
+ def M2_mpy_sat_rnd_hh_s1: T_M2_mpy <0b11, 1, 1, 1, 0>;
+ def M2_mpy_sat_rnd_hh_s0: T_M2_mpy <0b11, 1, 1, 0, 0>;
+}
+
+//===----------------------------------------------------------------------===//
+// Template Class
+// MPYS / Multipy signed/unsigned halfwords and add/subtract the
+// result from the accumulator.
+//Rx [-+]= mpy[u](Rs.[H|L],Rt.[H|L])[:<<1][:sat]
+//===----------------------------------------------------------------------===//
+
+let hasNewValue = 1, opNewValue = 0 in
+class T_M2_mpy_acc < bits<2> LHbits, bit isSat, bit isNac,
+ bit hasShift, bit isUnsigned >
+ : MInst_acc<(outs IntRegs:$Rx), (ins IntRegs:$dst2, IntRegs:$Rs, IntRegs:$Rt),
+ "$Rx "#!if(isNac,"-= ","+= ")#!if(isUnsigned,"mpyu","mpy")
+ #"($Rs."#!if(LHbits{1},"h","l")
+ #", $Rt."#!if(LHbits{0},"h)","l)")
+ #!if(hasShift,":<<1","")
+ #!if(isSat,":sat",""),
+ [], "$dst2 = $Rx", M_tc_3x_SLOT23 > {
+ bits<5> Rx;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1110;
+ let Inst{27-24} = 0b1110;
+ let Inst{23} = hasShift;
+ let Inst{22} = isUnsigned;
+ let Inst{21} = isNac;
+ let Inst{7} = isSat;
+ let Inst{6-5} = LHbits;
+ let Inst{4-0} = Rx;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ }
+
+//Rx += mpy(Rs.[H|L],Rt.[H|L])[:<<1]
+def M2_mpy_acc_ll_s1: T_M2_mpy_acc <0b00, 0, 0, 1, 0>;
+def M2_mpy_acc_ll_s0: T_M2_mpy_acc <0b00, 0, 0, 0, 0>;
+def M2_mpy_acc_lh_s1: T_M2_mpy_acc <0b01, 0, 0, 1, 0>;
+def M2_mpy_acc_lh_s0: T_M2_mpy_acc <0b01, 0, 0, 0, 0>;
+def M2_mpy_acc_hl_s1: T_M2_mpy_acc <0b10, 0, 0, 1, 0>;
+def M2_mpy_acc_hl_s0: T_M2_mpy_acc <0b10, 0, 0, 0, 0>;
+def M2_mpy_acc_hh_s1: T_M2_mpy_acc <0b11, 0, 0, 1, 0>;
+def M2_mpy_acc_hh_s0: T_M2_mpy_acc <0b11, 0, 0, 0, 0>;
+
+//Rx += mpyu(Rs.[H|L],Rt.[H|L])[:<<1]
+def M2_mpyu_acc_ll_s1: T_M2_mpy_acc <0b00, 0, 0, 1, 1>;
+def M2_mpyu_acc_ll_s0: T_M2_mpy_acc <0b00, 0, 0, 0, 1>;
+def M2_mpyu_acc_lh_s1: T_M2_mpy_acc <0b01, 0, 0, 1, 1>;
+def M2_mpyu_acc_lh_s0: T_M2_mpy_acc <0b01, 0, 0, 0, 1>;
+def M2_mpyu_acc_hl_s1: T_M2_mpy_acc <0b10, 0, 0, 1, 1>;
+def M2_mpyu_acc_hl_s0: T_M2_mpy_acc <0b10, 0, 0, 0, 1>;
+def M2_mpyu_acc_hh_s1: T_M2_mpy_acc <0b11, 0, 0, 1, 1>;
+def M2_mpyu_acc_hh_s0: T_M2_mpy_acc <0b11, 0, 0, 0, 1>;
+
+//Rx -= mpy(Rs.[H|L],Rt.[H|L])[:<<1]
+def M2_mpy_nac_ll_s1: T_M2_mpy_acc <0b00, 0, 1, 1, 0>;
+def M2_mpy_nac_ll_s0: T_M2_mpy_acc <0b00, 0, 1, 0, 0>;
+def M2_mpy_nac_lh_s1: T_M2_mpy_acc <0b01, 0, 1, 1, 0>;
+def M2_mpy_nac_lh_s0: T_M2_mpy_acc <0b01, 0, 1, 0, 0>;
+def M2_mpy_nac_hl_s1: T_M2_mpy_acc <0b10, 0, 1, 1, 0>;
+def M2_mpy_nac_hl_s0: T_M2_mpy_acc <0b10, 0, 1, 0, 0>;
+def M2_mpy_nac_hh_s1: T_M2_mpy_acc <0b11, 0, 1, 1, 0>;
+def M2_mpy_nac_hh_s0: T_M2_mpy_acc <0b11, 0, 1, 0, 0>;
+
+//Rx -= mpyu(Rs.[H|L],Rt.[H|L])[:<<1]
+def M2_mpyu_nac_ll_s1: T_M2_mpy_acc <0b00, 0, 1, 1, 1>;
+def M2_mpyu_nac_ll_s0: T_M2_mpy_acc <0b00, 0, 1, 0, 1>;
+def M2_mpyu_nac_lh_s1: T_M2_mpy_acc <0b01, 0, 1, 1, 1>;
+def M2_mpyu_nac_lh_s0: T_M2_mpy_acc <0b01, 0, 1, 0, 1>;
+def M2_mpyu_nac_hl_s1: T_M2_mpy_acc <0b10, 0, 1, 1, 1>;
+def M2_mpyu_nac_hl_s0: T_M2_mpy_acc <0b10, 0, 1, 0, 1>;
+def M2_mpyu_nac_hh_s1: T_M2_mpy_acc <0b11, 0, 1, 1, 1>;
+def M2_mpyu_nac_hh_s0: T_M2_mpy_acc <0b11, 0, 1, 0, 1>;
+
+//Rx += mpy(Rs.[H|L],Rt.[H|L])[:<<1]:sat
+def M2_mpy_acc_sat_ll_s1: T_M2_mpy_acc <0b00, 1, 0, 1, 0>;
+def M2_mpy_acc_sat_ll_s0: T_M2_mpy_acc <0b00, 1, 0, 0, 0>;
+def M2_mpy_acc_sat_lh_s1: T_M2_mpy_acc <0b01, 1, 0, 1, 0>;
+def M2_mpy_acc_sat_lh_s0: T_M2_mpy_acc <0b01, 1, 0, 0, 0>;
+def M2_mpy_acc_sat_hl_s1: T_M2_mpy_acc <0b10, 1, 0, 1, 0>;
+def M2_mpy_acc_sat_hl_s0: T_M2_mpy_acc <0b10, 1, 0, 0, 0>;
+def M2_mpy_acc_sat_hh_s1: T_M2_mpy_acc <0b11, 1, 0, 1, 0>;
+def M2_mpy_acc_sat_hh_s0: T_M2_mpy_acc <0b11, 1, 0, 0, 0>;
+
+//Rx -= mpy(Rs.[H|L],Rt.[H|L])[:<<1]:sat
+def M2_mpy_nac_sat_ll_s1: T_M2_mpy_acc <0b00, 1, 1, 1, 0>;
+def M2_mpy_nac_sat_ll_s0: T_M2_mpy_acc <0b00, 1, 1, 0, 0>;
+def M2_mpy_nac_sat_lh_s1: T_M2_mpy_acc <0b01, 1, 1, 1, 0>;
+def M2_mpy_nac_sat_lh_s0: T_M2_mpy_acc <0b01, 1, 1, 0, 0>;
+def M2_mpy_nac_sat_hl_s1: T_M2_mpy_acc <0b10, 1, 1, 1, 0>;
+def M2_mpy_nac_sat_hl_s0: T_M2_mpy_acc <0b10, 1, 1, 0, 0>;
+def M2_mpy_nac_sat_hh_s1: T_M2_mpy_acc <0b11, 1, 1, 1, 0>;
+def M2_mpy_nac_sat_hh_s0: T_M2_mpy_acc <0b11, 1, 1, 0, 0>;
+
+//===----------------------------------------------------------------------===//
+// Template Class
+// MPYS / Multipy signed/unsigned halfwords and add/subtract the
+// result from the 64-bit destination register.
+//Rxx [-+]= mpy[u](Rs.[H|L],Rt.[H|L])[:<<1][:sat]
+//===----------------------------------------------------------------------===//
+
+class T_M2_mpyd_acc < bits<2> LHbits, bit isNac, bit hasShift, bit isUnsigned>
+ : MInst_acc<(outs DoubleRegs:$Rxx),
+ (ins DoubleRegs:$dst2, IntRegs:$Rs, IntRegs:$Rt),
+ "$Rxx "#!if(isNac,"-= ","+= ")#!if(isUnsigned,"mpyu","mpy")
+ #"($Rs."#!if(LHbits{1},"h","l")
+ #", $Rt."#!if(LHbits{0},"h)","l)")
+ #!if(hasShift,":<<1",""),
+ [], "$dst2 = $Rxx", M_tc_3x_SLOT23 > {
+ bits<5> Rxx;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b0110;
+ let Inst{23} = hasShift;
+ let Inst{22} = isUnsigned;
+ let Inst{21} = isNac;
+ let Inst{7} = 0;
+ let Inst{6-5} = LHbits;
+ let Inst{4-0} = Rxx;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ }
+
+def M2_mpyd_acc_hh_s0: T_M2_mpyd_acc <0b11, 0, 0, 0>;
+def M2_mpyd_acc_hl_s0: T_M2_mpyd_acc <0b10, 0, 0, 0>;
+def M2_mpyd_acc_lh_s0: T_M2_mpyd_acc <0b01, 0, 0, 0>;
+def M2_mpyd_acc_ll_s0: T_M2_mpyd_acc <0b00, 0, 0, 0>;
+
+def M2_mpyd_acc_hh_s1: T_M2_mpyd_acc <0b11, 0, 1, 0>;
+def M2_mpyd_acc_hl_s1: T_M2_mpyd_acc <0b10, 0, 1, 0>;
+def M2_mpyd_acc_lh_s1: T_M2_mpyd_acc <0b01, 0, 1, 0>;
+def M2_mpyd_acc_ll_s1: T_M2_mpyd_acc <0b00, 0, 1, 0>;
+
+def M2_mpyd_nac_hh_s0: T_M2_mpyd_acc <0b11, 1, 0, 0>;
+def M2_mpyd_nac_hl_s0: T_M2_mpyd_acc <0b10, 1, 0, 0>;
+def M2_mpyd_nac_lh_s0: T_M2_mpyd_acc <0b01, 1, 0, 0>;
+def M2_mpyd_nac_ll_s0: T_M2_mpyd_acc <0b00, 1, 0, 0>;
+
+def M2_mpyd_nac_hh_s1: T_M2_mpyd_acc <0b11, 1, 1, 0>;
+def M2_mpyd_nac_hl_s1: T_M2_mpyd_acc <0b10, 1, 1, 0>;
+def M2_mpyd_nac_lh_s1: T_M2_mpyd_acc <0b01, 1, 1, 0>;
+def M2_mpyd_nac_ll_s1: T_M2_mpyd_acc <0b00, 1, 1, 0>;
+
+def M2_mpyud_acc_hh_s0: T_M2_mpyd_acc <0b11, 0, 0, 1>;
+def M2_mpyud_acc_hl_s0: T_M2_mpyd_acc <0b10, 0, 0, 1>;
+def M2_mpyud_acc_lh_s0: T_M2_mpyd_acc <0b01, 0, 0, 1>;
+def M2_mpyud_acc_ll_s0: T_M2_mpyd_acc <0b00, 0, 0, 1>;
+
+def M2_mpyud_acc_hh_s1: T_M2_mpyd_acc <0b11, 0, 1, 1>;
+def M2_mpyud_acc_hl_s1: T_M2_mpyd_acc <0b10, 0, 1, 1>;
+def M2_mpyud_acc_lh_s1: T_M2_mpyd_acc <0b01, 0, 1, 1>;
+def M2_mpyud_acc_ll_s1: T_M2_mpyd_acc <0b00, 0, 1, 1>;
+
+def M2_mpyud_nac_hh_s0: T_M2_mpyd_acc <0b11, 1, 0, 1>;
+def M2_mpyud_nac_hl_s0: T_M2_mpyd_acc <0b10, 1, 0, 1>;
+def M2_mpyud_nac_lh_s0: T_M2_mpyd_acc <0b01, 1, 0, 1>;
+def M2_mpyud_nac_ll_s0: T_M2_mpyd_acc <0b00, 1, 0, 1>;
+
+def M2_mpyud_nac_hh_s1: T_M2_mpyd_acc <0b11, 1, 1, 1>;
+def M2_mpyud_nac_hl_s1: T_M2_mpyd_acc <0b10, 1, 1, 1>;
+def M2_mpyud_nac_lh_s1: T_M2_mpyd_acc <0b01, 1, 1, 1>;
+def M2_mpyud_nac_ll_s1: T_M2_mpyd_acc <0b00, 1, 1, 1>;
+
+//===----------------------------------------------------------------------===//
+// Template Class -- Vector Multipy
+// Used for complex multiply real or imaginary, dual multiply and even halfwords
+//===----------------------------------------------------------------------===//
+class T_M2_vmpy < string opc, bits<3> MajOp, bits<3> MinOp, bit hasShift,
+ bit isRnd, bit isSat >
+ : MInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Rdd = "#opc#"($Rss, $Rtt)"#!if(hasShift,":<<1","")
+ #!if(isRnd,":rnd","")
+ #!if(isSat,":sat",""),
+ [] > {
+ bits<5> Rdd;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b1000;
+ let Inst{23-21} = MajOp;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rdd;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ }
+
+// Vector complex multiply imaginary: Rdd=vcmpyi(Rss,Rtt)[:<<1]:sat
+let Defs = [USR_OVF] in {
+def M2_vcmpy_s1_sat_i: T_M2_vmpy <"vcmpyi", 0b110, 0b110, 1, 0, 1>;
+def M2_vcmpy_s0_sat_i: T_M2_vmpy <"vcmpyi", 0b010, 0b110, 0, 0, 1>;
+
+// Vector complex multiply real: Rdd=vcmpyr(Rss,Rtt)[:<<1]:sat
+def M2_vcmpy_s1_sat_r: T_M2_vmpy <"vcmpyr", 0b101, 0b110, 1, 0, 1>;
+def M2_vcmpy_s0_sat_r: T_M2_vmpy <"vcmpyr", 0b001, 0b110, 0, 0, 1>;
+
+// Vector dual multiply: Rdd=vdmpy(Rss,Rtt)[:<<1]:sat
+def M2_vdmpys_s1: T_M2_vmpy <"vdmpy", 0b100, 0b100, 1, 0, 1>;
+def M2_vdmpys_s0: T_M2_vmpy <"vdmpy", 0b000, 0b100, 0, 0, 1>;
+
+// Vector multiply even halfwords: Rdd=vmpyeh(Rss,Rtt)[:<<1]:sat
+def M2_vmpy2es_s1: T_M2_vmpy <"vmpyeh", 0b100, 0b110, 1, 0, 1>;
+def M2_vmpy2es_s0: T_M2_vmpy <"vmpyeh", 0b000, 0b110, 0, 0, 1>;
+
+//Rdd=vmpywoh(Rss,Rtt)[:<<1][:rnd]:sat
+def M2_mmpyh_s0: T_M2_vmpy <"vmpywoh", 0b000, 0b111, 0, 0, 1>;
+def M2_mmpyh_s1: T_M2_vmpy <"vmpywoh", 0b100, 0b111, 1, 0, 1>;
+def M2_mmpyh_rs0: T_M2_vmpy <"vmpywoh", 0b001, 0b111, 0, 1, 1>;
+def M2_mmpyh_rs1: T_M2_vmpy <"vmpywoh", 0b101, 0b111, 1, 1, 1>;
+
+//Rdd=vmpyweh(Rss,Rtt)[:<<1][:rnd]:sat
+def M2_mmpyl_s0: T_M2_vmpy <"vmpyweh", 0b000, 0b101, 0, 0, 1>;
+def M2_mmpyl_s1: T_M2_vmpy <"vmpyweh", 0b100, 0b101, 1, 0, 1>;
+def M2_mmpyl_rs0: T_M2_vmpy <"vmpyweh", 0b001, 0b101, 0, 1, 1>;
+def M2_mmpyl_rs1: T_M2_vmpy <"vmpyweh", 0b101, 0b101, 1, 1, 1>;
+
+//Rdd=vmpywouh(Rss,Rtt)[:<<1][:rnd]:sat
+def M2_mmpyuh_s0: T_M2_vmpy <"vmpywouh", 0b010, 0b111, 0, 0, 1>;
+def M2_mmpyuh_s1: T_M2_vmpy <"vmpywouh", 0b110, 0b111, 1, 0, 1>;
+def M2_mmpyuh_rs0: T_M2_vmpy <"vmpywouh", 0b011, 0b111, 0, 1, 1>;
+def M2_mmpyuh_rs1: T_M2_vmpy <"vmpywouh", 0b111, 0b111, 1, 1, 1>;
+
+//Rdd=vmpyweuh(Rss,Rtt)[:<<1][:rnd]:sat
+def M2_mmpyul_s0: T_M2_vmpy <"vmpyweuh", 0b010, 0b101, 0, 0, 1>;
+def M2_mmpyul_s1: T_M2_vmpy <"vmpyweuh", 0b110, 0b101, 1, 0, 1>;
+def M2_mmpyul_rs0: T_M2_vmpy <"vmpyweuh", 0b011, 0b101, 0, 1, 1>;
+def M2_mmpyul_rs1: T_M2_vmpy <"vmpyweuh", 0b111, 0b101, 1, 1, 1>;
+}
+
+let hasNewValue = 1, opNewValue = 0 in
+class T_MType_mpy <string mnemonic, bits<4> RegTyBits, RegisterClass RC,
+ bits<3> MajOp, bits<3> MinOp, bit isSat = 0, bit isRnd = 0,
+ string op2Suffix = "", bit isRaw = 0, bit isHi = 0 >
+ : MInst <(outs IntRegs:$dst), (ins RC:$src1, RC:$src2),
+ "$dst = "#mnemonic
+ #"($src1, $src2"#op2Suffix#")"
+ #!if(MajOp{2}, ":<<1", "")
+ #!if(isRnd, ":rnd", "")
+ #!if(isSat, ":sat", "")
+ #!if(isRaw, !if(isHi, ":raw:hi", ":raw:lo"), ""), [] > {
+ bits<5> dst;
+ bits<5> src1;
+ bits<5> src2;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = RegTyBits;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{13} = 0b0;
+ let Inst{12-8} = src2;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = dst;
+ }
+
+class T_MType_vrcmpy <string mnemonic, bits<3> MajOp, bits<3> MinOp, bit isHi>
+ : T_MType_mpy <mnemonic, 0b1001, DoubleRegs, MajOp, MinOp, 1, 1, "", 1, isHi>;
+
+class T_MType_dd <string mnemonic, bits<3> MajOp, bits<3> MinOp,
+ bit isSat = 0, bit isRnd = 0 >
+ : T_MType_mpy <mnemonic, 0b1001, DoubleRegs, MajOp, MinOp, isSat, isRnd>;
+
+class T_MType_rr1 <string mnemonic, bits<3> MajOp, bits<3> MinOp,
+ bit isSat = 0, bit isRnd = 0 >
+ : T_MType_mpy<mnemonic, 0b1101, IntRegs, MajOp, MinOp, isSat, isRnd>;
+
+class T_MType_rr2 <string mnemonic, bits<3> MajOp, bits<3> MinOp,
+ bit isSat = 0, bit isRnd = 0, string op2str = "" >
+ : T_MType_mpy<mnemonic, 0b1101, IntRegs, MajOp, MinOp, isSat, isRnd, op2str>;
+
+def M2_vradduh : T_MType_dd <"vradduh", 0b000, 0b001, 0, 0>;
+def M2_vdmpyrs_s0 : T_MType_dd <"vdmpy", 0b000, 0b000, 1, 1>;
+def M2_vdmpyrs_s1 : T_MType_dd <"vdmpy", 0b100, 0b000, 1, 1>;
+
+let CextOpcode = "mpyi", InputType = "reg" in
+def M2_mpyi : T_MType_rr1 <"mpyi", 0b000, 0b000>, ImmRegRel;
+
+def M2_mpy_up : T_MType_rr1 <"mpy", 0b000, 0b001>;
+def M2_mpyu_up : T_MType_rr1 <"mpyu", 0b010, 0b001>;
+
+def M2_dpmpyss_rnd_s0 : T_MType_rr1 <"mpy", 0b001, 0b001, 0, 1>;
+
+def M2_vmpy2s_s0pack : T_MType_rr1 <"vmpyh", 0b001, 0b111, 1, 1>;
+def M2_vmpy2s_s1pack : T_MType_rr1 <"vmpyh", 0b101, 0b111, 1, 1>;
+
+def M2_hmmpyh_rs1 : T_MType_rr2 <"mpy", 0b101, 0b100, 1, 1, ".h">;
+def M2_hmmpyl_rs1 : T_MType_rr2 <"mpy", 0b111, 0b100, 1, 1, ".l">;
+
+def M2_cmpyrs_s0 : T_MType_rr2 <"cmpy", 0b001, 0b110, 1, 1>;
+def M2_cmpyrs_s1 : T_MType_rr2 <"cmpy", 0b101, 0b110, 1, 1>;
+def M2_cmpyrsc_s0 : T_MType_rr2 <"cmpy", 0b011, 0b110, 1, 1, "*">;
+def M2_cmpyrsc_s1 : T_MType_rr2 <"cmpy", 0b111, 0b110, 1, 1, "*">;
+
+// V4 Instructions
+def M2_vraddh : T_MType_dd <"vraddh", 0b001, 0b111, 0>;
+def M2_mpysu_up : T_MType_rr1 <"mpysu", 0b011, 0b001, 0>;
+def M2_mpy_up_s1 : T_MType_rr1 <"mpy", 0b101, 0b010, 0>;
+def M2_mpy_up_s1_sat : T_MType_rr1 <"mpy", 0b111, 0b000, 1>;
+
+def M2_hmmpyh_s1 : T_MType_rr2 <"mpy", 0b101, 0b000, 1, 0, ".h">;
+def M2_hmmpyl_s1 : T_MType_rr2 <"mpy", 0b101, 0b001, 1, 0, ".l">;
+
+def: Pat<(i32 (mul I32:$src1, I32:$src2)), (M2_mpyi I32:$src1, I32:$src2)>;
+def: Pat<(i32 (mulhs I32:$src1, I32:$src2)), (M2_mpy_up I32:$src1, I32:$src2)>;
+def: Pat<(i32 (mulhu I32:$src1, I32:$src2)), (M2_mpyu_up I32:$src1, I32:$src2)>;
+
+let hasNewValue = 1, opNewValue = 0 in
+class T_MType_mpy_ri <bit isNeg, Operand ImmOp, list<dag> pattern>
+ : MInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs, ImmOp:$u8),
+ "$Rd ="#!if(isNeg, "- ", "+ ")#"mpyi($Rs, #$u8)" ,
+ pattern, "", M_tc_3x_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<8> u8;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b0000;
+ let Inst{23} = isNeg;
+ let Inst{13} = 0b0;
+ let Inst{4-0} = Rd;
+ let Inst{20-16} = Rs;
+ let Inst{12-5} = u8;
+ }
+
+let isExtendable = 1, opExtentBits = 8, opExtendable = 2 in
+def M2_mpysip : T_MType_mpy_ri <0, u8Ext,
+ [(set (i32 IntRegs:$Rd), (mul IntRegs:$Rs, u8ExtPred:$u8))]>;
+
+def M2_mpysin : T_MType_mpy_ri <1, u8Imm,
+ [(set (i32 IntRegs:$Rd), (ineg (mul IntRegs:$Rs,
+ u8ImmPred:$u8)))]>;
+
+// Assember mapped to M2_mpyi
+let isAsmParserOnly = 1 in
+def M2_mpyui : MInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = mpyui($src1, $src2)">;
// Rd=mpyi(Rs,#m9)
// s9 is NOT the same as m9 - but it works.. so far.
-// Assembler maps to either Rd=+mpyi(Rs,#u8 or Rd=-mpyi(Rs,#u8)
+// Assembler maps to either Rd=+mpyi(Rs,#u8) or Rd=-mpyi(Rs,#u8)
// depending on the value of m9. See Arch Spec.
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 9,
-CextOpcode = "MPYI", InputType = "imm" in
-def MPYI_ri : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Ext:$src2),
- "$dst = mpyi($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
- s9ExtPred:$src2))]>, ImmRegRel;
-
-// Rd=mpyi(Rs,Rt)
-let CextOpcode = "MPYI", InputType = "reg" in
-def MPYI : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = mpyi($src1, $src2)",
- [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>, ImmRegRel;
-
-// Rx+=mpyi(Rs,#u8)
-let isExtendable = 1, opExtendable = 3, isExtentSigned = 0, opExtentBits = 8,
-CextOpcode = "MPYI_acc", InputType = "imm" in
-def MPYI_acc_ri : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2, u8Ext:$src3),
- "$dst += mpyi($src2, #$src3)",
- [(set (i32 IntRegs:$dst),
- (add (mul (i32 IntRegs:$src2), u8ExtPred:$src3),
- (i32 IntRegs:$src1)))],
- "$src1 = $dst">, ImmRegRel;
+ CextOpcode = "mpyi", InputType = "imm", hasNewValue = 1,
+ isAsmParserOnly = 1 in
+def M2_mpysmi : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Ext:$src2),
+ "$dst = mpyi($src1, #$src2)",
+ [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
+ s9ExtPred:$src2))]>, ImmRegRel;
+
+let hasNewValue = 1, isExtendable = 1, opExtentBits = 8, opExtendable = 3,
+ InputType = "imm" in
+class T_MType_acc_ri <string mnemonic, bits<3> MajOp, Operand ImmOp,
+ list<dag> pattern = []>
+ : MInst < (outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, ImmOp:$src3),
+ "$dst "#mnemonic#"($src2, #$src3)",
+ pattern, "$src1 = $dst", M_tc_2_SLOT23> {
+ bits<5> dst;
+ bits<5> src2;
+ bits<8> src3;
+
+ let IClass = 0b1110;
+
+ let Inst{27-26} = 0b00;
+ let Inst{25-23} = MajOp;
+ let Inst{20-16} = src2;
+ let Inst{13} = 0b0;
+ let Inst{12-5} = src3;
+ let Inst{4-0} = dst;
+ }
-// Rx+=mpyi(Rs,Rt)
-let CextOpcode = "MPYI_acc", InputType = "reg" in
-def MPYI_acc_rr : MInst_acc<(outs IntRegs:$dst),
+let InputType = "reg", hasNewValue = 1 in
+class T_MType_acc_rr <string mnemonic, bits<3> MajOp, bits<3> MinOp,
+ bit isSwap = 0, list<dag> pattern = [], bit hasNot = 0,
+ bit isSat = 0, bit isShift = 0>
+ : MInst < (outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "$dst += mpyi($src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)),
- (i32 IntRegs:$src1)))],
- "$src1 = $dst">, ImmRegRel;
-
-// Rx-=mpyi(Rs,#u8)
-let isExtendable = 1, opExtendable = 3, isExtentSigned = 0, opExtentBits = 8 in
-def MPYI_sub_ri : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2, u8Ext:$src3),
- "$dst -= mpyi($src2, #$src3)",
- [(set (i32 IntRegs:$dst),
- (sub (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
- u8ExtPred:$src3)))],
- "$src1 = $dst">;
-
-// Multiply and use upper result.
-// Rd=mpy(Rs,Rt.H):<<1:rnd:sat
-// Rd=mpy(Rs,Rt.L):<<1:rnd:sat
-// Rd=mpy(Rs,Rt)
-def MPY : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = mpy($src1, $src2)",
- [(set (i32 IntRegs:$dst), (mulhs (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
-// Rd=mpy(Rs,Rt):rnd
-// Rd=mpyu(Rs,Rt)
-def MPYU : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = mpyu($src1, $src2)",
- [(set (i32 IntRegs:$dst), (mulhu (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
-// Multiply and use full result.
-// Rdd=mpyu(Rs,Rt)
-def MPYU64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = mpyu($src1, $src2)",
- [(set (i64 DoubleRegs:$dst),
- (mul (i64 (anyext (i32 IntRegs:$src1))),
- (i64 (anyext (i32 IntRegs:$src2)))))]>;
-
-// Rdd=mpy(Rs,Rt)
-def MPY64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = mpy($src1, $src2)",
- [(set (i64 DoubleRegs:$dst),
- (mul (i64 (sext (i32 IntRegs:$src1))),
- (i64 (sext (i32 IntRegs:$src2)))))]>;
+ "$dst "#mnemonic#"($src2, "#!if(hasNot, "~$src3)","$src3)")
+ #!if(isShift, ":<<1", "")
+ #!if(isSat, ":sat", ""),
+ pattern, "$src1 = $dst", M_tc_2_SLOT23 > {
+ bits<5> dst;
+ bits<5> src2;
+ bits<5> src3;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b1111;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = !if(isSwap, src3, src2);
+ let Inst{13} = 0b0;
+ let Inst{12-8} = !if(isSwap, src2, src3);
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = dst;
+ }
+
+let CextOpcode = "MPYI_acc", Itinerary = M_tc_3x_SLOT23 in {
+ def M2_macsip : T_MType_acc_ri <"+= mpyi", 0b010, u8Ext,
+ [(set (i32 IntRegs:$dst),
+ (add (mul IntRegs:$src2, u8ExtPred:$src3),
+ IntRegs:$src1))]>, ImmRegRel;
+
+ def M2_maci : T_MType_acc_rr <"+= mpyi", 0b000, 0b000, 0,
+ [(set (i32 IntRegs:$dst),
+ (add (mul IntRegs:$src2, IntRegs:$src3),
+ IntRegs:$src1))]>, ImmRegRel;
+}
+
+let CextOpcode = "ADD_acc" in {
+ let isExtentSigned = 1 in
+ def M2_accii : T_MType_acc_ri <"+= add", 0b100, s8Ext,
+ [(set (i32 IntRegs:$dst),
+ (add (add (i32 IntRegs:$src2), s8_16ExtPred:$src3),
+ (i32 IntRegs:$src1)))]>, ImmRegRel;
+
+ def M2_acci : T_MType_acc_rr <"+= add", 0b000, 0b001, 0,
+ [(set (i32 IntRegs:$dst),
+ (add (add (i32 IntRegs:$src2), (i32 IntRegs:$src3)),
+ (i32 IntRegs:$src1)))]>, ImmRegRel;
+}
+
+let CextOpcode = "SUB_acc" in {
+ let isExtentSigned = 1 in
+ def M2_naccii : T_MType_acc_ri <"-= add", 0b101, s8Ext>, ImmRegRel;
+
+ def M2_nacci : T_MType_acc_rr <"-= add", 0b100, 0b001, 0>, ImmRegRel;
+}
+
+let Itinerary = M_tc_3x_SLOT23 in
+def M2_macsin : T_MType_acc_ri <"-= mpyi", 0b011, u8Ext>;
+
+def M2_xor_xacc : T_MType_acc_rr < "^= xor", 0b100, 0b011, 0>;
+def M2_subacc : T_MType_acc_rr <"+= sub", 0b000, 0b011, 1>;
+
+class T_MType_acc_pat1 <InstHexagon MI, SDNode firstOp, SDNode secOp,
+ PatLeaf ImmPred>
+ : Pat <(secOp IntRegs:$src1, (firstOp IntRegs:$src2, ImmPred:$src3)),
+ (MI IntRegs:$src1, IntRegs:$src2, ImmPred:$src3)>;
+
+class T_MType_acc_pat2 <InstHexagon MI, SDNode firstOp, SDNode secOp>
+ : Pat <(i32 (secOp IntRegs:$src1, (firstOp IntRegs:$src2, IntRegs:$src3))),
+ (MI IntRegs:$src1, IntRegs:$src2, IntRegs:$src3)>;
+
+def : T_MType_acc_pat2 <M2_xor_xacc, xor, xor>;
+def : T_MType_acc_pat1 <M2_macsin, mul, sub, u8ExtPred>;
+
+def : T_MType_acc_pat1 <M2_naccii, add, sub, s8_16ExtPred>;
+def : T_MType_acc_pat2 <M2_nacci, add, sub>;
+
+//===----------------------------------------------------------------------===//
+// Template Class -- XType Vector Instructions
+//===----------------------------------------------------------------------===//
+class T_XTYPE_Vect < string opc, bits<3> MajOp, bits<3> MinOp, bit isConj >
+ : MInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Rdd = "#opc#"($Rss, $Rtt"#!if(isConj,"*)",")"),
+ [] > {
+ bits<5> Rdd;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b1000;
+ let Inst{23-21} = MajOp;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rdd;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ }
+
+class T_XTYPE_Vect_acc < string opc, bits<3> MajOp, bits<3> MinOp, bit isConj >
+ : MInst <(outs DoubleRegs:$Rdd),
+ (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Rdd += "#opc#"($Rss, $Rtt"#!if(isConj,"*)",")"),
+ [], "$dst2 = $Rdd",M_tc_3x_SLOT23 > {
+ bits<5> Rdd;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b1010;
+ let Inst{23-21} = MajOp;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rdd;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ }
+
+class T_XTYPE_Vect_diff < bits<3> MajOp, string opc >
+ : MInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rtt, DoubleRegs:$Rss),
+ "$Rdd = "#opc#"($Rtt, $Rss)",
+ [], "",M_tc_2_SLOT23 > {
+ bits<5> Rdd;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b1000;
+ let Inst{23-21} = MajOp;
+ let Inst{7-5} = 0b000;
+ let Inst{4-0} = Rdd;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ }
+
+// Vector reduce add unsigned bytes: Rdd32=vrmpybu(Rss32,Rtt32)
+def A2_vraddub: T_XTYPE_Vect <"vraddub", 0b010, 0b001, 0>;
+def A2_vraddub_acc: T_XTYPE_Vect_acc <"vraddub", 0b010, 0b001, 0>;
+
+// Vector sum of absolute differences unsigned bytes: Rdd=vrsadub(Rss,Rtt)
+def A2_vrsadub: T_XTYPE_Vect <"vrsadub", 0b010, 0b010, 0>;
+def A2_vrsadub_acc: T_XTYPE_Vect_acc <"vrsadub", 0b010, 0b010, 0>;
+
+// Vector absolute difference: Rdd=vabsdiffh(Rtt,Rss)
+def M2_vabsdiffh: T_XTYPE_Vect_diff<0b011, "vabsdiffh">;
+
+// Vector absolute difference words: Rdd=vabsdiffw(Rtt,Rss)
+def M2_vabsdiffw: T_XTYPE_Vect_diff<0b001, "vabsdiffw">;
+
+// Vector reduce complex multiply real or imaginary:
+// Rdd[+]=vrcmpy[ir](Rss,Rtt[*])
+def M2_vrcmpyi_s0: T_XTYPE_Vect <"vrcmpyi", 0b000, 0b000, 0>;
+def M2_vrcmpyi_s0c: T_XTYPE_Vect <"vrcmpyi", 0b010, 0b000, 1>;
+def M2_vrcmaci_s0: T_XTYPE_Vect_acc <"vrcmpyi", 0b000, 0b000, 0>;
+def M2_vrcmaci_s0c: T_XTYPE_Vect_acc <"vrcmpyi", 0b010, 0b000, 1>;
+
+def M2_vrcmpyr_s0: T_XTYPE_Vect <"vrcmpyr", 0b000, 0b001, 0>;
+def M2_vrcmpyr_s0c: T_XTYPE_Vect <"vrcmpyr", 0b011, 0b001, 1>;
+def M2_vrcmacr_s0: T_XTYPE_Vect_acc <"vrcmpyr", 0b000, 0b001, 0>;
+def M2_vrcmacr_s0c: T_XTYPE_Vect_acc <"vrcmpyr", 0b011, 0b001, 1>;
+
+// Vector reduce halfwords:
+// Rdd[+]=vrmpyh(Rss,Rtt)
+def M2_vrmpy_s0: T_XTYPE_Vect <"vrmpyh", 0b000, 0b010, 0>;
+def M2_vrmac_s0: T_XTYPE_Vect_acc <"vrmpyh", 0b000, 0b010, 0>;
+
+//===----------------------------------------------------------------------===//
+// Template Class -- Vector Multipy with accumulation.
+// Used for complex multiply real or imaginary, dual multiply and even halfwords
+//===----------------------------------------------------------------------===//
+let Defs = [USR_OVF] in
+class T_M2_vmpy_acc_sat < string opc, bits<3> MajOp, bits<3> MinOp,
+ bit hasShift, bit isRnd >
+ : MInst <(outs DoubleRegs:$Rxx),
+ (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Rxx += "#opc#"($Rss, $Rtt)"#!if(hasShift,":<<1","")
+ #!if(isRnd,":rnd","")#":sat",
+ [], "$dst2 = $Rxx",M_tc_3x_SLOT23 > {
+ bits<5> Rxx;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b1010;
+ let Inst{23-21} = MajOp;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rxx;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ }
+
+class T_M2_vmpy_acc < string opc, bits<3> MajOp, bits<3> MinOp,
+ bit hasShift, bit isRnd >
+ : MInst <(outs DoubleRegs:$Rxx),
+ (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Rxx += "#opc#"($Rss, $Rtt)"#!if(hasShift,":<<1","")
+ #!if(isRnd,":rnd",""),
+ [], "$dst2 = $Rxx",M_tc_3x_SLOT23 > {
+ bits<5> Rxx;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b1010;
+ let Inst{23-21} = MajOp;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rxx;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ }
+
+// Vector multiply word by signed half with accumulation
+// Rxx+=vmpyw[eo]h(Rss,Rtt)[:<<1][:rnd]:sat
+def M2_mmacls_s1: T_M2_vmpy_acc_sat <"vmpyweh", 0b100, 0b101, 1, 0>;
+def M2_mmacls_s0: T_M2_vmpy_acc_sat <"vmpyweh", 0b000, 0b101, 0, 0>;
+def M2_mmacls_rs1: T_M2_vmpy_acc_sat <"vmpyweh", 0b101, 0b101, 1, 1>;
+def M2_mmacls_rs0: T_M2_vmpy_acc_sat <"vmpyweh", 0b001, 0b101, 0, 1>;
+
+def M2_mmachs_s1: T_M2_vmpy_acc_sat <"vmpywoh", 0b100, 0b111, 1, 0>;
+def M2_mmachs_s0: T_M2_vmpy_acc_sat <"vmpywoh", 0b000, 0b111, 0, 0>;
+def M2_mmachs_rs1: T_M2_vmpy_acc_sat <"vmpywoh", 0b101, 0b111, 1, 1>;
+def M2_mmachs_rs0: T_M2_vmpy_acc_sat <"vmpywoh", 0b001, 0b111, 0, 1>;
+
+// Vector multiply word by unsigned half with accumulation
+// Rxx+=vmpyw[eo]uh(Rss,Rtt)[:<<1][:rnd]:sat
+def M2_mmaculs_s1: T_M2_vmpy_acc_sat <"vmpyweuh", 0b110, 0b101, 1, 0>;
+def M2_mmaculs_s0: T_M2_vmpy_acc_sat <"vmpyweuh", 0b010, 0b101, 0, 0>;
+def M2_mmaculs_rs1: T_M2_vmpy_acc_sat <"vmpyweuh", 0b111, 0b101, 1, 1>;
+def M2_mmaculs_rs0: T_M2_vmpy_acc_sat <"vmpyweuh", 0b011, 0b101, 0, 1>;
+
+def M2_mmacuhs_s1: T_M2_vmpy_acc_sat <"vmpywouh", 0b110, 0b111, 1, 0>;
+def M2_mmacuhs_s0: T_M2_vmpy_acc_sat <"vmpywouh", 0b010, 0b111, 0, 0>;
+def M2_mmacuhs_rs1: T_M2_vmpy_acc_sat <"vmpywouh", 0b111, 0b111, 1, 1>;
+def M2_mmacuhs_rs0: T_M2_vmpy_acc_sat <"vmpywouh", 0b011, 0b111, 0, 1>;
+
+// Vector multiply even halfwords with accumulation
+// Rxx+=vmpyeh(Rss,Rtt)[:<<1][:sat]
+def M2_vmac2es: T_M2_vmpy_acc <"vmpyeh", 0b001, 0b010, 0, 0>;
+def M2_vmac2es_s1: T_M2_vmpy_acc_sat <"vmpyeh", 0b100, 0b110, 1, 0>;
+def M2_vmac2es_s0: T_M2_vmpy_acc_sat <"vmpyeh", 0b000, 0b110, 0, 0>;
+
+// Vector dual multiply with accumulation
+// Rxx+=vdmpy(Rss,Rtt)[:sat]
+def M2_vdmacs_s1: T_M2_vmpy_acc_sat <"vdmpy", 0b100, 0b100, 1, 0>;
+def M2_vdmacs_s0: T_M2_vmpy_acc_sat <"vdmpy", 0b000, 0b100, 0, 0>;
+
+// Vector complex multiply real or imaginary with accumulation
+// Rxx+=vcmpy[ir](Rss,Rtt):sat
+def M2_vcmac_s0_sat_r: T_M2_vmpy_acc_sat <"vcmpyr", 0b001, 0b100, 0, 0>;
+def M2_vcmac_s0_sat_i: T_M2_vmpy_acc_sat <"vcmpyi", 0b010, 0b100, 0, 0>;
+
+//===----------------------------------------------------------------------===//
+// Template Class -- Multiply signed/unsigned halfwords with and without
+// saturation and rounding
+//===----------------------------------------------------------------------===//
+class T_M2_mpyd < bits<2> LHbits, bit isRnd, bit hasShift, bit isUnsigned >
+ : MInst < (outs DoubleRegs:$Rdd), (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rdd = "#!if(isUnsigned,"mpyu","mpy")#"($Rs."#!if(LHbits{1},"h","l")
+ #", $Rt."#!if(LHbits{0},"h)","l)")
+ #!if(hasShift,":<<1","")
+ #!if(isRnd,":rnd",""),
+ [] > {
+ bits<5> Rdd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b0100;
+ let Inst{23} = hasShift;
+ let Inst{22} = isUnsigned;
+ let Inst{21} = isRnd;
+ let Inst{6-5} = LHbits;
+ let Inst{4-0} = Rdd;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+}
+
+def M2_mpyd_hh_s0: T_M2_mpyd<0b11, 0, 0, 0>;
+def M2_mpyd_hl_s0: T_M2_mpyd<0b10, 0, 0, 0>;
+def M2_mpyd_lh_s0: T_M2_mpyd<0b01, 0, 0, 0>;
+def M2_mpyd_ll_s0: T_M2_mpyd<0b00, 0, 0, 0>;
+
+def M2_mpyd_hh_s1: T_M2_mpyd<0b11, 0, 1, 0>;
+def M2_mpyd_hl_s1: T_M2_mpyd<0b10, 0, 1, 0>;
+def M2_mpyd_lh_s1: T_M2_mpyd<0b01, 0, 1, 0>;
+def M2_mpyd_ll_s1: T_M2_mpyd<0b00, 0, 1, 0>;
+
+def M2_mpyd_rnd_hh_s0: T_M2_mpyd<0b11, 1, 0, 0>;
+def M2_mpyd_rnd_hl_s0: T_M2_mpyd<0b10, 1, 0, 0>;
+def M2_mpyd_rnd_lh_s0: T_M2_mpyd<0b01, 1, 0, 0>;
+def M2_mpyd_rnd_ll_s0: T_M2_mpyd<0b00, 1, 0, 0>;
+
+def M2_mpyd_rnd_hh_s1: T_M2_mpyd<0b11, 1, 1, 0>;
+def M2_mpyd_rnd_hl_s1: T_M2_mpyd<0b10, 1, 1, 0>;
+def M2_mpyd_rnd_lh_s1: T_M2_mpyd<0b01, 1, 1, 0>;
+def M2_mpyd_rnd_ll_s1: T_M2_mpyd<0b00, 1, 1, 0>;
+
+//Rdd=mpyu(Rs.[HL],Rt.[HL])[:<<1]
+def M2_mpyud_hh_s0: T_M2_mpyd<0b11, 0, 0, 1>;
+def M2_mpyud_hl_s0: T_M2_mpyd<0b10, 0, 0, 1>;
+def M2_mpyud_lh_s0: T_M2_mpyd<0b01, 0, 0, 1>;
+def M2_mpyud_ll_s0: T_M2_mpyd<0b00, 0, 0, 1>;
+
+def M2_mpyud_hh_s1: T_M2_mpyd<0b11, 0, 1, 1>;
+def M2_mpyud_hl_s1: T_M2_mpyd<0b10, 0, 1, 1>;
+def M2_mpyud_lh_s1: T_M2_mpyd<0b01, 0, 1, 1>;
+def M2_mpyud_ll_s1: T_M2_mpyd<0b00, 0, 1, 1>;
+
+//===----------------------------------------------------------------------===//
+// Template Class for xtype mpy:
+// Vector multiply
+// Complex multiply
+// multiply 32X32 and use full result
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0 in
+class T_XTYPE_mpy64 <string mnemonic, bits<3> MajOp, bits<3> MinOp,
+ bit isSat, bit hasShift, bit isConj>
+ : MInst <(outs DoubleRegs:$Rdd),
+ (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rdd = "#mnemonic#"($Rs, $Rt"#!if(isConj,"*)",")")
+ #!if(hasShift,":<<1","")
+ #!if(isSat,":sat",""),
+ [] > {
+ bits<5> Rdd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b0101;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rdd;
+ }
+
+//===----------------------------------------------------------------------===//
+// Template Class for xtype mpy with accumulation into 64-bit:
+// Vector multiply
+// Complex multiply
+// multiply 32X32 and use full result
+//===----------------------------------------------------------------------===//
+class T_XTYPE_mpy64_acc <string op1, string op2, bits<3> MajOp, bits<3> MinOp,
+ bit isSat, bit hasShift, bit isConj>
+ : MInst <(outs DoubleRegs:$Rxx),
+ (ins DoubleRegs:$dst2, IntRegs:$Rs, IntRegs:$Rt),
+ "$Rxx "#op2#"= "#op1#"($Rs, $Rt"#!if(isConj,"*)",")")
+ #!if(hasShift,":<<1","")
+ #!if(isSat,":sat",""),
+
+ [] , "$dst2 = $Rxx" > {
+ bits<5> Rxx;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b0111;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rxx;
+ }
+
+// MPY - Multiply and use full result
+// Rdd = mpy[u](Rs,Rt)
+def M2_dpmpyss_s0 : T_XTYPE_mpy64 < "mpy", 0b000, 0b000, 0, 0, 0>;
+def M2_dpmpyuu_s0 : T_XTYPE_mpy64 < "mpyu", 0b010, 0b000, 0, 0, 0>;
+
+// Rxx[+-]= mpy[u](Rs,Rt)
+def M2_dpmpyss_acc_s0 : T_XTYPE_mpy64_acc < "mpy", "+", 0b000, 0b000, 0, 0, 0>;
+def M2_dpmpyss_nac_s0 : T_XTYPE_mpy64_acc < "mpy", "-", 0b001, 0b000, 0, 0, 0>;
+def M2_dpmpyuu_acc_s0 : T_XTYPE_mpy64_acc < "mpyu", "+", 0b010, 0b000, 0, 0, 0>;
+def M2_dpmpyuu_nac_s0 : T_XTYPE_mpy64_acc < "mpyu", "-", 0b011, 0b000, 0, 0, 0>;
+
+// Complex multiply real or imaginary
+// Rxx=cmpy[ir](Rs,Rt)
+def M2_cmpyi_s0 : T_XTYPE_mpy64 < "cmpyi", 0b000, 0b001, 0, 0, 0>;
+def M2_cmpyr_s0 : T_XTYPE_mpy64 < "cmpyr", 0b000, 0b010, 0, 0, 0>;
+
+// Rxx+=cmpy[ir](Rs,Rt)
+def M2_cmaci_s0 : T_XTYPE_mpy64_acc < "cmpyi", "+", 0b000, 0b001, 0, 0, 0>;
+def M2_cmacr_s0 : T_XTYPE_mpy64_acc < "cmpyr", "+", 0b000, 0b010, 0, 0, 0>;
+
+// Complex multiply
+// Rdd=cmpy(Rs,Rt)[:<<]:sat
+def M2_cmpys_s0 : T_XTYPE_mpy64 < "cmpy", 0b000, 0b110, 1, 0, 0>;
+def M2_cmpys_s1 : T_XTYPE_mpy64 < "cmpy", 0b100, 0b110, 1, 1, 0>;
+
+// Rdd=cmpy(Rs,Rt*)[:<<]:sat
+def M2_cmpysc_s0 : T_XTYPE_mpy64 < "cmpy", 0b010, 0b110, 1, 0, 1>;
+def M2_cmpysc_s1 : T_XTYPE_mpy64 < "cmpy", 0b110, 0b110, 1, 1, 1>;
+
+// Rxx[-+]=cmpy(Rs,Rt)[:<<1]:sat
+def M2_cmacs_s0 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b000, 0b110, 1, 0, 0>;
+def M2_cnacs_s0 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b000, 0b111, 1, 0, 0>;
+def M2_cmacs_s1 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b100, 0b110, 1, 1, 0>;
+def M2_cnacs_s1 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b100, 0b111, 1, 1, 0>;
+
+// Rxx[-+]=cmpy(Rs,Rt*)[:<<1]:sat
+def M2_cmacsc_s0 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b010, 0b110, 1, 0, 1>;
+def M2_cnacsc_s0 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b010, 0b111, 1, 0, 1>;
+def M2_cmacsc_s1 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b110, 0b110, 1, 1, 1>;
+def M2_cnacsc_s1 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b110, 0b111, 1, 1, 1>;
+
+// Vector multiply halfwords
+// Rdd=vmpyh(Rs,Rt)[:<<]:sat
+//let Defs = [USR_OVF] in {
+ def M2_vmpy2s_s1 : T_XTYPE_mpy64 < "vmpyh", 0b100, 0b101, 1, 1, 0>;
+ def M2_vmpy2s_s0 : T_XTYPE_mpy64 < "vmpyh", 0b000, 0b101, 1, 0, 0>;
+//}
+
+// Rxx+=vmpyh(Rs,Rt)[:<<1][:sat]
+def M2_vmac2 : T_XTYPE_mpy64_acc < "vmpyh", "+", 0b001, 0b001, 0, 0, 0>;
+def M2_vmac2s_s1 : T_XTYPE_mpy64_acc < "vmpyh", "+", 0b100, 0b101, 1, 1, 0>;
+def M2_vmac2s_s0 : T_XTYPE_mpy64_acc < "vmpyh", "+", 0b000, 0b101, 1, 0, 0>;
+
+def: Pat<(i64 (mul (i64 (anyext (i32 IntRegs:$src1))),
+ (i64 (anyext (i32 IntRegs:$src2))))),
+ (M2_dpmpyuu_s0 IntRegs:$src1, IntRegs:$src2)>;
+
+def: Pat<(i64 (mul (i64 (sext (i32 IntRegs:$src1))),
+ (i64 (sext (i32 IntRegs:$src2))))),
+ (M2_dpmpyss_s0 IntRegs:$src1, IntRegs:$src2)>;
+
+def: Pat<(i64 (mul (is_sext_i32:$src1),
+ (is_sext_i32:$src2))),
+ (M2_dpmpyss_s0 (LoReg DoubleRegs:$src1), (LoReg DoubleRegs:$src2))>;
// Multiply and accumulate, use full result.
// Rxx[+-]=mpy(Rs,Rt)
-// Rxx+=mpy(Rs,Rt)
-def MPY64_acc : MInst_acc<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "$dst += mpy($src2, $src3)",
- [(set (i64 DoubleRegs:$dst),
- (add (mul (i64 (sext (i32 IntRegs:$src2))),
- (i64 (sext (i32 IntRegs:$src3)))),
- (i64 DoubleRegs:$src1)))],
- "$src1 = $dst">;
-
-// Rxx-=mpy(Rs,Rt)
-def MPY64_sub : MInst_acc<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "$dst -= mpy($src2, $src3)",
- [(set (i64 DoubleRegs:$dst),
- (sub (i64 DoubleRegs:$src1),
- (mul (i64 (sext (i32 IntRegs:$src2))),
- (i64 (sext (i32 IntRegs:$src3))))))],
- "$src1 = $dst">;
-
-// Rxx[+-]=mpyu(Rs,Rt)
-// Rxx+=mpyu(Rs,Rt)
-def MPYU64_acc : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- IntRegs:$src2, IntRegs:$src3),
- "$dst += mpyu($src2, $src3)",
- [(set (i64 DoubleRegs:$dst),
- (add (mul (i64 (anyext (i32 IntRegs:$src2))),
- (i64 (anyext (i32 IntRegs:$src3)))),
- (i64 DoubleRegs:$src1)))], "$src1 = $dst">;
-
-// Rxx-=mpyu(Rs,Rt)
-def MPYU64_sub : MInst_acc<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "$dst -= mpyu($src2, $src3)",
- [(set (i64 DoubleRegs:$dst),
- (sub (i64 DoubleRegs:$src1),
- (mul (i64 (anyext (i32 IntRegs:$src2))),
- (i64 (anyext (i32 IntRegs:$src3))))))],
- "$src1 = $dst">;
-
-
-let InputType = "reg", CextOpcode = "ADD_acc" in
-def ADDrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
- IntRegs:$src2, IntRegs:$src3),
- "$dst += add($src2, $src3)",
- [(set (i32 IntRegs:$dst), (add (add (i32 IntRegs:$src2),
- (i32 IntRegs:$src3)),
- (i32 IntRegs:$src1)))],
- "$src1 = $dst">, ImmRegRel;
-
-let isExtendable = 1, opExtendable = 3, isExtentSigned = 1, opExtentBits = 8,
-InputType = "imm", CextOpcode = "ADD_acc" in
-def ADDri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
- IntRegs:$src2, s8Ext:$src3),
- "$dst += add($src2, #$src3)",
- [(set (i32 IntRegs:$dst), (add (add (i32 IntRegs:$src2),
- s8_16ExtPred:$src3),
- (i32 IntRegs:$src1)))],
- "$src1 = $dst">, ImmRegRel;
-
-let CextOpcode = "SUB_acc", InputType = "reg" in
-def SUBrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
- IntRegs:$src2, IntRegs:$src3),
- "$dst -= add($src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (sub (i32 IntRegs:$src1), (add (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">, ImmRegRel;
-
-let isExtendable = 1, opExtendable = 3, isExtentSigned = 1, opExtentBits = 8,
-CextOpcode = "SUB_acc", InputType = "imm" in
-def SUBri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
- IntRegs:$src2, s8Ext:$src3),
- "$dst -= add($src2, #$src3)",
- [(set (i32 IntRegs:$dst), (sub (i32 IntRegs:$src1),
- (add (i32 IntRegs:$src2),
- s8_16ExtPred:$src3)))],
- "$src1 = $dst">, ImmRegRel;
+
+def: Pat<(i64 (add (i64 DoubleRegs:$src1),
+ (mul (i64 (sext (i32 IntRegs:$src2))),
+ (i64 (sext (i32 IntRegs:$src3)))))),
+ (M2_dpmpyss_acc_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>;
+
+def: Pat<(i64 (sub (i64 DoubleRegs:$src1),
+ (mul (i64 (sext (i32 IntRegs:$src2))),
+ (i64 (sext (i32 IntRegs:$src3)))))),
+ (M2_dpmpyss_nac_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>;
+
+def: Pat<(i64 (add (i64 DoubleRegs:$src1),
+ (mul (i64 (anyext (i32 IntRegs:$src2))),
+ (i64 (anyext (i32 IntRegs:$src3)))))),
+ (M2_dpmpyuu_acc_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>;
+
+def: Pat<(i64 (add (i64 DoubleRegs:$src1),
+ (mul (i64 (zext (i32 IntRegs:$src2))),
+ (i64 (zext (i32 IntRegs:$src3)))))),
+ (M2_dpmpyuu_acc_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>;
+
+def: Pat<(i64 (sub (i64 DoubleRegs:$src1),
+ (mul (i64 (anyext (i32 IntRegs:$src2))),
+ (i64 (anyext (i32 IntRegs:$src3)))))),
+ (M2_dpmpyuu_nac_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>;
+
+def: Pat<(i64 (sub (i64 DoubleRegs:$src1),
+ (mul (i64 (zext (i32 IntRegs:$src2))),
+ (i64 (zext (i32 IntRegs:$src3)))))),
+ (M2_dpmpyuu_nac_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>;
//===----------------------------------------------------------------------===//
// MTYPE/MPYH -
@@ -1464,321 +3250,1134 @@ def SUBri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
//===----------------------------------------------------------------------===//
///
// Store doubleword.
-
//===----------------------------------------------------------------------===//
-// Post increment store
+// Template class for non-predicated post increment stores with immediate offset
//===----------------------------------------------------------------------===//
+let isPredicable = 1, hasSideEffects = 0, addrMode = PostInc in
+class T_store_pi <string mnemonic, RegisterClass RC, Operand ImmOp,
+ bits<4> MajOp, bit isHalf >
+ : STInst <(outs IntRegs:$_dst_),
+ (ins IntRegs:$src1, ImmOp:$offset, RC:$src2),
+ mnemonic#"($src1++#$offset) = $src2"#!if(isHalf, ".h", ""),
+ [], "$src1 = $_dst_" >,
+ AddrModeRel {
+ bits<5> src1;
+ bits<5> src2;
+ bits<7> offset;
+ bits<4> offsetBits;
+
+ string ImmOpStr = !cast<string>(ImmOp);
+ let offsetBits = !if (!eq(ImmOpStr, "s4_3Imm"), offset{6-3},
+ !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2},
+ !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1},
+ /* s4_0Imm */ offset{3-0})));
+ let isNVStorable = !if (!eq(ImmOpStr, "s4_3Imm"), 0, 1);
+
+ let IClass = 0b1010;
+
+ let Inst{27-25} = 0b101;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{13} = 0b0;
+ let Inst{12-8} = src2;
+ let Inst{7} = 0b0;
+ let Inst{6-3} = offsetBits;
+ let Inst{1} = 0b0;
+ }
-multiclass ST_PostInc_Pbase<string mnemonic, RegisterClass RC, Operand ImmOp,
- bit isNot, bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : STInst2PI<(outs IntRegs:$dst),
+//===----------------------------------------------------------------------===//
+// Template class for predicated post increment stores with immediate offset
+//===----------------------------------------------------------------------===//
+let isPredicated = 1, hasSideEffects = 0, addrMode = PostInc in
+class T_pstore_pi <string mnemonic, RegisterClass RC, Operand ImmOp,
+ bits<4> MajOp, bit isHalf, bit isPredNot, bit isPredNew >
+ : STInst <(outs IntRegs:$_dst_),
(ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset, RC:$src3),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#mnemonic#"($src2++#$offset) = $src3",
- [],
- "$src2 = $dst">;
-}
+ !if(isPredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
+ ") ")#mnemonic#"($src2++#$offset) = $src3"#!if(isHalf, ".h", ""),
+ [], "$src2 = $_dst_" >,
+ AddrModeRel {
+ bits<2> src1;
+ bits<5> src2;
+ bits<7> offset;
+ bits<5> src3;
+ bits<4> offsetBits;
-multiclass ST_PostInc_Pred<string mnemonic, RegisterClass RC,
- Operand ImmOp, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ST_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 0>;
- // Predicate new
- let Predicates = [HasV4T], validSubTargets = HasV4SubT in
- defm _cdn#NAME#_V4 : ST_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 1>;
+ string ImmOpStr = !cast<string>(ImmOp);
+ let offsetBits = !if (!eq(ImmOpStr, "s4_3Imm"), offset{6-3},
+ !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2},
+ !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1},
+ /* s4_0Imm */ offset{3-0})));
+
+ let isNVStorable = !if (!eq(ImmOpStr, "s4_3Imm"), 0, 1);
+ let isPredicatedNew = isPredNew;
+ let isPredicatedFalse = isPredNot;
+
+ let IClass = 0b1010;
+
+ let Inst{27-25} = 0b101;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = src2;
+ let Inst{13} = 0b1;
+ let Inst{12-8} = src3;
+ let Inst{7} = isPredNew;
+ let Inst{6-3} = offsetBits;
+ let Inst{2} = isPredNot;
+ let Inst{1-0} = src1;
}
-}
-let hasCtrlDep = 1, isNVStorable = 1, neverHasSideEffects = 1 in
multiclass ST_PostInc<string mnemonic, string BaseOp, RegisterClass RC,
- Operand ImmOp> {
+ Operand ImmOp, bits<4> MajOp, bit isHalf = 0 > {
- let hasCtrlDep = 1, BaseOpcode = "POST_"#BaseOp in {
- let isPredicable = 1 in
- def NAME : STInst2PI<(outs IntRegs:$dst),
- (ins IntRegs:$src1, ImmOp:$offset, RC:$src2),
- mnemonic#"($src1++#$offset) = $src2",
- [],
- "$src1 = $dst">;
-
- let isPredicated = 1 in {
- defm Pt : ST_PostInc_Pred<mnemonic, RC, ImmOp, 0 >;
- defm NotPt : ST_PostInc_Pred<mnemonic, RC, ImmOp, 1 >;
- }
+ let BaseOpcode = "POST_"#BaseOp in {
+ def S2_#NAME#_pi : T_store_pi <mnemonic, RC, ImmOp, MajOp, isHalf>;
+
+ // Predicated
+ def S2_p#NAME#t_pi : T_pstore_pi <mnemonic, RC, ImmOp, MajOp, isHalf, 0, 0>;
+ def S2_p#NAME#f_pi : T_pstore_pi <mnemonic, RC, ImmOp, MajOp, isHalf, 1, 0>;
+
+ // Predicated new
+ def S2_p#NAME#tnew_pi : T_pstore_pi <mnemonic, RC, ImmOp, MajOp,
+ isHalf, 0, 1>;
+ def S2_p#NAME#fnew_pi : T_pstore_pi <mnemonic, RC, ImmOp, MajOp,
+ isHalf, 1, 1>;
}
}
-defm POST_STbri: ST_PostInc <"memb", "STrib", IntRegs, s4_0Imm>, AddrModeRel;
-defm POST_SThri: ST_PostInc <"memh", "STrih", IntRegs, s4_1Imm>, AddrModeRel;
-defm POST_STwri: ST_PostInc <"memw", "STriw", IntRegs, s4_2Imm>, AddrModeRel;
+let accessSize = ByteAccess in
+defm storerb: ST_PostInc <"memb", "STrib", IntRegs, s4_0Imm, 0b1000>;
-let isNVStorable = 0 in
-defm POST_STdri: ST_PostInc <"memd", "STrid", DoubleRegs, s4_3Imm>, AddrModeRel;
+let accessSize = HalfWordAccess in
+defm storerh: ST_PostInc <"memh", "STrih", IntRegs, s4_1Imm, 0b1010>;
-def : Pat<(post_truncsti8 (i32 IntRegs:$src1), IntRegs:$src2,
- s4_3ImmPred:$offset),
- (POST_STbri IntRegs:$src2, s4_0ImmPred:$offset, IntRegs:$src1)>;
+let accessSize = WordAccess in
+defm storeri: ST_PostInc <"memw", "STriw", IntRegs, s4_2Imm, 0b1100>;
-def : Pat<(post_truncsti16 (i32 IntRegs:$src1), IntRegs:$src2,
- s4_3ImmPred:$offset),
- (POST_SThri IntRegs:$src2, s4_1ImmPred:$offset, IntRegs:$src1)>;
+let accessSize = DoubleWordAccess in
+defm storerd: ST_PostInc <"memd", "STrid", DoubleRegs, s4_3Imm, 0b1110>;
-def : Pat<(post_store (i32 IntRegs:$src1), IntRegs:$src2, s4_2ImmPred:$offset),
- (POST_STwri IntRegs:$src2, s4_1ImmPred:$offset, IntRegs:$src1)>;
+let accessSize = HalfWordAccess, isNVStorable = 0 in
+defm storerf: ST_PostInc <"memh", "STrih_H", IntRegs, s4_1Imm, 0b1011, 1>;
-def : Pat<(post_store (i64 DoubleRegs:$src1), IntRegs:$src2,
- s4_3ImmPred:$offset),
- (POST_STdri IntRegs:$src2, s4_3ImmPred:$offset, DoubleRegs:$src1)>;
+class Storepi_pat<PatFrag Store, PatFrag Value, PatFrag Offset,
+ InstHexagon MI>
+ : Pat<(Store Value:$src1, I32:$src2, Offset:$offset),
+ (MI I32:$src2, imm:$offset, Value:$src1)>;
+
+def: Storepi_pat<post_truncsti8, I32, s4_0ImmPred, S2_storerb_pi>;
+def: Storepi_pat<post_truncsti16, I32, s4_1ImmPred, S2_storerh_pi>;
+def: Storepi_pat<post_store, I32, s4_2ImmPred, S2_storeri_pi>;
+def: Storepi_pat<post_store, I64, s4_3ImmPred, S2_storerd_pi>;
//===----------------------------------------------------------------------===//
-// multiclass for the store instructions with MEMri operand.
+// Template class for post increment stores with register offset.
//===----------------------------------------------------------------------===//
-multiclass ST_MEMri_Pbase<string mnemonic, RegisterClass RC, bit isNot,
- bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : STInst2<(outs),
- (ins PredRegs:$src1, MEMri:$addr, RC: $src2),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#mnemonic#"($addr) = $src2",
- []>;
-}
+let isNVStorable = 1 in
+class T_store_pr <string mnemonic, RegisterClass RC, bits<3> MajOp,
+ MemAccessSize AccessSz, bit isHalf = 0>
+ : STInst <(outs IntRegs:$_dst_),
+ (ins IntRegs:$src1, ModRegs:$src2, RC:$src3),
+ mnemonic#"($src1++$src2) = $src3"#!if(isHalf, ".h", ""),
+ [], "$src1 = $_dst_" > {
+ bits<5> src1;
+ bits<1> src2;
+ bits<5> src3;
+ let accessSize = AccessSz;
+
+ let IClass = 0b1010;
-multiclass ST_MEMri_Pred<string mnemonic, RegisterClass RC, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ST_MEMri_Pbase<mnemonic, RC, PredNot, 0>;
+ let Inst{27-24} = 0b1101;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{13} = src2;
+ let Inst{12-8} = src3;
+ let Inst{7} = 0b0;
+ }
- // Predicate new
- let validSubTargets = HasV4SubT, Predicates = [HasV4T] in
- defm _cdn#NAME#_V4 : ST_MEMri_Pbase<mnemonic, RC, PredNot, 1>;
+def S2_storerb_pr : T_store_pr<"memb", IntRegs, 0b000, ByteAccess>;
+def S2_storerh_pr : T_store_pr<"memh", IntRegs, 0b010, HalfWordAccess>;
+def S2_storeri_pr : T_store_pr<"memw", IntRegs, 0b100, WordAccess>;
+def S2_storerd_pr : T_store_pr<"memd", DoubleRegs, 0b110, DoubleWordAccess>;
+
+def S2_storerf_pr : T_store_pr<"memh", IntRegs, 0b011, HalfWordAccess, 1>;
+
+let opExtendable = 1, isExtentSigned = 1, isPredicable = 1 in
+class T_store_io <string mnemonic, RegisterClass RC, Operand ImmOp,
+ bits<3>MajOp, bit isH = 0>
+ : STInst <(outs),
+ (ins IntRegs:$src1, ImmOp:$src2, RC:$src3),
+ mnemonic#"($src1+#$src2) = $src3"#!if(isH,".h","")>,
+ AddrModeRel, ImmRegRel {
+ bits<5> src1;
+ bits<14> src2; // Actual address offset
+ bits<5> src3;
+ bits<11> offsetBits; // Represents offset encoding
+
+ string ImmOpStr = !cast<string>(ImmOp);
+
+ let opExtentBits = !if (!eq(ImmOpStr, "s11_3Ext"), 14,
+ !if (!eq(ImmOpStr, "s11_2Ext"), 13,
+ !if (!eq(ImmOpStr, "s11_1Ext"), 12,
+ /* s11_0Ext */ 11)));
+ let offsetBits = !if (!eq(ImmOpStr, "s11_3Ext"), src2{13-3},
+ !if (!eq(ImmOpStr, "s11_2Ext"), src2{12-2},
+ !if (!eq(ImmOpStr, "s11_1Ext"), src2{11-1},
+ /* s11_0Ext */ src2{10-0})));
+ let IClass = 0b1010;
+
+ let Inst{27} = 0b0;
+ let Inst{26-25} = offsetBits{10-9};
+ let Inst{24} = 0b1;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{13} = offsetBits{8};
+ let Inst{12-8} = src3;
+ let Inst{7-0} = offsetBits{7-0};
}
-}
-let isExtendable = 1, isNVStorable = 1, neverHasSideEffects = 1 in
-multiclass ST_MEMri<string mnemonic, string CextOp, RegisterClass RC,
- bits<5> ImmBits, bits<5> PredImmBits> {
+let opExtendable = 2, isPredicated = 1 in
+class T_pstore_io <string mnemonic, RegisterClass RC, Operand ImmOp,
+ bits<3>MajOp, bit PredNot, bit isPredNew, bit isH = 0>
+ : STInst <(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$src3, RC:$src4),
+ !if(PredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
+ ") ")#mnemonic#"($src2+#$src3) = $src4"#!if(isH,".h",""),
+ [],"",V2LDST_tc_st_SLOT01 >,
+ AddrModeRel, ImmRegRel {
+ bits<2> src1;
+ bits<5> src2;
+ bits<9> src3; // Actual address offset
+ bits<5> src4;
+ bits<6> offsetBits; // Represents offset encoding
+
+ let isPredicatedNew = isPredNew;
+ let isPredicatedFalse = PredNot;
- let CextOpcode = CextOp, BaseOpcode = CextOp in {
- let opExtendable = 1, isExtentSigned = 1, opExtentBits = ImmBits,
- isPredicable = 1 in
- def NAME : STInst2<(outs),
- (ins MEMri:$addr, RC:$src),
- mnemonic#"($addr) = $src",
- []>;
-
- let opExtendable = 2, isExtentSigned = 0, opExtentBits = PredImmBits,
- isPredicated = 1 in {
- defm Pt : ST_MEMri_Pred<mnemonic, RC, 0>;
- defm NotPt : ST_MEMri_Pred<mnemonic, RC, 1>;
- }
+ string ImmOpStr = !cast<string>(ImmOp);
+ let opExtentBits = !if (!eq(ImmOpStr, "u6_3Ext"), 9,
+ !if (!eq(ImmOpStr, "u6_2Ext"), 8,
+ !if (!eq(ImmOpStr, "u6_1Ext"), 7,
+ /* u6_0Ext */ 6)));
+ let offsetBits = !if (!eq(ImmOpStr, "u6_3Ext"), src3{8-3},
+ !if (!eq(ImmOpStr, "u6_2Ext"), src3{7-2},
+ !if (!eq(ImmOpStr, "u6_1Ext"), src3{6-1},
+ /* u6_0Ext */ src3{5-0})));
+ let IClass = 0b0100;
+
+ let Inst{27} = 0b0;
+ let Inst{26} = PredNot;
+ let Inst{25} = isPredNew;
+ let Inst{24} = 0b0;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = src2;
+ let Inst{13} = offsetBits{5};
+ let Inst{12-8} = src4;
+ let Inst{7-3} = offsetBits{4-0};
+ let Inst{1-0} = src1;
+ }
+
+let isExtendable = 1, isNVStorable = 1, hasSideEffects = 0 in
+multiclass ST_Idxd<string mnemonic, string CextOp, RegisterClass RC,
+ Operand ImmOp, Operand predImmOp, bits<3> MajOp, bit isH = 0> {
+ let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed in {
+ def S2_#NAME#_io : T_store_io <mnemonic, RC, ImmOp, MajOp, isH>;
+
+ // Predicated
+ def S2_p#NAME#t_io : T_pstore_io<mnemonic, RC, predImmOp, MajOp, 0, 0, isH>;
+ def S2_p#NAME#f_io : T_pstore_io<mnemonic, RC, predImmOp, MajOp, 1, 0, isH>;
+
+ // Predicated new
+ def S4_p#NAME#tnew_io : T_pstore_io <mnemonic, RC, predImmOp,
+ MajOp, 0, 1, isH>;
+ def S4_p#NAME#fnew_io : T_pstore_io <mnemonic, RC, predImmOp,
+ MajOp, 1, 1, isH>;
}
}
-let addrMode = BaseImmOffset, isMEMri = "true" in {
+let addrMode = BaseImmOffset, InputType = "imm" in {
let accessSize = ByteAccess in
- defm STrib: ST_MEMri < "memb", "STrib", IntRegs, 11, 6>, AddrModeRel;
+ defm storerb: ST_Idxd < "memb", "STrib", IntRegs, s11_0Ext, u6_0Ext, 0b000>;
+
+ let accessSize = HalfWordAccess, opExtentAlign = 1 in
+ defm storerh: ST_Idxd < "memh", "STrih", IntRegs, s11_1Ext, u6_1Ext, 0b010>;
+
+ let accessSize = WordAccess, opExtentAlign = 2 in
+ defm storeri: ST_Idxd < "memw", "STriw", IntRegs, s11_2Ext, u6_2Ext, 0b100>;
- let accessSize = HalfWordAccess in
- defm STrih: ST_MEMri < "memh", "STrih", IntRegs, 12, 7>, AddrModeRel;
+ let accessSize = DoubleWordAccess, isNVStorable = 0, opExtentAlign = 3 in
+ defm storerd: ST_Idxd < "memd", "STrid", DoubleRegs, s11_3Ext,
+ u6_3Ext, 0b110>;
- let accessSize = WordAccess in
- defm STriw: ST_MEMri < "memw", "STriw", IntRegs, 13, 8>, AddrModeRel;
+ let accessSize = HalfWordAccess, opExtentAlign = 1 in
+ defm storerf: ST_Idxd < "memh", "STrif", IntRegs, s11_1Ext,
+ u6_1Ext, 0b011, 1>;
+}
- let accessSize = DoubleWordAccess, isNVStorable = 0 in
- defm STrid: ST_MEMri < "memd", "STrid", DoubleRegs, 14, 9>, AddrModeRel;
+// Patterns for generating stores, where the address takes different forms:
+// - frameindex,,
+// - base + offset,
+// - simple (base address without offset).
+// These would usually be used together (via Storex_pat defined below), but
+// in some cases one may want to apply different properties (such as
+// AddedComplexity) to the individual patterns.
+class Storex_fi_pat<PatFrag Store, PatFrag Value, InstHexagon MI>
+ : Pat<(Store Value:$Rs, AddrFI:$fi), (MI AddrFI:$fi, 0, Value:$Rs)>;
+class Storex_add_pat<PatFrag Store, PatFrag Value, PatFrag ImmPred,
+ InstHexagon MI>
+ : Pat<(Store Value:$Rt, (add (i32 IntRegs:$Rs), ImmPred:$Off)),
+ (MI IntRegs:$Rs, imm:$Off, Value:$Rt)>;
+class Storex_simple_pat<PatFrag Store, PatFrag Value, InstHexagon MI>
+ : Pat<(Store Value:$Rt, (i32 IntRegs:$Rs)),
+ (MI IntRegs:$Rs, 0, Value:$Rt)>;
+
+// Patterns for generating stores, where the address takes different forms,
+// and where the value being stored is transformed through the value modifier
+// ValueMod. The address forms are same as above.
+class Storexm_fi_pat<PatFrag Store, PatFrag Value, PatFrag ValueMod,
+ InstHexagon MI>
+ : Pat<(Store Value:$Rs, AddrFI:$fi),
+ (MI AddrFI:$fi, 0, (ValueMod Value:$Rs))>;
+class Storexm_add_pat<PatFrag Store, PatFrag Value, PatFrag ImmPred,
+ PatFrag ValueMod, InstHexagon MI>
+ : Pat<(Store Value:$Rt, (add (i32 IntRegs:$Rs), ImmPred:$Off)),
+ (MI IntRegs:$Rs, imm:$Off, (ValueMod Value:$Rt))>;
+class Storexm_simple_pat<PatFrag Store, PatFrag Value, PatFrag ValueMod,
+ InstHexagon MI>
+ : Pat<(Store Value:$Rt, (i32 IntRegs:$Rs)),
+ (MI IntRegs:$Rs, 0, (ValueMod Value:$Rt))>;
+
+multiclass Storex_pat<PatFrag Store, PatFrag Value, PatLeaf ImmPred,
+ InstHexagon MI> {
+ def: Storex_fi_pat <Store, Value, MI>;
+ def: Storex_add_pat <Store, Value, ImmPred, MI>;
}
-def : Pat<(truncstorei8 (i32 IntRegs:$src1), ADDRriS11_0:$addr),
- (STrib ADDRriS11_0:$addr, (i32 IntRegs:$src1))>;
+multiclass Storexm_pat<PatFrag Store, PatFrag Value, PatLeaf ImmPred,
+ PatFrag ValueMod, InstHexagon MI> {
+ def: Storexm_fi_pat <Store, Value, ValueMod, MI>;
+ def: Storexm_add_pat <Store, Value, ImmPred, ValueMod, MI>;
+}
-def : Pat<(truncstorei16 (i32 IntRegs:$src1), ADDRriS11_1:$addr),
- (STrih ADDRriS11_1:$addr, (i32 IntRegs:$src1))>;
+// Regular stores in the DAG have two operands: value and address.
+// Atomic stores also have two, but they are reversed: address, value.
+// To use atomic stores with the patterns, they need to have their operands
+// swapped. This relies on the knowledge that the F.Fragment uses names
+// "ptr" and "val".
+class SwapSt<PatFrag F>
+ : PatFrag<(ops node:$val, node:$ptr), F.Fragment>;
-def : Pat<(store (i32 IntRegs:$src1), ADDRriS11_2:$addr),
- (STriw ADDRriS11_2:$addr, (i32 IntRegs:$src1))>;
+let AddedComplexity = 20 in {
+ defm: Storex_pat<truncstorei8, I32, s11_0ExtPred, S2_storerb_io>;
+ defm: Storex_pat<truncstorei16, I32, s11_1ExtPred, S2_storerh_io>;
+ defm: Storex_pat<store, I32, s11_2ExtPred, S2_storeri_io>;
+ defm: Storex_pat<store, I64, s11_3ExtPred, S2_storerd_io>;
+
+ defm: Storex_pat<SwapSt<atomic_store_8>, I32, s11_0ExtPred, S2_storerb_io>;
+ defm: Storex_pat<SwapSt<atomic_store_16>, I32, s11_1ExtPred, S2_storerh_io>;
+ defm: Storex_pat<SwapSt<atomic_store_32>, I32, s11_2ExtPred, S2_storeri_io>;
+ defm: Storex_pat<SwapSt<atomic_store_64>, I64, s11_3ExtPred, S2_storerd_io>;
+}
-def : Pat<(store (i64 DoubleRegs:$src1), ADDRriS11_3:$addr),
- (STrid ADDRriS11_3:$addr, (i64 DoubleRegs:$src1))>;
+// Simple patterns should be tried with the least priority.
+def: Storex_simple_pat<truncstorei8, I32, S2_storerb_io>;
+def: Storex_simple_pat<truncstorei16, I32, S2_storerh_io>;
+def: Storex_simple_pat<store, I32, S2_storeri_io>;
+def: Storex_simple_pat<store, I64, S2_storerd_io>;
+def: Storex_simple_pat<SwapSt<atomic_store_8>, I32, S2_storerb_io>;
+def: Storex_simple_pat<SwapSt<atomic_store_16>, I32, S2_storerh_io>;
+def: Storex_simple_pat<SwapSt<atomic_store_32>, I32, S2_storeri_io>;
+def: Storex_simple_pat<SwapSt<atomic_store_64>, I64, S2_storerd_io>;
+
+let AddedComplexity = 20 in {
+ defm: Storexm_pat<truncstorei8, I64, s11_0ExtPred, LoReg, S2_storerb_io>;
+ defm: Storexm_pat<truncstorei16, I64, s11_1ExtPred, LoReg, S2_storerh_io>;
+ defm: Storexm_pat<truncstorei32, I64, s11_2ExtPred, LoReg, S2_storeri_io>;
+}
+
+def: Storexm_simple_pat<truncstorei8, I64, LoReg, S2_storerb_io>;
+def: Storexm_simple_pat<truncstorei16, I64, LoReg, S2_storerh_io>;
+def: Storexm_simple_pat<truncstorei32, I64, LoReg, S2_storeri_io>;
+
+// Store predicate.
+let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13,
+ isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in
+def STriw_pred : STInst<(outs),
+ (ins IntRegs:$addr, s11_2Ext:$off, PredRegs:$src1),
+ ".error \"should not emit\"", []>;
+
+// S2_allocframe: Allocate stack frame.
+let Defs = [R29, R30], Uses = [R29, R31, R30],
+ hasSideEffects = 0, accessSize = DoubleWordAccess in
+def S2_allocframe: ST0Inst <
+ (outs), (ins u11_3Imm:$u11_3),
+ "allocframe(#$u11_3)" > {
+ bits<14> u11_3;
+
+ let IClass = 0b1010;
+ let Inst{27-16} = 0b000010011101;
+ let Inst{13-11} = 0b000;
+ let Inst{10-0} = u11_3{13-3};
+ }
+
+// S2_storer[bhwdf]_pci: Store byte/half/word/double.
+// S2_storer[bhwdf]_pci -> S2_storerbnew_pci
+let Uses = [CS], isNVStorable = 1 in
+class T_store_pci <string mnemonic, RegisterClass RC,
+ Operand Imm, bits<4>MajOp,
+ MemAccessSize AlignSize, string RegSrc = "Rt">
+ : STInst <(outs IntRegs:$_dst_),
+ (ins IntRegs:$Rz, Imm:$offset, ModRegs:$Mu, RC:$Rt),
+ #mnemonic#"($Rz ++ #$offset:circ($Mu)) = $"#RegSrc#"",
+ [] ,
+ "$Rz = $_dst_" > {
+ bits<5> Rz;
+ bits<7> offset;
+ bits<1> Mu;
+ bits<5> Rt;
+ let accessSize = AlignSize;
+
+ let IClass = 0b1010;
+ let Inst{27-25} = 0b100;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = Rz;
+ let Inst{13} = Mu;
+ let Inst{12-8} = Rt;
+ let Inst{7} = 0b0;
+ let Inst{6-3} =
+ !if (!eq(!cast<string>(AlignSize), "DoubleWordAccess"), offset{6-3},
+ !if (!eq(!cast<string>(AlignSize), "WordAccess"), offset{5-2},
+ !if (!eq(!cast<string>(AlignSize), "HalfWordAccess"), offset{4-1},
+ /* ByteAccess */ offset{3-0})));
+ let Inst{1} = 0b0;
+ }
+
+def S2_storerb_pci : T_store_pci<"memb", IntRegs, s4_0Imm, 0b1000,
+ ByteAccess>;
+def S2_storerh_pci : T_store_pci<"memh", IntRegs, s4_1Imm, 0b1010,
+ HalfWordAccess>;
+def S2_storerf_pci : T_store_pci<"memh", IntRegs, s4_1Imm, 0b1011,
+ HalfWordAccess, "Rt.h">;
+def S2_storeri_pci : T_store_pci<"memw", IntRegs, s4_2Imm, 0b1100,
+ WordAccess>;
+def S2_storerd_pci : T_store_pci<"memd", DoubleRegs, s4_3Imm, 0b1110,
+ DoubleWordAccess>;
+
+let Uses = [CS], isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 4 in
+class T_storenew_pci <string mnemonic, Operand Imm,
+ bits<2>MajOp, MemAccessSize AlignSize>
+ : NVInst < (outs IntRegs:$_dst_),
+ (ins IntRegs:$Rz, Imm:$offset, ModRegs:$Mu, IntRegs:$Nt),
+ #mnemonic#"($Rz ++ #$offset:circ($Mu)) = $Nt.new",
+ [],
+ "$Rz = $_dst_"> {
+ bits<5> Rz;
+ bits<6> offset;
+ bits<1> Mu;
+ bits<3> Nt;
+
+ let accessSize = AlignSize;
+
+ let IClass = 0b1010;
+ let Inst{27-21} = 0b1001101;
+ let Inst{20-16} = Rz;
+ let Inst{13} = Mu;
+ let Inst{12-11} = MajOp;
+ let Inst{10-8} = Nt;
+ let Inst{7} = 0b0;
+ let Inst{6-3} =
+ !if (!eq(!cast<string>(AlignSize), "WordAccess"), offset{5-2},
+ !if (!eq(!cast<string>(AlignSize), "HalfWordAccess"), offset{4-1},
+ /* ByteAccess */ offset{3-0}));
+ let Inst{1} = 0b0;
+ }
+
+def S2_storerbnew_pci : T_storenew_pci <"memb", s4_0Imm, 0b00, ByteAccess>;
+def S2_storerhnew_pci : T_storenew_pci <"memh", s4_1Imm, 0b01, HalfWordAccess>;
+def S2_storerinew_pci : T_storenew_pci <"memw", s4_2Imm, 0b10, WordAccess>;
+
+//===----------------------------------------------------------------------===//
+// Circular stores - Pseudo
+//
+// Please note that the input operand order in the pseudo instructions
+// doesn't match with the real instructions. Pseudo instructions operand
+// order should mimics the ordering in the intrinsics.
+//===----------------------------------------------------------------------===//
+let isCodeGenOnly = 1, mayStore = 1, hasSideEffects = 0, isPseudo = 1 in
+class T_store_pci_pseudo <string opc, RegisterClass RC>
+ : STInstPI<(outs IntRegs:$_dst_),
+ (ins IntRegs:$src1, RC:$src2, IntRegs:$src3, s4Imm:$src4),
+ ".error \""#opc#"($src1++#$src4:circ($src3)) = $src2\"",
+ [], "$_dst_ = $src1">;
+
+def S2_storerb_pci_pseudo : T_store_pci_pseudo <"memb", IntRegs>;
+def S2_storerh_pci_pseudo : T_store_pci_pseudo <"memh", IntRegs>;
+def S2_storerf_pci_pseudo : T_store_pci_pseudo <"memh", IntRegs>;
+def S2_storeri_pci_pseudo : T_store_pci_pseudo <"memw", IntRegs>;
+def S2_storerd_pci_pseudo : T_store_pci_pseudo <"memd", DoubleRegs>;
//===----------------------------------------------------------------------===//
-// multiclass for the store instructions with base+immediate offset
-// addressing mode
+// Circular stores with auto-increment register
//===----------------------------------------------------------------------===//
-multiclass ST_Idxd_Pbase<string mnemonic, RegisterClass RC, Operand predImmOp,
- bit isNot, bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : STInst2<(outs),
- (ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3, RC: $src4),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#mnemonic#"($src2+#$src3) = $src4",
- []>;
+let Uses = [CS], isNVStorable = 1 in
+class T_store_pcr <string mnemonic, RegisterClass RC, bits<4>MajOp,
+ MemAccessSize AlignSize, string RegSrc = "Rt">
+ : STInst <(outs IntRegs:$_dst_),
+ (ins IntRegs:$Rz, ModRegs:$Mu, RC:$Rt),
+ #mnemonic#"($Rz ++ I:circ($Mu)) = $"#RegSrc#"",
+ [],
+ "$Rz = $_dst_" > {
+ bits<5> Rz;
+ bits<1> Mu;
+ bits<5> Rt;
+
+ let accessSize = AlignSize;
+
+ let IClass = 0b1010;
+ let Inst{27-25} = 0b100;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = Rz;
+ let Inst{13} = Mu;
+ let Inst{12-8} = Rt;
+ let Inst{7} = 0b0;
+ let Inst{1} = 0b1;
+ }
+
+def S2_storerb_pcr : T_store_pcr<"memb", IntRegs, 0b1000, ByteAccess>;
+def S2_storerh_pcr : T_store_pcr<"memh", IntRegs, 0b1010, HalfWordAccess>;
+def S2_storeri_pcr : T_store_pcr<"memw", IntRegs, 0b1100, WordAccess>;
+def S2_storerd_pcr : T_store_pcr<"memd", DoubleRegs, 0b1110, DoubleWordAccess>;
+def S2_storerf_pcr : T_store_pcr<"memh", IntRegs, 0b1011,
+ HalfWordAccess, "Rt.h">;
+
+//===----------------------------------------------------------------------===//
+// Circular .new stores with auto-increment register
+//===----------------------------------------------------------------------===//
+let Uses = [CS], isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 3 in
+class T_storenew_pcr <string mnemonic, bits<2>MajOp,
+ MemAccessSize AlignSize>
+ : NVInst <(outs IntRegs:$_dst_),
+ (ins IntRegs:$Rz, ModRegs:$Mu, IntRegs:$Nt),
+ #mnemonic#"($Rz ++ I:circ($Mu)) = $Nt.new" ,
+ [] ,
+ "$Rz = $_dst_"> {
+ bits<5> Rz;
+ bits<1> Mu;
+ bits<3> Nt;
+
+ let accessSize = AlignSize;
+
+ let IClass = 0b1010;
+ let Inst{27-21} = 0b1001101;
+ let Inst{20-16} = Rz;
+ let Inst{13} = Mu;
+ let Inst{12-11} = MajOp;
+ let Inst{10-8} = Nt;
+ let Inst{7} = 0b0;
+ let Inst{1} = 0b1;
+ }
+
+def S2_storerbnew_pcr : T_storenew_pcr <"memb", 0b00, ByteAccess>;
+def S2_storerhnew_pcr : T_storenew_pcr <"memh", 0b01, HalfWordAccess>;
+def S2_storerinew_pcr : T_storenew_pcr <"memw", 0b10, WordAccess>;
+
+//===----------------------------------------------------------------------===//
+// Bit-reversed stores with auto-increment register
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0 in
+class T_store_pbr<string mnemonic, RegisterClass RC,
+ MemAccessSize addrSize, bits<3> majOp,
+ bit isHalf = 0>
+ : STInst
+ <(outs IntRegs:$_dst_),
+ (ins IntRegs:$Rz, ModRegs:$Mu, RC:$src),
+ #mnemonic#"($Rz ++ $Mu:brev) = $src"#!if (!eq(isHalf, 1), ".h", ""),
+ [], "$Rz = $_dst_" > {
+
+ let accessSize = addrSize;
+
+ bits<5> Rz;
+ bits<1> Mu;
+ bits<5> src;
+
+ let IClass = 0b1010;
+
+ let Inst{27-24} = 0b1111;
+ let Inst{23-21} = majOp;
+ let Inst{7} = 0b0;
+ let Inst{20-16} = Rz;
+ let Inst{13} = Mu;
+ let Inst{12-8} = src;
+ }
+
+let isNVStorable = 1 in {
+ let BaseOpcode = "S2_storerb_pbr" in
+ def S2_storerb_pbr : T_store_pbr<"memb", IntRegs, ByteAccess,
+ 0b000>, NewValueRel;
+ let BaseOpcode = "S2_storerh_pbr" in
+ def S2_storerh_pbr : T_store_pbr<"memh", IntRegs, HalfWordAccess,
+ 0b010>, NewValueRel;
+ let BaseOpcode = "S2_storeri_pbr" in
+ def S2_storeri_pbr : T_store_pbr<"memw", IntRegs, WordAccess,
+ 0b100>, NewValueRel;
}
-multiclass ST_Idxd_Pred<string mnemonic, RegisterClass RC, Operand predImmOp,
- bit PredNot> {
- let isPredicatedFalse = PredNot, isPredicated = 1 in {
- defm _c#NAME : ST_Idxd_Pbase<mnemonic, RC, predImmOp, PredNot, 0>;
+def S2_storerf_pbr : T_store_pbr<"memh", IntRegs, HalfWordAccess, 0b011, 1>;
+def S2_storerd_pbr : T_store_pbr<"memd", DoubleRegs, DoubleWordAccess, 0b110>;
- // Predicate new
- let validSubTargets = HasV4SubT, Predicates = [HasV4T] in
- defm _cdn#NAME#_V4 : ST_Idxd_Pbase<mnemonic, RC, predImmOp, PredNot, 1>;
+//===----------------------------------------------------------------------===//
+// Bit-reversed .new stores with auto-increment register
+//===----------------------------------------------------------------------===//
+let isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 3,
+ hasSideEffects = 0 in
+class T_storenew_pbr<string mnemonic, MemAccessSize addrSize, bits<2> majOp>
+ : NVInst <(outs IntRegs:$_dst_),
+ (ins IntRegs:$Rz, ModRegs:$Mu, IntRegs:$Nt),
+ #mnemonic#"($Rz ++ $Mu:brev) = $Nt.new", [],
+ "$Rz = $_dst_">, NewValueRel {
+ let accessSize = addrSize;
+ bits<5> Rz;
+ bits<1> Mu;
+ bits<3> Nt;
+
+ let IClass = 0b1010;
+
+ let Inst{27-21} = 0b1111101;
+ let Inst{12-11} = majOp;
+ let Inst{7} = 0b0;
+ let Inst{20-16} = Rz;
+ let Inst{13} = Mu;
+ let Inst{10-8} = Nt;
}
+
+let BaseOpcode = "S2_storerb_pbr" in
+def S2_storerbnew_pbr : T_storenew_pbr<"memb", ByteAccess, 0b00>;
+
+let BaseOpcode = "S2_storerh_pbr" in
+def S2_storerhnew_pbr : T_storenew_pbr<"memh", HalfWordAccess, 0b01>;
+
+let BaseOpcode = "S2_storeri_pbr" in
+def S2_storerinew_pbr : T_storenew_pbr<"memw", WordAccess, 0b10>;
+
+//===----------------------------------------------------------------------===//
+// Bit-reversed stores - Pseudo
+//
+// Please note that the input operand order in the pseudo instructions
+// doesn't match with the real instructions. Pseudo instructions operand
+// order should mimics the ordering in the intrinsics.
+//===----------------------------------------------------------------------===//
+let isCodeGenOnly = 1, mayStore = 1, hasSideEffects = 0, isPseudo = 1 in
+class T_store_pbr_pseudo <string opc, RegisterClass RC>
+ : STInstPI<(outs IntRegs:$_dst_),
+ (ins IntRegs:$src1, RC:$src2, IntRegs:$src3),
+ ".error \""#opc#"($src1++$src3:brev) = $src2\"",
+ [], "$_dst_ = $src1">;
+
+def S2_storerb_pbr_pseudo : T_store_pbr_pseudo <"memb", IntRegs>;
+def S2_storerh_pbr_pseudo : T_store_pbr_pseudo <"memh", IntRegs>;
+def S2_storeri_pbr_pseudo : T_store_pbr_pseudo <"memw", IntRegs>;
+def S2_storerf_pbr_pseudo : T_store_pbr_pseudo <"memh", IntRegs>;
+def S2_storerd_pbr_pseudo : T_store_pbr_pseudo <"memd", DoubleRegs>;
+
+//===----------------------------------------------------------------------===//
+// ST -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Template class for S_2op instructions.
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0 in
+class T_S2op_1 <string mnemonic, bits<4> RegTyBits, RegisterClass RCOut,
+ RegisterClass RCIn, bits<2> MajOp, bits<3> MinOp, bit isSat>
+ : SInst <(outs RCOut:$dst), (ins RCIn:$src),
+ "$dst = "#mnemonic#"($src)"#!if(isSat, ":sat", ""),
+ [], "", S_2op_tc_1_SLOT23 > {
+ bits<5> dst;
+ bits<5> src;
+
+ let IClass = 0b1000;
+
+ let Inst{27-24} = RegTyBits;
+ let Inst{23-22} = MajOp;
+ let Inst{21} = 0b0;
+ let Inst{20-16} = src;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = dst;
+ }
+
+class T_S2op_1_di <string mnemonic, bits<2> MajOp, bits<3> MinOp>
+ : T_S2op_1 <mnemonic, 0b0100, DoubleRegs, IntRegs, MajOp, MinOp, 0>;
+
+let hasNewValue = 1 in
+class T_S2op_1_id <string mnemonic, bits<2> MajOp, bits<3> MinOp, bit isSat = 0>
+ : T_S2op_1 <mnemonic, 0b1000, IntRegs, DoubleRegs, MajOp, MinOp, isSat>;
+
+let hasNewValue = 1 in
+class T_S2op_1_ii <string mnemonic, bits<2> MajOp, bits<3> MinOp, bit isSat = 0>
+ : T_S2op_1 <mnemonic, 0b1100, IntRegs, IntRegs, MajOp, MinOp, isSat>;
+
+// Vector sign/zero extend
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
+ def S2_vsxtbh : T_S2op_1_di <"vsxtbh", 0b00, 0b000>;
+ def S2_vsxthw : T_S2op_1_di <"vsxthw", 0b00, 0b100>;
+ def S2_vzxtbh : T_S2op_1_di <"vzxtbh", 0b00, 0b010>;
+ def S2_vzxthw : T_S2op_1_di <"vzxthw", 0b00, 0b110>;
}
-let isExtendable = 1, isNVStorable = 1, neverHasSideEffects = 1 in
-multiclass ST_Idxd<string mnemonic, string CextOp, RegisterClass RC,
- Operand ImmOp, Operand predImmOp, bits<5> ImmBits,
- bits<5> PredImmBits> {
+// Vector splat bytes/halfwords
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
+ def S2_vsplatrb : T_S2op_1_ii <"vsplatb", 0b01, 0b111>;
+ def S2_vsplatrh : T_S2op_1_di <"vsplath", 0b01, 0b010>;
+}
- let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed in {
- let opExtendable = 1, isExtentSigned = 1, opExtentBits = ImmBits,
- isPredicable = 1 in
- def NAME : STInst2<(outs),
- (ins IntRegs:$src1, ImmOp:$src2, RC:$src3),
- mnemonic#"($src1+#$src2) = $src3",
- []>;
+// Sign extend word to doubleword
+def A2_sxtw : T_S2op_1_di <"sxtw", 0b01, 0b000>;
- let opExtendable = 2, isExtentSigned = 0, opExtentBits = PredImmBits in {
- defm Pt : ST_Idxd_Pred<mnemonic, RC, predImmOp, 0>;
- defm NotPt : ST_Idxd_Pred<mnemonic, RC, predImmOp, 1>;
- }
+def: Pat <(i64 (sext I32:$src)), (A2_sxtw I32:$src)>;
+
+// Vector saturate and pack
+let Defs = [USR_OVF] in {
+ def S2_svsathb : T_S2op_1_ii <"vsathb", 0b10, 0b000>;
+ def S2_svsathub : T_S2op_1_ii <"vsathub", 0b10, 0b010>;
+ def S2_vsathb : T_S2op_1_id <"vsathb", 0b00, 0b110>;
+ def S2_vsathub : T_S2op_1_id <"vsathub", 0b00, 0b000>;
+ def S2_vsatwh : T_S2op_1_id <"vsatwh", 0b00, 0b010>;
+ def S2_vsatwuh : T_S2op_1_id <"vsatwuh", 0b00, 0b100>;
+}
+
+// Vector truncate
+def S2_vtrunohb : T_S2op_1_id <"vtrunohb", 0b10, 0b000>;
+def S2_vtrunehb : T_S2op_1_id <"vtrunehb", 0b10, 0b010>;
+
+// Swizzle the bytes of a word
+def A2_swiz : T_S2op_1_ii <"swiz", 0b10, 0b111>;
+
+// Saturate
+let Defs = [USR_OVF] in {
+ def A2_sat : T_S2op_1_id <"sat", 0b11, 0b000>;
+ def A2_satb : T_S2op_1_ii <"satb", 0b11, 0b111>;
+ def A2_satub : T_S2op_1_ii <"satub", 0b11, 0b110>;
+ def A2_sath : T_S2op_1_ii <"sath", 0b11, 0b100>;
+ def A2_satuh : T_S2op_1_ii <"satuh", 0b11, 0b101>;
+ def A2_roundsat : T_S2op_1_id <"round", 0b11, 0b001, 0b1>;
+}
+
+let Itinerary = S_2op_tc_2_SLOT23 in {
+ // Vector round and pack
+ def S2_vrndpackwh : T_S2op_1_id <"vrndwh", 0b10, 0b100>;
+
+ let Defs = [USR_OVF] in
+ def S2_vrndpackwhs : T_S2op_1_id <"vrndwh", 0b10, 0b110, 1>;
+
+ // Bit reverse
+ def S2_brev : T_S2op_1_ii <"brev", 0b01, 0b110>;
+
+ // Absolute value word
+ def A2_abs : T_S2op_1_ii <"abs", 0b10, 0b100>;
+
+ let Defs = [USR_OVF] in
+ def A2_abssat : T_S2op_1_ii <"abs", 0b10, 0b101, 1>;
+
+ // Negate with saturation
+ let Defs = [USR_OVF] in
+ def A2_negsat : T_S2op_1_ii <"neg", 0b10, 0b110, 1>;
+}
+
+def: Pat<(i32 (select (i1 (setlt (i32 IntRegs:$src), 0)),
+ (i32 (sub 0, (i32 IntRegs:$src))),
+ (i32 IntRegs:$src))),
+ (A2_abs IntRegs:$src)>;
+
+let AddedComplexity = 50 in
+def: Pat<(i32 (xor (add (sra (i32 IntRegs:$src), (i32 31)),
+ (i32 IntRegs:$src)),
+ (sra (i32 IntRegs:$src), (i32 31)))),
+ (A2_abs IntRegs:$src)>;
+
+class T_S2op_2 <string mnemonic, bits<4> RegTyBits, RegisterClass RCOut,
+ RegisterClass RCIn, bits<3> MajOp, bits<3> MinOp,
+ bit isSat, bit isRnd, list<dag> pattern = []>
+ : SInst <(outs RCOut:$dst),
+ (ins RCIn:$src, u5Imm:$u5),
+ "$dst = "#mnemonic#"($src, #$u5)"#!if(isSat, ":sat", "")
+ #!if(isRnd, ":rnd", ""),
+ pattern, "", S_2op_tc_2_SLOT23> {
+ bits<5> dst;
+ bits<5> src;
+ bits<5> u5;
+
+ let IClass = 0b1000;
+
+ let Inst{27-24} = RegTyBits;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = src;
+ let Inst{13} = 0b0;
+ let Inst{12-8} = u5;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = dst;
}
+
+class T_S2op_2_di <string mnemonic, bits<3> MajOp, bits<3> MinOp>
+ : T_S2op_2 <mnemonic, 0b1000, DoubleRegs, IntRegs, MajOp, MinOp, 0, 0>;
+
+let hasNewValue = 1 in
+class T_S2op_2_id <string mnemonic, bits<3> MajOp, bits<3> MinOp>
+ : T_S2op_2 <mnemonic, 0b1000, IntRegs, DoubleRegs, MajOp, MinOp, 0, 0>;
+
+let hasNewValue = 1 in
+class T_S2op_2_ii <string mnemonic, bits<3> MajOp, bits<3> MinOp,
+ bit isSat = 0, bit isRnd = 0, list<dag> pattern = []>
+ : T_S2op_2 <mnemonic, 0b1100, IntRegs, IntRegs, MajOp, MinOp,
+ isSat, isRnd, pattern>;
+
+class T_S2op_shift <string mnemonic, bits<3> MajOp, bits<3> MinOp, SDNode OpNd>
+ : T_S2op_2_ii <mnemonic, MajOp, MinOp, 0, 0,
+ [(set (i32 IntRegs:$dst), (OpNd (i32 IntRegs:$src),
+ (u5ImmPred:$u5)))]>;
+
+// Vector arithmetic shift right by immediate with truncate and pack
+def S2_asr_i_svw_trun : T_S2op_2_id <"vasrw", 0b110, 0b010>;
+
+// Arithmetic/logical shift right/left by immediate
+let Itinerary = S_2op_tc_1_SLOT23 in {
+ def S2_asr_i_r : T_S2op_shift <"asr", 0b000, 0b000, sra>;
+ def S2_lsr_i_r : T_S2op_shift <"lsr", 0b000, 0b001, srl>;
+ def S2_asl_i_r : T_S2op_shift <"asl", 0b000, 0b010, shl>;
}
-let addrMode = BaseImmOffset, InputType = "reg" in {
- let accessSize = ByteAccess in
- defm STrib_indexed: ST_Idxd < "memb", "STrib", IntRegs, s11_0Ext,
- u6_0Ext, 11, 6>, AddrModeRel, ImmRegRel;
+// Shift left by immediate with saturation
+let Defs = [USR_OVF] in
+def S2_asl_i_r_sat : T_S2op_2_ii <"asl", 0b010, 0b010, 1>;
+
+// Shift right with round
+def S2_asr_i_r_rnd : T_S2op_2_ii <"asr", 0b010, 0b000, 0, 1>;
+
+let isAsmParserOnly = 1 in
+def S2_asr_i_r_rnd_goodsyntax
+ : SInst <(outs IntRegs:$dst), (ins IntRegs:$src, u5Imm:$u5),
+ "$dst = asrrnd($src, #$u5)",
+ [], "", S_2op_tc_1_SLOT23>;
+
+let isAsmParserOnly = 1 in
+def A2_not: ALU32_rr<(outs IntRegs:$dst),(ins IntRegs:$src),
+ "$dst = not($src)">;
+
+def: Pat<(i32 (sra (i32 (add (i32 (sra I32:$src1, u5ImmPred:$src2)),
+ (i32 1))),
+ (i32 1))),
+ (S2_asr_i_r_rnd IntRegs:$src1, u5ImmPred:$src2)>;
+
+class T_S2op_3<string opc, bits<2>MajOp, bits<3>minOp, bits<1> sat = 0>
+ : SInst<(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss),
+ "$Rdd = "#opc#"($Rss)"#!if(!eq(sat, 1),":sat","")> {
+ bits<5> Rss;
+ bits<5> Rdd;
+ let IClass = 0b1000;
+ let Inst{27-24} = 0;
+ let Inst{23-22} = MajOp;
+ let Inst{20-16} = Rss;
+ let Inst{7-5} = minOp;
+ let Inst{4-0} = Rdd;
+}
+
+def A2_absp : T_S2op_3 <"abs", 0b10, 0b110>;
+def A2_negp : T_S2op_3 <"neg", 0b10, 0b101>;
+def A2_notp : T_S2op_3 <"not", 0b10, 0b100>;
+
+// Innterleave/deinterleave
+def S2_interleave : T_S2op_3 <"interleave", 0b11, 0b101>;
+def S2_deinterleave : T_S2op_3 <"deinterleave", 0b11, 0b100>;
+
+// Vector Complex conjugate
+def A2_vconj : T_S2op_3 <"vconj", 0b10, 0b111, 1>;
+
+// Vector saturate without pack
+def S2_vsathb_nopack : T_S2op_3 <"vsathb", 0b00, 0b111>;
+def S2_vsathub_nopack : T_S2op_3 <"vsathub", 0b00, 0b100>;
+def S2_vsatwh_nopack : T_S2op_3 <"vsatwh", 0b00, 0b110>;
+def S2_vsatwuh_nopack : T_S2op_3 <"vsatwuh", 0b00, 0b101>;
+
+// Vector absolute value halfwords with and without saturation
+// Rdd64=vabsh(Rss64)[:sat]
+def A2_vabsh : T_S2op_3 <"vabsh", 0b01, 0b100>;
+def A2_vabshsat : T_S2op_3 <"vabsh", 0b01, 0b101, 1>;
+
+// Vector absolute value words with and without saturation
+def A2_vabsw : T_S2op_3 <"vabsw", 0b01, 0b110>;
+def A2_vabswsat : T_S2op_3 <"vabsw", 0b01, 0b111, 1>;
+
+def : Pat<(not (i64 DoubleRegs:$src1)),
+ (A2_notp DoubleRegs:$src1)>;
+
+//===----------------------------------------------------------------------===//
+// STYPE/BIT +
+//===----------------------------------------------------------------------===//
+// Bit count
+
+let hasSideEffects = 0, hasNewValue = 1 in
+class T_COUNT_LEADING<string MnOp, bits<3> MajOp, bits<3> MinOp, bit Is32,
+ dag Out, dag Inp>
+ : SInst<Out, Inp, "$Rd = "#MnOp#"($Rs)", [], "", S_2op_tc_1_SLOT23> {
+ bits<5> Rs;
+ bits<5> Rd;
+ let IClass = 0b1000;
+ let Inst{27} = 0b1;
+ let Inst{26} = Is32;
+ let Inst{25-24} = 0b00;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rd;
+}
- let accessSize = HalfWordAccess in
- defm STrih_indexed: ST_Idxd < "memh", "STrih", IntRegs, s11_1Ext,
- u6_1Ext, 12, 7>, AddrModeRel, ImmRegRel;
+class T_COUNT_LEADING_32<string MnOp, bits<3> MajOp, bits<3> MinOp>
+ : T_COUNT_LEADING<MnOp, MajOp, MinOp, 0b1,
+ (outs IntRegs:$Rd), (ins IntRegs:$Rs)>;
+
+class T_COUNT_LEADING_64<string MnOp, bits<3> MajOp, bits<3> MinOp>
+ : T_COUNT_LEADING<MnOp, MajOp, MinOp, 0b0,
+ (outs IntRegs:$Rd), (ins DoubleRegs:$Rs)>;
+
+def S2_cl0 : T_COUNT_LEADING_32<"cl0", 0b000, 0b101>;
+def S2_cl1 : T_COUNT_LEADING_32<"cl1", 0b000, 0b110>;
+def S2_ct0 : T_COUNT_LEADING_32<"ct0", 0b010, 0b100>;
+def S2_ct1 : T_COUNT_LEADING_32<"ct1", 0b010, 0b101>;
+def S2_cl0p : T_COUNT_LEADING_64<"cl0", 0b010, 0b010>;
+def S2_cl1p : T_COUNT_LEADING_64<"cl1", 0b010, 0b100>;
+def S2_clb : T_COUNT_LEADING_32<"clb", 0b000, 0b100>;
+def S2_clbp : T_COUNT_LEADING_64<"clb", 0b010, 0b000>;
+def S2_clbnorm : T_COUNT_LEADING_32<"normamt", 0b000, 0b111>;
+
+def: Pat<(i32 (ctlz I32:$Rs)), (S2_cl0 I32:$Rs)>;
+def: Pat<(i32 (ctlz (not I32:$Rs))), (S2_cl1 I32:$Rs)>;
+def: Pat<(i32 (cttz I32:$Rs)), (S2_ct0 I32:$Rs)>;
+def: Pat<(i32 (cttz (not I32:$Rs))), (S2_ct1 I32:$Rs)>;
+def: Pat<(i32 (trunc (ctlz I64:$Rss))), (S2_cl0p I64:$Rss)>;
+def: Pat<(i32 (trunc (ctlz (not I64:$Rss)))), (S2_cl1p I64:$Rss)>;
+
+// Bit set/clear/toggle
- let accessSize = WordAccess in
- defm STriw_indexed: ST_Idxd < "memw", "STriw", IntRegs, s11_2Ext,
- u6_2Ext, 13, 8>, AddrModeRel, ImmRegRel;
+let hasSideEffects = 0, hasNewValue = 1 in
+class T_SCT_BIT_IMM<string MnOp, bits<3> MinOp>
+ : SInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, u5Imm:$u5),
+ "$Rd = "#MnOp#"($Rs, #$u5)", [], "", S_2op_tc_1_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> u5;
+ let IClass = 0b1000;
+ let Inst{27-21} = 0b1100110;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0b0;
+ let Inst{12-8} = u5;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rd;
+}
- let accessSize = DoubleWordAccess, isNVStorable = 0 in
- defm STrid_indexed: ST_Idxd < "memd", "STrid", DoubleRegs, s11_3Ext,
- u6_3Ext, 14, 9>, AddrModeRel;
+let hasSideEffects = 0, hasNewValue = 1 in
+class T_SCT_BIT_REG<string MnOp, bits<2> MinOp>
+ : SInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd = "#MnOp#"($Rs, $Rt)", [], "", S_3op_tc_1_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rt;
+ let IClass = 0b1100;
+ let Inst{27-22} = 0b011010;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{7-6} = MinOp;
+ let Inst{4-0} = Rd;
}
-let AddedComplexity = 10 in {
-def : Pat<(truncstorei8 (i32 IntRegs:$src1), (add IntRegs:$src2,
- s11_0ExtPred:$offset)),
- (STrib_indexed IntRegs:$src2, s11_0ImmPred:$offset,
- (i32 IntRegs:$src1))>;
+def S2_clrbit_i : T_SCT_BIT_IMM<"clrbit", 0b001>;
+def S2_setbit_i : T_SCT_BIT_IMM<"setbit", 0b000>;
+def S2_togglebit_i : T_SCT_BIT_IMM<"togglebit", 0b010>;
+def S2_clrbit_r : T_SCT_BIT_REG<"clrbit", 0b01>;
+def S2_setbit_r : T_SCT_BIT_REG<"setbit", 0b00>;
+def S2_togglebit_r : T_SCT_BIT_REG<"togglebit", 0b10>;
+
+def: Pat<(i32 (and (i32 IntRegs:$Rs), (not (shl 1, u5ImmPred:$u5)))),
+ (S2_clrbit_i IntRegs:$Rs, u5ImmPred:$u5)>;
+def: Pat<(i32 (or (i32 IntRegs:$Rs), (shl 1, u5ImmPred:$u5))),
+ (S2_setbit_i IntRegs:$Rs, u5ImmPred:$u5)>;
+def: Pat<(i32 (xor (i32 IntRegs:$Rs), (shl 1, u5ImmPred:$u5))),
+ (S2_togglebit_i IntRegs:$Rs, u5ImmPred:$u5)>;
+def: Pat<(i32 (and (i32 IntRegs:$Rs), (not (shl 1, (i32 IntRegs:$Rt))))),
+ (S2_clrbit_r IntRegs:$Rs, IntRegs:$Rt)>;
+def: Pat<(i32 (or (i32 IntRegs:$Rs), (shl 1, (i32 IntRegs:$Rt)))),
+ (S2_setbit_r IntRegs:$Rs, IntRegs:$Rt)>;
+def: Pat<(i32 (xor (i32 IntRegs:$Rs), (shl 1, (i32 IntRegs:$Rt)))),
+ (S2_togglebit_r IntRegs:$Rs, IntRegs:$Rt)>;
+
+// Bit test
+
+let hasSideEffects = 0 in
+class T_TEST_BIT_IMM<string MnOp, bits<3> MajOp>
+ : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, u5Imm:$u5),
+ "$Pd = "#MnOp#"($Rs, #$u5)",
+ [], "", S_2op_tc_2early_SLOT23> {
+ bits<2> Pd;
+ bits<5> Rs;
+ bits<5> u5;
+ let IClass = 0b1000;
+ let Inst{27-24} = 0b0101;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0;
+ let Inst{12-8} = u5;
+ let Inst{1-0} = Pd;
+}
-def : Pat<(truncstorei16 (i32 IntRegs:$src1), (add IntRegs:$src2,
- s11_1ExtPred:$offset)),
- (STrih_indexed IntRegs:$src2, s11_1ImmPred:$offset,
- (i32 IntRegs:$src1))>;
+let hasSideEffects = 0 in
+class T_TEST_BIT_REG<string MnOp, bit IsNeg>
+ : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Pd = "#MnOp#"($Rs, $Rt)",
+ [], "", S_3op_tc_2early_SLOT23> {
+ bits<2> Pd;
+ bits<5> Rs;
+ bits<5> Rt;
+ let IClass = 0b1100;
+ let Inst{27-22} = 0b011100;
+ let Inst{21} = IsNeg;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{1-0} = Pd;
+}
-def : Pat<(store (i32 IntRegs:$src1), (add IntRegs:$src2,
- s11_2ExtPred:$offset)),
- (STriw_indexed IntRegs:$src2, s11_2ImmPred:$offset,
- (i32 IntRegs:$src1))>;
+def S2_tstbit_i : T_TEST_BIT_IMM<"tstbit", 0b000>;
+def S2_tstbit_r : T_TEST_BIT_REG<"tstbit", 0>;
+
+let AddedComplexity = 20 in { // Complexity greater than cmp reg-imm.
+ def: Pat<(i1 (setne (and (shl 1, u5ImmPred:$u5), (i32 IntRegs:$Rs)), 0)),
+ (S2_tstbit_i IntRegs:$Rs, u5ImmPred:$u5)>;
+ def: Pat<(i1 (setne (and (shl 1, (i32 IntRegs:$Rt)), (i32 IntRegs:$Rs)), 0)),
+ (S2_tstbit_r IntRegs:$Rs, IntRegs:$Rt)>;
+ def: Pat<(i1 (trunc (i32 IntRegs:$Rs))),
+ (S2_tstbit_i IntRegs:$Rs, 0)>;
+ def: Pat<(i1 (trunc (i64 DoubleRegs:$Rs))),
+ (S2_tstbit_i (LoReg DoubleRegs:$Rs), 0)>;
+}
-def : Pat<(store (i64 DoubleRegs:$src1), (add IntRegs:$src2,
- s11_3ExtPred:$offset)),
- (STrid_indexed IntRegs:$src2, s11_3ImmPred:$offset,
- (i64 DoubleRegs:$src1))>;
+let hasSideEffects = 0 in
+class T_TEST_BITS_IMM<string MnOp, bits<2> MajOp, bit IsNeg>
+ : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, u6Imm:$u6),
+ "$Pd = "#MnOp#"($Rs, #$u6)",
+ [], "", S_2op_tc_2early_SLOT23> {
+ bits<2> Pd;
+ bits<5> Rs;
+ bits<6> u6;
+ let IClass = 0b1000;
+ let Inst{27-24} = 0b0101;
+ let Inst{23-22} = MajOp;
+ let Inst{21} = IsNeg;
+ let Inst{20-16} = Rs;
+ let Inst{13-8} = u6;
+ let Inst{1-0} = Pd;
}
-// memh(Rx++#s4:1)=Rt.H
+let hasSideEffects = 0 in
+class T_TEST_BITS_REG<string MnOp, bits<2> MajOp, bit IsNeg>
+ : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Pd = "#MnOp#"($Rs, $Rt)",
+ [], "", S_3op_tc_2early_SLOT23> {
+ bits<2> Pd;
+ bits<5> Rs;
+ bits<5> Rt;
+ let IClass = 0b1100;
+ let Inst{27-24} = 0b0111;
+ let Inst{23-22} = MajOp;
+ let Inst{21} = IsNeg;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{1-0} = Pd;
+}
-// Store word.
-// Store predicate.
-let Defs = [R10,R11,D5], neverHasSideEffects = 1 in
-def STriw_pred : STInst2<(outs),
- (ins MEMri:$addr, PredRegs:$src1),
- "Error; should not emit",
- []>;
+def C2_bitsclri : T_TEST_BITS_IMM<"bitsclr", 0b10, 0>;
+def C2_bitsclr : T_TEST_BITS_REG<"bitsclr", 0b10, 0>;
+def C2_bitsset : T_TEST_BITS_REG<"bitsset", 0b01, 0>;
-// Allocate stack frame.
-let Defs = [R29, R30], Uses = [R31, R30], neverHasSideEffects = 1 in {
- def ALLOCFRAME : STInst2<(outs),
- (ins i32imm:$amt),
- "allocframe(#$amt)",
- []>;
+let AddedComplexity = 20 in { // Complexity greater than compare reg-imm.
+ def: Pat<(i1 (seteq (and (i32 IntRegs:$Rs), u6ImmPred:$u6), 0)),
+ (C2_bitsclri IntRegs:$Rs, u6ImmPred:$u6)>;
+ def: Pat<(i1 (seteq (and (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)), 0)),
+ (C2_bitsclr IntRegs:$Rs, IntRegs:$Rt)>;
}
+
+let AddedComplexity = 10 in // Complexity greater than compare reg-reg.
+def: Pat<(i1 (seteq (and (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)), IntRegs:$Rt)),
+ (C2_bitsset IntRegs:$Rs, IntRegs:$Rt)>;
+
//===----------------------------------------------------------------------===//
-// ST -
+// STYPE/BIT -
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// STYPE/ALU +
+// STYPE/COMPLEX +
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// STYPE/COMPLEX -
//===----------------------------------------------------------------------===//
-// Logical NOT.
-def NOT_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
- "$dst = not($src1)",
- [(set (i64 DoubleRegs:$dst), (not (i64 DoubleRegs:$src1)))]>;
+//===----------------------------------------------------------------------===//
+// XTYPE/PERM +
+//===----------------------------------------------------------------------===//
-// Sign extend word to doubleword.
-def SXTW : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
- "$dst = sxtw($src1)",
- [(set (i64 DoubleRegs:$dst), (sext (i32 IntRegs:$src1)))]>;
//===----------------------------------------------------------------------===//
-// STYPE/ALU -
+// XTYPE/PERM -
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// STYPE/BIT +
+// STYPE/PRED +
//===----------------------------------------------------------------------===//
-// clrbit.
-def CLRBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- "$dst = clrbit($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1),
- (not
- (shl 1, u5ImmPred:$src2))))]>;
-
-def CLRBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- "$dst = clrbit($src1, #$src2)",
- []>;
-
-// Map from r0 = and(r1, 2147483647) to r0 = clrbit(r1, #31).
-def : Pat <(and (i32 IntRegs:$src1), 2147483647),
- (CLRBIT_31 (i32 IntRegs:$src1), 31)>;
-
-// setbit.
-def SETBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- "$dst = setbit($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1),
- (shl 1, u5ImmPred:$src2)))]>;
-
-// Map from r0 = or(r1, -2147483648) to r0 = setbit(r1, #31).
-def SETBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- "$dst = setbit($src1, #$src2)",
- []>;
-
-def : Pat <(or (i32 IntRegs:$src1), -2147483648),
- (SETBIT_31 (i32 IntRegs:$src1), 31)>;
-
-// togglebit.
-def TOGBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- "$dst = setbit($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (xor (i32 IntRegs:$src1),
- (shl 1, u5ImmPred:$src2)))]>;
-
-// Map from r0 = xor(r1, -2147483648) to r0 = togglebit(r1, #31).
-def TOGBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- "$dst = togglebit($src1, #$src2)",
- []>;
-
-def : Pat <(xor (i32 IntRegs:$src1), -2147483648),
- (TOGBIT_31 (i32 IntRegs:$src1), 31)>;
// Predicate transfer.
-let neverHasSideEffects = 1 in
-def TFR_RsPd : SInst<(outs IntRegs:$dst), (ins PredRegs:$src1),
- "$dst = $src1 /* Should almost never emit this. */",
- []>;
+let hasSideEffects = 0, hasNewValue = 1 in
+def C2_tfrpr : SInst<(outs IntRegs:$Rd), (ins PredRegs:$Ps),
+ "$Rd = $Ps", [], "", S_2op_tc_1_SLOT23> {
+ bits<5> Rd;
+ bits<2> Ps;
+
+ let IClass = 0b1000;
+ let Inst{27-24} = 0b1001;
+ let Inst{22} = 0b1;
+ let Inst{17-16} = Ps;
+ let Inst{4-0} = Rd;
+}
+
+// Transfer general register to predicate.
+let hasSideEffects = 0 in
+def C2_tfrrp: SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs),
+ "$Pd = $Rs", [], "", S_2op_tc_2early_SLOT23> {
+ bits<2> Pd;
+ bits<5> Rs;
+
+ let IClass = 0b1000;
+ let Inst{27-21} = 0b0101010;
+ let Inst{20-16} = Rs;
+ let Inst{1-0} = Pd;
+}
+
+let hasSideEffects = 0, isCodeGenOnly = 1 in
+def C2_pxfer_map: SInst<(outs PredRegs:$dst), (ins PredRegs:$src),
+ "$dst = $src">;
+
+
+// Patterns for loads of i1:
+def: Pat<(i1 (load AddrFI:$fi)),
+ (C2_tfrrp (L2_loadrub_io AddrFI:$fi, 0))>;
+def: Pat<(i1 (load (add (i32 IntRegs:$Rs), s11_0ExtPred:$Off))),
+ (C2_tfrrp (L2_loadrub_io IntRegs:$Rs, imm:$Off))>;
+def: Pat<(i1 (load (i32 IntRegs:$Rs))),
+ (C2_tfrrp (L2_loadrub_io IntRegs:$Rs, 0))>;
+
+def I1toI32: OutPatFrag<(ops node:$Rs),
+ (C2_muxii (i1 $Rs), 1, 0)>;
+
+def I32toI1: OutPatFrag<(ops node:$Rs),
+ (i1 (C2_tfrrp (i32 $Rs)))>;
+
+defm: Storexm_pat<store, I1, s11_0ExtPred, I1toI32, S2_storerb_io>;
+def: Storexm_simple_pat<store, I1, I1toI32, S2_storerb_io>;
-def TFR_PdRs : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1),
- "$dst = $src1 /* Should almost never emit this. */",
- [(set (i1 PredRegs:$dst), (trunc (i32 IntRegs:$src1)))]>;
//===----------------------------------------------------------------------===//
// STYPE/PRED -
//===----------------------------------------------------------------------===//
@@ -1786,88 +4385,56 @@ def TFR_PdRs : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1),
//===----------------------------------------------------------------------===//
// STYPE/SHIFT +
//===----------------------------------------------------------------------===//
+class S_2OpInstImm<string Mnemonic, bits<3>MajOp, bits<3>MinOp,
+ Operand Imm, list<dag> pattern = [], bit isRnd = 0>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, Imm:$src2),
+ "$dst = "#Mnemonic#"($src1, #$src2)"#!if(isRnd, ":rnd", ""),
+ pattern> {
+ bits<5> src1;
+ bits<5> dst;
+ let IClass = 0b1000;
+ let Inst{27-24} = 0;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = dst;
+}
+
+class S_2OpInstImmI6<string Mnemonic, SDNode OpNode, bits<3>MinOp>
+ : S_2OpInstImm<Mnemonic, 0b000, MinOp, u6Imm,
+ [(set (i64 DoubleRegs:$dst), (OpNode (i64 DoubleRegs:$src1),
+ u6ImmPred:$src2))]> {
+ bits<6> src2;
+ let Inst{13-8} = src2;
+}
+
// Shift by immediate.
-def ASR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- "$dst = asr($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (sra (i32 IntRegs:$src1),
- u5ImmPred:$src2))]>;
-
-def ASRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
- "$dst = asr($src1, #$src2)",
- [(set (i64 DoubleRegs:$dst), (sra (i64 DoubleRegs:$src1),
- u6ImmPred:$src2))]>;
-
-def ASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- "$dst = asl($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1),
- u5ImmPred:$src2))]>;
-
-def ASLd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
- "$dst = asl($src1, #$src2)",
- [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1),
- u6ImmPred:$src2))]>;
-
-def LSR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- "$dst = lsr($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (srl (i32 IntRegs:$src1),
- u5ImmPred:$src2))]>;
-
-def LSRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
- "$dst = lsr($src1, #$src2)",
- [(set (i64 DoubleRegs:$dst), (srl (i64 DoubleRegs:$src1),
- u6ImmPred:$src2))]>;
-
-// Shift by immediate and add.
-let AddedComplexity = 100 in
-def ADDASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
- u3Imm:$src3),
- "$dst = addasl($src1, $src2, #$src3)",
- [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1),
- (shl (i32 IntRegs:$src2),
- u3ImmPred:$src3)))]>;
-
-// Shift by register.
-def ASL_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = asl($src1, $src2)",
- [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
-def ASR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = asr($src1, $src2)",
- [(set (i32 IntRegs:$dst), (sra (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
-def LSL_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = lsl($src1, $src2)",
- [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
-def LSR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = lsr($src1, $src2)",
- [(set (i32 IntRegs:$dst), (srl (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
-def ASLd : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
- "$dst = asl($src1, $src2)",
- [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
-def LSLd : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
- "$dst = lsl($src1, $src2)",
- [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
-def ASRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- IntRegs:$src2),
- "$dst = asr($src1, $src2)",
- [(set (i64 DoubleRegs:$dst), (sra (i64 DoubleRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
-def LSRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- IntRegs:$src2),
- "$dst = lsr($src1, $src2)",
- [(set (i64 DoubleRegs:$dst), (srl (i64 DoubleRegs:$src1),
- (i32 IntRegs:$src2)))]>;
+def S2_asr_i_p : S_2OpInstImmI6<"asr", sra, 0b000>;
+def S2_asl_i_p : S_2OpInstImmI6<"asl", shl, 0b010>;
+def S2_lsr_i_p : S_2OpInstImmI6<"lsr", srl, 0b001>;
+
+// Shift left by small amount and add.
+let AddedComplexity = 100, hasNewValue = 1, hasSideEffects = 0 in
+def S2_addasl_rrri: SInst <(outs IntRegs:$Rd),
+ (ins IntRegs:$Rt, IntRegs:$Rs, u3Imm:$u3),
+ "$Rd = addasl($Rt, $Rs, #$u3)" ,
+ [(set (i32 IntRegs:$Rd), (add (i32 IntRegs:$Rt),
+ (shl (i32 IntRegs:$Rs), u3ImmPred:$u3)))],
+ "", S_3op_tc_2_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rt;
+ bits<5> Rs;
+ bits<3> u3;
+
+ let IClass = 0b1100;
+
+ let Inst{27-21} = 0b0100000;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0b0;
+ let Inst{12-8} = Rt;
+ let Inst{7-5} = u3;
+ let Inst{4-0} = Rd;
+ }
//===----------------------------------------------------------------------===//
// STYPE/SHIFT -
@@ -1894,39 +4461,222 @@ def LSRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
//===----------------------------------------------------------------------===//
// SYSTEM/USER +
//===----------------------------------------------------------------------===//
-def SDHexagonBARRIER: SDTypeProfile<0, 0, []>;
-def HexagonBARRIER: SDNode<"HexagonISD::BARRIER", SDHexagonBARRIER,
- [SDNPHasChain]>;
+def HexagonBARRIER: SDNode<"HexagonISD::BARRIER", SDTNone, [SDNPHasChain]>;
-let hasSideEffects = 1, isSolo = 1 in
-def BARRIER : SYSInst<(outs), (ins),
+let hasSideEffects = 1, isSoloAX = 1 in
+def Y2_barrier : SYSInst<(outs), (ins),
"barrier",
- [(HexagonBARRIER)]>;
+ [(HexagonBARRIER)],"",ST_tc_st_SLOT0> {
+ let Inst{31-28} = 0b1010;
+ let Inst{27-21} = 0b1000000;
+}
//===----------------------------------------------------------------------===//
// SYSTEM/SUPER -
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// CRUSER - Type.
+//===----------------------------------------------------------------------===//
+// HW loop
+let isExtendable = 1, isExtentSigned = 1, opExtentBits = 9, opExtentAlign = 2,
+ opExtendable = 0, hasSideEffects = 0 in
+class LOOP_iBase<string mnemonic, Operand brOp, bit mustExtend = 0>
+ : CRInst<(outs), (ins brOp:$offset, u10Imm:$src2),
+ #mnemonic#"($offset, #$src2)",
+ [], "" , CR_tc_3x_SLOT3> {
+ bits<9> offset;
+ bits<10> src2;
+
+ let IClass = 0b0110;
+
+ let Inst{27-22} = 0b100100;
+ let Inst{21} = !if (!eq(mnemonic, "loop0"), 0b0, 0b1);
+ let Inst{20-16} = src2{9-5};
+ let Inst{12-8} = offset{8-4};
+ let Inst{7-5} = src2{4-2};
+ let Inst{4-3} = offset{3-2};
+ let Inst{1-0} = src2{1-0};
+}
-// TFRI64 - assembly mapped.
-let isReMaterializable = 1 in
-def TFRI64 : ALU64_rr<(outs DoubleRegs:$dst), (ins s8Imm64:$src1),
- "$dst = #$src1",
- [(set (i64 DoubleRegs:$dst), s8Imm64Pred:$src1)]>;
-
-// Pseudo instruction to encode a set of conditional transfers.
-// This instruction is used instead of a mux and trades-off codesize
-// for performance. We conduct this transformation optimistically in
-// the hope that these instructions get promoted to dot-new transfers.
-let AddedComplexity = 100, isPredicated = 1 in
-def TFR_condset_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
- IntRegs:$src2,
- IntRegs:$src3),
- "Error; should not emit",
- [(set (i32 IntRegs:$dst),
- (i32 (select (i1 PredRegs:$src1),
- (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))]>;
-let AddedComplexity = 100, isPredicated = 1 in
+let isExtendable = 1, isExtentSigned = 1, opExtentBits = 9, opExtentAlign = 2,
+ opExtendable = 0, hasSideEffects = 0 in
+class LOOP_rBase<string mnemonic, Operand brOp, bit mustExtend = 0>
+ : CRInst<(outs), (ins brOp:$offset, IntRegs:$src2),
+ #mnemonic#"($offset, $src2)",
+ [], "" ,CR_tc_3x_SLOT3> {
+ bits<9> offset;
+ bits<5> src2;
+
+ let IClass = 0b0110;
+
+ let Inst{27-22} = 0b000000;
+ let Inst{21} = !if (!eq(mnemonic, "loop0"), 0b0, 0b1);
+ let Inst{20-16} = src2;
+ let Inst{12-8} = offset{8-4};
+ let Inst{4-3} = offset{3-2};
+ }
+
+multiclass LOOP_ri<string mnemonic> {
+ def i : LOOP_iBase<mnemonic, brtarget>;
+ def r : LOOP_rBase<mnemonic, brtarget>;
+}
+
+
+let Defs = [SA0, LC0, USR] in
+defm J2_loop0 : LOOP_ri<"loop0">;
+
+// Interestingly only loop0's appear to set usr.lpcfg
+let Defs = [SA1, LC1] in
+defm J2_loop1 : LOOP_ri<"loop1">;
+
+let isBranch = 1, isTerminator = 1, hasSideEffects = 0,
+ Defs = [PC, LC0], Uses = [SA0, LC0] in {
+def ENDLOOP0 : Endloop<(outs), (ins brtarget:$offset),
+ ":endloop0",
+ []>;
+}
+
+let isBranch = 1, isTerminator = 1, hasSideEffects = 0,
+ Defs = [PC, LC1], Uses = [SA1, LC1] in {
+def ENDLOOP1 : Endloop<(outs), (ins brtarget:$offset),
+ ":endloop1",
+ []>;
+}
+
+// Pipelined loop instructions, sp[123]loop0
+let Defs = [LC0, SA0, P3, USR], hasSideEffects = 0,
+ isExtentSigned = 1, isExtendable = 1, opExtentBits = 9, opExtentAlign = 2,
+ opExtendable = 0, isPredicateLate = 1 in
+class SPLOOP_iBase<string SP, bits<2> op>
+ : CRInst <(outs), (ins brtarget:$r7_2, u10Imm:$U10),
+ "p3 = sp"#SP#"loop0($r7_2, #$U10)" > {
+ bits<9> r7_2;
+ bits<10> U10;
+
+ let IClass = 0b0110;
+
+ let Inst{22-21} = op;
+ let Inst{27-23} = 0b10011;
+ let Inst{20-16} = U10{9-5};
+ let Inst{12-8} = r7_2{8-4};
+ let Inst{7-5} = U10{4-2};
+ let Inst{4-3} = r7_2{3-2};
+ let Inst{1-0} = U10{1-0};
+ }
+
+let Defs = [LC0, SA0, P3, USR], hasSideEffects = 0,
+ isExtentSigned = 1, isExtendable = 1, opExtentBits = 9, opExtentAlign = 2,
+ opExtendable = 0, isPredicateLate = 1 in
+class SPLOOP_rBase<string SP, bits<2> op>
+ : CRInst <(outs), (ins brtarget:$r7_2, IntRegs:$Rs),
+ "p3 = sp"#SP#"loop0($r7_2, $Rs)" > {
+ bits<9> r7_2;
+ bits<5> Rs;
+
+ let IClass = 0b0110;
+
+ let Inst{22-21} = op;
+ let Inst{27-23} = 0b00001;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = r7_2{8-4};
+ let Inst{4-3} = r7_2{3-2};
+ }
+
+multiclass SPLOOP_ri<string mnemonic, bits<2> op> {
+ def i : SPLOOP_iBase<mnemonic, op>;
+ def r : SPLOOP_rBase<mnemonic, op>;
+}
+
+defm J2_ploop1s : SPLOOP_ri<"1", 0b01>;
+defm J2_ploop2s : SPLOOP_ri<"2", 0b10>;
+defm J2_ploop3s : SPLOOP_ri<"3", 0b11>;
+
+// if (Rs[!>=<]=#0) jump:[t/nt]
+let Defs = [PC], isPredicated = 1, isBranch = 1, hasSideEffects = 0,
+ hasSideEffects = 0 in
+class J2_jump_0_Base<string compare, bit isTak, bits<2> op>
+ : CRInst <(outs), (ins IntRegs:$Rs, brtarget:$r13_2),
+ "if ($Rs"#compare#"#0) jump"#!if(isTak, ":t", ":nt")#" $r13_2" > {
+ bits<5> Rs;
+ bits<15> r13_2;
+
+ let IClass = 0b0110;
+
+ let Inst{27-24} = 0b0001;
+ let Inst{23-22} = op;
+ let Inst{12} = isTak;
+ let Inst{21} = r13_2{14};
+ let Inst{20-16} = Rs;
+ let Inst{11-1} = r13_2{12-2};
+ let Inst{13} = r13_2{13};
+ }
+
+multiclass J2_jump_compare_0<string compare, bits<2> op> {
+ def NAME : J2_jump_0_Base<compare, 0, op>;
+ def NAME#pt : J2_jump_0_Base<compare, 1, op>;
+}
+
+defm J2_jumprz : J2_jump_compare_0<"!=", 0b00>;
+defm J2_jumprgtez : J2_jump_compare_0<">=", 0b01>;
+defm J2_jumprnz : J2_jump_compare_0<"==", 0b10>;
+defm J2_jumprltez : J2_jump_compare_0<"<=", 0b11>;
+
+// Transfer to/from Control/GPR Guest/GPR
+let hasSideEffects = 0 in
+class TFR_CR_RS_base<RegisterClass CTRC, RegisterClass RC, bit isDouble>
+ : CRInst <(outs CTRC:$dst), (ins RC:$src),
+ "$dst = $src", [], "", CR_tc_3x_SLOT3> {
+ bits<5> dst;
+ bits<5> src;
+
+ let IClass = 0b0110;
+
+ let Inst{27-25} = 0b001;
+ let Inst{24} = isDouble;
+ let Inst{23-21} = 0b001;
+ let Inst{20-16} = src;
+ let Inst{4-0} = dst;
+ }
+
+def A2_tfrrcr : TFR_CR_RS_base<CtrRegs, IntRegs, 0b0>;
+def A4_tfrpcp : TFR_CR_RS_base<CtrRegs64, DoubleRegs, 0b1>;
+def : InstAlias<"m0 = $Rs", (A2_tfrrcr C6, IntRegs:$Rs)>;
+def : InstAlias<"m1 = $Rs", (A2_tfrrcr C7, IntRegs:$Rs)>;
+
+let hasSideEffects = 0 in
+class TFR_RD_CR_base<RegisterClass RC, RegisterClass CTRC, bit isSingle>
+ : CRInst <(outs RC:$dst), (ins CTRC:$src),
+ "$dst = $src", [], "", CR_tc_3x_SLOT3> {
+ bits<5> dst;
+ bits<5> src;
+
+ let IClass = 0b0110;
+
+ let Inst{27-26} = 0b10;
+ let Inst{25} = isSingle;
+ let Inst{24-21} = 0b0000;
+ let Inst{20-16} = src;
+ let Inst{4-0} = dst;
+ }
+
+let hasNewValue = 1, opNewValue = 0 in
+def A2_tfrcrr : TFR_RD_CR_base<IntRegs, CtrRegs, 1>;
+def A4_tfrcpp : TFR_RD_CR_base<DoubleRegs, CtrRegs64, 0>;
+def : InstAlias<"$Rd = m0", (A2_tfrcrr IntRegs:$Rd, C6)>;
+def : InstAlias<"$Rd = m1", (A2_tfrcrr IntRegs:$Rd, C7)>;
+
+// Y4_trace: Send value to etm trace.
+let isSoloAX = 1, hasSideEffects = 0 in
+def Y4_trace: CRInst <(outs), (ins IntRegs:$Rs),
+ "trace($Rs)"> {
+ bits<5> Rs;
+
+ let IClass = 0b0110;
+ let Inst{27-21} = 0b0010010;
+ let Inst{20-16} = Rs;
+ }
+
+let AddedComplexity = 100, isPredicated = 1, isCodeGenOnly = 1 in
def TFR_condset_ri : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, s12Imm:$src3),
"Error; should not emit",
@@ -1934,7 +4684,7 @@ def TFR_condset_ri : ALU32_rr<(outs IntRegs:$dst),
(i32 (select (i1 PredRegs:$src1), (i32 IntRegs:$src2),
s12ImmPred:$src3)))]>;
-let AddedComplexity = 100, isPredicated = 1 in
+let AddedComplexity = 100, isPredicated = 1, isCodeGenOnly = 1 in
def TFR_condset_ir : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, s12Imm:$src2, IntRegs:$src3),
"Error; should not emit",
@@ -1942,7 +4692,7 @@ def TFR_condset_ir : ALU32_rr<(outs IntRegs:$dst),
(i32 (select (i1 PredRegs:$src1), s12ImmPred:$src2,
(i32 IntRegs:$src3))))]>;
-let AddedComplexity = 100, isPredicated = 1 in
+let AddedComplexity = 100, isPredicated = 1, isCodeGenOnly = 1 in
def TFR_condset_ii : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, s12Imm:$src2, s12Imm:$src3),
"Error; should not emit",
@@ -1951,115 +4701,109 @@ def TFR_condset_ii : ALU32_rr<(outs IntRegs:$dst),
s12ImmPred:$src3)))]>;
// Generate frameindex addresses.
-let isReMaterializable = 1 in
+let isReMaterializable = 1, isCodeGenOnly = 1 in
def TFR_FI : ALU32_ri<(outs IntRegs:$dst), (ins FrameIndex:$src1),
"$dst = add($src1)",
[(set (i32 IntRegs:$dst), ADDRri:$src1)]>;
-//
-// CR - Type.
-//
-let neverHasSideEffects = 1, Defs = [SA0, LC0] in {
-def LOOP0_i : CRInst<(outs), (ins brtarget:$offset, u10Imm:$src2),
- "loop0($offset, #$src2)",
- []>;
-}
+// Support for generating global address.
+// Taken from X86InstrInfo.td.
+def SDTHexagonCONST32 : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>,
+ SDTCisPtrTy<0>]>;
+def HexagonCONST32 : SDNode<"HexagonISD::CONST32", SDTHexagonCONST32>;
+def HexagonCONST32_GP : SDNode<"HexagonISD::CONST32_GP", SDTHexagonCONST32>;
-let neverHasSideEffects = 1, Defs = [SA0, LC0] in {
-def LOOP0_r : CRInst<(outs), (ins brtarget:$offset, IntRegs:$src2),
- "loop0($offset, $src2)",
- []>;
-}
+// HI/LO Instructions
+let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0,
+ hasNewValue = 1, opNewValue = 0 in
+class REG_IMMED<string RegHalf, string Op, bit Rs, bits<3> MajOp, bit MinOp>
+ : ALU32_ri<(outs IntRegs:$dst),
+ (ins i32imm:$imm_value),
+ "$dst"#RegHalf#" = #"#Op#"($imm_value)", []> {
+ bits<5> dst;
+ bits<32> imm_value;
+ let IClass = 0b0111;
-let isBranch = 1, isTerminator = 1, neverHasSideEffects = 1,
- Defs = [PC, LC0], Uses = [SA0, LC0] in {
-def ENDLOOP0 : Endloop<(outs), (ins brtarget:$offset),
- ":endloop0",
- []>;
+ let Inst{27} = Rs;
+ let Inst{26-24} = MajOp;
+ let Inst{21} = MinOp;
+ let Inst{20-16} = dst;
+ let Inst{23-22} = !if (!eq(Op, "LO"), imm_value{15-14}, imm_value{31-30});
+ let Inst{13-0} = !if (!eq(Op, "LO"), imm_value{13-0}, imm_value{29-16});
}
-// Support for generating global address.
-// Taken from X86InstrInfo.td.
-def SDTHexagonCONST32 : SDTypeProfile<1, 1, [
- SDTCisVT<0, i32>,
- SDTCisVT<1, i32>,
- SDTCisPtrTy<0>]>;
-def HexagonCONST32 : SDNode<"HexagonISD::CONST32", SDTHexagonCONST32>;
-def HexagonCONST32_GP : SDNode<"HexagonISD::CONST32_GP", SDTHexagonCONST32>;
+let isAsmParserOnly = 1 in {
+ def LO : REG_IMMED<".l", "LO", 0b0, 0b001, 0b1>;
+ def LO_H : REG_IMMED<".l", "HI", 0b0, 0b001, 0b1>;
+ def HI : REG_IMMED<".h", "HI", 0b0, 0b010, 0b1>;
+ def HI_L : REG_IMMED<".h", "LO", 0b0, 0b010, 0b1>;
+}
-// HI/LO Instructions
-let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
-def LO : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global),
- "$dst.l = #LO($global)",
- []>;
+let isMoveImm = 1, isCodeGenOnly = 1 in
+def LO_PIC : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label),
+ "$dst.l = #LO($label@GOTREL)",
+ []>;
-let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
-def HI : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global),
- "$dst.h = #HI($global)",
- []>;
+let isMoveImm = 1, isCodeGenOnly = 1 in
+def HI_PIC : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label),
+ "$dst.h = #HI($label@GOTREL)",
+ []>;
-let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
+let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0,
+ isAsmParserOnly = 1 in
def LOi : ALU32_ri<(outs IntRegs:$dst), (ins i32imm:$imm_value),
"$dst.l = #LO($imm_value)",
[]>;
-let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
+let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0,
+ isAsmParserOnly = 1 in
def HIi : ALU32_ri<(outs IntRegs:$dst), (ins i32imm:$imm_value),
"$dst.h = #HI($imm_value)",
[]>;
-let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
+let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0,
+ isAsmParserOnly = 1 in
def LO_jt : ALU32_ri<(outs IntRegs:$dst), (ins jumptablebase:$jt),
"$dst.l = #LO($jt)",
[]>;
-let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
+let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0,
+ isAsmParserOnly = 1 in
def HI_jt : ALU32_ri<(outs IntRegs:$dst), (ins jumptablebase:$jt),
"$dst.h = #HI($jt)",
[]>;
-
-let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
-def LO_label : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label),
- "$dst.l = #LO($label)",
- []>;
-
-let isReMaterializable = 1, isMoveImm = 1 , neverHasSideEffects = 1 in
-def HI_label : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label),
- "$dst.h = #HI($label)",
- []>;
-
// This pattern is incorrect. When we add small data, we should change
// this pattern to use memw(#foo).
// This is for sdata.
-let isMoveImm = 1 in
-def CONST32 : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
+let isMoveImm = 1, isAsmParserOnly = 1 in
+def CONST32 : CONSTLDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
"$dst = CONST32(#$global)",
[(set (i32 IntRegs:$dst),
(load (HexagonCONST32 tglobaltlsaddr:$global)))]>;
-// This is for non-sdata.
let isReMaterializable = 1, isMoveImm = 1 in
def CONST32_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global),
"$dst = CONST32(#$global)",
[(set (i32 IntRegs:$dst),
(HexagonCONST32 tglobaladdr:$global))]>;
-let isReMaterializable = 1, isMoveImm = 1 in
-def CONST32_set_jt : LDInst2<(outs IntRegs:$dst), (ins jumptablebase:$jt),
+let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
+def CONST32_set_jt : CONSTLDInst<(outs IntRegs:$dst), (ins jumptablebase:$jt),
"$dst = CONST32(#$jt)",
[(set (i32 IntRegs:$dst),
(HexagonCONST32 tjumptable:$jt))]>;
-let isReMaterializable = 1, isMoveImm = 1 in
+let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
def CONST32GP_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global),
"$dst = CONST32(#$global)",
[(set (i32 IntRegs:$dst),
(HexagonCONST32_GP tglobaladdr:$global))]>;
-let isReMaterializable = 1, isMoveImm = 1 in
-def CONST32_Int_Real : LDInst2<(outs IntRegs:$dst), (ins i32imm:$global),
+let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
+def CONST32_Int_Real : CONSTLDInst<(outs IntRegs:$dst), (ins i32imm:$global),
"$dst = CONST32(#$global)",
[(set (i32 IntRegs:$dst), imm:$global) ]>;
@@ -2067,839 +4811,921 @@ def CONST32_Int_Real : LDInst2<(outs IntRegs:$dst), (ins i32imm:$global),
def : Pat<(HexagonCONST32_GP tblockaddress:$addr),
(CONST32_Int_Real tblockaddress:$addr)>;
-let isReMaterializable = 1, isMoveImm = 1 in
+let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
def CONST32_Label : LDInst2<(outs IntRegs:$dst), (ins bblabel:$label),
"$dst = CONST32($label)",
[(set (i32 IntRegs:$dst), (HexagonCONST32 bbl:$label))]>;
-let isReMaterializable = 1, isMoveImm = 1 in
-def CONST64_Int_Real : LDInst2<(outs DoubleRegs:$dst), (ins i64imm:$global),
+let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
+def CONST64_Int_Real : CONSTLDInst<(outs DoubleRegs:$dst), (ins i64imm:$global),
"$dst = CONST64(#$global)",
- [(set (i64 DoubleRegs:$dst), imm:$global) ]>;
+ [(set (i64 DoubleRegs:$dst), imm:$global)]>;
-def TFR_PdFalse : SInst<(outs PredRegs:$dst), (ins),
- "$dst = xor($dst, $dst)",
- [(set (i1 PredRegs:$dst), 0)]>;
+let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1,
+ isCodeGenOnly = 1 in
+def TFR_PdTrue : SInst<(outs PredRegs:$dst), (ins), "",
+ [(set (i1 PredRegs:$dst), 1)]>;
-def MPY_trsext : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = mpy($src1, $src2)",
- [(set (i32 IntRegs:$dst),
- (trunc (i64 (srl (i64 (mul (i64 (sext (i32 IntRegs:$src1))),
- (i64 (sext (i32 IntRegs:$src2))))),
- (i32 32)))))]>;
+let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1,
+ isCodeGenOnly = 1 in
+def TFR_PdFalse : SInst<(outs PredRegs:$dst), (ins), "$dst = xor($dst, $dst)",
+ [(set (i1 PredRegs:$dst), 0)]>;
// Pseudo instructions.
def SDT_SPCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
-
-def SDT_SPCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
+def SDT_SPCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
SDTCisVT<1, i32> ]>;
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPCallSeqStart,
[SDNPHasChain, SDNPOutGlue]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-def SDT_SPCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
-
-def call : SDNode<"HexagonISD::CALL", SDT_SPCall,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>;
+def SDT_SPCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
// For tailcalls a HexagonTCRet SDNode has 3 SDNode Properties - a chain,
// Optional Flag and Variable Arguments.
// Its 1 Operand has pointer type.
-def HexagonTCRet : SDNode<"HexagonISD::TC_RETURN", SDT_SPCall,
- [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
-
-let Defs = [R29, R30], Uses = [R31, R30, R29] in {
- def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
- "Should never be emitted",
- [(callseq_start timm:$amt)]>;
-}
-
-let Defs = [R29, R30, R31], Uses = [R29] in {
- def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
- "Should never be emitted",
- [(callseq_end timm:$amt1, timm:$amt2)]>;
-}
-// Call subroutine.
-let isCall = 1, neverHasSideEffects = 1,
- Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
- R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
- def CALL : JInst<(outs), (ins calltarget:$dst),
- "call $dst", []>;
-}
-
-// Call subroutine from register.
-let isCall = 1, neverHasSideEffects = 1,
- Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
- R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
- def CALLR : JRInst<(outs), (ins IntRegs:$dst),
- "callr $dst",
- []>;
- }
+def HexagonTCRet : SDNode<"HexagonISD::TC_RETURN", SDT_SPCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+
+let Defs = [R29, R30], Uses = [R31, R30, R29], isPseudo = 1 in
+def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
+ ".error \"should not emit\" ",
+ [(callseq_start timm:$amt)]>;
+let Defs = [R29, R30, R31], Uses = [R29], isPseudo = 1 in
+def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ ".error \"should not emit\" ",
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+
+// Call subroutine indirectly.
+let Defs = VolatileV3.Regs in
+def J2_callr : JUMPR_MISC_CALLR<0, 1>;
// Indirect tail-call.
-let isCodeGenOnly = 1, isCall = 1, isReturn = 1 in
-def TCRETURNR : T_JMPr;
+let isPseudo = 1, isCall = 1, isReturn = 1, isBarrier = 1, isPredicable = 0,
+ isTerminator = 1, isCodeGenOnly = 1 in
+def TCRETURNr : T_JMPr;
// Direct tail-calls.
let isCall = 1, isReturn = 1, isBarrier = 1, isPredicable = 0,
isTerminator = 1, isCodeGenOnly = 1 in {
- def TCRETURNtg : T_JMP<(ins calltarget:$dst)>;
- def TCRETURNtext : T_JMP<(ins calltarget:$dst)>;
+ def TCRETURNtg : JInst<(outs), (ins calltarget:$dst), "jump $dst",
+ [], "", J_tc_2early_SLOT23>;
+ def TCRETURNtext : JInst<(outs), (ins calltarget:$dst), "jump $dst",
+ [], "", J_tc_2early_SLOT23>;
}
-// Map call instruction.
-def : Pat<(call (i32 IntRegs:$dst)),
- (CALLR (i32 IntRegs:$dst))>, Requires<[HasV2TOnly]>;
-def : Pat<(call tglobaladdr:$dst),
- (CALL tglobaladdr:$dst)>, Requires<[HasV2TOnly]>;
-def : Pat<(call texternalsym:$dst),
- (CALL texternalsym:$dst)>, Requires<[HasV2TOnly]>;
//Tail calls.
-def : Pat<(HexagonTCRet tglobaladdr:$dst),
- (TCRETURNtg tglobaladdr:$dst)>;
-def : Pat<(HexagonTCRet texternalsym:$dst),
- (TCRETURNtext texternalsym:$dst)>;
-def : Pat<(HexagonTCRet (i32 IntRegs:$dst)),
- (TCRETURNR (i32 IntRegs:$dst))>;
-
-// Atomic load and store support
-// 8 bit atomic load
-def : Pat<(atomic_load_8 ADDRriS11_0:$src1),
- (i32 (LDriub ADDRriS11_0:$src1))>;
-
-def : Pat<(atomic_load_8 (add (i32 IntRegs:$src1), s11_0ImmPred:$offset)),
- (i32 (LDriub_indexed (i32 IntRegs:$src1), s11_0ImmPred:$offset))>;
-
-// 16 bit atomic load
-def : Pat<(atomic_load_16 ADDRriS11_1:$src1),
- (i32 (LDriuh ADDRriS11_1:$src1))>;
-
-def : Pat<(atomic_load_16 (add (i32 IntRegs:$src1), s11_1ImmPred:$offset)),
- (i32 (LDriuh_indexed (i32 IntRegs:$src1), s11_1ImmPred:$offset))>;
-
-def : Pat<(atomic_load_32 ADDRriS11_2:$src1),
- (i32 (LDriw ADDRriS11_2:$src1))>;
-
-def : Pat<(atomic_load_32 (add (i32 IntRegs:$src1), s11_2ImmPred:$offset)),
- (i32 (LDriw_indexed (i32 IntRegs:$src1), s11_2ImmPred:$offset))>;
-
-// 64 bit atomic load
-def : Pat<(atomic_load_64 ADDRriS11_3:$src1),
- (i64 (LDrid ADDRriS11_3:$src1))>;
-
-def : Pat<(atomic_load_64 (add (i32 IntRegs:$src1), s11_3ImmPred:$offset)),
- (i64 (LDrid_indexed (i32 IntRegs:$src1), s11_3ImmPred:$offset))>;
-
-
-def : Pat<(atomic_store_8 ADDRriS11_0:$src2, (i32 IntRegs:$src1)),
- (STrib ADDRriS11_0:$src2, (i32 IntRegs:$src1))>;
-
-def : Pat<(atomic_store_8 (add (i32 IntRegs:$src2), s11_0ImmPred:$offset),
- (i32 IntRegs:$src1)),
- (STrib_indexed (i32 IntRegs:$src2), s11_0ImmPred:$offset,
- (i32 IntRegs:$src1))>;
-
-
-def : Pat<(atomic_store_16 ADDRriS11_1:$src2, (i32 IntRegs:$src1)),
- (STrih ADDRriS11_1:$src2, (i32 IntRegs:$src1))>;
-
-def : Pat<(atomic_store_16 (i32 IntRegs:$src1),
- (add (i32 IntRegs:$src2), s11_1ImmPred:$offset)),
- (STrih_indexed (i32 IntRegs:$src2), s11_1ImmPred:$offset,
- (i32 IntRegs:$src1))>;
-
-def : Pat<(atomic_store_32 ADDRriS11_2:$src2, (i32 IntRegs:$src1)),
- (STriw ADDRriS11_2:$src2, (i32 IntRegs:$src1))>;
-
-def : Pat<(atomic_store_32 (add (i32 IntRegs:$src2), s11_2ImmPred:$offset),
- (i32 IntRegs:$src1)),
- (STriw_indexed (i32 IntRegs:$src2), s11_2ImmPred:$offset,
- (i32 IntRegs:$src1))>;
-
-
-
-
-def : Pat<(atomic_store_64 ADDRriS11_3:$src2, (i64 DoubleRegs:$src1)),
- (STrid ADDRriS11_3:$src2, (i64 DoubleRegs:$src1))>;
-
-def : Pat<(atomic_store_64 (add (i32 IntRegs:$src2), s11_3ImmPred:$offset),
- (i64 DoubleRegs:$src1)),
- (STrid_indexed (i32 IntRegs:$src2), s11_3ImmPred:$offset,
- (i64 DoubleRegs:$src1))>;
+def: Pat<(HexagonTCRet tglobaladdr:$dst),
+ (TCRETURNtg tglobaladdr:$dst)>;
+def: Pat<(HexagonTCRet texternalsym:$dst),
+ (TCRETURNtext texternalsym:$dst)>;
+def: Pat<(HexagonTCRet (i32 IntRegs:$dst)),
+ (TCRETURNr (i32 IntRegs:$dst))>;
// Map from r0 = and(r1, 65535) to r0 = zxth(r1)
-def : Pat <(and (i32 IntRegs:$src1), 65535),
- (ZXTH (i32 IntRegs:$src1))>;
+def: Pat<(and (i32 IntRegs:$src1), 65535),
+ (A2_zxth IntRegs:$src1)>;
// Map from r0 = and(r1, 255) to r0 = zxtb(r1).
-def : Pat <(and (i32 IntRegs:$src1), 255),
- (ZXTB (i32 IntRegs:$src1))>;
+def: Pat<(and (i32 IntRegs:$src1), 255),
+ (A2_zxtb IntRegs:$src1)>;
// Map Add(p1, true) to p1 = not(p1).
// Add(p1, false) should never be produced,
// if it does, it got to be mapped to NOOP.
-def : Pat <(add (i1 PredRegs:$src1), -1),
- (NOT_p (i1 PredRegs:$src1))>;
-
-// Map from p0 = setlt(r0, r1) r2 = mux(p0, r3, r4) =>
-// p0 = cmp.lt(r0, r1), r0 = mux(p0, r2, r1).
-// cmp.lt(r0, r1) -> cmp.gt(r1, r0)
-def : Pat <(select (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
- (i32 IntRegs:$src3),
- (i32 IntRegs:$src4)),
- (i32 (TFR_condset_rr (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)),
- (i32 IntRegs:$src4), (i32 IntRegs:$src3)))>,
- Requires<[HasV2TOnly]>;
+def: Pat<(add (i1 PredRegs:$src1), -1),
+ (C2_not PredRegs:$src1)>;
// Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i).
-def : Pat <(select (not (i1 PredRegs:$src1)), s8ImmPred:$src2, s8ImmPred:$src3),
- (i32 (TFR_condset_ii (i1 PredRegs:$src1), s8ImmPred:$src3,
- s8ImmPred:$src2))>;
+def: Pat<(select (not (i1 PredRegs:$src1)), s8ImmPred:$src2, s8ExtPred:$src3),
+ (C2_muxii PredRegs:$src1, s8ExtPred:$src3, s8ImmPred:$src2)>;
// Map from p0 = pnot(p0); r0 = select(p0, #i, r1)
-// => r0 = TFR_condset_ri(p0, r1, #i)
-def : Pat <(select (not (i1 PredRegs:$src1)), s12ImmPred:$src2,
- (i32 IntRegs:$src3)),
- (i32 (TFR_condset_ri (i1 PredRegs:$src1), (i32 IntRegs:$src3),
- s12ImmPred:$src2))>;
+// => r0 = C2_muxir(p0, r1, #i)
+def: Pat<(select (not (i1 PredRegs:$src1)), s8ExtPred:$src2,
+ (i32 IntRegs:$src3)),
+ (C2_muxir PredRegs:$src1, IntRegs:$src3, s8ExtPred:$src2)>;
// Map from p0 = pnot(p0); r0 = mux(p0, r1, #i)
-// => r0 = TFR_condset_ir(p0, #i, r1)
-def : Pat <(select (not (i1 PredRegs:$src1)), IntRegs:$src2, s12ImmPred:$src3),
- (i32 (TFR_condset_ir (i1 PredRegs:$src1), s12ImmPred:$src3,
- (i32 IntRegs:$src2)))>;
+// => r0 = C2_muxri (p0, #i, r1)
+def: Pat<(select (not (i1 PredRegs:$src1)), IntRegs:$src2, s8ExtPred:$src3),
+ (C2_muxri PredRegs:$src1, s8ExtPred:$src3, IntRegs:$src2)>;
// Map from p0 = pnot(p0); if (p0) jump => if (!p0) jump.
-def : Pat <(brcond (not (i1 PredRegs:$src1)), bb:$offset),
- (JMP_f (i1 PredRegs:$src1), bb:$offset)>;
+def: Pat<(brcond (not (i1 PredRegs:$src1)), bb:$offset),
+ (J2_jumpf PredRegs:$src1, bb:$offset)>;
-// Map from p2 = pnot(p2); p1 = and(p0, p2) => p1 = and(p0, !p2).
-def : Pat <(and (i1 PredRegs:$src1), (not (i1 PredRegs:$src2))),
- (i1 (AND_pnotp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>;
+// Map from Rdd = sign_extend_inreg(Rss, i32) -> Rdd = A2_sxtw(Rss.lo).
+def: Pat<(i64 (sext_inreg (i64 DoubleRegs:$src1), i32)),
+ (A2_sxtw (LoReg DoubleRegs:$src1))>;
+// Map from Rdd = sign_extend_inreg(Rss, i16) -> Rdd = A2_sxtw(A2_sxth(Rss.lo)).
+def: Pat<(i64 (sext_inreg (i64 DoubleRegs:$src1), i16)),
+ (A2_sxtw (A2_sxth (LoReg DoubleRegs:$src1)))>;
-let AddedComplexity = 100 in
-def : Pat <(i64 (zextloadi1 (HexagonCONST32 tglobaladdr:$global))),
- (i64 (COMBINE_rr (TFRI 0),
- (LDriub_indexed (CONST32_set tglobaladdr:$global), 0)))>,
- Requires<[NoV4T]>;
-
-// Map from i1 loads to 32 bits. This assumes that the i1* is byte aligned.
-let AddedComplexity = 10 in
-def : Pat <(i32 (zextloadi1 ADDRriS11_0:$addr)),
- (i32 (A2_and (i32 (LDrib ADDRriS11_0:$addr)), (TFRI 0x1)))>;
-
-// Map from Rdd = sign_extend_inreg(Rss, i32) -> Rdd = SXTW(Rss.lo).
-def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i32)),
- (i64 (SXTW (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg))))>;
-
-// Map from Rdd = sign_extend_inreg(Rss, i16) -> Rdd = SXTW(SXTH(Rss.lo)).
-def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i16)),
- (i64 (SXTW (i32 (SXTH (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
- subreg_loreg))))))>;
-
-// Map from Rdd = sign_extend_inreg(Rss, i8) -> Rdd = SXTW(SXTB(Rss.lo)).
-def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i8)),
- (i64 (SXTW (i32 (SXTB (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
- subreg_loreg))))))>;
+// Map from Rdd = sign_extend_inreg(Rss, i8) -> Rdd = A2_sxtw(A2_sxtb(Rss.lo)).
+def: Pat<(i64 (sext_inreg (i64 DoubleRegs:$src1), i8)),
+ (A2_sxtw (A2_sxtb (LoReg DoubleRegs:$src1)))>;
// We want to prevent emitting pnot's as much as possible.
-// Map brcond with an unsupported setcc to a JMP_f.
+// Map brcond with an unsupported setcc to a J2_jumpf.
def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
bb:$offset),
- (JMP_f (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)),
+ (J2_jumpf (C2_cmpeq (i32 IntRegs:$src1), (i32 IntRegs:$src2)),
bb:$offset)>;
def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)),
bb:$offset),
- (JMP_f (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2), bb:$offset)>;
+ (J2_jumpf (C2_cmpeqi (i32 IntRegs:$src1), s10ImmPred:$src2), bb:$offset)>;
-def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 -1))), bb:$offset),
- (JMP_f (i1 PredRegs:$src1), bb:$offset)>;
+def: Pat<(brcond (i1 (setne (i1 PredRegs:$src1), (i1 -1))), bb:$offset),
+ (J2_jumpf PredRegs:$src1, bb:$offset)>;
-def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 0))), bb:$offset),
- (JMP_t (i1 PredRegs:$src1), bb:$offset)>;
+def: Pat<(brcond (i1 (setne (i1 PredRegs:$src1), (i1 0))), bb:$offset),
+ (J2_jumpt PredRegs:$src1, bb:$offset)>;
// cmp.lt(Rs, Imm) -> !cmp.ge(Rs, Imm) -> !cmp.gt(Rs, Imm-1)
-def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)),
- bb:$offset),
- (JMP_f (CMPGTri (i32 IntRegs:$src1),
- (DEC_CONST_SIGNED s8ImmPred:$src2)), bb:$offset)>;
+def: Pat<(brcond (i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)), bb:$offset),
+ (J2_jumpf (C2_cmpgti IntRegs:$src1, (DEC_CONST_SIGNED s8ImmPred:$src2)),
+ bb:$offset)>;
// cmp.lt(r0, r1) -> cmp.gt(r1, r0)
def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
bb:$offset),
- (JMP_t (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)), bb:$offset)>;
+ (J2_jumpt (C2_cmpgt (i32 IntRegs:$src2), (i32 IntRegs:$src1)), bb:$offset)>;
def : Pat <(brcond (i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
bb:$offset),
- (JMP_f (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)),
+ (J2_jumpf (C2_cmpgtup (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)),
bb:$offset)>;
def : Pat <(brcond (i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
bb:$offset),
- (JMP_f (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)),
+ (J2_jumpf (C2_cmpgtu (i32 IntRegs:$src1), (i32 IntRegs:$src2)),
bb:$offset)>;
def : Pat <(brcond (i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
bb:$offset),
- (JMP_f (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)),
+ (J2_jumpf (C2_cmpgtup (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)),
bb:$offset)>;
// Map from a 64-bit select to an emulated 64-bit mux.
// Hexagon does not support 64-bit MUXes; so emulate with combines.
-def : Pat <(select (i1 PredRegs:$src1), (i64 DoubleRegs:$src2),
- (i64 DoubleRegs:$src3)),
- (i64 (COMBINE_rr (i32 (MUX_rr (i1 PredRegs:$src1),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
- subreg_hireg)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3),
- subreg_hireg)))),
- (i32 (MUX_rr (i1 PredRegs:$src1),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
- subreg_loreg)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3),
- subreg_loreg))))))>;
+def: Pat<(select (i1 PredRegs:$src1), (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src3)),
+ (A2_combinew (C2_mux PredRegs:$src1, (HiReg DoubleRegs:$src2),
+ (HiReg DoubleRegs:$src3)),
+ (C2_mux PredRegs:$src1, (LoReg DoubleRegs:$src2),
+ (LoReg DoubleRegs:$src3)))>;
// Map from a 1-bit select to logical ops.
// From LegalizeDAG.cpp: (B1 ? B2 : B3) <=> (B1 & B2)|(!B1&B3).
-def : Pat <(select (i1 PredRegs:$src1), (i1 PredRegs:$src2),
- (i1 PredRegs:$src3)),
- (OR_pp (AND_pp (i1 PredRegs:$src1), (i1 PredRegs:$src2)),
- (AND_pp (NOT_p (i1 PredRegs:$src1)), (i1 PredRegs:$src3)))>;
+def: Pat<(select (i1 PredRegs:$src1), (i1 PredRegs:$src2), (i1 PredRegs:$src3)),
+ (C2_or (C2_and PredRegs:$src1, PredRegs:$src2),
+ (C2_and (C2_not PredRegs:$src1), PredRegs:$src3))>;
// Map Pd = load(addr) -> Rs = load(addr); Pd = Rs.
def : Pat<(i1 (load ADDRriS11_2:$addr)),
- (i1 (TFR_PdRs (i32 (LDrib ADDRriS11_2:$addr))))>;
+ (i1 (C2_tfrrp (i32 (L2_loadrb_io AddrFI:$addr, 0))))>;
// Map for truncating from 64 immediates to 32 bit immediates.
-def : Pat<(i32 (trunc (i64 DoubleRegs:$src))),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg))>;
+def: Pat<(i32 (trunc (i64 DoubleRegs:$src))),
+ (LoReg DoubleRegs:$src)>;
// Map for truncating from i64 immediates to i1 bit immediates.
-def : Pat<(i1 (trunc (i64 DoubleRegs:$src))),
- (i1 (TFR_PdRs (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
- subreg_loreg))))>;
+def: Pat<(i1 (trunc (i64 DoubleRegs:$src))),
+ (C2_tfrrp (LoReg DoubleRegs:$src))>;
// Map memb(Rs) = Rdd -> memb(Rs) = Rt.
def : Pat<(truncstorei8 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
- (STrib ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
+ (S2_storerb_io AddrFI:$addr, 0, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
subreg_loreg)))>;
// Map memh(Rs) = Rdd -> memh(Rs) = Rt.
def : Pat<(truncstorei16 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
- (STrih ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
+ (S2_storerh_io AddrFI:$addr, 0, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
subreg_loreg)))>;
// Map memw(Rs) = Rdd -> memw(Rs) = Rt
def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
- (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
+ (S2_storeri_io AddrFI:$addr, 0, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
subreg_loreg)))>;
// Map memw(Rs) = Rdd -> memw(Rs) = Rt.
def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
- (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
+ (S2_storeri_io AddrFI:$addr, 0, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
subreg_loreg)))>;
// Map from i1 = constant<-1>; memw(addr) = i1 -> r0 = 1; memw(addr) = r0.
def : Pat<(store (i1 -1), ADDRriS11_2:$addr),
- (STrib ADDRriS11_2:$addr, (TFRI 1))>;
+ (S2_storerb_io AddrFI:$addr, 0, (A2_tfrsi 1))>;
// Map from i1 = constant<-1>; store i1 -> r0 = 1; store r0.
def : Pat<(store (i1 -1), ADDRriS11_2:$addr),
- (STrib ADDRriS11_2:$addr, (TFRI 1))>;
+ (S2_storerb_io AddrFI:$addr, 0, (A2_tfrsi 1))>;
// Map from memb(Rs) = Pd -> Rt = mux(Pd, #0, #1); store Rt.
def : Pat<(store (i1 PredRegs:$src1), ADDRriS11_2:$addr),
- (STrib ADDRriS11_2:$addr, (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0)) )>;
-
-// Map Rdd = anyext(Rs) -> Rdd = sxtw(Rs).
-// Hexagon_TODO: We can probably use combine but that will cost 2 instructions.
-// Better way to do this?
-def : Pat<(i64 (anyext (i32 IntRegs:$src1))),
- (i64 (SXTW (i32 IntRegs:$src1)))>;
+ (S2_storerb_io AddrFI:$addr, 0, (i32 (C2_muxii (i1 PredRegs:$src1), 1, 0)) )>;
-// Map cmple -> cmpgt.
// rs <= rt -> !(rs > rt).
-def : Pat<(i1 (setle (i32 IntRegs:$src1), s10ExtPred:$src2)),
- (i1 (NOT_p (CMPGTri (i32 IntRegs:$src1), s10ExtPred:$src2)))>;
+let AddedComplexity = 30 in
+def: Pat<(i1 (setle (i32 IntRegs:$src1), s10ExtPred:$src2)),
+ (C2_not (C2_cmpgti IntRegs:$src1, s10ExtPred:$src2))>;
// rs <= rt -> !(rs > rt).
def : Pat<(i1 (setle (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
- (i1 (NOT_p (CMPGTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>;
+ (i1 (C2_not (C2_cmpgt (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>;
// Rss <= Rtt -> !(Rss > Rtt).
-def : Pat<(i1 (setle (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
- (i1 (NOT_p (CMPGT64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>;
+def: Pat<(i1 (setle (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (C2_not (C2_cmpgtp DoubleRegs:$src1, DoubleRegs:$src2))>;
// Map cmpne -> cmpeq.
// Hexagon_TODO: We should improve on this.
// rs != rt -> !(rs == rt).
-def : Pat <(i1 (setne (i32 IntRegs:$src1), s10ExtPred:$src2)),
- (i1 (NOT_p(i1 (CMPEQri (i32 IntRegs:$src1), s10ExtPred:$src2))))>;
+let AddedComplexity = 30 in
+def: Pat<(i1 (setne (i32 IntRegs:$src1), s10ExtPred:$src2)),
+ (C2_not (C2_cmpeqi IntRegs:$src1, s10ExtPred:$src2))>;
// Map cmpne(Rs) -> !cmpeqe(Rs).
// rs != rt -> !(rs == rt).
def : Pat <(i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
- (i1 (NOT_p (i1 (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)))))>;
+ (i1 (C2_not (i1 (C2_cmpeq (i32 IntRegs:$src1), (i32 IntRegs:$src2)))))>;
// Convert setne back to xor for hexagon since we compute w/ pred registers.
-def : Pat <(i1 (setne (i1 PredRegs:$src1), (i1 PredRegs:$src2))),
- (i1 (XOR_pp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>;
+def: Pat<(i1 (setne (i1 PredRegs:$src1), (i1 PredRegs:$src2))),
+ (C2_xor PredRegs:$src1, PredRegs:$src2)>;
// Map cmpne(Rss) -> !cmpew(Rss).
// rs != rt -> !(rs == rt).
-def : Pat <(i1 (setne (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
- (i1 (NOT_p (i1 (CMPEHexagon4rr (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2)))))>;
+def: Pat<(i1 (setne (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (C2_not (C2_cmpeqp DoubleRegs:$src1, DoubleRegs:$src2))>;
// Map cmpge(Rs, Rt) -> !(cmpgt(Rs, Rt).
// rs >= rt -> !(rt > rs).
def : Pat <(i1 (setge (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
- (i1 (NOT_p (i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))))>;
+ (i1 (C2_not (i1 (C2_cmpgt (i32 IntRegs:$src2), (i32 IntRegs:$src1)))))>;
// cmpge(Rs, Imm) -> cmpgt(Rs, Imm-1)
-def : Pat <(i1 (setge (i32 IntRegs:$src1), s8ExtPred:$src2)),
- (i1 (CMPGTri (i32 IntRegs:$src1), (DEC_CONST_SIGNED s8ExtPred:$src2)))>;
+let AddedComplexity = 30 in
+def: Pat<(i1 (setge (i32 IntRegs:$src1), s8ExtPred:$src2)),
+ (C2_cmpgti IntRegs:$src1, (DEC_CONST_SIGNED s8ExtPred:$src2))>;
// Map cmpge(Rss, Rtt) -> !cmpgt(Rtt, Rss).
// rss >= rtt -> !(rtt > rss).
-def : Pat <(i1 (setge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
- (i1 (NOT_p (i1 (CMPGT64rr (i64 DoubleRegs:$src2),
- (i64 DoubleRegs:$src1)))))>;
+def: Pat<(i1 (setge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (C2_not (C2_cmpgtp DoubleRegs:$src2, DoubleRegs:$src1))>;
// Map cmplt(Rs, Imm) -> !cmpge(Rs, Imm).
// !cmpge(Rs, Imm) -> !cmpgt(Rs, Imm-1).
// rs < rt -> !(rs >= rt).
-def : Pat <(i1 (setlt (i32 IntRegs:$src1), s8ExtPred:$src2)),
- (i1 (NOT_p (CMPGTri (i32 IntRegs:$src1), (DEC_CONST_SIGNED s8ExtPred:$src2))))>;
-
-// Map cmplt(Rs, Rt) -> cmpgt(Rt, Rs).
-// rs < rt -> rt > rs.
-// We can let assembler map it, or we can do in the compiler itself.
-def : Pat <(i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
- (i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>;
-
-// Map cmplt(Rss, Rtt) -> cmpgt(Rtt, Rss).
-// rss < rtt -> (rtt > rss).
-def : Pat <(i1 (setlt (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
- (i1 (CMPGT64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>;
-
-// Map from cmpltu(Rs, Rd) -> cmpgtu(Rd, Rs)
-// rs < rt -> rt > rs.
-// We can let assembler map it, or we can do in the compiler itself.
-def : Pat <(i1 (setult (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
- (i1 (CMPGTUrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>;
-
-// Map from cmpltu(Rss, Rdd) -> cmpgtu(Rdd, Rss).
-// rs < rt -> rt > rs.
-def : Pat <(i1 (setult (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
- (i1 (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>;
+let AddedComplexity = 30 in
+def: Pat<(i1 (setlt (i32 IntRegs:$src1), s8ExtPred:$src2)),
+ (C2_not (C2_cmpgti IntRegs:$src1, (DEC_CONST_SIGNED s8ExtPred:$src2)))>;
// Generate cmpgeu(Rs, #0) -> cmpeq(Rs, Rs)
-def : Pat <(i1 (setuge (i32 IntRegs:$src1), 0)),
- (i1 (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src1)))>;
+def: Pat<(i1 (setuge (i32 IntRegs:$src1), 0)),
+ (C2_cmpeq IntRegs:$src1, IntRegs:$src1)>;
// Generate cmpgeu(Rs, #u8) -> cmpgtu(Rs, #u8 -1)
-def : Pat <(i1 (setuge (i32 IntRegs:$src1), u8ExtPred:$src2)),
- (i1 (CMPGTUri (i32 IntRegs:$src1), (DEC_CONST_UNSIGNED u8ExtPred:$src2)))>;
+def: Pat<(i1 (setuge (i32 IntRegs:$src1), u8ExtPred:$src2)),
+ (C2_cmpgtui IntRegs:$src1, (DEC_CONST_UNSIGNED u8ExtPred:$src2))>;
// Generate cmpgtu(Rs, #u9)
-def : Pat <(i1 (setugt (i32 IntRegs:$src1), u9ExtPred:$src2)),
- (i1 (CMPGTUri (i32 IntRegs:$src1), u9ExtPred:$src2))>;
-
-// Map from Rs >= Rt -> !(Rt > Rs).
-// rs >= rt -> !(rt > rs).
-def : Pat <(i1 (setuge (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
- (i1 (NOT_p (CMPGTUrr (i32 IntRegs:$src2), (i32 IntRegs:$src1))))>;
+def: Pat<(i1 (setugt (i32 IntRegs:$src1), u9ExtPred:$src2)),
+ (C2_cmpgtui IntRegs:$src1, u9ExtPred:$src2)>;
// Map from Rs >= Rt -> !(Rt > Rs).
// rs >= rt -> !(rt > rs).
-def : Pat <(i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
- (i1 (NOT_p (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1))))>;
-
-// Map from cmpleu(Rs, Rt) -> !cmpgtu(Rs, Rt).
-// Map from (Rs <= Rt) -> !(Rs > Rt).
-def : Pat <(i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
- (i1 (NOT_p (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>;
+def: Pat<(i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (C2_not (C2_cmpgtup DoubleRegs:$src2, DoubleRegs:$src1))>;
// Map from cmpleu(Rss, Rtt) -> !cmpgtu(Rss, Rtt-1).
// Map from (Rs <= Rt) -> !(Rs > Rt).
-def : Pat <(i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
- (i1 (NOT_p (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>;
+def: Pat<(i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (C2_not (C2_cmpgtup DoubleRegs:$src1, DoubleRegs:$src2))>;
// Sign extends.
// i1 -> i32
-def : Pat <(i32 (sext (i1 PredRegs:$src1))),
- (i32 (MUX_ii (i1 PredRegs:$src1), -1, 0))>;
+def: Pat<(i32 (sext (i1 PredRegs:$src1))),
+ (C2_muxii PredRegs:$src1, -1, 0)>;
// i1 -> i64
-def : Pat <(i64 (sext (i1 PredRegs:$src1))),
- (i64 (COMBINE_rr (TFRI -1), (MUX_ii (i1 PredRegs:$src1), -1, 0)))>;
-
-// Convert sign-extended load back to load and sign extend.
-// i8 -> i64
-def: Pat <(i64 (sextloadi8 ADDRriS11_0:$src1)),
- (i64 (SXTW (LDrib ADDRriS11_0:$src1)))>;
-
-// Convert any-extended load back to load and sign extend.
-// i8 -> i64
-def: Pat <(i64 (extloadi8 ADDRriS11_0:$src1)),
- (i64 (SXTW (LDrib ADDRriS11_0:$src1)))>;
-
-// Convert sign-extended load back to load and sign extend.
-// i16 -> i64
-def: Pat <(i64 (sextloadi16 ADDRriS11_1:$src1)),
- (i64 (SXTW (LDrih ADDRriS11_1:$src1)))>;
+def: Pat<(i64 (sext (i1 PredRegs:$src1))),
+ (A2_combinew (A2_tfrsi -1), (C2_muxii PredRegs:$src1, -1, 0))>;
// Convert sign-extended load back to load and sign extend.
// i32 -> i64
def: Pat <(i64 (sextloadi32 ADDRriS11_2:$src1)),
- (i64 (SXTW (LDriw ADDRriS11_2:$src1)))>;
-
+ (i64 (A2_sxtw (L2_loadri_io AddrFI:$src1, 0)))>;
// Zero extends.
// i1 -> i32
-def : Pat <(i32 (zext (i1 PredRegs:$src1))),
- (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>;
+def: Pat<(i32 (zext (i1 PredRegs:$src1))),
+ (C2_muxii PredRegs:$src1, 1, 0)>;
-// i1 -> i64
-def : Pat <(i64 (zext (i1 PredRegs:$src1))),
- (i64 (COMBINE_rr (TFRI 0), (MUX_ii (i1 PredRegs:$src1), 1, 0)))>,
- Requires<[NoV4T]>;
+// Map from Rs = Pd to Pd = mux(Pd, #1, #0)
+def: Pat<(i32 (anyext (i1 PredRegs:$src1))),
+ (C2_muxii PredRegs:$src1, 1, 0)>;
-// i32 -> i64
-def : Pat <(i64 (zext (i32 IntRegs:$src1))),
- (i64 (COMBINE_rr (TFRI 0), (i32 IntRegs:$src1)))>,
- Requires<[NoV4T]>;
+// Map from Rss = Pd to Rdd = sxtw (mux(Pd, #1, #0))
+def: Pat<(i64 (anyext (i1 PredRegs:$src1))),
+ (A2_sxtw (C2_muxii PredRegs:$src1, 1, 0))>;
-// i8 -> i64
-def: Pat <(i64 (zextloadi8 ADDRriS11_0:$src1)),
- (i64 (COMBINE_rr (TFRI 0), (LDriub ADDRriS11_0:$src1)))>,
- Requires<[NoV4T]>;
+def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh),
+ (i32 32))),
+ (i64 (zextloadi32 ADDRriS11_2:$srcLow)))),
+ (i64 (A2_combinew (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg),
+ (L2_loadri_io AddrFI:$srcLow, 0)))>;
-let AddedComplexity = 20 in
-def: Pat <(i64 (zextloadi8 (add (i32 IntRegs:$src1),
- s11_0ExtPred:$offset))),
- (i64 (COMBINE_rr (TFRI 0), (LDriub_indexed IntRegs:$src1,
- s11_0ExtPred:$offset)))>,
- Requires<[NoV4T]>;
+// Multiply 64-bit unsigned and use upper result.
+def : Pat <(mulhu (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)),
+ (A2_addp
+ (M2_dpmpyuu_acc_s0
+ (S2_lsr_i_p
+ (A2_addp
+ (M2_dpmpyuu_acc_s0
+ (S2_lsr_i_p (M2_dpmpyuu_s0 (LoReg $src1), (LoReg $src2)), 32),
+ (HiReg $src1),
+ (LoReg $src2)),
+ (A2_combinew (A2_tfrsi 0),
+ (LoReg (M2_dpmpyuu_s0 (LoReg $src1), (HiReg $src2))))),
+ 32),
+ (HiReg $src1),
+ (HiReg $src2)),
+ (S2_lsr_i_p (M2_dpmpyuu_s0 (LoReg $src1), (HiReg $src2)), 32)
+)>;
-// i1 -> i64
-def: Pat <(i64 (zextloadi1 ADDRriS11_0:$src1)),
- (i64 (COMBINE_rr (TFRI 0), (LDriub ADDRriS11_0:$src1)))>,
- Requires<[NoV4T]>;
+// Hexagon specific ISD nodes.
+def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>]>;
+def SDTHexagonARGEXTEND : SDTypeProfile<1, 1, [SDTCisVT<0, i32>]>;
-let AddedComplexity = 20 in
-def: Pat <(i64 (zextloadi1 (add (i32 IntRegs:$src1),
- s11_0ExtPred:$offset))),
- (i64 (COMBINE_rr (TFRI 0), (LDriub_indexed IntRegs:$src1,
- s11_0ExtPred:$offset)))>,
- Requires<[NoV4T]>;
+def Hexagon_ADJDYNALLOC : SDNode<"HexagonISD::ADJDYNALLOC",
+ SDTHexagonADJDYNALLOC>;
+def Hexagon_ARGEXTEND : SDNode<"HexagonISD::ARGEXTEND", SDTHexagonARGEXTEND>;
-// i16 -> i64
-def: Pat <(i64 (zextloadi16 ADDRriS11_1:$src1)),
- (i64 (COMBINE_rr (TFRI 0), (LDriuh ADDRriS11_1:$src1)))>,
- Requires<[NoV4T]>;
+// Needed to tag these instructions for stack layout.
+let isCodeGenOnly = 1, usesCustomInserter = 1 in
+def ADJDYNALLOC : T_Addri<s6Imm>;
-let AddedComplexity = 20 in
-def: Pat <(i64 (zextloadi16 (add (i32 IntRegs:$src1),
- s11_1ExtPred:$offset))),
- (i64 (COMBINE_rr (TFRI 0), (LDriuh_indexed IntRegs:$src1,
- s11_1ExtPred:$offset)))>,
- Requires<[NoV4T]>;
+def: Pat<(Hexagon_ADJDYNALLOC I32:$Rs, s16ImmPred:$s16),
+ (ADJDYNALLOC I32:$Rs, imm:$s16)>;
-// i32 -> i64
-def: Pat <(i64 (zextloadi32 ADDRriS11_2:$src1)),
- (i64 (COMBINE_rr (TFRI 0), (LDriw ADDRriS11_2:$src1)))>,
- Requires<[NoV4T]>;
+let isCodeGenOnly = 1 in
+def ARGEXTEND : ALU32_rr <(outs IntRegs:$dst), (ins IntRegs:$src1),
+ "$dst = $src1",
+ [(set (i32 IntRegs:$dst),
+ (Hexagon_ARGEXTEND (i32 IntRegs:$src1)))]>;
let AddedComplexity = 100 in
-def: Pat <(i64 (zextloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))),
- (i64 (COMBINE_rr (TFRI 0), (LDriw_indexed IntRegs:$src1,
- s11_2ExtPred:$offset)))>,
- Requires<[NoV4T]>;
+def: Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND (i32 IntRegs:$src1)), i16)),
+ (i32 IntRegs:$src1)>;
-let AddedComplexity = 10 in
-def: Pat <(i32 (zextloadi1 ADDRriS11_0:$src1)),
- (i32 (LDriw ADDRriS11_0:$src1))>;
+def HexagonWrapperJT: SDNode<"HexagonISD::WrapperJT", SDTIntUnaryOp>;
-// Map from Rs = Pd to Pd = mux(Pd, #1, #0)
-def : Pat <(i32 (zext (i1 PredRegs:$src1))),
- (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>;
+def : Pat<(HexagonWrapperJT tjumptable:$dst),
+ (i32 (CONST32_set_jt tjumptable:$dst))>;
-// Map from Rs = Pd to Pd = mux(Pd, #1, #0)
-def : Pat <(i32 (anyext (i1 PredRegs:$src1))),
- (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>;
+// XTYPE/SHIFT
+//
+//===----------------------------------------------------------------------===//
+// Template Class
+// Shift by immediate/register and accumulate/logical
+//===----------------------------------------------------------------------===//
-// Map from Rss = Pd to Rdd = sxtw (mux(Pd, #1, #0))
-def : Pat <(i64 (anyext (i1 PredRegs:$src1))),
- (i64 (SXTW (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))))>;
+// Rx[+-&|]=asr(Rs,#u5)
+// Rx[+-&|^]=lsr(Rs,#u5)
+// Rx[+-&|^]=asl(Rs,#u5)
+
+let hasNewValue = 1, opNewValue = 0 in
+class T_shift_imm_acc_r <string opc1, string opc2, SDNode OpNode1,
+ SDNode OpNode2, bits<3> majOp, bits<2> minOp>
+ : SInst_acc<(outs IntRegs:$Rx),
+ (ins IntRegs:$src1, IntRegs:$Rs, u5Imm:$u5),
+ "$Rx "#opc2#opc1#"($Rs, #$u5)",
+ [(set (i32 IntRegs:$Rx),
+ (OpNode2 (i32 IntRegs:$src1),
+ (OpNode1 (i32 IntRegs:$Rs), u5ImmPred:$u5)))],
+ "$src1 = $Rx", S_2op_tc_2_SLOT23> {
+ bits<5> Rx;
+ bits<5> Rs;
+ bits<5> u5;
+
+ let IClass = 0b1000;
+
+ let Inst{27-24} = 0b1110;
+ let Inst{23-22} = majOp{2-1};
+ let Inst{13} = 0b0;
+ let Inst{7} = majOp{0};
+ let Inst{6-5} = minOp;
+ let Inst{4-0} = Rx;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = u5;
+ }
+// Rx[+-&|]=asr(Rs,Rt)
+// Rx[+-&|^]=lsr(Rs,Rt)
+// Rx[+-&|^]=asl(Rs,Rt)
+
+let hasNewValue = 1, opNewValue = 0 in
+class T_shift_reg_acc_r <string opc1, string opc2, SDNode OpNode1,
+ SDNode OpNode2, bits<2> majOp, bits<2> minOp>
+ : SInst_acc<(outs IntRegs:$Rx),
+ (ins IntRegs:$src1, IntRegs:$Rs, IntRegs:$Rt),
+ "$Rx "#opc2#opc1#"($Rs, $Rt)",
+ [(set (i32 IntRegs:$Rx),
+ (OpNode2 (i32 IntRegs:$src1),
+ (OpNode1 (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))],
+ "$src1 = $Rx", S_3op_tc_2_SLOT23 > {
+ bits<5> Rx;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1100;
-let AddedComplexity = 100 in
-def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh),
- (i32 32))),
- (i64 (zextloadi32 (i32 (add IntRegs:$src2,
- s11_2ExtPred:$offset2)))))),
- (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg),
- (LDriw_indexed IntRegs:$src2,
- s11_2ExtPred:$offset2)))>;
+ let Inst{27-24} = 0b1100;
+ let Inst{23-22} = majOp;
+ let Inst{7-6} = minOp;
+ let Inst{4-0} = Rx;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ }
-def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh),
- (i32 32))),
- (i64 (zextloadi32 ADDRriS11_2:$srcLow)))),
- (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg),
- (LDriw ADDRriS11_2:$srcLow)))>;
+// Rxx[+-&|]=asr(Rss,#u6)
+// Rxx[+-&|^]=lsr(Rss,#u6)
+// Rxx[+-&|^]=asl(Rss,#u6)
+
+class T_shift_imm_acc_p <string opc1, string opc2, SDNode OpNode1,
+ SDNode OpNode2, bits<3> majOp, bits<2> minOp>
+ : SInst_acc<(outs DoubleRegs:$Rxx),
+ (ins DoubleRegs:$src1, DoubleRegs:$Rss, u6Imm:$u6),
+ "$Rxx "#opc2#opc1#"($Rss, #$u6)",
+ [(set (i64 DoubleRegs:$Rxx),
+ (OpNode2 (i64 DoubleRegs:$src1),
+ (OpNode1 (i64 DoubleRegs:$Rss), u6ImmPred:$u6)))],
+ "$src1 = $Rxx", S_2op_tc_2_SLOT23> {
+ bits<5> Rxx;
+ bits<5> Rss;
+ bits<6> u6;
+
+ let IClass = 0b1000;
+
+ let Inst{27-24} = 0b0010;
+ let Inst{23-22} = majOp{2-1};
+ let Inst{7} = majOp{0};
+ let Inst{6-5} = minOp;
+ let Inst{4-0} = Rxx;
+ let Inst{20-16} = Rss;
+ let Inst{13-8} = u6;
+ }
-def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh),
- (i32 32))),
- (i64 (zext (i32 IntRegs:$srcLow))))),
- (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg),
- IntRegs:$srcLow))>;
-let AddedComplexity = 100 in
-def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh),
- (i32 32))),
- (i64 (zextloadi32 (i32 (add IntRegs:$src2,
- s11_2ExtPred:$offset2)))))),
- (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg),
- (LDriw_indexed IntRegs:$src2,
- s11_2ExtPred:$offset2)))>;
+// Rxx[+-&|]=asr(Rss,Rt)
+// Rxx[+-&|^]=lsr(Rss,Rt)
+// Rxx[+-&|^]=asl(Rss,Rt)
+// Rxx[+-&|^]=lsl(Rss,Rt)
+
+class T_shift_reg_acc_p <string opc1, string opc2, SDNode OpNode1,
+ SDNode OpNode2, bits<3> majOp, bits<2> minOp>
+ : SInst_acc<(outs DoubleRegs:$Rxx),
+ (ins DoubleRegs:$src1, DoubleRegs:$Rss, IntRegs:$Rt),
+ "$Rxx "#opc2#opc1#"($Rss, $Rt)",
+ [(set (i64 DoubleRegs:$Rxx),
+ (OpNode2 (i64 DoubleRegs:$src1),
+ (OpNode1 (i64 DoubleRegs:$Rss), (i32 IntRegs:$Rt))))],
+ "$src1 = $Rxx", S_3op_tc_2_SLOT23> {
+ bits<5> Rxx;
+ bits<5> Rss;
+ bits<5> Rt;
+
+ let IClass = 0b1100;
+
+ let Inst{27-24} = 0b1011;
+ let Inst{23-21} = majOp;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rt;
+ let Inst{7-6} = minOp;
+ let Inst{4-0} = Rxx;
+ }
-def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh),
- (i32 32))),
- (i64 (zextloadi32 ADDRriS11_2:$srcLow)))),
- (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg),
- (LDriw ADDRriS11_2:$srcLow)))>;
+//===----------------------------------------------------------------------===//
+// Multi-class for the shift instructions with logical/arithmetic operators.
+//===----------------------------------------------------------------------===//
-def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh),
- (i32 32))),
- (i64 (zext (i32 IntRegs:$srcLow))))),
- (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg),
- IntRegs:$srcLow))>;
-
-// Any extended 64-bit load.
-// anyext i32 -> i64
-def: Pat <(i64 (extloadi32 ADDRriS11_2:$src1)),
- (i64 (COMBINE_rr (TFRI 0), (LDriw ADDRriS11_2:$src1)))>,
- Requires<[NoV4T]>;
-
-// When there is an offset we should prefer the pattern below over the pattern above.
-// The complexity of the above is 13 (gleaned from HexagonGenDAGIsel.inc)
-// So this complexity below is comfortably higher to allow for choosing the below.
-// If this is not done then we generate addresses such as
-// ********************************************
-// r1 = add (r0, #4)
-// r1 = memw(r1 + #0)
-// instead of
-// r1 = memw(r0 + #4)
-// ********************************************
+multiclass xtype_imm_base<string OpcStr1, string OpcStr2, SDNode OpNode1,
+ SDNode OpNode2, bits<3> majOp, bits<2> minOp > {
+ def _i_r#NAME : T_shift_imm_acc_r< OpcStr1, OpcStr2, OpNode1,
+ OpNode2, majOp, minOp >;
+ def _i_p#NAME : T_shift_imm_acc_p< OpcStr1, OpcStr2, OpNode1,
+ OpNode2, majOp, minOp >;
+}
+
+multiclass xtype_imm_acc<string opc1, SDNode OpNode, bits<2>minOp> {
+ let AddedComplexity = 100 in
+ defm _acc : xtype_imm_base< opc1, "+= ", OpNode, add, 0b001, minOp>;
+
+ defm _nac : xtype_imm_base< opc1, "-= ", OpNode, sub, 0b000, minOp>;
+ defm _and : xtype_imm_base< opc1, "&= ", OpNode, and, 0b010, minOp>;
+ defm _or : xtype_imm_base< opc1, "|= ", OpNode, or, 0b011, minOp>;
+}
+
+multiclass xtype_xor_imm_acc<string opc1, SDNode OpNode, bits<2>minOp> {
let AddedComplexity = 100 in
-def: Pat <(i64 (extloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))),
- (i64 (COMBINE_rr (TFRI 0), (LDriw_indexed IntRegs:$src1,
- s11_2ExtPred:$offset)))>,
- Requires<[NoV4T]>;
+ defm _xacc : xtype_imm_base< opc1, "^= ", OpNode, xor, 0b100, minOp>;
+}
-// anyext i16 -> i64.
-def: Pat <(i64 (extloadi16 ADDRriS11_2:$src1)),
- (i64 (COMBINE_rr (TFRI 0), (LDrih ADDRriS11_2:$src1)))>,
- Requires<[NoV4T]>;
+defm S2_asr : xtype_imm_acc<"asr", sra, 0b00>;
-let AddedComplexity = 20 in
-def: Pat <(i64 (extloadi16 (add (i32 IntRegs:$src1),
- s11_1ExtPred:$offset))),
- (i64 (COMBINE_rr (TFRI 0), (LDrih_indexed IntRegs:$src1,
- s11_1ExtPred:$offset)))>,
- Requires<[NoV4T]>;
+defm S2_lsr : xtype_imm_acc<"lsr", srl, 0b01>,
+ xtype_xor_imm_acc<"lsr", srl, 0b01>;
-// Map from Rdd = zxtw(Rs) -> Rdd = combine(0, Rs).
-def : Pat<(i64 (zext (i32 IntRegs:$src1))),
- (i64 (COMBINE_rr (TFRI 0), (i32 IntRegs:$src1)))>,
- Requires<[NoV4T]>;
+defm S2_asl : xtype_imm_acc<"asl", shl, 0b10>,
+ xtype_xor_imm_acc<"asl", shl, 0b10>;
-// Multiply 64-bit unsigned and use upper result.
-def : Pat <(mulhu (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)),
- (i64
- (MPYU64_acc
- (i64
- (COMBINE_rr
- (TFRI 0),
- (i32
- (EXTRACT_SUBREG
- (i64
- (LSRd_ri
- (i64
- (MPYU64_acc
- (i64
- (MPYU64_acc
- (i64
- (COMBINE_rr (TFRI 0),
- (i32
- (EXTRACT_SUBREG
- (i64
- (LSRd_ri
- (i64
- (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
- subreg_loreg)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
- subreg_loreg)))), 32)),
- subreg_loreg)))),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))),
- 32)), subreg_loreg)))),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>;
-
-// Multiply 64-bit signed and use upper result.
-def : Pat <(mulhs (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)),
- (i64
- (MPY64_acc
- (i64
- (COMBINE_rr (TFRI 0),
- (i32
- (EXTRACT_SUBREG
- (i64
- (LSRd_ri
- (i64
- (MPY64_acc
- (i64
- (MPY64_acc
- (i64
- (COMBINE_rr (TFRI 0),
- (i32
- (EXTRACT_SUBREG
- (i64
- (LSRd_ri
- (i64
- (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
- subreg_loreg)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
- subreg_loreg)))), 32)),
- subreg_loreg)))),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))),
- 32)), subreg_loreg)))),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>;
+multiclass xtype_reg_acc_r<string opc1, SDNode OpNode, bits<2>minOp> {
+ let AddedComplexity = 100 in
+ def _acc : T_shift_reg_acc_r <opc1, "+= ", OpNode, add, 0b11, minOp>;
-// Hexagon specific ISD nodes.
-//def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>]>;
-def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2,
- [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
-def Hexagon_ADJDYNALLOC : SDNode<"HexagonISD::ADJDYNALLOC",
- SDTHexagonADJDYNALLOC>;
-// Needed to tag these instructions for stack layout.
-let usesCustomInserter = 1 in
-def ADJDYNALLOC : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1,
- s16Imm:$src2),
- "$dst = add($src1, #$src2)",
- [(set (i32 IntRegs:$dst),
- (Hexagon_ADJDYNALLOC (i32 IntRegs:$src1),
- s16ImmPred:$src2))]>;
+ def _nac : T_shift_reg_acc_r <opc1, "-= ", OpNode, sub, 0b10, minOp>;
+ def _and : T_shift_reg_acc_r <opc1, "&= ", OpNode, and, 0b01, minOp>;
+ def _or : T_shift_reg_acc_r <opc1, "|= ", OpNode, or, 0b00, minOp>;
+}
-def SDTHexagonARGEXTEND : SDTypeProfile<1, 1, [SDTCisVT<0, i32>]>;
-def Hexagon_ARGEXTEND : SDNode<"HexagonISD::ARGEXTEND", SDTHexagonARGEXTEND>;
-def ARGEXTEND : ALU32_rr <(outs IntRegs:$dst), (ins IntRegs:$src1),
- "$dst = $src1",
- [(set (i32 IntRegs:$dst),
- (Hexagon_ARGEXTEND (i32 IntRegs:$src1)))]>;
+multiclass xtype_reg_acc_p<string opc1, SDNode OpNode, bits<2>minOp> {
+ let AddedComplexity = 100 in
+ def _acc : T_shift_reg_acc_p <opc1, "+= ", OpNode, add, 0b110, minOp>;
-let AddedComplexity = 100 in
-def : Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND (i32 IntRegs:$src1)), i16)),
- (COPY (i32 IntRegs:$src1))>;
+ def _nac : T_shift_reg_acc_p <opc1, "-= ", OpNode, sub, 0b100, minOp>;
+ def _and : T_shift_reg_acc_p <opc1, "&= ", OpNode, and, 0b010, minOp>;
+ def _or : T_shift_reg_acc_p <opc1, "|= ", OpNode, or, 0b000, minOp>;
+ def _xor : T_shift_reg_acc_p <opc1, "^= ", OpNode, xor, 0b011, minOp>;
+}
-def HexagonWrapperJT: SDNode<"HexagonISD::WrapperJT", SDTIntUnaryOp>;
+multiclass xtype_reg_acc<string OpcStr, SDNode OpNode, bits<2> minOp > {
+ defm _r_r : xtype_reg_acc_r <OpcStr, OpNode, minOp>;
+ defm _r_p : xtype_reg_acc_p <OpcStr, OpNode, minOp>;
+}
-def : Pat<(HexagonWrapperJT tjumptable:$dst),
- (i32 (CONST32_set_jt tjumptable:$dst))>;
+defm S2_asl : xtype_reg_acc<"asl", shl, 0b10>;
+defm S2_asr : xtype_reg_acc<"asr", sra, 0b00>;
+defm S2_lsr : xtype_reg_acc<"lsr", srl, 0b01>;
+defm S2_lsl : xtype_reg_acc<"lsl", shl, 0b11>;
-// XTYPE/SHIFT
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0 in
+class T_S3op_1 <string mnemonic, RegisterClass RC, bits<2> MajOp, bits<3> MinOp,
+ bit SwapOps, bit isSat = 0, bit isRnd = 0, bit hasShift = 0>
+ : SInst <(outs RC:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ "$dst = "#mnemonic#"($src1, $src2)"#!if(isRnd, ":rnd", "")
+ #!if(hasShift,":>>1","")
+ #!if(isSat, ":sat", ""),
+ [], "", S_3op_tc_2_SLOT23 > {
+ bits<5> dst;
+ bits<5> src1;
+ bits<5> src2;
-// Multi-class for logical operators :
-// Shift by immediate/register and accumulate/logical
-multiclass xtype_imm<string OpcStr, SDNode OpNode1, SDNode OpNode2> {
- def _ri : SInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2, u5Imm:$src3),
- !strconcat("$dst ", !strconcat(OpcStr, "($src2, #$src3)")),
- [(set (i32 IntRegs:$dst),
- (OpNode2 (i32 IntRegs:$src1),
- (OpNode1 (i32 IntRegs:$src2),
- u5ImmPred:$src3)))],
- "$src1 = $dst">;
-
- def d_ri : SInst_acc<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2, u6Imm:$src3),
- !strconcat("$dst ", !strconcat(OpcStr, "($src2, #$src3)")),
- [(set (i64 DoubleRegs:$dst), (OpNode2 (i64 DoubleRegs:$src1),
- (OpNode1 (i64 DoubleRegs:$src2), u6ImmPred:$src3)))],
- "$src1 = $dst">;
-}
-
-// Multi-class for logical operators :
-// Shift by register and accumulate/logical (32/64 bits)
-multiclass xtype_reg<string OpcStr, SDNode OpNode1, SDNode OpNode2> {
- def _rr : SInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- !strconcat("$dst ", !strconcat(OpcStr, "($src2, $src3)")),
- [(set (i32 IntRegs:$dst),
- (OpNode2 (i32 IntRegs:$src1),
- (OpNode1 (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">;
+ let IClass = 0b1100;
- def d_rr : SInst_acc<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
- !strconcat("$dst ", !strconcat(OpcStr, "($src2, $src3)")),
- [(set (i64 DoubleRegs:$dst),
- (OpNode2 (i64 DoubleRegs:$src1),
- (OpNode1 (i64 DoubleRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">;
+ let Inst{27-24} = 0b0001;
+ let Inst{23-22} = MajOp;
+ let Inst{20-16} = !if (SwapOps, src2, src1);
+ let Inst{12-8} = !if (SwapOps, src1, src2);
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = dst;
+ }
-}
+class T_S3op_64 <string mnemonic, bits<2> MajOp, bits<3> MinOp, bit SwapOps,
+ bit isSat = 0, bit isRnd = 0, bit hasShift = 0 >
+ : T_S3op_1 <mnemonic, DoubleRegs, MajOp, MinOp, SwapOps,
+ isSat, isRnd, hasShift>;
-multiclass basic_xtype_imm<string OpcStr, SDNode OpNode> {
-let AddedComplexity = 100 in
- defm _ADD : xtype_imm< !strconcat("+= ", OpcStr), OpNode, add>;
- defm _SUB : xtype_imm< !strconcat("-= ", OpcStr), OpNode, sub>;
- defm _AND : xtype_imm< !strconcat("&= ", OpcStr), OpNode, and>;
- defm _OR : xtype_imm< !strconcat("|= ", OpcStr), OpNode, or>;
+let Itinerary = S_3op_tc_1_SLOT23 in {
+ def S2_shuffeb : T_S3op_64 < "shuffeb", 0b00, 0b010, 0>;
+ def S2_shuffeh : T_S3op_64 < "shuffeh", 0b00, 0b110, 0>;
+ def S2_shuffob : T_S3op_64 < "shuffob", 0b00, 0b100, 1>;
+ def S2_shuffoh : T_S3op_64 < "shuffoh", 0b10, 0b000, 1>;
+
+ def S2_vtrunewh : T_S3op_64 < "vtrunewh", 0b10, 0b010, 0>;
+ def S2_vtrunowh : T_S3op_64 < "vtrunowh", 0b10, 0b100, 0>;
}
-multiclass basic_xtype_reg<string OpcStr, SDNode OpNode> {
-let AddedComplexity = 100 in
- defm _ADD : xtype_reg< !strconcat("+= ", OpcStr), OpNode, add>;
- defm _SUB : xtype_reg< !strconcat("-= ", OpcStr), OpNode, sub>;
- defm _AND : xtype_reg< !strconcat("&= ", OpcStr), OpNode, and>;
- defm _OR : xtype_reg< !strconcat("|= ", OpcStr), OpNode, or>;
+def S2_lfsp : T_S3op_64 < "lfs", 0b10, 0b110, 0>;
+
+let hasSideEffects = 0 in
+class T_S3op_2 <string mnemonic, bits<3> MajOp, bit SwapOps>
+ : SInst < (outs DoubleRegs:$Rdd),
+ (ins DoubleRegs:$Rss, DoubleRegs:$Rtt, PredRegs:$Pu),
+ "$Rdd = "#mnemonic#"($Rss, $Rtt, $Pu)",
+ [], "", S_3op_tc_1_SLOT23 > {
+ bits<5> Rdd;
+ bits<5> Rss;
+ bits<5> Rtt;
+ bits<2> Pu;
+
+ let IClass = 0b1100;
+
+ let Inst{27-24} = 0b0010;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = !if (SwapOps, Rtt, Rss);
+ let Inst{12-8} = !if (SwapOps, Rss, Rtt);
+ let Inst{6-5} = Pu;
+ let Inst{4-0} = Rdd;
+ }
+
+def S2_valignrb : T_S3op_2 < "valignb", 0b000, 1>;
+def S2_vsplicerb : T_S3op_2 < "vspliceb", 0b100, 0>;
+
+//===----------------------------------------------------------------------===//
+// Template class used by vector shift, vector rotate, vector neg,
+// 32-bit shift, 64-bit shifts, etc.
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 0 in
+class T_S3op_3 <string mnemonic, RegisterClass RC, bits<2> MajOp,
+ bits<2> MinOp, bit isSat = 0, list<dag> pattern = [] >
+ : SInst <(outs RC:$dst),
+ (ins RC:$src1, IntRegs:$src2),
+ "$dst = "#mnemonic#"($src1, $src2)"#!if(isSat, ":sat", ""),
+ pattern, "", S_3op_tc_1_SLOT23> {
+ bits<5> dst;
+ bits<5> src1;
+ bits<5> src2;
+
+ let IClass = 0b1100;
+
+ let Inst{27-24} = !if(!eq(!cast<string>(RC), "IntRegs"), 0b0110, 0b0011);
+ let Inst{23-22} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{12-8} = src2;
+ let Inst{7-6} = MinOp;
+ let Inst{4-0} = dst;
+ }
+
+let hasNewValue = 1 in
+class T_S3op_shift32 <string mnemonic, SDNode OpNode, bits<2> MinOp>
+ : T_S3op_3 <mnemonic, IntRegs, 0b01, MinOp, 0,
+ [(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
+
+let hasNewValue = 1, Itinerary = S_3op_tc_2_SLOT23 in
+class T_S3op_shift32_Sat <string mnemonic, bits<2> MinOp>
+ : T_S3op_3 <mnemonic, IntRegs, 0b00, MinOp, 1, []>;
+
+
+class T_S3op_shift64 <string mnemonic, SDNode OpNode, bits<2> MinOp>
+ : T_S3op_3 <mnemonic, DoubleRegs, 0b10, MinOp, 0,
+ [(set (i64 DoubleRegs:$dst), (OpNode (i64 DoubleRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
+
+
+class T_S3op_shiftVect <string mnemonic, bits<2> MajOp, bits<2> MinOp>
+ : T_S3op_3 <mnemonic, DoubleRegs, MajOp, MinOp, 0, []>;
+
+
+// Shift by register
+// Rdd=[asr|lsr|asl|lsl](Rss,Rt)
+
+def S2_asr_r_p : T_S3op_shift64 < "asr", sra, 0b00>;
+def S2_lsr_r_p : T_S3op_shift64 < "lsr", srl, 0b01>;
+def S2_asl_r_p : T_S3op_shift64 < "asl", shl, 0b10>;
+def S2_lsl_r_p : T_S3op_shift64 < "lsl", shl, 0b11>;
+
+// Rd=[asr|lsr|asl|lsl](Rs,Rt)
+
+def S2_asr_r_r : T_S3op_shift32<"asr", sra, 0b00>;
+def S2_lsr_r_r : T_S3op_shift32<"lsr", srl, 0b01>;
+def S2_asl_r_r : T_S3op_shift32<"asl", shl, 0b10>;
+def S2_lsl_r_r : T_S3op_shift32<"lsl", shl, 0b11>;
+
+// Shift by register with saturation
+// Rd=asr(Rs,Rt):sat
+// Rd=asl(Rs,Rt):sat
+
+let Defs = [USR_OVF] in {
+ def S2_asr_r_r_sat : T_S3op_shift32_Sat<"asr", 0b00>;
+ def S2_asl_r_r_sat : T_S3op_shift32_Sat<"asl", 0b10>;
}
-multiclass xtype_xor_imm<string OpcStr, SDNode OpNode> {
-let AddedComplexity = 100 in
- defm _XOR : xtype_imm< !strconcat("^= ", OpcStr), OpNode, xor>;
+let hasNewValue = 1, hasSideEffects = 0 in
+class T_S3op_8 <string opc, bits<3> MinOp, bit isSat, bit isRnd, bit hasShift, bit hasSplat = 0>
+ : SInst < (outs IntRegs:$Rd),
+ (ins DoubleRegs:$Rss, IntRegs:$Rt),
+ "$Rd = "#opc#"($Rss, $Rt"#!if(hasSplat, "*", "")#")"
+ #!if(hasShift, ":<<1", "")
+ #!if(isRnd, ":rnd", "")
+ #!if(isSat, ":sat", ""),
+ [], "", S_3op_tc_1_SLOT23 > {
+ bits<5> Rd;
+ bits<5> Rss;
+ bits<5> Rt;
+
+ let IClass = 0b1100;
+
+ let Inst{27-24} = 0b0101;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rt;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rd;
+ }
+
+def S2_asr_r_svw_trun : T_S3op_8<"vasrw", 0b010, 0, 0, 0>;
+
+let Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23 in
+def S2_vcrotate : T_S3op_shiftVect < "vcrotate", 0b11, 0b00>;
+
+let hasSideEffects = 0 in
+class T_S3op_7 <string mnemonic, bit MajOp >
+ : SInst <(outs DoubleRegs:$Rdd),
+ (ins DoubleRegs:$Rss, DoubleRegs:$Rtt, u3Imm:$u3),
+ "$Rdd = "#mnemonic#"($Rss, $Rtt, #$u3)" ,
+ [], "", S_3op_tc_1_SLOT23 > {
+ bits<5> Rdd;
+ bits<5> Rss;
+ bits<5> Rtt;
+ bits<3> u3;
+
+ let IClass = 0b1100;
+
+ let Inst{27-24} = 0b0000;
+ let Inst{23} = MajOp;
+ let Inst{20-16} = !if(MajOp, Rss, Rtt);
+ let Inst{12-8} = !if(MajOp, Rtt, Rss);
+ let Inst{7-5} = u3;
+ let Inst{4-0} = Rdd;
+ }
+
+def S2_valignib : T_S3op_7 < "valignb", 0>;
+def S2_vspliceib : T_S3op_7 < "vspliceb", 1>;
+
+//===----------------------------------------------------------------------===//
+// Template class for 'insert bitfield' instructions
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0 in
+class T_S3op_insert <string mnemonic, RegisterClass RC>
+ : SInst <(outs RC:$dst),
+ (ins RC:$src1, RC:$src2, DoubleRegs:$src3),
+ "$dst = "#mnemonic#"($src2, $src3)" ,
+ [], "$src1 = $dst", S_3op_tc_1_SLOT23 > {
+ bits<5> dst;
+ bits<5> src2;
+ bits<5> src3;
+
+ let IClass = 0b1100;
+
+ let Inst{27-26} = 0b10;
+ let Inst{25-24} = !if(!eq(!cast<string>(RC), "IntRegs"), 0b00, 0b10);
+ let Inst{23} = 0b0;
+ let Inst{20-16} = src2;
+ let Inst{12-8} = src3;
+ let Inst{4-0} = dst;
+ }
+
+let hasSideEffects = 0 in
+class T_S2op_insert <bits<4> RegTyBits, RegisterClass RC, Operand ImmOp>
+ : SInst <(outs RC:$dst), (ins RC:$dst2, RC:$src1, ImmOp:$src2, ImmOp:$src3),
+ "$dst = insert($src1, #$src2, #$src3)",
+ [], "$dst2 = $dst", S_2op_tc_2_SLOT23> {
+ bits<5> dst;
+ bits<5> src1;
+ bits<6> src2;
+ bits<6> src3;
+ bit bit23;
+ bit bit13;
+ string ImmOpStr = !cast<string>(ImmOp);
+
+ let bit23 = !if (!eq(ImmOpStr, "u6Imm"), src3{5}, 0);
+ let bit13 = !if (!eq(ImmOpStr, "u6Imm"), src2{5}, 0);
+
+ let IClass = 0b1000;
+
+ let Inst{27-24} = RegTyBits;
+ let Inst{23} = bit23;
+ let Inst{22-21} = src3{4-3};
+ let Inst{20-16} = src1;
+ let Inst{13} = bit13;
+ let Inst{12-8} = src2{4-0};
+ let Inst{7-5} = src3{2-0};
+ let Inst{4-0} = dst;
+ }
+
+// Rx=insert(Rs,Rtt)
+// Rx=insert(Rs,#u5,#U5)
+let hasNewValue = 1 in {
+ def S2_insert_rp : T_S3op_insert <"insert", IntRegs>;
+ def S2_insert : T_S2op_insert <0b1111, IntRegs, u5Imm>;
}
-defm ASL : basic_xtype_imm<"asl", shl>, basic_xtype_reg<"asl", shl>,
- xtype_xor_imm<"asl", shl>;
+// Rxx=insert(Rss,Rtt)
+// Rxx=insert(Rss,#u6,#U6)
+def S2_insertp_rp : T_S3op_insert<"insert", DoubleRegs>;
+def S2_insertp : T_S2op_insert <0b0011, DoubleRegs, u6Imm>;
-defm LSR : basic_xtype_imm<"lsr", srl>, basic_xtype_reg<"lsr", srl>,
- xtype_xor_imm<"lsr", srl>;
+//===----------------------------------------------------------------------===//
+// Template class for 'extract bitfield' instructions
+//===----------------------------------------------------------------------===//
+let hasNewValue = 1, hasSideEffects = 0 in
+class T_S3op_extract <string mnemonic, bits<2> MinOp>
+ : SInst <(outs IntRegs:$Rd), (ins IntRegs:$Rs, DoubleRegs:$Rtt),
+ "$Rd = "#mnemonic#"($Rs, $Rtt)",
+ [], "", S_3op_tc_2_SLOT23 > {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rtt;
+
+ let IClass = 0b1100;
+
+ let Inst{27-22} = 0b100100;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rtt;
+ let Inst{7-6} = MinOp;
+ let Inst{4-0} = Rd;
+ }
+
+let hasSideEffects = 0 in
+class T_S2op_extract <string mnemonic, bits<4> RegTyBits,
+ RegisterClass RC, Operand ImmOp>
+ : SInst <(outs RC:$dst), (ins RC:$src1, ImmOp:$src2, ImmOp:$src3),
+ "$dst = "#mnemonic#"($src1, #$src2, #$src3)",
+ [], "", S_2op_tc_2_SLOT23> {
+ bits<5> dst;
+ bits<5> src1;
+ bits<6> src2;
+ bits<6> src3;
+ bit bit23;
+ bit bit13;
+ string ImmOpStr = !cast<string>(ImmOp);
+
+ let bit23 = !if (!eq(ImmOpStr, "u6Imm"), src3{5},
+ !if (!eq(mnemonic, "extractu"), 0, 1));
+
+ let bit13 = !if (!eq(ImmOpStr, "u6Imm"), src2{5}, 0);
+
+ let IClass = 0b1000;
+
+ let Inst{27-24} = RegTyBits;
+ let Inst{23} = bit23;
+ let Inst{22-21} = src3{4-3};
+ let Inst{20-16} = src1;
+ let Inst{13} = bit13;
+ let Inst{12-8} = src2{4-0};
+ let Inst{7-5} = src3{2-0};
+ let Inst{4-0} = dst;
+ }
-defm ASR : basic_xtype_imm<"asr", sra>, basic_xtype_reg<"asr", sra>;
-defm LSL : basic_xtype_reg<"lsl", shl>;
+// Extract bitfield
+
+// Rdd=extractu(Rss,Rtt)
+// Rdd=extractu(Rss,#u6,#U6)
+def S2_extractup_rp : T_S3op_64 < "extractu", 0b00, 0b000, 0>;
+def S2_extractup : T_S2op_extract <"extractu", 0b0001, DoubleRegs, u6Imm>;
+
+// Rd=extractu(Rs,Rtt)
+// Rd=extractu(Rs,#u5,#U5)
+let hasNewValue = 1 in {
+ def S2_extractu_rp : T_S3op_extract<"extractu", 0b00>;
+ def S2_extractu : T_S2op_extract <"extractu", 0b1101, IntRegs, u5Imm>;
+}
// Change the sign of the immediate for Rd=-mpyi(Rs,#u8)
-def : Pat <(mul (i32 IntRegs:$src1), (ineg n8ImmPred:$src2)),
- (i32 (MPYI_rin (i32 IntRegs:$src1), u8ImmPred:$src2))>;
+def: Pat<(mul (i32 IntRegs:$src1), (ineg n8ImmPred:$src2)),
+ (M2_mpysin IntRegs:$src1, u8ImmPred:$src2)>;
+
+//===----------------------------------------------------------------------===//
+// :raw for of tableindx[bdhw] insns
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 0, hasNewValue = 1, opNewValue = 0 in
+class tableidxRaw<string OpStr, bits<2>MinOp>
+ : SInst <(outs IntRegs:$Rx),
+ (ins IntRegs:$_dst_, IntRegs:$Rs, u4Imm:$u4, s6Imm:$S6),
+ "$Rx = "#OpStr#"($Rs, #$u4, #$S6):raw",
+ [], "$Rx = $_dst_" > {
+ bits<5> Rx;
+ bits<5> Rs;
+ bits<4> u4;
+ bits<6> S6;
+
+ let IClass = 0b1000;
+
+ let Inst{27-24} = 0b0111;
+ let Inst{23-22} = MinOp;
+ let Inst{21} = u4{3};
+ let Inst{20-16} = Rs;
+ let Inst{13-8} = S6;
+ let Inst{7-5} = u4{2-0};
+ let Inst{4-0} = Rx;
+ }
+
+def S2_tableidxb : tableidxRaw<"tableidxb", 0b00>;
+def S2_tableidxh : tableidxRaw<"tableidxh", 0b01>;
+def S2_tableidxw : tableidxRaw<"tableidxw", 0b10>;
+def S2_tableidxd : tableidxRaw<"tableidxd", 0b11>;
//===----------------------------------------------------------------------===//
// V3 Instructions +
@@ -2930,3 +5756,9 @@ include "HexagonInstrInfoV5.td"
//===----------------------------------------------------------------------===//
// V5 Instructions -
//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// ALU32/64/Vector +
+//===----------------------------------------------------------------------===///
+
+include "HexagonInstrInfoVector.td" \ No newline at end of file
diff --git a/lib/Target/Hexagon/HexagonInstrInfoV3.td b/lib/Target/Hexagon/HexagonInstrInfoV3.td
index 7e75554..84d035d 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoV3.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoV3.td
@@ -21,13 +21,52 @@ def callv3nr : SDNode<"HexagonISD::CALLv3nr", SDT_SPCall,
// J +
//===----------------------------------------------------------------------===//
// Call subroutine.
-let isCall = 1, neverHasSideEffects = 1,
- Defs = [D0, D1, D2, D3, D4, D5, D6, D7, R28, R31,
- P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
- def CALLv3 : JInst<(outs), (ins calltarget:$dst),
- "call $dst", []>, Requires<[HasV3T]>;
+let isCall = 1, hasSideEffects = 1, Defs = VolatileV3.Regs, isPredicable = 1,
+ isExtended = 0, isExtendable = 1, opExtendable = 0,
+ isExtentSigned = 1, opExtentBits = 24, opExtentAlign = 2 in
+class T_Call<string ExtStr>
+ : JInst<(outs), (ins calltarget:$dst),
+ "call " # ExtStr # "$dst", [], "", J_tc_2early_SLOT23> {
+ let BaseOpcode = "call";
+ bits<24> dst;
+
+ let IClass = 0b0101;
+ let Inst{27-25} = 0b101;
+ let Inst{24-16,13-1} = dst{23-2};
+ let Inst{0} = 0b0;
+}
+
+let isCall = 1, hasSideEffects = 1, Defs = VolatileV3.Regs, isPredicated = 1,
+ isExtended = 0, isExtendable = 1, opExtendable = 1,
+ isExtentSigned = 1, opExtentBits = 17, opExtentAlign = 2 in
+class T_CallPred<bit IfTrue, string ExtStr>
+ : JInst<(outs), (ins PredRegs:$Pu, calltarget:$dst),
+ CondStr<"$Pu", IfTrue, 0>.S # "call " # ExtStr # "$dst",
+ [], "", J_tc_2early_SLOT23> {
+ let BaseOpcode = "call";
+ let isPredicatedFalse = !if(IfTrue,0,1);
+ bits<2> Pu;
+ bits<17> dst;
+
+ let IClass = 0b0101;
+ let Inst{27-24} = 0b1101;
+ let Inst{23-22,20-16,13,7-1} = dst{16-2};
+ let Inst{21} = !if(IfTrue,0,1);
+ let Inst{11} = 0b0;
+ let Inst{9-8} = Pu;
+}
+
+multiclass T_Calls<string ExtStr> {
+ def NAME : T_Call<ExtStr>;
+ def t : T_CallPred<1, ExtStr>;
+ def f : T_CallPred<0, ExtStr>;
}
+defm J2_call: T_Calls<"">, PredRel;
+
+let isCodeGenOnly = 1, isCall = 1, hasSideEffects = 1, Defs = VolatileV3.Regs in
+def CALLv3nr : T_Call<"">, PredRel;
+
//===----------------------------------------------------------------------===//
// J -
//===----------------------------------------------------------------------===//
@@ -37,13 +76,10 @@ let isCall = 1, neverHasSideEffects = 1,
// JR +
//===----------------------------------------------------------------------===//
// Call subroutine from register.
-let isCall = 1, neverHasSideEffects = 1,
- Defs = [D0, D1, D2, D3, D4, D5, D6, D7, R28, R31,
- P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
- def CALLRv3 : JRInst<(outs), (ins IntRegs:$dst),
- "callr $dst",
- []>, Requires<[HasV3TOnly]>;
- }
+
+let isCodeGenOnly = 1, Defs = VolatileV3.Regs in {
+ def CALLRv3nr : JUMPR_MISC_CALLR<0, 1>; // Call, no return.
+}
//===----------------------------------------------------------------------===//
// JR -
@@ -53,27 +89,63 @@ let isCall = 1, neverHasSideEffects = 1,
// ALU64/ALU +
//===----------------------------------------------------------------------===//
-let AddedComplexity = 200 in
-def MAXw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = max($src2, $src1)",
- [(set (i64 DoubleRegs:$dst),
- (i64 (select (i1 (setlt (i64 DoubleRegs:$src2),
- (i64 DoubleRegs:$src1))),
- (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2))))]>,
-Requires<[HasV3T]>;
-
-let AddedComplexity = 200 in
-def MINw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = min($src2, $src1)",
- [(set (i64 DoubleRegs:$dst),
- (i64 (select (i1 (setgt (i64 DoubleRegs:$src2),
- (i64 DoubleRegs:$src1))),
- (i64 DoubleRegs:$src1),
- (i64 DoubleRegs:$src2))))]>,
-Requires<[HasV3T]>;
+let Defs = [USR_OVF], Itinerary = ALU64_tc_2_SLOT23 in
+def A2_addpsat : T_ALU64_arith<"add", 0b011, 0b101, 1, 0, 1>;
+
+class T_ALU64_addsp_hl<string suffix, bits<3> MinOp>
+ : T_ALU64_rr<"add", suffix, 0b0011, 0b011, MinOp, 0, 0, "">;
+
+def A2_addspl : T_ALU64_addsp_hl<":raw:lo", 0b110>;
+def A2_addsph : T_ALU64_addsp_hl<":raw:hi", 0b111>;
+
+let hasSideEffects = 0, isAsmParserOnly = 1 in
+def A2_addsp : ALU64_rr<(outs DoubleRegs:$Rd),
+ (ins IntRegs:$Rs, DoubleRegs:$Rt), "$Rd = add($Rs, $Rt)",
+ [(set (i64 DoubleRegs:$Rd), (i64 (add (i64 (sext (i32 IntRegs:$Rs))),
+ (i64 DoubleRegs:$Rt))))],
+ "", ALU64_tc_1_SLOT23>;
+
+
+let hasSideEffects = 0 in
+class T_XTYPE_MIN_MAX_P<bit isMax, bit isUnsigned>
+ : ALU64Inst<(outs DoubleRegs:$Rd), (ins DoubleRegs:$Rt, DoubleRegs:$Rs),
+ "$Rd = "#!if(isMax,"max","min")#!if(isUnsigned,"u","")
+ #"($Rt, $Rs)", [], "", ALU64_tc_2_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+
+ let Inst{27-23} = 0b00111;
+ let Inst{22-21} = !if(isMax, 0b10, 0b01);
+ let Inst{20-16} = !if(isMax, Rt, Rs);
+ let Inst{12-8} = !if(isMax, Rs, Rt);
+ let Inst{7} = 0b1;
+ let Inst{6} = !if(isMax, 0b0, 0b1);
+ let Inst{5} = isUnsigned;
+ let Inst{4-0} = Rd;
+}
+
+def A2_minp : T_XTYPE_MIN_MAX_P<0, 0>;
+def A2_minup : T_XTYPE_MIN_MAX_P<0, 1>;
+def A2_maxp : T_XTYPE_MIN_MAX_P<1, 0>;
+def A2_maxup : T_XTYPE_MIN_MAX_P<1, 1>;
+
+multiclass MinMax_pats_p<PatFrag Op, InstHexagon Inst, InstHexagon SwapInst> {
+ defm: T_MinMax_pats<Op, DoubleRegs, i64, Inst, SwapInst>;
+}
+
+let AddedComplexity = 200 in {
+ defm: MinMax_pats_p<setge, A2_maxp, A2_minp>;
+ defm: MinMax_pats_p<setgt, A2_maxp, A2_minp>;
+ defm: MinMax_pats_p<setle, A2_minp, A2_maxp>;
+ defm: MinMax_pats_p<setlt, A2_minp, A2_maxp>;
+ defm: MinMax_pats_p<setuge, A2_maxup, A2_minup>;
+ defm: MinMax_pats_p<setugt, A2_maxup, A2_minup>;
+ defm: MinMax_pats_p<setule, A2_minup, A2_maxup>;
+ defm: MinMax_pats_p<setult, A2_minup, A2_maxup>;
+}
//===----------------------------------------------------------------------===//
// ALU64/ALU -
@@ -83,25 +155,112 @@ Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (seteq (i32 IntRegs:$src1), 0)), bb:$offset),
-// (JMP_RegEzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
+// (JMP_RegEzt (i32 IntRegs:$src1), bb:$offset)>;
//def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), 0)), bb:$offset),
-// (JMP_RegNzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
+// (JMP_RegNzt (i32 IntRegs:$src1), bb:$offset)>;
//def : Pat <(brcond (i1 (setle (i32 IntRegs:$src1), 0)), bb:$offset),
-// (JMP_RegLezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
+// (JMP_RegLezt (i32 IntRegs:$src1), bb:$offset)>;
//def : Pat <(brcond (i1 (setge (i32 IntRegs:$src1), 0)), bb:$offset),
-// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
+// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>;
//def : Pat <(brcond (i1 (setgt (i32 IntRegs:$src1), -1)), bb:$offset),
-// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
-
+// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>;
// Map call instruction
-def : Pat<(call (i32 IntRegs:$dst)),
- (CALLRv3 (i32 IntRegs:$dst))>, Requires<[HasV3T]>;
-def : Pat<(call tglobaladdr:$dst),
- (CALLv3 tglobaladdr:$dst)>, Requires<[HasV3T]>;
-def : Pat<(call texternalsym:$dst),
- (CALLv3 texternalsym:$dst)>, Requires<[HasV3T]>;
+def : Pat<(callv3 (i32 IntRegs:$dst)),
+ (J2_callr (i32 IntRegs:$dst))>;
+def : Pat<(callv3 tglobaladdr:$dst),
+ (J2_call tglobaladdr:$dst)>;
+def : Pat<(callv3 texternalsym:$dst),
+ (J2_call texternalsym:$dst)>;
+def : Pat<(callv3 tglobaltlsaddr:$dst),
+ (J2_call tglobaltlsaddr:$dst)>;
+
+def : Pat<(callv3nr (i32 IntRegs:$dst)),
+ (CALLRv3nr (i32 IntRegs:$dst))>;
+def : Pat<(callv3nr tglobaladdr:$dst),
+ (CALLv3nr tglobaladdr:$dst)>;
+def : Pat<(callv3nr texternalsym:$dst),
+ (CALLv3nr texternalsym:$dst)>;
+
+//===----------------------------------------------------------------------===//
+// :raw form of vrcmpys:hi/lo insns
+//===----------------------------------------------------------------------===//
+// Vector reduce complex multiply by scalar.
+let Defs = [USR_OVF], hasSideEffects = 0 in
+class T_vrcmpRaw<string HiLo, bits<3>MajOp>:
+ MInst<(outs DoubleRegs:$Rdd),
+ (ins DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Rdd = vrcmpys($Rss, $Rtt):<<1:sat:raw:"#HiLo, []> {
+ bits<5> Rdd;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b1000;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ let Inst{7-5} = 0b100;
+ let Inst{4-0} = Rdd;
+}
+
+def M2_vrcmpys_s1_h: T_vrcmpRaw<"hi", 0b101>;
+def M2_vrcmpys_s1_l: T_vrcmpRaw<"lo", 0b111>;
+
+// Assembler mapped to M2_vrcmpys_s1_h or M2_vrcmpys_s1_l
+let hasSideEffects = 0, isAsmParserOnly = 1 in
+def M2_vrcmpys_s1
+ : MInst<(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, IntRegs:$Rt),
+ "$Rdd=vrcmpys($Rss,$Rt):<<1:sat">;
+
+// Vector reduce complex multiply by scalar with accumulation.
+let Defs = [USR_OVF], hasSideEffects = 0 in
+class T_vrcmpys_acc<string HiLo, bits<3>MajOp>:
+ MInst <(outs DoubleRegs:$Rxx),
+ (ins DoubleRegs:$_src_, DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Rxx += vrcmpys($Rss, $Rtt):<<1:sat:raw:"#HiLo, [],
+ "$Rxx = $_src_"> {
+ bits<5> Rxx;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b1010;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ let Inst{7-5} = 0b100;
+ let Inst{4-0} = Rxx;
+ }
+
+def M2_vrcmpys_acc_s1_h: T_vrcmpys_acc<"hi", 0b101>;
+def M2_vrcmpys_acc_s1_l: T_vrcmpys_acc<"lo", 0b111>;
+
+// Assembler mapped to M2_vrcmpys_acc_s1_h or M2_vrcmpys_acc_s1_l
+
+let isAsmParserOnly = 1 in
+def M2_vrcmpys_acc_s1
+ : MInst <(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$dst2, DoubleRegs:$src1, IntRegs:$src2),
+ "$dst += vrcmpys($src1, $src2):<<1:sat", [],
+ "$dst2 = $dst">;
+
+def M2_vrcmpys_s1rp_h : T_MType_vrcmpy <"vrcmpys", 0b101, 0b110, 1>;
+def M2_vrcmpys_s1rp_l : T_MType_vrcmpy <"vrcmpys", 0b101, 0b111, 0>;
+
+// Assembler mapped to M2_vrcmpys_s1rp_h or M2_vrcmpys_s1rp_l
+let isAsmParserOnly = 1 in
+def M2_vrcmpys_s1rp
+ : MInst <(outs IntRegs:$Rd), (ins DoubleRegs:$Rss, IntRegs:$Rt),
+ "$Rd=vrcmpys($Rss,$Rt):<<1:rnd:sat">;
+
+
+// S2_cabacdecbin: Cabac decode bin.
+let Defs = [P0], isPredicateLate = 1, Itinerary = S_3op_tc_1_SLOT23 in
+def S2_cabacdecbin : T_S3op_64 < "decbin", 0b11, 0b110, 0>;
diff --git a/lib/Target/Hexagon/HexagonInstrInfoV4.td b/lib/Target/Hexagon/HexagonInstrInfoV4.td
index d39f7d7..0e4dde3 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoV4.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoV4.td
@@ -11,25 +11,34 @@
//
//===----------------------------------------------------------------------===//
-let neverHasSideEffects = 1 in
-class T_Immext<dag ins> :
- EXTENDERInst<(outs), ins, "immext(#$imm)", []>,
- Requires<[HasV4T]>;
-
-def IMMEXT_b : T_Immext<(ins brtarget:$imm)>;
-def IMMEXT_c : T_Immext<(ins calltarget:$imm)>;
-def IMMEXT_g : T_Immext<(ins globaladdress:$imm)>;
-def IMMEXT_i : T_Immext<(ins u26_6Imm:$imm)>;
-
-// Fold (add (CONST32 tglobaladdr:$addr) <offset>) into a global address.
-def FoldGlobalAddr : ComplexPattern<i32, 1, "foldGlobalAddress", [], []>;
+def addrga: PatLeaf<(i32 AddrGA:$Addr)>;
+def addrgp: PatLeaf<(i32 AddrGP:$Addr)>;
+
+let hasSideEffects = 0 in
+class T_Immext<Operand ImmType>
+ : EXTENDERInst<(outs), (ins ImmType:$imm),
+ "immext(#$imm)", []> {
+ bits<32> imm;
+ let IClass = 0b0000;
+
+ let Inst{27-16} = imm{31-20};
+ let Inst{13-0} = imm{19-6};
+ }
-// Fold (add (CONST32_GP tglobaladdr:$addr) <offset>) into a global address.
-def FoldGlobalAddrGP : ComplexPattern<i32, 1, "foldGlobalAddressGP", [], []>;
+def A4_ext : T_Immext<u26_6Imm>;
+let isCodeGenOnly = 1 in {
+ let isBranch = 1 in
+ def A4_ext_b : T_Immext<brtarget>;
+ let isCall = 1 in
+ def A4_ext_c : T_Immext<calltarget>;
+ def A4_ext_g : T_Immext<globaladdress>;
+}
-def NumUsesBelowThresCONST32 : PatFrag<(ops node:$addr),
- (HexagonCONST32 node:$addr), [{
- return hasNumUsesBelowThresGA(N->getOperand(0).getNode());
+def BITPOS32 : SDNodeXForm<imm, [{
+ // Return the bit position we will set [0-31].
+ // As an SDNode.
+ int32_t imm = N->getSExtValue();
+ return XformMskToBitPosU5Imm(imm);
}]>;
// Hexagon V4 Architecture spec defines 8 instruction classes:
@@ -95,63 +104,158 @@ def NumUsesBelowThresCONST32 : PatFrag<(ops node:$addr),
//===----------------------------------------------------------------------===//
// ALU32 +
//===----------------------------------------------------------------------===//
-// Generate frame index addresses.
-let neverHasSideEffects = 1, isReMaterializable = 1,
-isExtended = 1, opExtendable = 2, validSubTargets = HasV4SubT in
-def TFR_FI_immext_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s32Imm:$offset),
- "$dst = add($src1, ##$offset)",
- []>,
- Requires<[HasV4T]>;
-
-// Rd=cmp.eq(Rs,#s8)
-let validSubTargets = HasV4SubT, isExtendable = 1, opExtendable = 2,
-isExtentSigned = 1, opExtentBits = 8 in
-def V4_A4_rcmpeqi : ALU32_ri<(outs IntRegs:$Rd),
- (ins IntRegs:$Rs, s8Ext:$s8),
- "$Rd = cmp.eq($Rs, #$s8)",
- [(set (i32 IntRegs:$Rd),
- (i32 (zext (i1 (seteq (i32 IntRegs:$Rs),
- s8ExtPred:$s8)))))]>,
- Requires<[HasV4T]>;
-
-// Preserve the TSTBIT generation
-def : Pat <(i32 (zext (i1 (setne (i32 (and (i32 (shl 1, (i32 IntRegs:$src2))),
- (i32 IntRegs:$src1))), 0)))),
- (i32 (MUX_ii (i1 (TSTBIT_rr (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
- 1, 0))>;
-
-// Interfered with tstbit generation, above pattern preserves, see : tstbit.ll
-// Rd=cmp.ne(Rs,#s8)
-let validSubTargets = HasV4SubT, isExtendable = 1, opExtendable = 2,
-isExtentSigned = 1, opExtentBits = 8 in
-def V4_A4_rcmpneqi : ALU32_ri<(outs IntRegs:$Rd),
- (ins IntRegs:$Rs, s8Ext:$s8),
- "$Rd = !cmp.eq($Rs, #$s8)",
- [(set (i32 IntRegs:$Rd),
- (i32 (zext (i1 (setne (i32 IntRegs:$Rs),
- s8ExtPred:$s8)))))]>,
- Requires<[HasV4T]>;
-
-// Rd=cmp.eq(Rs,Rt)
-let validSubTargets = HasV4SubT in
-def V4_A4_rcmpeq : ALU32_ri<(outs IntRegs:$Rd),
- (ins IntRegs:$Rs, IntRegs:$Rt),
- "$Rd = cmp.eq($Rs, $Rt)",
- [(set (i32 IntRegs:$Rd),
- (i32 (zext (i1 (seteq (i32 IntRegs:$Rs),
- IntRegs:$Rt)))))]>,
- Requires<[HasV4T]>;
-
-// Rd=cmp.ne(Rs,Rt)
-let validSubTargets = HasV4SubT in
-def V4_A4_rcmpneq : ALU32_ri<(outs IntRegs:$Rd),
- (ins IntRegs:$Rs, IntRegs:$Rt),
- "$Rd = !cmp.eq($Rs, $Rt)",
- [(set (i32 IntRegs:$Rd),
- (i32 (zext (i1 (setne (i32 IntRegs:$Rs),
- IntRegs:$Rt)))))]>,
- Requires<[HasV4T]>;
+
+class T_ALU32_3op_not<string mnemonic, bits<3> MajOp, bits<3> MinOp,
+ bit OpsRev>
+ : T_ALU32_3op<mnemonic, MajOp, MinOp, OpsRev, 0> {
+ let AsmString = "$Rd = "#mnemonic#"($Rs, ~$Rt)";
+}
+
+let BaseOpcode = "andn_rr", CextOpcode = "andn" in
+def A4_andn : T_ALU32_3op_not<"and", 0b001, 0b100, 1>;
+let BaseOpcode = "orn_rr", CextOpcode = "orn" in
+def A4_orn : T_ALU32_3op_not<"or", 0b001, 0b101, 1>;
+
+let CextOpcode = "rcmp.eq" in
+def A4_rcmpeq : T_ALU32_3op<"cmp.eq", 0b011, 0b010, 0, 1>;
+let CextOpcode = "!rcmp.eq" in
+def A4_rcmpneq : T_ALU32_3op<"!cmp.eq", 0b011, 0b011, 0, 1>;
+
+def C4_cmpneq : T_ALU32_3op_cmp<"!cmp.eq", 0b00, 1, 1>;
+def C4_cmplte : T_ALU32_3op_cmp<"!cmp.gt", 0b10, 1, 0>;
+def C4_cmplteu : T_ALU32_3op_cmp<"!cmp.gtu", 0b11, 1, 0>;
+
+// Pats for instruction selection.
+
+// A class to embed the usual comparison patfrags within a zext to i32.
+// The seteq/setne frags use "lhs" and "rhs" as operands, so use the same
+// names, or else the frag's "body" won't match the operands.
+class CmpInReg<PatFrag Op>
+ : PatFrag<(ops node:$lhs, node:$rhs),(i32 (zext (i1 Op.Fragment)))>;
+
+def: T_cmp32_rr_pat<A4_rcmpeq, CmpInReg<seteq>, i32>;
+def: T_cmp32_rr_pat<A4_rcmpneq, CmpInReg<setne>, i32>;
+
+def: T_cmp32_rr_pat<C4_cmpneq, setne, i1>;
+
+class T_CMP_rrbh<string mnemonic, bits<3> MinOp, bit IsComm>
+ : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Pd = "#mnemonic#"($Rs, $Rt)", [], "", S_3op_tc_2early_SLOT23>,
+ ImmRegRel {
+ let InputType = "reg";
+ let CextOpcode = mnemonic;
+ let isCompare = 1;
+ let isCommutable = IsComm;
+ let hasSideEffects = 0;
+
+ bits<2> Pd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1100;
+ let Inst{27-21} = 0b0111110;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{7-5} = MinOp;
+ let Inst{1-0} = Pd;
+}
+
+def A4_cmpbeq : T_CMP_rrbh<"cmpb.eq", 0b110, 1>;
+def A4_cmpbgt : T_CMP_rrbh<"cmpb.gt", 0b010, 0>;
+def A4_cmpbgtu : T_CMP_rrbh<"cmpb.gtu", 0b111, 0>;
+def A4_cmpheq : T_CMP_rrbh<"cmph.eq", 0b011, 1>;
+def A4_cmphgt : T_CMP_rrbh<"cmph.gt", 0b100, 0>;
+def A4_cmphgtu : T_CMP_rrbh<"cmph.gtu", 0b101, 0>;
+
+let AddedComplexity = 100 in {
+ def: Pat<(i1 (seteq (and (xor (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)),
+ 255), 0)),
+ (A4_cmpbeq IntRegs:$Rs, IntRegs:$Rt)>;
+ def: Pat<(i1 (setne (and (xor (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)),
+ 255), 0)),
+ (C2_not (A4_cmpbeq IntRegs:$Rs, IntRegs:$Rt))>;
+ def: Pat<(i1 (seteq (and (xor (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)),
+ 65535), 0)),
+ (A4_cmpheq IntRegs:$Rs, IntRegs:$Rt)>;
+ def: Pat<(i1 (setne (and (xor (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)),
+ 65535), 0)),
+ (C2_not (A4_cmpheq IntRegs:$Rs, IntRegs:$Rt))>;
+}
+
+class T_CMP_ribh<string mnemonic, bits<2> MajOp, bit IsHalf, bit IsComm,
+ Operand ImmType, bit IsImmExt, bit IsImmSigned, int ImmBits>
+ : ALU64Inst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, ImmType:$Imm),
+ "$Pd = "#mnemonic#"($Rs, #$Imm)", [], "", ALU64_tc_2early_SLOT23>,
+ ImmRegRel {
+ let InputType = "imm";
+ let CextOpcode = mnemonic;
+ let isCompare = 1;
+ let isCommutable = IsComm;
+ let hasSideEffects = 0;
+ let isExtendable = IsImmExt;
+ let opExtendable = !if (IsImmExt, 2, 0);
+ let isExtentSigned = IsImmSigned;
+ let opExtentBits = ImmBits;
+
+ bits<2> Pd;
+ bits<5> Rs;
+ bits<8> Imm;
+
+ let IClass = 0b1101;
+ let Inst{27-24} = 0b1101;
+ let Inst{22-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{12-5} = Imm;
+ let Inst{4} = 0b0;
+ let Inst{3} = IsHalf;
+ let Inst{1-0} = Pd;
+}
+
+def A4_cmpbeqi : T_CMP_ribh<"cmpb.eq", 0b00, 0, 1, u8Imm, 0, 0, 8>;
+def A4_cmpbgti : T_CMP_ribh<"cmpb.gt", 0b01, 0, 0, s8Imm, 0, 1, 8>;
+def A4_cmpbgtui : T_CMP_ribh<"cmpb.gtu", 0b10, 0, 0, u7Ext, 1, 0, 7>;
+def A4_cmpheqi : T_CMP_ribh<"cmph.eq", 0b00, 1, 1, s8Ext, 1, 1, 8>;
+def A4_cmphgti : T_CMP_ribh<"cmph.gt", 0b01, 1, 0, s8Ext, 1, 1, 8>;
+def A4_cmphgtui : T_CMP_ribh<"cmph.gtu", 0b10, 1, 0, u7Ext, 1, 0, 7>;
+
+class T_RCMP_EQ_ri<string mnemonic, bit IsNeg>
+ : ALU32_ri<(outs IntRegs:$Rd), (ins IntRegs:$Rs, s8Ext:$s8),
+ "$Rd = "#mnemonic#"($Rs, #$s8)", [], "", ALU32_2op_tc_1_SLOT0123>,
+ ImmRegRel {
+ let InputType = "imm";
+ let CextOpcode = !if (IsNeg, "!rcmp.eq", "rcmp.eq");
+ let isExtendable = 1;
+ let opExtendable = 2;
+ let isExtentSigned = 1;
+ let opExtentBits = 8;
+ let hasNewValue = 1;
+
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<8> s8;
+
+ let IClass = 0b0111;
+ let Inst{27-24} = 0b0011;
+ let Inst{22} = 0b1;
+ let Inst{21} = IsNeg;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0b1;
+ let Inst{12-5} = s8;
+ let Inst{4-0} = Rd;
+}
+
+def A4_rcmpeqi : T_RCMP_EQ_ri<"cmp.eq", 0>;
+def A4_rcmpneqi : T_RCMP_EQ_ri<"!cmp.eq", 1>;
+
+def: Pat<(i32 (zext (i1 (seteq (i32 IntRegs:$Rs), s8ExtPred:$s8)))),
+ (A4_rcmpeqi IntRegs:$Rs, s8ExtPred:$s8)>;
+def: Pat<(i32 (zext (i1 (setne (i32 IntRegs:$Rs), s8ExtPred:$s8)))),
+ (A4_rcmpneqi IntRegs:$Rs, s8ExtPred:$s8)>;
+
+// Preserve the S2_tstbit_r generation
+def: Pat<(i32 (zext (i1 (setne (i32 (and (i32 (shl 1, (i32 IntRegs:$src2))),
+ (i32 IntRegs:$src1))), 0)))),
+ (C2_muxii (S2_tstbit_r IntRegs:$src1, IntRegs:$src2), 1, 0)>;
//===----------------------------------------------------------------------===//
// ALU32 -
@@ -162,24 +266,31 @@ def V4_A4_rcmpneq : ALU32_ri<(outs IntRegs:$Rd),
// ALU32/PERM +
//===----------------------------------------------------------------------===//
-// Combine
-// Rdd=combine(Rs, #s8)
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 8,
- neverHasSideEffects = 1, validSubTargets = HasV4SubT in
-def COMBINE_rI_V4 : ALU32_ri<(outs DoubleRegs:$dst),
- (ins IntRegs:$src1, s8Ext:$src2),
- "$dst = combine($src1, #$src2)",
- []>,
- Requires<[HasV4T]>;
-
-// Rdd=combine(#s8, Rs)
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 8,
- neverHasSideEffects = 1, validSubTargets = HasV4SubT in
-def COMBINE_Ir_V4 : ALU32_ir<(outs DoubleRegs:$dst),
- (ins s8Ext:$src1, IntRegs:$src2),
- "$dst = combine(#$src1, $src2)",
- []>,
- Requires<[HasV4T]>;
+// Combine a word and an immediate into a register pair.
+let hasSideEffects = 0, isExtentSigned = 1, isExtendable = 1,
+ opExtentBits = 8 in
+class T_Combine1 <bits<2> MajOp, dag ins, string AsmStr>
+ : ALU32Inst <(outs DoubleRegs:$Rdd), ins, AsmStr> {
+ bits<5> Rdd;
+ bits<5> Rs;
+ bits<8> s8;
+
+ let IClass = 0b0111;
+ let Inst{27-24} = 0b0011;
+ let Inst{22-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0b1;
+ let Inst{12-5} = s8;
+ let Inst{4-0} = Rdd;
+ }
+
+let opExtendable = 2 in
+def A4_combineri : T_Combine1<0b00, (ins IntRegs:$Rs, s8Ext:$s8),
+ "$Rdd = combine($Rs, #$s8)">;
+
+let opExtendable = 1 in
+def A4_combineir : T_Combine1<0b01, (ins s8Ext:$s8, IntRegs:$Rs),
+ "$Rdd = combine(#$s8, $Rs)">;
def HexagonWrapperCombineRI_V4 :
SDNode<"HexagonISD::WrapperCombineRI_V4", SDTHexagonI64I32I32>;
@@ -187,274 +298,355 @@ def HexagonWrapperCombineIR_V4 :
SDNode<"HexagonISD::WrapperCombineIR_V4", SDTHexagonI64I32I32>;
def : Pat <(HexagonWrapperCombineRI_V4 IntRegs:$r, s8ExtPred:$i),
- (COMBINE_rI_V4 IntRegs:$r, s8ExtPred:$i)>,
- Requires<[HasV4T]>;
+ (A4_combineri IntRegs:$r, s8ExtPred:$i)>;
def : Pat <(HexagonWrapperCombineIR_V4 s8ExtPred:$i, IntRegs:$r),
- (COMBINE_Ir_V4 s8ExtPred:$i, IntRegs:$r)>,
- Requires<[HasV4T]>;
+ (A4_combineir s8ExtPred:$i, IntRegs:$r)>;
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 0, opExtentBits = 6,
- neverHasSideEffects = 1, validSubTargets = HasV4SubT in
-def COMBINE_iI_V4 : ALU32_ii<(outs DoubleRegs:$dst),
- (ins s8Imm:$src1, u6Ext:$src2),
- "$dst = combine(#$src1, #$src2)",
- []>,
- Requires<[HasV4T]>;
+// A4_combineii: Set two small immediates.
+let hasSideEffects = 0, isExtendable = 1, opExtentBits = 6, opExtendable = 2 in
+def A4_combineii: ALU32Inst<(outs DoubleRegs:$Rdd), (ins s8Imm:$s8, u6Ext:$U6),
+ "$Rdd = combine(#$s8, #$U6)"> {
+ bits<5> Rdd;
+ bits<8> s8;
+ bits<6> U6;
+
+ let IClass = 0b0111;
+ let Inst{27-23} = 0b11001;
+ let Inst{20-16} = U6{5-1};
+ let Inst{13} = U6{0};
+ let Inst{12-5} = s8;
+ let Inst{4-0} = Rdd;
+ }
+
+// The complexity of the combine with two immediates should be greater than
+// the complexity of a combine involving a register.
+let AddedComplexity = 75 in
+def: Pat<(HexagonCOMBINE s8ImmPred:$s8, u6ExtPred:$u6),
+ (A4_combineii imm:$s8, imm:$u6)>;
//===----------------------------------------------------------------------===//
-// ALU32/PERM +
+// ALU32/PERM -
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// LD +
//===----------------------------------------------------------------------===//
+
+def Zext64: OutPatFrag<(ops node:$Rs),
+ (i64 (A4_combineir 0, (i32 $Rs)))>;
+def Sext64: OutPatFrag<(ops node:$Rs),
+ (i64 (A2_sxtw (i32 $Rs)))>;
+
+// Patterns to generate indexed loads with different forms of the address:
+// - frameindex,
+// - base + offset,
+// - base (without offset).
+multiclass Loadxm_pat<PatFrag Load, ValueType VT, PatFrag ValueMod,
+ PatLeaf ImmPred, InstHexagon MI> {
+ def: Pat<(VT (Load AddrFI:$fi)),
+ (VT (ValueMod (MI AddrFI:$fi, 0)))>;
+ def: Pat<(VT (Load (add IntRegs:$Rs, ImmPred:$Off))),
+ (VT (ValueMod (MI IntRegs:$Rs, imm:$Off)))>;
+ def: Pat<(VT (Load (i32 IntRegs:$Rs))),
+ (VT (ValueMod (MI IntRegs:$Rs, 0)))>;
+}
+
+defm: Loadxm_pat<extloadi1, i64, Zext64, s11_0ExtPred, L2_loadrub_io>;
+defm: Loadxm_pat<extloadi8, i64, Zext64, s11_0ExtPred, L2_loadrub_io>;
+defm: Loadxm_pat<extloadi16, i64, Zext64, s11_1ExtPred, L2_loadruh_io>;
+defm: Loadxm_pat<zextloadi1, i64, Zext64, s11_0ExtPred, L2_loadrub_io>;
+defm: Loadxm_pat<zextloadi8, i64, Zext64, s11_0ExtPred, L2_loadrub_io>;
+defm: Loadxm_pat<zextloadi16, i64, Zext64, s11_1ExtPred, L2_loadruh_io>;
+defm: Loadxm_pat<sextloadi8, i64, Sext64, s11_0ExtPred, L2_loadrb_io>;
+defm: Loadxm_pat<sextloadi16, i64, Sext64, s11_1ExtPred, L2_loadrh_io>;
+
+// Map Rdd = anyext(Rs) -> Rdd = combine(#0, Rs).
+def: Pat<(i64 (anyext (i32 IntRegs:$src1))), (Zext64 IntRegs:$src1)>;
+
//===----------------------------------------------------------------------===//
// Template class for load instructions with Absolute set addressing mode.
//===----------------------------------------------------------------------===//
-let isExtended = 1, opExtendable = 2, neverHasSideEffects = 1,
-validSubTargets = HasV4SubT, addrMode = AbsoluteSet in
-class T_LD_abs_set<string mnemonic, RegisterClass RC>:
- LDInst2<(outs RC:$dst1, IntRegs:$dst2),
- (ins u0AlwaysExt:$addr),
- "$dst1 = "#mnemonic#"($dst2=##$addr)",
- []>,
- Requires<[HasV4T]>;
+let isExtended = 1, opExtendable = 2, opExtentBits = 6, addrMode = AbsoluteSet,
+ hasSideEffects = 0 in
+class T_LD_abs_set<string mnemonic, RegisterClass RC, bits<4>MajOp>:
+ LDInst<(outs RC:$dst1, IntRegs:$dst2),
+ (ins u6Ext:$addr),
+ "$dst1 = "#mnemonic#"($dst2 = #$addr)",
+ []> {
+ bits<7> name;
+ bits<5> dst1;
+ bits<5> dst2;
+ bits<6> addr;
+
+ let IClass = 0b1001;
+ let Inst{27-25} = 0b101;
+ let Inst{24-21} = MajOp;
+ let Inst{13-12} = 0b01;
+ let Inst{4-0} = dst1;
+ let Inst{20-16} = dst2;
+ let Inst{11-8} = addr{5-2};
+ let Inst{6-5} = addr{1-0};
+}
+
+let accessSize = ByteAccess, hasNewValue = 1 in {
+ def L4_loadrb_ap : T_LD_abs_set <"memb", IntRegs, 0b1000>;
+ def L4_loadrub_ap : T_LD_abs_set <"memub", IntRegs, 0b1001>;
+}
-def LDrid_abs_set_V4 : T_LD_abs_set <"memd", DoubleRegs>;
-def LDrib_abs_set_V4 : T_LD_abs_set <"memb", IntRegs>;
-def LDriub_abs_set_V4 : T_LD_abs_set <"memub", IntRegs>;
-def LDrih_abs_set_V4 : T_LD_abs_set <"memh", IntRegs>;
-def LDriw_abs_set_V4 : T_LD_abs_set <"memw", IntRegs>;
-def LDriuh_abs_set_V4 : T_LD_abs_set <"memuh", IntRegs>;
+let accessSize = HalfWordAccess, hasNewValue = 1 in {
+ def L4_loadrh_ap : T_LD_abs_set <"memh", IntRegs, 0b1010>;
+ def L4_loadruh_ap : T_LD_abs_set <"memuh", IntRegs, 0b1011>;
+ def L4_loadbsw2_ap : T_LD_abs_set <"membh", IntRegs, 0b0001>;
+ def L4_loadbzw2_ap : T_LD_abs_set <"memubh", IntRegs, 0b0011>;
+}
+let accessSize = WordAccess, hasNewValue = 1 in
+ def L4_loadri_ap : T_LD_abs_set <"memw", IntRegs, 0b1100>;
-// multiclass for load instructions with base + register offset
-// addressing mode
-multiclass ld_idxd_shl_pbase<string mnemonic, RegisterClass RC, bit isNot,
- bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : LDInst2<(outs RC:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#"$dst = "#mnemonic#"($src2+$src3<<#$offset)",
- []>, Requires<[HasV4T]>;
+let accessSize = WordAccess in {
+ def L4_loadbzw4_ap : T_LD_abs_set <"memubh", DoubleRegs, 0b0101>;
+ def L4_loadbsw4_ap : T_LD_abs_set <"membh", DoubleRegs, 0b0111>;
}
-multiclass ld_idxd_shl_pred<string mnemonic, RegisterClass RC, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ld_idxd_shl_pbase<mnemonic, RC, PredNot, 0>;
- // Predicate new
- defm _cdn#NAME : ld_idxd_shl_pbase<mnemonic, RC, PredNot, 1>;
+let accessSize = DoubleWordAccess in
+def L4_loadrd_ap : T_LD_abs_set <"memd", DoubleRegs, 0b1110>;
+
+let accessSize = ByteAccess in
+ def L4_loadalignb_ap : T_LD_abs_set <"memb_fifo", DoubleRegs, 0b0100>;
+
+let accessSize = HalfWordAccess in
+def L4_loadalignh_ap : T_LD_abs_set <"memh_fifo", DoubleRegs, 0b0010>;
+
+// Load - Indirect with long offset
+let InputType = "imm", addrMode = BaseLongOffset, isExtended = 1,
+opExtentBits = 6, opExtendable = 3 in
+class T_LoadAbsReg <string mnemonic, string CextOp, RegisterClass RC,
+ bits<4> MajOp>
+ : LDInst <(outs RC:$dst), (ins IntRegs:$src1, u2Imm:$src2, u6Ext:$src3),
+ "$dst = "#mnemonic#"($src1<<#$src2 + #$src3)",
+ [] >, ImmRegShl {
+ bits<5> dst;
+ bits<5> src1;
+ bits<2> src2;
+ bits<6> src3;
+ let CextOpcode = CextOp;
+ let hasNewValue = !if (!eq(!cast<string>(RC), "DoubleRegs"), 0, 1);
+
+ let IClass = 0b1001;
+ let Inst{27-25} = 0b110;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{13} = src2{1};
+ let Inst{12} = 0b1;
+ let Inst{11-8} = src3{5-2};
+ let Inst{7} = src2{0};
+ let Inst{6-5} = src3{1-0};
+ let Inst{4-0} = dst;
}
+
+let accessSize = ByteAccess in {
+ def L4_loadrb_ur : T_LoadAbsReg<"memb", "LDrib", IntRegs, 0b1000>;
+ def L4_loadrub_ur : T_LoadAbsReg<"memub", "LDriub", IntRegs, 0b1001>;
+ def L4_loadalignb_ur : T_LoadAbsReg<"memb_fifo", "LDrib_fifo",
+ DoubleRegs, 0b0100>;
}
-let neverHasSideEffects = 1 in
-multiclass ld_idxd_shl<string mnemonic, string CextOp, RegisterClass RC> {
- let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed_shl in {
- let isPredicable = 1 in
- def NAME#_V4 : LDInst2<(outs RC:$dst),
- (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
- "$dst = "#mnemonic#"($src1+$src2<<#$offset)",
- []>, Requires<[HasV4T]>;
-
- let isPredicated = 1 in {
- defm Pt_V4 : ld_idxd_shl_pred<mnemonic, RC, 0 >;
- defm NotPt_V4 : ld_idxd_shl_pred<mnemonic, RC, 1>;
- }
- }
+let accessSize = HalfWordAccess in {
+ def L4_loadrh_ur : T_LoadAbsReg<"memh", "LDrih", IntRegs, 0b1010>;
+ def L4_loadruh_ur : T_LoadAbsReg<"memuh", "LDriuh", IntRegs, 0b1011>;
+ def L4_loadbsw2_ur : T_LoadAbsReg<"membh", "LDribh2", IntRegs, 0b0001>;
+ def L4_loadbzw2_ur : T_LoadAbsReg<"memubh", "LDriubh2", IntRegs, 0b0011>;
+ def L4_loadalignh_ur : T_LoadAbsReg<"memh_fifo", "LDrih_fifo",
+ DoubleRegs, 0b0010>;
}
-let addrMode = BaseRegOffset in {
- let accessSize = ByteAccess in {
- defm LDrib_indexed_shl: ld_idxd_shl<"memb", "LDrib", IntRegs>,
- AddrModeRel;
- defm LDriub_indexed_shl: ld_idxd_shl<"memub", "LDriub", IntRegs>,
- AddrModeRel;
- }
- let accessSize = HalfWordAccess in {
- defm LDrih_indexed_shl: ld_idxd_shl<"memh", "LDrih", IntRegs>, AddrModeRel;
- defm LDriuh_indexed_shl: ld_idxd_shl<"memuh", "LDriuh", IntRegs>,
- AddrModeRel;
- }
- let accessSize = WordAccess in
- defm LDriw_indexed_shl: ld_idxd_shl<"memw", "LDriw", IntRegs>, AddrModeRel;
+let accessSize = WordAccess in {
+ def L4_loadri_ur : T_LoadAbsReg<"memw", "LDriw", IntRegs, 0b1100>;
+ def L4_loadbsw4_ur : T_LoadAbsReg<"membh", "LDribh4", DoubleRegs, 0b0111>;
+ def L4_loadbzw4_ur : T_LoadAbsReg<"memubh", "LDriubh4", DoubleRegs, 0b0101>;
+}
+
+let accessSize = DoubleWordAccess in
+def L4_loadrd_ur : T_LoadAbsReg<"memd", "LDrid", DoubleRegs, 0b1110>;
+
- let accessSize = DoubleWordAccess in
- defm LDrid_indexed_shl: ld_idxd_shl<"memd", "LDrid", DoubleRegs>,
- AddrModeRel;
+multiclass T_LoadAbsReg_Pat <PatFrag ldOp, InstHexagon MI, ValueType VT = i32> {
+ def : Pat <(VT (ldOp (add (shl IntRegs:$src1, u2ImmPred:$src2),
+ (HexagonCONST32 tglobaladdr:$src3)))),
+ (MI IntRegs:$src1, u2ImmPred:$src2, tglobaladdr:$src3)>;
+
+ def : Pat <(VT (ldOp (add IntRegs:$src1,
+ (HexagonCONST32 tglobaladdr:$src2)))),
+ (MI IntRegs:$src1, 0, tglobaladdr:$src2)>;
}
-// 'def pats' for load instructions with base + register offset and non-zero
-// immediate value. Immediate value is used to left-shift the second
-// register operand.
-let AddedComplexity = 40 in {
-def : Pat <(i32 (sextloadi8 (add IntRegs:$src1,
- (shl IntRegs:$src2, u2ImmPred:$offset)))),
- (LDrib_indexed_shl_V4 IntRegs:$src1,
- IntRegs:$src2, u2ImmPred:$offset)>,
- Requires<[HasV4T]>;
-
-def : Pat <(i32 (zextloadi8 (add IntRegs:$src1,
- (shl IntRegs:$src2, u2ImmPred:$offset)))),
- (LDriub_indexed_shl_V4 IntRegs:$src1,
- IntRegs:$src2, u2ImmPred:$offset)>,
- Requires<[HasV4T]>;
-
-def : Pat <(i32 (extloadi8 (add IntRegs:$src1,
- (shl IntRegs:$src2, u2ImmPred:$offset)))),
- (LDriub_indexed_shl_V4 IntRegs:$src1,
- IntRegs:$src2, u2ImmPred:$offset)>,
- Requires<[HasV4T]>;
-
-def : Pat <(i32 (sextloadi16 (add IntRegs:$src1,
- (shl IntRegs:$src2, u2ImmPred:$offset)))),
- (LDrih_indexed_shl_V4 IntRegs:$src1,
- IntRegs:$src2, u2ImmPred:$offset)>,
- Requires<[HasV4T]>;
-
-def : Pat <(i32 (zextloadi16 (add IntRegs:$src1,
- (shl IntRegs:$src2, u2ImmPred:$offset)))),
- (LDriuh_indexed_shl_V4 IntRegs:$src1,
- IntRegs:$src2, u2ImmPred:$offset)>,
- Requires<[HasV4T]>;
-
-def : Pat <(i32 (extloadi16 (add IntRegs:$src1,
- (shl IntRegs:$src2, u2ImmPred:$offset)))),
- (LDriuh_indexed_shl_V4 IntRegs:$src1,
- IntRegs:$src2, u2ImmPred:$offset)>,
- Requires<[HasV4T]>;
-
-def : Pat <(i32 (load (add IntRegs:$src1,
- (shl IntRegs:$src2, u2ImmPred:$offset)))),
- (LDriw_indexed_shl_V4 IntRegs:$src1,
- IntRegs:$src2, u2ImmPred:$offset)>,
- Requires<[HasV4T]>;
-
-def : Pat <(i64 (load (add IntRegs:$src1,
- (shl IntRegs:$src2, u2ImmPred:$offset)))),
- (LDrid_indexed_shl_V4 IntRegs:$src1,
- IntRegs:$src2, u2ImmPred:$offset)>,
- Requires<[HasV4T]>;
+let AddedComplexity = 60 in {
+defm : T_LoadAbsReg_Pat <sextloadi8, L4_loadrb_ur>;
+defm : T_LoadAbsReg_Pat <zextloadi8, L4_loadrub_ur>;
+defm : T_LoadAbsReg_Pat <extloadi8, L4_loadrub_ur>;
+
+defm : T_LoadAbsReg_Pat <sextloadi16, L4_loadrh_ur>;
+defm : T_LoadAbsReg_Pat <zextloadi16, L4_loadruh_ur>;
+defm : T_LoadAbsReg_Pat <extloadi16, L4_loadruh_ur>;
+
+defm : T_LoadAbsReg_Pat <load, L4_loadri_ur>;
+defm : T_LoadAbsReg_Pat <load, L4_loadrd_ur, i64>;
}
+//===----------------------------------------------------------------------===//
+// Template classes for the non-predicated load instructions with
+// base + register offset addressing mode
+//===----------------------------------------------------------------------===//
+class T_load_rr <string mnemonic, RegisterClass RC, bits<3> MajOp>:
+ LDInst<(outs RC:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$u2),
+ "$dst = "#mnemonic#"($src1 + $src2<<#$u2)",
+ [], "", V4LDST_tc_ld_SLOT01>, ImmRegShl, AddrModeRel {
+ bits<5> dst;
+ bits<5> src1;
+ bits<5> src2;
+ bits<2> u2;
-// 'def pats' for load instruction base + register offset and
-// zero immediate value.
-let AddedComplexity = 10 in {
-def : Pat <(i64 (load (add IntRegs:$src1, IntRegs:$src2))),
- (LDrid_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2, 0)>,
- Requires<[HasV4T]>;
+ let IClass = 0b0011;
-def : Pat <(i32 (sextloadi8 (add IntRegs:$src1, IntRegs:$src2))),
- (LDrib_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2, 0)>,
- Requires<[HasV4T]>;
+ let Inst{27-24} = 0b1010;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{12-8} = src2;
+ let Inst{13} = u2{1};
+ let Inst{7} = u2{0};
+ let Inst{4-0} = dst;
+ }
-def : Pat <(i32 (zextloadi8 (add IntRegs:$src1, IntRegs:$src2))),
- (LDriub_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2, 0)>,
- Requires<[HasV4T]>;
+//===----------------------------------------------------------------------===//
+// Template classes for the predicated load instructions with
+// base + register offset addressing mode
+//===----------------------------------------------------------------------===//
+let isPredicated = 1 in
+class T_pload_rr <string mnemonic, RegisterClass RC, bits<3> MajOp,
+ bit isNot, bit isPredNew>:
+ LDInst <(outs RC:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$u2),
+ !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
+ ") ")#"$dst = "#mnemonic#"($src2+$src3<<#$u2)",
+ [], "", V4LDST_tc_ld_SLOT01>, AddrModeRel {
+ bits<5> dst;
+ bits<2> src1;
+ bits<5> src2;
+ bits<5> src3;
+ bits<2> u2;
+
+ let isPredicatedFalse = isNot;
+ let isPredicatedNew = isPredNew;
-def : Pat <(i32 (extloadi8 (add IntRegs:$src1, IntRegs:$src2))),
- (LDriub_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2, 0)>,
- Requires<[HasV4T]>;
+ let IClass = 0b0011;
-def : Pat <(i32 (sextloadi16 (add IntRegs:$src1, IntRegs:$src2))),
- (LDrih_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2, 0)>,
- Requires<[HasV4T]>;
+ let Inst{27-26} = 0b00;
+ let Inst{25} = isPredNew;
+ let Inst{24} = isNot;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = src2;
+ let Inst{12-8} = src3;
+ let Inst{13} = u2{1};
+ let Inst{7} = u2{0};
+ let Inst{6-5} = src1;
+ let Inst{4-0} = dst;
+ }
-def : Pat <(i32 (zextloadi16 (add IntRegs:$src1, IntRegs:$src2))),
- (LDriuh_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2, 0)>,
- Requires<[HasV4T]>;
+//===----------------------------------------------------------------------===//
+// multiclass for load instructions with base + register offset
+// addressing mode
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, addrMode = BaseRegOffset in
+multiclass ld_idxd_shl <string mnemonic, string CextOp, RegisterClass RC,
+ bits<3> MajOp > {
+ let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed_shl,
+ InputType = "reg" in {
+ let isPredicable = 1 in
+ def L4_#NAME#_rr : T_load_rr <mnemonic, RC, MajOp>;
-def : Pat <(i32 (extloadi16 (add IntRegs:$src1, IntRegs:$src2))),
- (LDriuh_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2, 0)>,
- Requires<[HasV4T]>;
+ // Predicated
+ def L4_p#NAME#t_rr : T_pload_rr <mnemonic, RC, MajOp, 0, 0>;
+ def L4_p#NAME#f_rr : T_pload_rr <mnemonic, RC, MajOp, 1, 0>;
-def : Pat <(i32 (load (add IntRegs:$src1, IntRegs:$src2))),
- (LDriw_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2, 0)>,
- Requires<[HasV4T]>;
+ // Predicated new
+ def L4_p#NAME#tnew_rr : T_pload_rr <mnemonic, RC, MajOp, 0, 1>;
+ def L4_p#NAME#fnew_rr : T_pload_rr <mnemonic, RC, MajOp, 1, 1>;
+ }
}
-// zext i1->i64
-def : Pat <(i64 (zext (i1 PredRegs:$src1))),
- (i64 (COMBINE_Ir_V4 0, (MUX_ii (i1 PredRegs:$src1), 1, 0)))>,
- Requires<[HasV4T]>;
+let hasNewValue = 1, accessSize = ByteAccess in {
+ defm loadrb : ld_idxd_shl<"memb", "LDrib", IntRegs, 0b000>;
+ defm loadrub : ld_idxd_shl<"memub", "LDriub", IntRegs, 0b001>;
+}
-// zext i32->i64
-def : Pat <(i64 (zext (i32 IntRegs:$src1))),
- (i64 (COMBINE_Ir_V4 0, (i32 IntRegs:$src1)))>,
- Requires<[HasV4T]>;
-// zext i8->i64
-def: Pat <(i64 (zextloadi8 ADDRriS11_0:$src1)),
- (i64 (COMBINE_Ir_V4 0, (LDriub ADDRriS11_0:$src1)))>,
- Requires<[HasV4T]>;
-
-let AddedComplexity = 20 in
-def: Pat <(i64 (zextloadi8 (add (i32 IntRegs:$src1),
- s11_0ExtPred:$offset))),
- (i64 (COMBINE_Ir_V4 0, (LDriub_indexed IntRegs:$src1,
- s11_0ExtPred:$offset)))>,
- Requires<[HasV4T]>;
+let hasNewValue = 1, accessSize = HalfWordAccess in {
+ defm loadrh : ld_idxd_shl<"memh", "LDrih", IntRegs, 0b010>;
+ defm loadruh : ld_idxd_shl<"memuh", "LDriuh", IntRegs, 0b011>;
+}
+
+let hasNewValue = 1, accessSize = WordAccess in
+defm loadri : ld_idxd_shl<"memw", "LDriw", IntRegs, 0b100>;
+
+let accessSize = DoubleWordAccess in
+defm loadrd : ld_idxd_shl<"memd", "LDrid", DoubleRegs, 0b110>;
+
+// 'def pats' for load instructions with base + register offset and non-zero
+// immediate value. Immediate value is used to left-shift the second
+// register operand.
+class Loadxs_pat<PatFrag Load, ValueType VT, InstHexagon MI>
+ : Pat<(VT (Load (add (i32 IntRegs:$Rs),
+ (i32 (shl (i32 IntRegs:$Rt), u2ImmPred:$u2))))),
+ (VT (MI IntRegs:$Rs, IntRegs:$Rt, imm:$u2))>;
+
+let AddedComplexity = 40 in {
+ def: Loadxs_pat<extloadi8, i32, L4_loadrub_rr>;
+ def: Loadxs_pat<zextloadi8, i32, L4_loadrub_rr>;
+ def: Loadxs_pat<sextloadi8, i32, L4_loadrb_rr>;
+ def: Loadxs_pat<extloadi16, i32, L4_loadruh_rr>;
+ def: Loadxs_pat<zextloadi16, i32, L4_loadruh_rr>;
+ def: Loadxs_pat<sextloadi16, i32, L4_loadrh_rr>;
+ def: Loadxs_pat<load, i32, L4_loadri_rr>;
+ def: Loadxs_pat<load, i64, L4_loadrd_rr>;
+}
+
+// 'def pats' for load instruction base + register offset and
+// zero immediate value.
+class Loadxs_simple_pat<PatFrag Load, ValueType VT, InstHexagon MI>
+ : Pat<(VT (Load (add (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)))),
+ (VT (MI IntRegs:$Rs, IntRegs:$Rt, 0))>;
+
+let AddedComplexity = 20 in {
+ def: Loadxs_simple_pat<extloadi8, i32, L4_loadrub_rr>;
+ def: Loadxs_simple_pat<zextloadi8, i32, L4_loadrub_rr>;
+ def: Loadxs_simple_pat<sextloadi8, i32, L4_loadrb_rr>;
+ def: Loadxs_simple_pat<extloadi16, i32, L4_loadruh_rr>;
+ def: Loadxs_simple_pat<zextloadi16, i32, L4_loadruh_rr>;
+ def: Loadxs_simple_pat<sextloadi16, i32, L4_loadrh_rr>;
+ def: Loadxs_simple_pat<load, i32, L4_loadri_rr>;
+ def: Loadxs_simple_pat<load, i64, L4_loadrd_rr>;
+}
// zext i1->i64
-def: Pat <(i64 (zextloadi1 ADDRriS11_0:$src1)),
- (i64 (COMBINE_Ir_V4 0, (LDriub ADDRriS11_0:$src1)))>,
- Requires<[HasV4T]>;
-
-let AddedComplexity = 20 in
-def: Pat <(i64 (zextloadi1 (add (i32 IntRegs:$src1),
- s11_0ExtPred:$offset))),
- (i64 (COMBINE_Ir_V4 0, (LDriub_indexed IntRegs:$src1,
- s11_0ExtPred:$offset)))>,
- Requires<[HasV4T]>;
-
-// zext i16->i64
-def: Pat <(i64 (zextloadi16 ADDRriS11_1:$src1)),
- (i64 (COMBINE_Ir_V4 0, (LDriuh ADDRriS11_1:$src1)))>,
- Requires<[HasV4T]>;
-
-let AddedComplexity = 20 in
-def: Pat <(i64 (zextloadi16 (add (i32 IntRegs:$src1),
- s11_1ExtPred:$offset))),
- (i64 (COMBINE_Ir_V4 0, (LDriuh_indexed IntRegs:$src1,
- s11_1ExtPred:$offset)))>,
- Requires<[HasV4T]>;
-
-// anyext i16->i64
-def: Pat <(i64 (extloadi16 ADDRriS11_2:$src1)),
- (i64 (COMBINE_Ir_V4 0, (LDrih ADDRriS11_2:$src1)))>,
- Requires<[HasV4T]>;
-
-let AddedComplexity = 20 in
-def: Pat <(i64 (extloadi16 (add (i32 IntRegs:$src1),
- s11_1ExtPred:$offset))),
- (i64 (COMBINE_Ir_V4 0, (LDrih_indexed IntRegs:$src1,
- s11_1ExtPred:$offset)))>,
- Requires<[HasV4T]>;
+def: Pat<(i64 (zext (i1 PredRegs:$src1))),
+ (Zext64 (C2_muxii PredRegs:$src1, 1, 0))>;
+
+// zext i32->i64
+def: Pat<(i64 (zext (i32 IntRegs:$src1))),
+ (Zext64 IntRegs:$src1)>;
// zext i32->i64
def: Pat <(i64 (zextloadi32 ADDRriS11_2:$src1)),
- (i64 (COMBINE_Ir_V4 0, (LDriw ADDRriS11_2:$src1)))>,
- Requires<[HasV4T]>;
+ (i64 (A4_combineir 0, (L2_loadri_io AddrFI:$src1, 0)))>;
let AddedComplexity = 100 in
def: Pat <(i64 (zextloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))),
- (i64 (COMBINE_Ir_V4 0, (LDriw_indexed IntRegs:$src1,
- s11_2ExtPred:$offset)))>,
- Requires<[HasV4T]>;
+ (i64 (A4_combineir 0, (L2_loadri_io IntRegs:$src1,
+ s11_2ExtPred:$offset)))>;
// anyext i32->i64
def: Pat <(i64 (extloadi32 ADDRriS11_2:$src1)),
- (i64 (COMBINE_Ir_V4 0, (LDriw ADDRriS11_2:$src1)))>,
- Requires<[HasV4T]>;
-
-let AddedComplexity = 100 in
-def: Pat <(i64 (extloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))),
- (i64 (COMBINE_Ir_V4 0, (LDriw_indexed IntRegs:$src1,
- s11_2ExtPred:$offset)))>,
- Requires<[HasV4T]>;
-
-
+ (i64 (A4_combineir 0, (L2_loadri_io AddrFI:$src1, 0)))>;
//===----------------------------------------------------------------------===//
// LD -
@@ -467,194 +659,357 @@ def: Pat <(i64 (extloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))),
//===----------------------------------------------------------------------===//
// Template class for store instructions with Absolute set addressing mode.
//===----------------------------------------------------------------------===//
-let isExtended = 1, opExtendable = 2, validSubTargets = HasV4SubT,
-addrMode = AbsoluteSet in
-class T_ST_abs_set<string mnemonic, RegisterClass RC>:
- STInst2<(outs IntRegs:$dst1),
- (ins RC:$src1, u0AlwaysExt:$src2),
- mnemonic#"($dst1=##$src2) = $src1",
- []>,
- Requires<[HasV4T]>;
+let isExtended = 1, opExtendable = 1, opExtentBits = 6,
+ addrMode = AbsoluteSet, isNVStorable = 1 in
+class T_ST_absset <string mnemonic, string BaseOp, RegisterClass RC,
+ bits<3> MajOp, MemAccessSize AccessSz, bit isHalf = 0>
+ : STInst<(outs IntRegs:$dst),
+ (ins u6Ext:$addr, RC:$src),
+ mnemonic#"($dst = #$addr) = $src"#!if(isHalf, ".h","")>, NewValueRel {
+ bits<5> dst;
+ bits<6> addr;
+ bits<5> src;
+ let accessSize = AccessSz;
+ let BaseOpcode = BaseOp#"_AbsSet";
+
+ let IClass = 0b1010;
+
+ let Inst{27-24} = 0b1011;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = dst;
+ let Inst{13} = 0b0;
+ let Inst{12-8} = src;
+ let Inst{7} = 0b1;
+ let Inst{5-0} = addr;
+ }
-def STrid_abs_set_V4 : T_ST_abs_set <"memd", DoubleRegs>;
-def STrib_abs_set_V4 : T_ST_abs_set <"memb", IntRegs>;
-def STrih_abs_set_V4 : T_ST_abs_set <"memh", IntRegs>;
-def STriw_abs_set_V4 : T_ST_abs_set <"memw", IntRegs>;
+def S4_storerb_ap : T_ST_absset <"memb", "STrib", IntRegs, 0b000, ByteAccess>;
+def S4_storerh_ap : T_ST_absset <"memh", "STrih", IntRegs, 0b010,
+ HalfWordAccess>;
+def S4_storeri_ap : T_ST_absset <"memw", "STriw", IntRegs, 0b100, WordAccess>;
-//===----------------------------------------------------------------------===//
-// multiclass for store instructions with base + register offset addressing
-// mode
-//===----------------------------------------------------------------------===//
-multiclass ST_Idxd_shl_Pbase<string mnemonic, RegisterClass RC, bit isNot,
- bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : STInst2<(outs),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
- RC:$src5),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#mnemonic#"($src2+$src3<<#$src4) = $src5",
- []>,
- Requires<[HasV4T]>;
+let isNVStorable = 0 in {
+ def S4_storerf_ap : T_ST_absset <"memh", "STrif", IntRegs,
+ 0b011, HalfWordAccess, 1>;
+ def S4_storerd_ap : T_ST_absset <"memd", "STrid", DoubleRegs,
+ 0b110, DoubleWordAccess>;
}
-multiclass ST_Idxd_shl_Pred<string mnemonic, RegisterClass RC, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ST_Idxd_shl_Pbase<mnemonic, RC, PredNot, 0>;
- // Predicate new
- defm _cdn#NAME : ST_Idxd_shl_Pbase<mnemonic, RC, PredNot, 1>;
+let opExtendable = 1, isNewValue = 1, isNVStore = 1, opNewValue = 2,
+isExtended = 1, opExtentBits= 6 in
+class T_ST_absset_nv <string mnemonic, string BaseOp, bits<2> MajOp,
+ MemAccessSize AccessSz >
+ : NVInst <(outs IntRegs:$dst),
+ (ins u6Ext:$addr, IntRegs:$src),
+ mnemonic#"($dst = #$addr) = $src.new">, NewValueRel {
+ bits<5> dst;
+ bits<6> addr;
+ bits<3> src;
+ let accessSize = AccessSz;
+ let BaseOpcode = BaseOp#"_AbsSet";
+
+ let IClass = 0b1010;
+
+ let Inst{27-21} = 0b1011101;
+ let Inst{20-16} = dst;
+ let Inst{13-11} = 0b000;
+ let Inst{12-11} = MajOp;
+ let Inst{10-8} = src;
+ let Inst{7} = 0b1;
+ let Inst{5-0} = addr;
}
+
+let mayStore = 1, addrMode = AbsoluteSet in {
+ def S4_storerbnew_ap : T_ST_absset_nv <"memb", "STrib", 0b00, ByteAccess>;
+ def S4_storerhnew_ap : T_ST_absset_nv <"memh", "STrih", 0b01, HalfWordAccess>;
+ def S4_storerinew_ap : T_ST_absset_nv <"memw", "STriw", 0b10, WordAccess>;
}
-let isNVStorable = 1 in
-multiclass ST_Idxd_shl<string mnemonic, string CextOp, RegisterClass RC> {
- let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed_shl in {
- let isPredicable = 1 in
- def NAME#_V4 : STInst2<(outs),
- (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, RC:$src4),
- mnemonic#"($src1+$src2<<#$src3) = $src4",
- []>,
- Requires<[HasV4T]>;
-
- let isPredicated = 1 in {
- defm Pt_V4 : ST_Idxd_shl_Pred<mnemonic, RC, 0 >;
- defm NotPt_V4 : ST_Idxd_shl_Pred<mnemonic, RC, 1>;
- }
- }
+let isExtended = 1, opExtendable = 2, opExtentBits = 6, InputType = "imm",
+addrMode = BaseLongOffset, AddedComplexity = 40 in
+class T_StoreAbsReg <string mnemonic, string CextOp, RegisterClass RC,
+ bits<3> MajOp, MemAccessSize AccessSz, bit isHalf = 0>
+ : STInst<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, u6Ext:$src3, RC:$src4),
+ mnemonic#"($src1<<#$src2 + #$src3) = $src4"#!if(isHalf, ".h",""),
+ []>, ImmRegShl, NewValueRel {
+
+ bits<5> src1;
+ bits<2> src2;
+ bits<6> src3;
+ bits<5> src4;
+
+ let accessSize = AccessSz;
+ let CextOpcode = CextOp;
+ let BaseOpcode = CextOp#"_shl";
+ let IClass = 0b1010;
+
+ let Inst{27-24} =0b1101;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = src1;
+ let Inst{13} = src2{1};
+ let Inst{12-8} = src4;
+ let Inst{7} = 0b1;
+ let Inst{6} = src2{0};
+ let Inst{5-0} = src3;
}
-// multiclass for new-value store instructions with base + register offset
-// addressing mode.
-multiclass ST_Idxd_shl_Pbase_nv<string mnemonic, RegisterClass RC, bit isNot,
- bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME#_nv_V4 : NVInst_V4<(outs),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
- RC:$src5),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#mnemonic#"($src2+$src3<<#$src4) = $src5.new",
- []>,
- Requires<[HasV4T]>;
+def S4_storerb_ur : T_StoreAbsReg <"memb", "STrib", IntRegs, 0b000, ByteAccess>;
+def S4_storerh_ur : T_StoreAbsReg <"memh", "STrih", IntRegs, 0b010,
+ HalfWordAccess>;
+def S4_storerf_ur : T_StoreAbsReg <"memh", "STrif", IntRegs, 0b011,
+ HalfWordAccess, 1>;
+def S4_storeri_ur : T_StoreAbsReg <"memw", "STriw", IntRegs, 0b100, WordAccess>;
+def S4_storerd_ur : T_StoreAbsReg <"memd", "STrid", DoubleRegs, 0b110,
+ DoubleWordAccess>;
+
+let AddedComplexity = 40 in
+multiclass T_StoreAbsReg_Pats <InstHexagon MI, RegisterClass RC, ValueType VT,
+ PatFrag stOp> {
+ def : Pat<(stOp (VT RC:$src4),
+ (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
+ u0AlwaysExtPred:$src3)),
+ (MI IntRegs:$src1, u2ImmPred:$src2, u0AlwaysExtPred:$src3, RC:$src4)>;
+
+ def : Pat<(stOp (VT RC:$src4),
+ (add (shl IntRegs:$src1, u2ImmPred:$src2),
+ (HexagonCONST32 tglobaladdr:$src3))),
+ (MI IntRegs:$src1, u2ImmPred:$src2, tglobaladdr:$src3, RC:$src4)>;
+
+ def : Pat<(stOp (VT RC:$src4),
+ (add IntRegs:$src1, (HexagonCONST32 tglobaladdr:$src3))),
+ (MI IntRegs:$src1, 0, tglobaladdr:$src3, RC:$src4)>;
}
-multiclass ST_Idxd_shl_Pred_nv<string mnemonic, RegisterClass RC, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ST_Idxd_shl_Pbase_nv<mnemonic, RC, PredNot, 0>;
- // Predicate new
- defm _cdn#NAME : ST_Idxd_shl_Pbase_nv<mnemonic, RC, PredNot, 1>;
+defm : T_StoreAbsReg_Pats <S4_storerd_ur, DoubleRegs, i64, store>;
+defm : T_StoreAbsReg_Pats <S4_storeri_ur, IntRegs, i32, store>;
+defm : T_StoreAbsReg_Pats <S4_storerb_ur, IntRegs, i32, truncstorei8>;
+defm : T_StoreAbsReg_Pats <S4_storerh_ur, IntRegs, i32, truncstorei16>;
+
+let mayStore = 1, isNVStore = 1, isExtended = 1, addrMode = BaseLongOffset,
+ opExtentBits = 6, isNewValue = 1, opNewValue = 3, opExtendable = 2 in
+class T_StoreAbsRegNV <string mnemonic, string CextOp, bits<2> MajOp,
+ MemAccessSize AccessSz>
+ : NVInst <(outs ),
+ (ins IntRegs:$src1, u2Imm:$src2, u6Ext:$src3, IntRegs:$src4),
+ mnemonic#"($src1<<#$src2 + #$src3) = $src4.new">, NewValueRel {
+ bits<5> src1;
+ bits<2> src2;
+ bits<6> src3;
+ bits<3> src4;
+
+ let CextOpcode = CextOp;
+ let BaseOpcode = CextOp#"_shl";
+ let IClass = 0b1010;
+
+ let Inst{27-21} = 0b1101101;
+ let Inst{12-11} = 0b00;
+ let Inst{7} = 0b1;
+ let Inst{20-16} = src1;
+ let Inst{13} = src2{1};
+ let Inst{12-11} = MajOp;
+ let Inst{10-8} = src4;
+ let Inst{6} = src2{0};
+ let Inst{5-0} = src3;
}
-}
-let mayStore = 1, isNVStore = 1 in
-multiclass ST_Idxd_shl_nv<string mnemonic, string CextOp, RegisterClass RC> {
- let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed_shl in {
- let isPredicable = 1 in
- def NAME#_nv_V4 : NVInst_V4<(outs),
- (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, RC:$src4),
- mnemonic#"($src1+$src2<<#$src3) = $src4.new",
- []>,
- Requires<[HasV4T]>;
-
- let isPredicated = 1 in {
- defm Pt : ST_Idxd_shl_Pred_nv<mnemonic, RC, 0 >;
- defm NotPt : ST_Idxd_shl_Pred_nv<mnemonic, RC, 1>;
- }
+def S4_storerbnew_ur : T_StoreAbsRegNV <"memb", "STrib", 0b00, ByteAccess>;
+def S4_storerhnew_ur : T_StoreAbsRegNV <"memh", "STrih", 0b01, HalfWordAccess>;
+def S4_storerinew_ur : T_StoreAbsRegNV <"memw", "STriw", 0b10, WordAccess>;
+
+//===----------------------------------------------------------------------===//
+// Template classes for the non-predicated store instructions with
+// base + register offset addressing mode
+//===----------------------------------------------------------------------===//
+let isPredicable = 1 in
+class T_store_rr <string mnemonic, RegisterClass RC, bits<3> MajOp, bit isH>
+ : STInst < (outs ), (ins IntRegs:$Rs, IntRegs:$Ru, u2Imm:$u2, RC:$Rt),
+ mnemonic#"($Rs + $Ru<<#$u2) = $Rt"#!if(isH, ".h",""),
+ [],"",V4LDST_tc_st_SLOT01>, ImmRegShl, AddrModeRel {
+
+ bits<5> Rs;
+ bits<5> Ru;
+ bits<2> u2;
+ bits<5> Rt;
+
+ let IClass = 0b0011;
+
+ let Inst{27-24} = 0b1011;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Ru;
+ let Inst{13} = u2{1};
+ let Inst{7} = u2{0};
+ let Inst{4-0} = Rt;
}
-}
-let addrMode = BaseRegOffset, neverHasSideEffects = 1,
-validSubTargets = HasV4SubT in {
- let accessSize = ByteAccess in
- defm STrib_indexed_shl: ST_Idxd_shl<"memb", "STrib", IntRegs>,
- ST_Idxd_shl_nv<"memb", "STrib", IntRegs>, AddrModeRel;
+//===----------------------------------------------------------------------===//
+// Template classes for the predicated store instructions with
+// base + register offset addressing mode
+//===----------------------------------------------------------------------===//
+let isPredicated = 1 in
+class T_pstore_rr <string mnemonic, RegisterClass RC, bits<3> MajOp,
+ bit isNot, bit isPredNew, bit isH>
+ : STInst <(outs),
+ (ins PredRegs:$Pv, IntRegs:$Rs, IntRegs:$Ru, u2Imm:$u2, RC:$Rt),
+
+ !if(isNot, "if (!$Pv", "if ($Pv")#!if(isPredNew, ".new) ",
+ ") ")#mnemonic#"($Rs+$Ru<<#$u2) = $Rt"#!if(isH, ".h",""),
+ [], "", V4LDST_tc_st_SLOT01> , AddrModeRel{
+ bits<2> Pv;
+ bits<5> Rs;
+ bits<5> Ru;
+ bits<2> u2;
+ bits<5> Rt;
+
+ let isPredicatedFalse = isNot;
+ let isPredicatedNew = isPredNew;
- let accessSize = HalfWordAccess in
- defm STrih_indexed_shl: ST_Idxd_shl<"memh", "STrih", IntRegs>,
- ST_Idxd_shl_nv<"memh", "STrih", IntRegs>, AddrModeRel;
+ let IClass = 0b0011;
- let accessSize = WordAccess in
- defm STriw_indexed_shl: ST_Idxd_shl<"memw", "STriw", IntRegs>,
- ST_Idxd_shl_nv<"memw", "STriw", IntRegs>, AddrModeRel;
+ let Inst{27-26} = 0b01;
+ let Inst{25} = isPredNew;
+ let Inst{24} = isNot;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Ru;
+ let Inst{13} = u2{1};
+ let Inst{7} = u2{0};
+ let Inst{6-5} = Pv;
+ let Inst{4-0} = Rt;
+ }
- let isNVStorable = 0, accessSize = DoubleWordAccess in
- defm STrid_indexed_shl: ST_Idxd_shl<"memd", "STrid", DoubleRegs>, AddrModeRel;
-}
+//===----------------------------------------------------------------------===//
+// Template classes for the new-value store instructions with
+// base + register offset addressing mode
+//===----------------------------------------------------------------------===//
+let isPredicable = 1, isNewValue = 1, opNewValue = 3 in
+class T_store_new_rr <string mnemonic, bits<2> MajOp> :
+ NVInst < (outs ), (ins IntRegs:$Rs, IntRegs:$Ru, u2Imm:$u2, IntRegs:$Nt),
+ mnemonic#"($Rs + $Ru<<#$u2) = $Nt.new",
+ [],"",V4LDST_tc_st_SLOT0>, ImmRegShl, AddrModeRel {
-let Predicates = [HasV4T], AddedComplexity = 10 in {
-def : Pat<(truncstorei8 (i32 IntRegs:$src4),
- (add IntRegs:$src1, (shl IntRegs:$src2,
- u2ImmPred:$src3))),
- (STrib_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2,
- u2ImmPred:$src3, IntRegs:$src4)>;
+ bits<5> Rs;
+ bits<5> Ru;
+ bits<2> u2;
+ bits<3> Nt;
-def : Pat<(truncstorei16 (i32 IntRegs:$src4),
- (add IntRegs:$src1, (shl IntRegs:$src2,
- u2ImmPred:$src3))),
- (STrih_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2,
- u2ImmPred:$src3, IntRegs:$src4)>;
+ let IClass = 0b0011;
-def : Pat<(store (i32 IntRegs:$src4),
- (add IntRegs:$src1, (shl IntRegs:$src2, u2ImmPred:$src3))),
- (STriw_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2,
- u2ImmPred:$src3, IntRegs:$src4)>;
+ let Inst{27-21} = 0b1011101;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Ru;
+ let Inst{13} = u2{1};
+ let Inst{7} = u2{0};
+ let Inst{4-3} = MajOp;
+ let Inst{2-0} = Nt;
+ }
-def : Pat<(store (i64 DoubleRegs:$src4),
- (add IntRegs:$src1, (shl IntRegs:$src2, u2ImmPred:$src3))),
- (STrid_indexed_shl_V4 IntRegs:$src1, IntRegs:$src2,
- u2ImmPred:$src3, DoubleRegs:$src4)>;
-}
+//===----------------------------------------------------------------------===//
+// Template classes for the predicated new-value store instructions with
+// base + register offset addressing mode
+//===----------------------------------------------------------------------===//
+let isPredicated = 1, isNewValue = 1, opNewValue = 4 in
+class T_pstore_new_rr <string mnemonic, bits<2> MajOp, bit isNot, bit isPredNew>
+ : NVInst<(outs),
+ (ins PredRegs:$Pv, IntRegs:$Rs, IntRegs:$Ru, u2Imm:$u2, IntRegs:$Nt),
+ !if(isNot, "if (!$Pv", "if ($Pv")#!if(isPredNew, ".new) ",
+ ") ")#mnemonic#"($Rs+$Ru<<#$u2) = $Nt.new",
+ [], "", V4LDST_tc_st_SLOT0>, AddrModeRel {
+ bits<2> Pv;
+ bits<5> Rs;
+ bits<5> Ru;
+ bits<2> u2;
+ bits<3> Nt;
+
+ let isPredicatedFalse = isNot;
+ let isPredicatedNew = isPredNew;
-let isExtended = 1, opExtendable = 2 in
-class T_ST_LongOff <string mnemonic, PatFrag stOp, RegisterClass RC, ValueType VT> :
- STInst<(outs),
- (ins IntRegs:$src1, u2Imm:$src2, u0AlwaysExt:$src3, RC:$src4),
- mnemonic#"($src1<<#$src2+##$src3) = $src4",
- [(stOp (VT RC:$src4),
- (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
- u0AlwaysExtPred:$src3))]>,
- Requires<[HasV4T]>;
+ let IClass = 0b0011;
+ let Inst{27-26} = 0b01;
+ let Inst{25} = isPredNew;
+ let Inst{24} = isNot;
+ let Inst{23-21} = 0b101;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Ru;
+ let Inst{13} = u2{1};
+ let Inst{7} = u2{0};
+ let Inst{6-5} = Pv;
+ let Inst{4-3} = MajOp;
+ let Inst{2-0} = Nt;
+ }
-let isExtended = 1, opExtendable = 2, mayStore = 1, isNVStore = 1 in
-class T_ST_LongOff_nv <string mnemonic> :
- NVInst_V4<(outs),
- (ins IntRegs:$src1, u2Imm:$src2, u0AlwaysExt:$src3, IntRegs:$src4),
- mnemonic#"($src1<<#$src2+##$src3) = $src4.new",
- []>,
- Requires<[HasV4T]>;
+//===----------------------------------------------------------------------===//
+// multiclass for store instructions with base + register offset addressing
+// mode
+//===----------------------------------------------------------------------===//
+let isNVStorable = 1 in
+multiclass ST_Idxd_shl<string mnemonic, string CextOp, RegisterClass RC,
+ bits<3> MajOp, bit isH = 0> {
+ let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed_shl in {
+ def S4_#NAME#_rr : T_store_rr <mnemonic, RC, MajOp, isH>;
-multiclass ST_LongOff <string mnemonic, string BaseOp, PatFrag stOp> {
- let BaseOpcode = BaseOp#"_shl" in {
- let isNVStorable = 1 in
- def NAME#_V4 : T_ST_LongOff<mnemonic, stOp, IntRegs, i32>;
+ // Predicated
+ def S4_p#NAME#t_rr : T_pstore_rr <mnemonic, RC, MajOp, 0, 0, isH>;
+ def S4_p#NAME#f_rr : T_pstore_rr <mnemonic, RC, MajOp, 1, 0, isH>;
- def NAME#_nv_V4 : T_ST_LongOff_nv<mnemonic>;
+ // Predicated new
+ def S4_p#NAME#tnew_rr : T_pstore_rr <mnemonic, RC, MajOp, 0, 1, isH>;
+ def S4_p#NAME#fnew_rr : T_pstore_rr <mnemonic, RC, MajOp, 1, 1, isH>;
}
}
-let AddedComplexity = 10, validSubTargets = HasV4SubT in {
- def STrid_shl_V4 : T_ST_LongOff<"memd", store, DoubleRegs, i64>;
- defm STrib_shl : ST_LongOff <"memb", "STrib", truncstorei8>, NewValueRel;
- defm STrih_shl : ST_LongOff <"memh", "Strih", truncstorei16>, NewValueRel;
- defm STriw_shl : ST_LongOff <"memw", "STriw", store>, NewValueRel;
+//===----------------------------------------------------------------------===//
+// multiclass for new-value store instructions with base + register offset
+// addressing mode.
+//===----------------------------------------------------------------------===//
+let mayStore = 1, isNVStore = 1 in
+multiclass ST_Idxd_shl_nv <string mnemonic, string CextOp, RegisterClass RC,
+ bits<2> MajOp> {
+ let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed_shl in {
+ def S4_#NAME#new_rr : T_store_new_rr<mnemonic, MajOp>;
+
+ // Predicated
+ def S4_p#NAME#newt_rr : T_pstore_new_rr <mnemonic, MajOp, 0, 0>;
+ def S4_p#NAME#newf_rr : T_pstore_new_rr <mnemonic, MajOp, 1, 0>;
+
+ // Predicated new
+ def S4_p#NAME#newtnew_rr : T_pstore_new_rr <mnemonic, MajOp, 0, 1>;
+ def S4_p#NAME#newfnew_rr : T_pstore_new_rr <mnemonic, MajOp, 1, 1>;
+ }
}
-let AddedComplexity = 40 in
-multiclass T_ST_LOff_Pats <InstHexagon I, RegisterClass RC, ValueType VT,
- PatFrag stOp> {
- def : Pat<(stOp (VT RC:$src4),
- (add (shl IntRegs:$src1, u2ImmPred:$src2),
- (NumUsesBelowThresCONST32 tglobaladdr:$src3))),
- (I IntRegs:$src1, u2ImmPred:$src2, tglobaladdr:$src3, RC:$src4)>;
+let addrMode = BaseRegOffset, InputType = "reg", hasSideEffects = 0 in {
+ let accessSize = ByteAccess in
+ defm storerb: ST_Idxd_shl<"memb", "STrib", IntRegs, 0b000>,
+ ST_Idxd_shl_nv<"memb", "STrib", IntRegs, 0b00>;
- def : Pat<(stOp (VT RC:$src4),
- (add IntRegs:$src1,
- (NumUsesBelowThresCONST32 tglobaladdr:$src3))),
- (I IntRegs:$src1, 0, tglobaladdr:$src3, RC:$src4)>;
+ let accessSize = HalfWordAccess in
+ defm storerh: ST_Idxd_shl<"memh", "STrih", IntRegs, 0b010>,
+ ST_Idxd_shl_nv<"memh", "STrih", IntRegs, 0b01>;
+
+ let accessSize = WordAccess in
+ defm storeri: ST_Idxd_shl<"memw", "STriw", IntRegs, 0b100>,
+ ST_Idxd_shl_nv<"memw", "STriw", IntRegs, 0b10>;
+
+ let isNVStorable = 0, accessSize = DoubleWordAccess in
+ defm storerd: ST_Idxd_shl<"memd", "STrid", DoubleRegs, 0b110>;
+
+ let isNVStorable = 0, accessSize = HalfWordAccess in
+ defm storerf: ST_Idxd_shl<"memh", "STrif", IntRegs, 0b011, 1>;
}
-defm : T_ST_LOff_Pats<STrid_shl_V4, DoubleRegs, i64, store>;
-defm : T_ST_LOff_Pats<STriw_shl_V4, IntRegs, i32, store>;
-defm : T_ST_LOff_Pats<STrib_shl_V4, IntRegs, i32, truncstorei8>;
-defm : T_ST_LOff_Pats<STrih_shl_V4, IntRegs, i32, truncstorei16>;
+class Storexs_pat<PatFrag Store, PatFrag Value, InstHexagon MI>
+ : Pat<(Store Value:$Ru, (add (i32 IntRegs:$Rs),
+ (i32 (shl (i32 IntRegs:$Rt), u2ImmPred:$u2)))),
+ (MI IntRegs:$Rs, IntRegs:$Rt, imm:$u2, Value:$Ru)>;
+
+let AddedComplexity = 40 in {
+ def: Storexs_pat<truncstorei8, I32, S4_storerb_rr>;
+ def: Storexs_pat<truncstorei16, I32, S4_storerh_rr>;
+ def: Storexs_pat<store, I32, S4_storeri_rr>;
+ def: Storexs_pat<store, I64, S4_storerd_rr>;
+}
// memd(Rx++#s4:3)=Rtt
// memd(Rx++#s4:3:circ(Mu))=Rtt
@@ -668,75 +1023,151 @@ defm : T_ST_LOff_Pats<STrih_shl_V4, IntRegs, i32, truncstorei16>;
// TODO: needs to be implemented.
//===----------------------------------------------------------------------===//
+// Template class
+//===----------------------------------------------------------------------===//
+let isPredicable = 1, isExtendable = 1, isExtentSigned = 1, opExtentBits = 8,
+ opExtendable = 2 in
+class T_StoreImm <string mnemonic, Operand OffsetOp, bits<2> MajOp >
+ : STInst <(outs ), (ins IntRegs:$Rs, OffsetOp:$offset, s8Ext:$S8),
+ mnemonic#"($Rs+#$offset)=#$S8",
+ [], "", V4LDST_tc_st_SLOT01>,
+ ImmRegRel, PredNewRel {
+ bits<5> Rs;
+ bits<8> S8;
+ bits<8> offset;
+ bits<6> offsetBits;
+
+ string OffsetOpStr = !cast<string>(OffsetOp);
+ let offsetBits = !if (!eq(OffsetOpStr, "u6_2Imm"), offset{7-2},
+ !if (!eq(OffsetOpStr, "u6_1Imm"), offset{6-1},
+ /* u6_0Imm */ offset{5-0}));
+
+ let IClass = 0b0011;
+
+ let Inst{27-25} = 0b110;
+ let Inst{22-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{12-7} = offsetBits;
+ let Inst{13} = S8{7};
+ let Inst{6-0} = S8{6-0};
+ }
+
+let isPredicated = 1, isExtendable = 1, isExtentSigned = 1, opExtentBits = 6,
+ opExtendable = 3 in
+class T_StoreImm_pred <string mnemonic, Operand OffsetOp, bits<2> MajOp,
+ bit isPredNot, bit isPredNew >
+ : STInst <(outs ),
+ (ins PredRegs:$Pv, IntRegs:$Rs, OffsetOp:$offset, s6Ext:$S6),
+ !if(isPredNot, "if (!$Pv", "if ($Pv")#!if(isPredNew, ".new) ",
+ ") ")#mnemonic#"($Rs+#$offset)=#$S6",
+ [], "", V4LDST_tc_st_SLOT01>,
+ ImmRegRel, PredNewRel {
+ bits<2> Pv;
+ bits<5> Rs;
+ bits<6> S6;
+ bits<8> offset;
+ bits<6> offsetBits;
+
+ string OffsetOpStr = !cast<string>(OffsetOp);
+ let offsetBits = !if (!eq(OffsetOpStr, "u6_2Imm"), offset{7-2},
+ !if (!eq(OffsetOpStr, "u6_1Imm"), offset{6-1},
+ /* u6_0Imm */ offset{5-0}));
+ let isPredicatedNew = isPredNew;
+ let isPredicatedFalse = isPredNot;
+
+ let IClass = 0b0011;
+
+ let Inst{27-25} = 0b100;
+ let Inst{24} = isPredNew;
+ let Inst{23} = isPredNot;
+ let Inst{22-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{13} = S6{5};
+ let Inst{12-7} = offsetBits;
+ let Inst{6-5} = Pv;
+ let Inst{4-0} = S6{4-0};
+ }
+
+
+//===----------------------------------------------------------------------===//
// multiclass for store instructions with base + immediate offset
// addressing mode and immediate stored value.
// mem[bhw](Rx++#s4:3)=#s8
// if ([!]Pv[.new]) mem[bhw](Rx++#s4:3)=#s6
//===----------------------------------------------------------------------===//
-multiclass ST_Imm_Pbase<string mnemonic, Operand OffsetOp, bit isNot,
- bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : STInst2<(outs),
- (ins PredRegs:$src1, IntRegs:$src2, OffsetOp:$src3, s6Ext:$src4),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#mnemonic#"($src2+#$src3) = #$src4",
- []>,
- Requires<[HasV4T]>;
-}
-multiclass ST_Imm_Pred<string mnemonic, Operand OffsetOp, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ST_Imm_Pbase<mnemonic, OffsetOp, PredNot, 0>;
- // Predicate new
- defm _cdn#NAME : ST_Imm_Pbase<mnemonic, OffsetOp, PredNot, 1>;
- }
+multiclass ST_Imm_Pred <string mnemonic, Operand OffsetOp, bits<2> MajOp,
+ bit PredNot> {
+ def _io : T_StoreImm_pred <mnemonic, OffsetOp, MajOp, PredNot, 0>;
+ // Predicate new
+ def new_io : T_StoreImm_pred <mnemonic, OffsetOp, MajOp, PredNot, 1>;
}
-let isExtendable = 1, isExtentSigned = 1, neverHasSideEffects = 1 in
-multiclass ST_Imm<string mnemonic, string CextOp, Operand OffsetOp> {
+multiclass ST_Imm <string mnemonic, string CextOp, Operand OffsetOp,
+ bits<2> MajOp> {
let CextOpcode = CextOp, BaseOpcode = CextOp#_imm in {
- let opExtendable = 2, opExtentBits = 8, isPredicable = 1 in
- def NAME#_V4 : STInst2<(outs),
- (ins IntRegs:$src1, OffsetOp:$src2, s8Ext:$src3),
- mnemonic#"($src1+#$src2) = #$src3",
- []>,
- Requires<[HasV4T]>;
-
- let opExtendable = 3, opExtentBits = 6, isPredicated = 1 in {
- defm Pt_V4 : ST_Imm_Pred<mnemonic, OffsetOp, 0>;
- defm NotPt_V4 : ST_Imm_Pred<mnemonic, OffsetOp, 1 >;
- }
+ def _io : T_StoreImm <mnemonic, OffsetOp, MajOp>;
+
+ defm t : ST_Imm_Pred <mnemonic, OffsetOp, MajOp, 0>;
+ defm f : ST_Imm_Pred <mnemonic, OffsetOp, MajOp, 1>;
}
}
-let addrMode = BaseImmOffset, InputType = "imm",
-validSubTargets = HasV4SubT in {
+let hasSideEffects = 0, addrMode = BaseImmOffset,
+ InputType = "imm" in {
let accessSize = ByteAccess in
- defm STrib_imm : ST_Imm<"memb", "STrib", u6_0Imm>, ImmRegRel, PredNewRel;
+ defm S4_storeirb : ST_Imm<"memb", "STrib", u6_0Imm, 0b00>;
let accessSize = HalfWordAccess in
- defm STrih_imm : ST_Imm<"memh", "STrih", u6_1Imm>, ImmRegRel, PredNewRel;
+ defm S4_storeirh : ST_Imm<"memh", "STrih", u6_1Imm, 0b01>;
let accessSize = WordAccess in
- defm STriw_imm : ST_Imm<"memw", "STriw", u6_2Imm>, ImmRegRel, PredNewRel;
+ defm S4_storeiri : ST_Imm<"memw", "STriw", u6_2Imm, 0b10>;
}
-let Predicates = [HasV4T], AddedComplexity = 10 in {
-def: Pat<(truncstorei8 s8ExtPred:$src3, (add IntRegs:$src1, u6_0ImmPred:$src2)),
- (STrib_imm_V4 IntRegs:$src1, u6_0ImmPred:$src2, s8ExtPred:$src3)>;
+def IMM_BYTE : SDNodeXForm<imm, [{
+ // -1 etc is represented as 255 etc
+ // assigning to a byte restores our desired signed value.
+ int8_t imm = N->getSExtValue();
+ return CurDAG->getTargetConstant(imm, MVT::i32);
+}]>;
-def: Pat<(truncstorei16 s8ExtPred:$src3, (add IntRegs:$src1,
- u6_1ImmPred:$src2)),
- (STrih_imm_V4 IntRegs:$src1, u6_1ImmPred:$src2, s8ExtPred:$src3)>;
+def IMM_HALF : SDNodeXForm<imm, [{
+ // -1 etc is represented as 65535 etc
+ // assigning to a short restores our desired signed value.
+ int16_t imm = N->getSExtValue();
+ return CurDAG->getTargetConstant(imm, MVT::i32);
+}]>;
-def: Pat<(store s8ExtPred:$src3, (add IntRegs:$src1, u6_2ImmPred:$src2)),
- (STriw_imm_V4 IntRegs:$src1, u6_2ImmPred:$src2, s8ExtPred:$src3)>;
+def IMM_WORD : SDNodeXForm<imm, [{
+ // -1 etc can be represented as 4294967295 etc
+ // Currently, it's not doing this. But some optimization
+ // might convert -1 to a large +ve number.
+ // assigning to a word restores our desired signed value.
+ int32_t imm = N->getSExtValue();
+ return CurDAG->getTargetConstant(imm, MVT::i32);
+}]>;
+
+def ToImmByte : OutPatFrag<(ops node:$R), (IMM_BYTE $R)>;
+def ToImmHalf : OutPatFrag<(ops node:$R), (IMM_HALF $R)>;
+def ToImmWord : OutPatFrag<(ops node:$R), (IMM_WORD $R)>;
+
+let AddedComplexity = 40 in {
+ // Not using frameindex patterns for these stores, because the offset
+ // is not extendable. This could cause problems during removing the frame
+ // indices, since the offset with respect to R29/R30 may not fit in the
+ // u6 field.
+ def: Storexm_add_pat<truncstorei8, s8ExtPred, u6_0ImmPred, ToImmByte,
+ S4_storeirb_io>;
+ def: Storexm_add_pat<truncstorei16, s8ExtPred, u6_1ImmPred, ToImmHalf,
+ S4_storeirh_io>;
+ def: Storexm_add_pat<store, s8ExtPred, u6_2ImmPred, ToImmWord,
+ S4_storeiri_io>;
}
-let AddedComplexity = 6 in
-def : Pat <(truncstorei8 s8ExtPred:$src2, (i32 IntRegs:$src1)),
- (STrib_imm_V4 IntRegs:$src1, 0, s8ExtPred:$src2)>,
- Requires<[HasV4T]>;
+def: Storexm_simple_pat<truncstorei8, s8ExtPred, ToImmByte, S4_storeirb_io>;
+def: Storexm_simple_pat<truncstorei16, s8ExtPred, ToImmHalf, S4_storeirh_io>;
+def: Storexm_simple_pat<store, s8ExtPred, ToImmWord, S4_storeiri_io>;
// memb(Rx++#s4:0:circ(Mu))=Rt
// memb(Rx++I:circ(Mu))=Rt
@@ -744,16 +1175,10 @@ def : Pat <(truncstorei8 s8ExtPred:$src2, (i32 IntRegs:$src1)),
// memb(Rx++Mu:brev)=Rt
// memb(gp+#u16:0)=Rt
-
// Store halfword.
// TODO: needs to be implemented
// memh(Re=#U6)=Rt.H
// memh(Rs+#s11:1)=Rt.H
-let AddedComplexity = 6 in
-def : Pat <(truncstorei16 s8ExtPred:$src2, (i32 IntRegs:$src1)),
- (STrih_imm_V4 IntRegs:$src1, 0, s8ExtPred:$src2)>,
- Requires<[HasV4T]>;
-
// memh(Rs+Ru<<#u2)=Rt.H
// TODO: needs to be implemented.
@@ -770,7 +1195,6 @@ def : Pat <(truncstorei16 s8ExtPred:$src2, (i32 IntRegs:$src1)),
// if ([!]Pv[.new]) memh(#u6)=Rt.H
// if ([!]Pv[.new]) memh(#u6)=Rt
-
// if ([!]Pv[.new]) memh(Rs+#u6:1)=Rt.H
// TODO: needs to be implemented.
@@ -780,20 +1204,6 @@ def : Pat <(truncstorei16 s8ExtPred:$src2, (i32 IntRegs:$src1)),
// Store word.
// memw(Re=#U6)=Rt
// TODO: Needs to be implemented.
-
-// Store predicate:
-let neverHasSideEffects = 1 in
-def STriw_pred_V4 : STInst2<(outs),
- (ins MEMri:$addr, PredRegs:$src1),
- "Error; should not emit",
- []>,
- Requires<[HasV4T]>;
-
-let AddedComplexity = 6 in
-def : Pat <(store s8ExtPred:$src2, (i32 IntRegs:$src1)),
- (STriw_imm_V4 IntRegs:$src1, 0, s8ExtPred:$src2)>,
- Requires<[HasV4T]>;
-
// memw(Rx++#s4:2)=Rt
// memw(Rx++#s4:2:circ(Mu))=Rt
// memw(Rx++I:circ(Mu))=Rt
@@ -809,175 +1219,285 @@ def : Pat <(store s8ExtPred:$src2, (i32 IntRegs:$src1)),
// NV/ST +
//===----------------------------------------------------------------------===//
-// multiclass for new-value store instructions with base + immediate offset.
-//
-multiclass ST_Idxd_Pbase_nv<string mnemonic, RegisterClass RC,
- Operand predImmOp, bit isNot, bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME#_nv_V4 : NVInst_V4<(outs),
- (ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3, RC: $src4),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#mnemonic#"($src2+#$src3) = $src4.new",
- []>,
- Requires<[HasV4T]>;
-}
+let opNewValue = 2, opExtendable = 1, isExtentSigned = 1, isPredicable = 1 in
+class T_store_io_nv <string mnemonic, RegisterClass RC,
+ Operand ImmOp, bits<2>MajOp>
+ : NVInst_V4 <(outs),
+ (ins IntRegs:$src1, ImmOp:$src2, RC:$src3),
+ mnemonic#"($src1+#$src2) = $src3.new",
+ [],"",ST_tc_st_SLOT0> {
+ bits<5> src1;
+ bits<13> src2; // Actual address offset
+ bits<3> src3;
+ bits<11> offsetBits; // Represents offset encoding
+
+ let opExtentBits = !if (!eq(mnemonic, "memb"), 11,
+ !if (!eq(mnemonic, "memh"), 12,
+ !if (!eq(mnemonic, "memw"), 13, 0)));
-multiclass ST_Idxd_Pred_nv<string mnemonic, RegisterClass RC, Operand predImmOp,
- bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ST_Idxd_Pbase_nv<mnemonic, RC, predImmOp, PredNot, 0>;
- // Predicate new
- defm _cdn#NAME : ST_Idxd_Pbase_nv<mnemonic, RC, predImmOp, PredNot, 1>;
+ let opExtentAlign = !if (!eq(mnemonic, "memb"), 0,
+ !if (!eq(mnemonic, "memh"), 1,
+ !if (!eq(mnemonic, "memw"), 2, 0)));
+
+ let offsetBits = !if (!eq(mnemonic, "memb"), src2{10-0},
+ !if (!eq(mnemonic, "memh"), src2{11-1},
+ !if (!eq(mnemonic, "memw"), src2{12-2}, 0)));
+
+ let IClass = 0b1010;
+
+ let Inst{27} = 0b0;
+ let Inst{26-25} = offsetBits{10-9};
+ let Inst{24-21} = 0b1101;
+ let Inst{20-16} = src1;
+ let Inst{13} = offsetBits{8};
+ let Inst{12-11} = MajOp;
+ let Inst{10-8} = src3;
+ let Inst{7-0} = offsetBits{7-0};
}
-}
-let mayStore = 1, isNVStore = 1, neverHasSideEffects = 1, isExtendable = 1 in
+let opExtendable = 2, opNewValue = 3, isPredicated = 1 in
+class T_pstore_io_nv <string mnemonic, RegisterClass RC, Operand predImmOp,
+ bits<2>MajOp, bit PredNot, bit isPredNew>
+ : NVInst_V4 <(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3, RC:$src4),
+ !if(PredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
+ ") ")#mnemonic#"($src2+#$src3) = $src4.new",
+ [],"",V2LDST_tc_st_SLOT0> {
+ bits<2> src1;
+ bits<5> src2;
+ bits<9> src3;
+ bits<3> src4;
+ bits<6> offsetBits; // Represents offset encoding
+
+ let isPredicatedNew = isPredNew;
+ let isPredicatedFalse = PredNot;
+ let opExtentBits = !if (!eq(mnemonic, "memb"), 6,
+ !if (!eq(mnemonic, "memh"), 7,
+ !if (!eq(mnemonic, "memw"), 8, 0)));
+
+ let opExtentAlign = !if (!eq(mnemonic, "memb"), 0,
+ !if (!eq(mnemonic, "memh"), 1,
+ !if (!eq(mnemonic, "memw"), 2, 0)));
+
+ let offsetBits = !if (!eq(mnemonic, "memb"), src3{5-0},
+ !if (!eq(mnemonic, "memh"), src3{6-1},
+ !if (!eq(mnemonic, "memw"), src3{7-2}, 0)));
+
+ let IClass = 0b0100;
+
+ let Inst{27} = 0b0;
+ let Inst{26} = PredNot;
+ let Inst{25} = isPredNew;
+ let Inst{24-21} = 0b0101;
+ let Inst{20-16} = src2;
+ let Inst{13} = offsetBits{5};
+ let Inst{12-11} = MajOp;
+ let Inst{10-8} = src4;
+ let Inst{7-3} = offsetBits{4-0};
+ let Inst{2} = 0b0;
+ let Inst{1-0} = src1;
+ }
+
+// multiclass for new-value store instructions with base + immediate offset.
+//
+let mayStore = 1, isNVStore = 1, isNewValue = 1, hasSideEffects = 0,
+ isExtendable = 1 in
multiclass ST_Idxd_nv<string mnemonic, string CextOp, RegisterClass RC,
- Operand ImmOp, Operand predImmOp, bits<5> ImmBits,
- bits<5> PredImmBits> {
+ Operand ImmOp, Operand predImmOp, bits<2> MajOp> {
let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed in {
- let opExtendable = 1, isExtentSigned = 1, opExtentBits = ImmBits,
- isPredicable = 1 in
- def NAME#_nv_V4 : NVInst_V4<(outs),
- (ins IntRegs:$src1, ImmOp:$src2, RC:$src3),
- mnemonic#"($src1+#$src2) = $src3.new",
- []>,
- Requires<[HasV4T]>;
-
- let opExtendable = 2, isExtentSigned = 0, opExtentBits = PredImmBits,
- isPredicated = 1 in {
- defm Pt : ST_Idxd_Pred_nv<mnemonic, RC, predImmOp, 0>;
- defm NotPt : ST_Idxd_Pred_nv<mnemonic, RC, predImmOp, 1>;
- }
+ def S2_#NAME#new_io : T_store_io_nv <mnemonic, RC, ImmOp, MajOp>;
+ // Predicated
+ def S2_p#NAME#newt_io :T_pstore_io_nv <mnemonic, RC, predImmOp, MajOp, 0, 0>;
+ def S2_p#NAME#newf_io :T_pstore_io_nv <mnemonic, RC, predImmOp, MajOp, 1, 0>;
+ // Predicated new
+ def S4_p#NAME#newtnew_io :T_pstore_io_nv <mnemonic, RC, predImmOp,
+ MajOp, 0, 1>;
+ def S4_p#NAME#newfnew_io :T_pstore_io_nv <mnemonic, RC, predImmOp,
+ MajOp, 1, 1>;
}
}
-let addrMode = BaseImmOffset, validSubTargets = HasV4SubT in {
+let addrMode = BaseImmOffset, InputType = "imm" in {
let accessSize = ByteAccess in
- defm STrib_indexed: ST_Idxd_nv<"memb", "STrib", IntRegs, s11_0Ext,
- u6_0Ext, 11, 6>, AddrModeRel;
+ defm storerb: ST_Idxd_nv<"memb", "STrib", IntRegs, s11_0Ext,
+ u6_0Ext, 0b00>, AddrModeRel;
- let accessSize = HalfWordAccess in
- defm STrih_indexed: ST_Idxd_nv<"memh", "STrih", IntRegs, s11_1Ext,
- u6_1Ext, 12, 7>, AddrModeRel;
+ let accessSize = HalfWordAccess, opExtentAlign = 1 in
+ defm storerh: ST_Idxd_nv<"memh", "STrih", IntRegs, s11_1Ext,
+ u6_1Ext, 0b01>, AddrModeRel;
- let accessSize = WordAccess in
- defm STriw_indexed: ST_Idxd_nv<"memw", "STriw", IntRegs, s11_2Ext,
- u6_2Ext, 13, 8>, AddrModeRel;
+ let accessSize = WordAccess, opExtentAlign = 2 in
+ defm storeri: ST_Idxd_nv<"memw", "STriw", IntRegs, s11_2Ext,
+ u6_2Ext, 0b10>, AddrModeRel;
}
-// multiclass for new-value store instructions with base + immediate offset.
-// and MEMri operand.
-multiclass ST_MEMri_Pbase_nv<string mnemonic, RegisterClass RC, bit isNot,
- bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME#_nv_V4 : NVInst_V4<(outs),
- (ins PredRegs:$src1, MEMri:$addr, RC: $src2),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#mnemonic#"($addr) = $src2.new",
- []>,
- Requires<[HasV4T]>;
-}
-
-multiclass ST_MEMri_Pred_nv<string mnemonic, RegisterClass RC, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ST_MEMri_Pbase_nv<mnemonic, RC, PredNot, 0>;
-
- // Predicate new
- defm _cdn#NAME : ST_MEMri_Pbase_nv<mnemonic, RC, PredNot, 1>;
- }
-}
-
-let mayStore = 1, isNVStore = 1, isExtendable = 1, neverHasSideEffects = 1 in
-multiclass ST_MEMri_nv<string mnemonic, string CextOp, RegisterClass RC,
- bits<5> ImmBits, bits<5> PredImmBits> {
-
- let CextOpcode = CextOp, BaseOpcode = CextOp in {
- let opExtendable = 1, isExtentSigned = 1, opExtentBits = ImmBits,
- isPredicable = 1 in
- def NAME#_nv_V4 : NVInst_V4<(outs),
- (ins MEMri:$addr, RC:$src),
- mnemonic#"($addr) = $src.new",
- []>,
- Requires<[HasV4T]>;
-
- let opExtendable = 2, isExtentSigned = 0, opExtentBits = PredImmBits,
- neverHasSideEffects = 1, isPredicated = 1 in {
- defm Pt : ST_MEMri_Pred_nv<mnemonic, RC, 0>;
- defm NotPt : ST_MEMri_Pred_nv<mnemonic, RC, 1>;
- }
+//===----------------------------------------------------------------------===//
+// Post increment loads with register offset.
+//===----------------------------------------------------------------------===//
+
+let hasNewValue = 1 in
+def L2_loadbsw2_pr : T_load_pr <"membh", IntRegs, 0b0001, HalfWordAccess>;
+
+def L2_loadbsw4_pr : T_load_pr <"membh", DoubleRegs, 0b0111, WordAccess>;
+
+let hasSideEffects = 0, addrMode = PostInc in
+class T_loadalign_pr <string mnemonic, bits<4> MajOp, MemAccessSize AccessSz>
+ : LDInstPI <(outs DoubleRegs:$dst, IntRegs:$_dst_),
+ (ins DoubleRegs:$src1, IntRegs:$src2, ModRegs:$src3),
+ "$dst = "#mnemonic#"($src2++$src3)", [],
+ "$src1 = $dst, $src2 = $_dst_"> {
+ bits<5> dst;
+ bits<5> src2;
+ bits<1> src3;
+
+ let accessSize = AccessSz;
+ let IClass = 0b1001;
+
+ let Inst{27-25} = 0b110;
+ let Inst{24-21} = MajOp;
+ let Inst{20-16} = src2;
+ let Inst{13} = src3;
+ let Inst{12} = 0b0;
+ let Inst{7} = 0b0;
+ let Inst{4-0} = dst;
}
-}
-let addrMode = BaseImmOffset, isMEMri = "true", validSubTargets = HasV4SubT,
-mayStore = 1 in {
- let accessSize = ByteAccess in
- defm STrib: ST_MEMri_nv<"memb", "STrib", IntRegs, 11, 6>, AddrModeRel;
+def L2_loadalignb_pr : T_loadalign_pr <"memb_fifo", 0b0100, ByteAccess>;
+def L2_loadalignh_pr : T_loadalign_pr <"memh_fifo", 0b0010, HalfWordAccess>;
- let accessSize = HalfWordAccess in
- defm STrih: ST_MEMri_nv<"memh", "STrih", IntRegs, 12, 7>, AddrModeRel;
+//===----------------------------------------------------------------------===//
+// Template class for non-predicated post increment .new stores
+// mem[bhwd](Rx++#s4:[0123])=Nt.new
+//===----------------------------------------------------------------------===//
+let isPredicable = 1, hasSideEffects = 0, addrMode = PostInc, isNVStore = 1,
+ isNewValue = 1, opNewValue = 3 in
+class T_StorePI_nv <string mnemonic, Operand ImmOp, bits<2> MajOp >
+ : NVInstPI_V4 <(outs IntRegs:$_dst_),
+ (ins IntRegs:$src1, ImmOp:$offset, IntRegs:$src2),
+ mnemonic#"($src1++#$offset) = $src2.new",
+ [], "$src1 = $_dst_">,
+ AddrModeRel {
+ bits<5> src1;
+ bits<3> src2;
+ bits<7> offset;
+ bits<4> offsetBits;
- let accessSize = WordAccess in
- defm STriw: ST_MEMri_nv<"memw", "STriw", IntRegs, 13, 8>, AddrModeRel;
-}
+ string ImmOpStr = !cast<string>(ImmOp);
+ let offsetBits = !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2},
+ !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1},
+ /* s4_0Imm */ offset{3-0}));
+ let IClass = 0b1010;
+
+ let Inst{27-21} = 0b1011101;
+ let Inst{20-16} = src1;
+ let Inst{13} = 0b0;
+ let Inst{12-11} = MajOp;
+ let Inst{10-8} = src2;
+ let Inst{7} = 0b0;
+ let Inst{6-3} = offsetBits;
+ let Inst{1} = 0b0;
+ }
//===----------------------------------------------------------------------===//
-// Post increment store
-// mem[bhwd](Rx++#s4:[0123])=Nt.new
+// Template class for predicated post increment .new stores
+// if([!]Pv[.new]) mem[bhwd](Rx++#s4:[0123])=Nt.new
//===----------------------------------------------------------------------===//
+let isPredicated = 1, hasSideEffects = 0, addrMode = PostInc, isNVStore = 1,
+ isNewValue = 1, opNewValue = 4 in
+class T_StorePI_nv_pred <string mnemonic, Operand ImmOp,
+ bits<2> MajOp, bit isPredNot, bit isPredNew >
+ : NVInstPI_V4 <(outs IntRegs:$_dst_),
+ (ins PredRegs:$src1, IntRegs:$src2,
+ ImmOp:$offset, IntRegs:$src3),
+ !if(isPredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
+ ") ")#mnemonic#"($src2++#$offset) = $src3.new",
+ [], "$src2 = $_dst_">,
+ AddrModeRel {
+ bits<2> src1;
+ bits<5> src2;
+ bits<3> src3;
+ bits<7> offset;
+ bits<4> offsetBits;
+
+ string ImmOpStr = !cast<string>(ImmOp);
+ let offsetBits = !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2},
+ !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1},
+ /* s4_0Imm */ offset{3-0}));
+ let isPredicatedNew = isPredNew;
+ let isPredicatedFalse = isPredNot;
+
+ let IClass = 0b1010;
+
+ let Inst{27-21} = 0b1011101;
+ let Inst{20-16} = src2;
+ let Inst{13} = 0b1;
+ let Inst{12-11} = MajOp;
+ let Inst{10-8} = src3;
+ let Inst{7} = isPredNew;
+ let Inst{6-3} = offsetBits;
+ let Inst{2} = isPredNot;
+ let Inst{1-0} = src1;
+ }
+
+multiclass ST_PostInc_Pred_nv<string mnemonic, Operand ImmOp,
+ bits<2> MajOp, bit PredNot> {
+ def _pi : T_StorePI_nv_pred <mnemonic, ImmOp, MajOp, PredNot, 0>;
-multiclass ST_PostInc_Pbase_nv<string mnemonic, RegisterClass RC, Operand ImmOp,
- bit isNot, bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME#_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset, RC:$src3),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#mnemonic#"($src2++#$offset) = $src3.new",
- [],
- "$src2 = $dst">,
- Requires<[HasV4T]>;
+ // Predicate new
+ def new_pi : T_StorePI_nv_pred <mnemonic, ImmOp, MajOp, PredNot, 1>;
}
-multiclass ST_PostInc_Pred_nv<string mnemonic, RegisterClass RC,
- Operand ImmOp, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ST_PostInc_Pbase_nv<mnemonic, RC, ImmOp, PredNot, 0>;
- // Predicate new
- let Predicates = [HasV4T], validSubTargets = HasV4SubT in
- defm _cdn#NAME : ST_PostInc_Pbase_nv<mnemonic, RC, ImmOp, PredNot, 1>;
+multiclass ST_PostInc_nv<string mnemonic, string BaseOp, Operand ImmOp,
+ bits<2> MajOp> {
+ let BaseOpcode = "POST_"#BaseOp in {
+ def S2_#NAME#_pi : T_StorePI_nv <mnemonic, ImmOp, MajOp>;
+
+ // Predicated
+ defm S2_p#NAME#t : ST_PostInc_Pred_nv <mnemonic, ImmOp, MajOp, 0>;
+ defm S2_p#NAME#f : ST_PostInc_Pred_nv <mnemonic, ImmOp, MajOp, 1>;
}
}
-let hasCtrlDep = 1, isNVStore = 1, neverHasSideEffects = 1 in
-multiclass ST_PostInc_nv<string mnemonic, string BaseOp, RegisterClass RC,
- Operand ImmOp> {
+let accessSize = ByteAccess in
+defm storerbnew: ST_PostInc_nv <"memb", "STrib", s4_0Imm, 0b00>;
- let BaseOpcode = "POST_"#BaseOp in {
- let isPredicable = 1 in
- def NAME#_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
- (ins IntRegs:$src1, ImmOp:$offset, RC:$src2),
- mnemonic#"($src1++#$offset) = $src2.new",
- [],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
- let isPredicated = 1 in {
- defm Pt : ST_PostInc_Pred_nv<mnemonic, RC, ImmOp, 0 >;
- defm NotPt : ST_PostInc_Pred_nv<mnemonic, RC, ImmOp, 1 >;
- }
+let accessSize = HalfWordAccess in
+defm storerhnew: ST_PostInc_nv <"memh", "STrih", s4_1Imm, 0b01>;
+
+let accessSize = WordAccess in
+defm storerinew: ST_PostInc_nv <"memw", "STriw", s4_2Imm, 0b10>;
+
+//===----------------------------------------------------------------------===//
+// Template class for post increment .new stores with register offset
+//===----------------------------------------------------------------------===//
+let isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 3 in
+class T_StorePI_RegNV <string mnemonic, bits<2> MajOp, MemAccessSize AccessSz>
+ : NVInstPI_V4 <(outs IntRegs:$_dst_),
+ (ins IntRegs:$src1, ModRegs:$src2, IntRegs:$src3),
+ #mnemonic#"($src1++$src2) = $src3.new",
+ [], "$src1 = $_dst_"> {
+ bits<5> src1;
+ bits<1> src2;
+ bits<3> src3;
+ let accessSize = AccessSz;
+
+ let IClass = 0b1010;
+
+ let Inst{27-21} = 0b1101101;
+ let Inst{20-16} = src1;
+ let Inst{13} = src2;
+ let Inst{12-11} = MajOp;
+ let Inst{10-8} = src3;
+ let Inst{7} = 0b0;
}
-}
-let addrMode = PostInc, validSubTargets = HasV4SubT in {
-defm POST_STbri: ST_PostInc_nv <"memb", "STrib", IntRegs, s4_0Imm>, AddrModeRel;
-defm POST_SThri: ST_PostInc_nv <"memh", "STrih", IntRegs, s4_1Imm>, AddrModeRel;
-defm POST_STwri: ST_PostInc_nv <"memw", "STriw", IntRegs, s4_2Imm>, AddrModeRel;
-}
+def S2_storerbnew_pr : T_StorePI_RegNV<"memb", 0b00, ByteAccess>;
+def S2_storerhnew_pr : T_StorePI_RegNV<"memh", 0b01, HalfWordAccess>;
+def S2_storerinew_pr : T_StorePI_RegNV<"memw", 0b10, WordAccess>;
// memb(Rx++#s4:0:circ(Mu))=Nt.new
// memb(Rx++I:circ(Mu))=Nt.new
-// memb(Rx++Mu)=Nt.new
// memb(Rx++Mu:brev)=Nt.new
// memh(Rx++#s4:1:circ(Mu))=Nt.new
// memh(Rx++I:circ(Mu))=Nt.new
@@ -1002,7 +1522,8 @@ defm POST_STwri: ST_PostInc_nv <"memw", "STriw", IntRegs, s4_2Imm>, AddrModeRel;
// operands.
//===----------------------------------------------------------------------===//
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 11 in
+let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 11,
+ opExtentAlign = 2 in
class NVJrr_template<string mnemonic, bits<3> majOp, bit NvOpNum,
bit isNegCond, bit isTak>
: NVInst_V4<(outs),
@@ -1010,8 +1531,7 @@ class NVJrr_template<string mnemonic, bits<3> majOp, bit NvOpNum,
"if ("#!if(isNegCond, "!","")#mnemonic#
"($src1"#!if(!eq(NvOpNum, 0),".new, ",", ")#
"$src2"#!if(!eq(NvOpNum, 1),".new))","))")#" jump:"
- #!if(isTak, "t","nt")#" $offset",
- []>, Requires<[HasV4T]> {
+ #!if(isTak, "t","nt")#" $offset", []> {
bits<5> src1;
bits<5> src2;
@@ -1020,14 +1540,14 @@ class NVJrr_template<string mnemonic, bits<3> majOp, bit NvOpNum,
bits<11> offset;
let isTaken = isTak;
- let isBrTaken = !if(isTaken, "true", "false");
let isPredicatedFalse = isNegCond;
+ let opNewValue{0} = NvOpNum;
let Ns = !if(!eq(NvOpNum, 0), src1{2-0}, src2{2-0});
let RegOp = !if(!eq(NvOpNum, 0), src2, src1);
let IClass = 0b0010;
- let Inst{26} = 0b0;
+ let Inst{27-26} = 0b00;
let Inst{25-23} = majOp;
let Inst{22} = isNegCond;
let Inst{18-16} = Ns;
@@ -1041,9 +1561,9 @@ class NVJrr_template<string mnemonic, bits<3> majOp, bit NvOpNum,
multiclass NVJrr_cond<string mnemonic, bits<3> majOp, bit NvOpNum,
bit isNegCond> {
// Branch not taken:
- def _nt_V4: NVJrr_template<mnemonic, majOp, NvOpNum, isNegCond, 0>;
+ def _nt: NVJrr_template<mnemonic, majOp, NvOpNum, isNegCond, 0>;
// Branch taken:
- def _t_V4: NVJrr_template<mnemonic, majOp, NvOpNum, isNegCond, 1>;
+ def _t : NVJrr_template<mnemonic, majOp, NvOpNum, isNegCond, 1>;
}
// NvOpNum = 0 -> First Operand is a new-value Register
@@ -1052,8 +1572,8 @@ multiclass NVJrr_cond<string mnemonic, bits<3> majOp, bit NvOpNum,
multiclass NVJrr_base<string mnemonic, string BaseOp, bits<3> majOp,
bit NvOpNum> {
let BaseOpcode = BaseOp#_NVJ in {
- defm _t_Jumpnv : NVJrr_cond<mnemonic, majOp, NvOpNum, 0>; // True cond
- defm _f_Jumpnv : NVJrr_cond<mnemonic, majOp, NvOpNum, 1>; // False cond
+ defm _t_jumpnv : NVJrr_cond<mnemonic, majOp, NvOpNum, 0>; // True cond
+ defm _f_jumpnv : NVJrr_cond<mnemonic, majOp, NvOpNum, 1>; // False cond
}
}
@@ -1064,12 +1584,12 @@ multiclass NVJrr_base<string mnemonic, string BaseOp, bits<3> majOp,
// if ([!]cmp.gtu(Rt,Ns.new)) jump:[n]t #r9:2
let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator = 1,
- Defs = [PC], neverHasSideEffects = 1, validSubTargets = HasV4SubT in {
- defm CMPEQrr : NVJrr_base<"cmp.eq", "CMPEQ", 0b000, 0>, PredRel;
- defm CMPGTrr : NVJrr_base<"cmp.gt", "CMPGT", 0b001, 0>, PredRel;
- defm CMPGTUrr : NVJrr_base<"cmp.gtu", "CMPGTU", 0b010, 0>, PredRel;
- defm CMPLTrr : NVJrr_base<"cmp.gt", "CMPLT", 0b011, 1>, PredRel;
- defm CMPLTUrr : NVJrr_base<"cmp.gtu", "CMPLTU", 0b100, 1>, PredRel;
+ Defs = [PC], hasSideEffects = 0 in {
+ defm J4_cmpeq : NVJrr_base<"cmp.eq", "CMPEQ", 0b000, 0>, PredRel;
+ defm J4_cmpgt : NVJrr_base<"cmp.gt", "CMPGT", 0b001, 0>, PredRel;
+ defm J4_cmpgtu : NVJrr_base<"cmp.gtu", "CMPGTU", 0b010, 0>, PredRel;
+ defm J4_cmplt : NVJrr_base<"cmp.gt", "CMPLT", 0b011, 1>, PredRel;
+ defm J4_cmpltu : NVJrr_base<"cmp.gtu", "CMPLTU", 0b100, 1>, PredRel;
}
//===----------------------------------------------------------------------===//
@@ -1077,18 +1597,18 @@ let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator = 1,
// with a register and an unsigned immediate (U5) operand.
//===----------------------------------------------------------------------===//
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 11 in
+let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 11,
+ opExtentAlign = 2 in
class NVJri_template<string mnemonic, bits<3> majOp, bit isNegCond,
bit isTak>
: NVInst_V4<(outs),
(ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset),
"if ("#!if(isNegCond, "!","")#mnemonic#"($src1.new, #$src2)) jump:"
- #!if(isTak, "t","nt")#" $offset",
- []>, Requires<[HasV4T]> {
+ #!if(isTak, "t","nt")#" $offset", []> {
let isTaken = isTak;
let isPredicatedFalse = isNegCond;
- let isBrTaken = !if(isTaken, "true", "false");
+ let isTaken = isTak;
bits<3> src1;
bits<5> src2;
@@ -1107,15 +1627,15 @@ class NVJri_template<string mnemonic, bits<3> majOp, bit isNegCond,
multiclass NVJri_cond<string mnemonic, bits<3> majOp, bit isNegCond> {
// Branch not taken:
- def _nt_V4: NVJri_template<mnemonic, majOp, isNegCond, 0>;
+ def _nt: NVJri_template<mnemonic, majOp, isNegCond, 0>;
// Branch taken:
- def _t_V4: NVJri_template<mnemonic, majOp, isNegCond, 1>;
+ def _t : NVJri_template<mnemonic, majOp, isNegCond, 1>;
}
multiclass NVJri_base<string mnemonic, string BaseOp, bits<3> majOp> {
let BaseOpcode = BaseOp#_NVJri in {
- defm _t_Jumpnv : NVJri_cond<mnemonic, majOp, 0>; // True Cond
- defm _f_Jumpnv : NVJri_cond<mnemonic, majOp, 1>; // False cond
+ defm _t_jumpnv : NVJri_cond<mnemonic, majOp, 0>; // True Cond
+ defm _f_jumpnv : NVJri_cond<mnemonic, majOp, 1>; // False cond
}
}
@@ -1124,10 +1644,10 @@ multiclass NVJri_base<string mnemonic, string BaseOp, bits<3> majOp> {
// if ([!]cmp.gtu(Ns.new,#U5)) jump:[n]t #r9:2
let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator = 1,
- Defs = [PC], neverHasSideEffects = 1, validSubTargets = HasV4SubT in {
- defm CMPEQri : NVJri_base<"cmp.eq", "CMPEQ", 0b000>, PredRel;
- defm CMPGTri : NVJri_base<"cmp.gt", "CMPGT", 0b001>, PredRel;
- defm CMPGTUri : NVJri_base<"cmp.gtu", "CMPGTU", 0b010>, PredRel;
+ Defs = [PC], hasSideEffects = 0 in {
+ defm J4_cmpeqi : NVJri_base<"cmp.eq", "CMPEQ", 0b000>, PredRel;
+ defm J4_cmpgti : NVJri_base<"cmp.gt", "CMPGT", 0b001>, PredRel;
+ defm J4_cmpgtui : NVJri_base<"cmp.gtu", "CMPGTU", 0b010>, PredRel;
}
//===----------------------------------------------------------------------===//
@@ -1135,19 +1655,19 @@ let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator = 1,
// with a register and an hardcoded 0/-1 immediate value.
//===----------------------------------------------------------------------===//
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 11 in
+let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 11,
+ opExtentAlign = 2 in
class NVJ_ConstImm_template<string mnemonic, bits<3> majOp, string ImmVal,
bit isNegCond, bit isTak>
: NVInst_V4<(outs),
(ins IntRegs:$src1, brtarget:$offset),
"if ("#!if(isNegCond, "!","")#mnemonic
#"($src1.new, #"#ImmVal#")) jump:"
- #!if(isTak, "t","nt")#" $offset",
- []>, Requires<[HasV4T]> {
+ #!if(isTak, "t","nt")#" $offset", []> {
let isTaken = isTak;
let isPredicatedFalse = isNegCond;
- let isBrTaken = !if(isTaken, "true", "false");
+ let isTaken = isTak;
bits<3> src1;
bits<11> offset;
@@ -1164,16 +1684,16 @@ class NVJ_ConstImm_template<string mnemonic, bits<3> majOp, string ImmVal,
multiclass NVJ_ConstImm_cond<string mnemonic, bits<3> majOp, string ImmVal,
bit isNegCond> {
// Branch not taken:
- def _nt_V4: NVJ_ConstImm_template<mnemonic, majOp, ImmVal, isNegCond, 0>;
+ def _nt: NVJ_ConstImm_template<mnemonic, majOp, ImmVal, isNegCond, 0>;
// Branch taken:
- def _t_V4: NVJ_ConstImm_template<mnemonic, majOp, ImmVal, isNegCond, 1>;
+ def _t : NVJ_ConstImm_template<mnemonic, majOp, ImmVal, isNegCond, 1>;
}
multiclass NVJ_ConstImm_base<string mnemonic, string BaseOp, bits<3> majOp,
string ImmVal> {
let BaseOpcode = BaseOp#_NVJ_ConstImm in {
- defm _t_Jumpnv : NVJ_ConstImm_cond<mnemonic, majOp, ImmVal, 0>; // True cond
- defm _f_Jumpnv : NVJ_ConstImm_cond<mnemonic, majOp, ImmVal, 1>; // False Cond
+ defm _t_jumpnv : NVJ_ConstImm_cond<mnemonic, majOp, ImmVal, 0>; // True
+ defm _f_jumpnv : NVJ_ConstImm_cond<mnemonic, majOp, ImmVal, 1>; // False
}
}
@@ -1182,51 +1702,194 @@ multiclass NVJ_ConstImm_base<string mnemonic, string BaseOp, bits<3> majOp,
// if ([!]cmp.gt(Ns.new,#-1)) jump:[n]t #r9:2
let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator=1,
- Defs = [PC], neverHasSideEffects = 1 in {
- defm TSTBIT0 : NVJ_ConstImm_base<"tstbit", "TSTBIT", 0b011, "0">, PredRel;
- defm CMPEQn1 : NVJ_ConstImm_base<"cmp.eq", "CMPEQ", 0b100, "-1">, PredRel;
- defm CMPGTn1 : NVJ_ConstImm_base<"cmp.gt", "CMPGT", 0b101, "-1">, PredRel;
+ Defs = [PC], hasSideEffects = 0 in {
+ defm J4_tstbit0 : NVJ_ConstImm_base<"tstbit", "TSTBIT", 0b011, "0">, PredRel;
+ defm J4_cmpeqn1 : NVJ_ConstImm_base<"cmp.eq", "CMPEQ", 0b100, "-1">, PredRel;
+ defm J4_cmpgtn1 : NVJ_ConstImm_base<"cmp.gt", "CMPGT", 0b101, "-1">, PredRel;
+}
+
+// J4_hintjumpr: Hint indirect conditional jump.
+let isBranch = 1, isIndirectBranch = 1, hasSideEffects = 0 in
+def J4_hintjumpr: JRInst <
+ (outs),
+ (ins IntRegs:$Rs),
+ "hintjr($Rs)"> {
+ bits<5> Rs;
+ let IClass = 0b0101;
+ let Inst{27-21} = 0b0010101;
+ let Inst{20-16} = Rs;
+ }
+
+//===----------------------------------------------------------------------===//
+// NV/J -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// CR +
+//===----------------------------------------------------------------------===//
+
+// PC-relative add
+let hasNewValue = 1, isExtendable = 1, opExtendable = 1,
+ isExtentSigned = 0, opExtentBits = 6, hasSideEffects = 0, Uses = [PC] in
+def C4_addipc : CRInst <(outs IntRegs:$Rd), (ins u6Ext:$u6),
+ "$Rd = add(pc, #$u6)", [], "", CR_tc_2_SLOT3 > {
+ bits<5> Rd;
+ bits<6> u6;
+
+ let IClass = 0b0110;
+ let Inst{27-16} = 0b101001001001;
+ let Inst{12-7} = u6;
+ let Inst{4-0} = Rd;
+ }
+
+
+
+let hasSideEffects = 0 in
+class T_LOGICAL_3OP<string MnOp1, string MnOp2, bits<2> OpBits, bit IsNeg>
+ : CRInst<(outs PredRegs:$Pd),
+ (ins PredRegs:$Ps, PredRegs:$Pt, PredRegs:$Pu),
+ "$Pd = " # MnOp1 # "($Ps, " # MnOp2 # "($Pt, " #
+ !if (IsNeg,"!","") # "$Pu))",
+ [], "", CR_tc_2early_SLOT23> {
+ bits<2> Pd;
+ bits<2> Ps;
+ bits<2> Pt;
+ bits<2> Pu;
+
+ let IClass = 0b0110;
+ let Inst{27-24} = 0b1011;
+ let Inst{23} = IsNeg;
+ let Inst{22-21} = OpBits;
+ let Inst{20} = 0b1;
+ let Inst{17-16} = Ps;
+ let Inst{13} = 0b0;
+ let Inst{9-8} = Pt;
+ let Inst{7-6} = Pu;
+ let Inst{1-0} = Pd;
}
+def C4_and_and : T_LOGICAL_3OP<"and", "and", 0b00, 0>;
+def C4_and_or : T_LOGICAL_3OP<"and", "or", 0b01, 0>;
+def C4_or_and : T_LOGICAL_3OP<"or", "and", 0b10, 0>;
+def C4_or_or : T_LOGICAL_3OP<"or", "or", 0b11, 0>;
+def C4_and_andn : T_LOGICAL_3OP<"and", "and", 0b00, 1>;
+def C4_and_orn : T_LOGICAL_3OP<"and", "or", 0b01, 1>;
+def C4_or_andn : T_LOGICAL_3OP<"or", "and", 0b10, 1>;
+def C4_or_orn : T_LOGICAL_3OP<"or", "or", 0b11, 1>;
+
+// op(Ps, op(Pt, Pu))
+class LogLog_pat<SDNode Op1, SDNode Op2, InstHexagon MI>
+ : Pat<(i1 (Op1 I1:$Ps, (Op2 I1:$Pt, I1:$Pu))),
+ (MI I1:$Ps, I1:$Pt, I1:$Pu)>;
+
+// op(Ps, op(Pt, ~Pu))
+class LogLogNot_pat<SDNode Op1, SDNode Op2, InstHexagon MI>
+ : Pat<(i1 (Op1 I1:$Ps, (Op2 I1:$Pt, (not I1:$Pu)))),
+ (MI I1:$Ps, I1:$Pt, I1:$Pu)>;
+
+def: LogLog_pat<and, and, C4_and_and>;
+def: LogLog_pat<and, or, C4_and_or>;
+def: LogLog_pat<or, and, C4_or_and>;
+def: LogLog_pat<or, or, C4_or_or>;
+
+def: LogLogNot_pat<and, and, C4_and_andn>;
+def: LogLogNot_pat<and, or, C4_and_orn>;
+def: LogLogNot_pat<or, and, C4_or_andn>;
+def: LogLogNot_pat<or, or, C4_or_orn>;
+
+//===----------------------------------------------------------------------===//
+// CR -
+//===----------------------------------------------------------------------===//
+
//===----------------------------------------------------------------------===//
// XTYPE/ALU +
//===----------------------------------------------------------------------===//
+// Logical with-not instructions.
+def A4_andnp : T_ALU64_logical<"and", 0b001, 1, 0, 1>;
+def A4_ornp : T_ALU64_logical<"or", 0b011, 1, 0, 1>;
+
+def: Pat<(i64 (and (i64 DoubleRegs:$Rs), (i64 (not (i64 DoubleRegs:$Rt))))),
+ (A4_andnp DoubleRegs:$Rs, DoubleRegs:$Rt)>;
+def: Pat<(i64 (or (i64 DoubleRegs:$Rs), (i64 (not (i64 DoubleRegs:$Rt))))),
+ (A4_ornp DoubleRegs:$Rs, DoubleRegs:$Rt)>;
+
+let hasNewValue = 1, hasSideEffects = 0 in
+def S4_parity: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd = parity($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+ let Inst{27-21} = 0b0101111;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{4-0} = Rd;
+}
+
// Add and accumulate.
// Rd=add(Rs,add(Ru,#s6))
-let isExtendable = 1, opExtendable = 3, isExtentSigned = 1, opExtentBits = 6,
-validSubTargets = HasV4SubT in
-def ADDr_ADDri_V4 : MInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2, s6Ext:$src3),
- "$dst = add($src1, add($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (add (i32 IntRegs:$src1), (add (i32 IntRegs:$src2),
- s6_16ExtPred:$src3)))]>,
- Requires<[HasV4T]>;
-
-// Rd=add(Rs,sub(#s6,Ru))
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 6,
-validSubTargets = HasV4SubT in
-def ADDr_SUBri_V4 : MInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s6Ext:$src2, IntRegs:$src3),
- "$dst = add($src1, sub(#$src2, $src3))",
- [(set (i32 IntRegs:$dst),
- (add (i32 IntRegs:$src1), (sub s6_10ExtPred:$src2,
- (i32 IntRegs:$src3))))]>,
- Requires<[HasV4T]>;
-
-// Generates the same instruction as ADDr_SUBri_V4 but matches different
-// pattern.
-// Rd=add(Rs,sub(#s6,Ru))
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 6,
-validSubTargets = HasV4SubT in
-def ADDri_SUBr_V4 : MInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s6Ext:$src2, IntRegs:$src3),
- "$dst = add($src1, sub(#$src2, $src3))",
- [(set (i32 IntRegs:$dst),
- (sub (add (i32 IntRegs:$src1), s6_10ExtPred:$src2),
- (i32 IntRegs:$src3)))]>,
- Requires<[HasV4T]>;
+let isExtentSigned = 1, hasNewValue = 1, isExtendable = 1, opExtentBits = 6,
+ opExtendable = 3 in
+def S4_addaddi : ALU64Inst <(outs IntRegs:$Rd),
+ (ins IntRegs:$Rs, IntRegs:$Ru, s6Ext:$s6),
+ "$Rd = add($Rs, add($Ru, #$s6))" ,
+ [(set (i32 IntRegs:$Rd), (add (i32 IntRegs:$Rs),
+ (add (i32 IntRegs:$Ru), s6_16ExtPred:$s6)))],
+ "", ALU64_tc_2_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Ru;
+ bits<6> s6;
+
+ let IClass = 0b1101;
+
+ let Inst{27-23} = 0b10110;
+ let Inst{22-21} = s6{5-4};
+ let Inst{20-16} = Rs;
+ let Inst{13} = s6{3};
+ let Inst{12-8} = Rd;
+ let Inst{7-5} = s6{2-0};
+ let Inst{4-0} = Ru;
+ }
+
+let isExtentSigned = 1, hasSideEffects = 0, hasNewValue = 1, isExtendable = 1,
+ opExtentBits = 6, opExtendable = 2 in
+def S4_subaddi: ALU64Inst <(outs IntRegs:$Rd),
+ (ins IntRegs:$Rs, s6Ext:$s6, IntRegs:$Ru),
+ "$Rd = add($Rs, sub(#$s6, $Ru))",
+ [], "", ALU64_tc_2_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<6> s6;
+ bits<5> Ru;
+
+ let IClass = 0b1101;
+
+ let Inst{27-23} = 0b10111;
+ let Inst{22-21} = s6{5-4};
+ let Inst{20-16} = Rs;
+ let Inst{13} = s6{3};
+ let Inst{12-8} = Rd;
+ let Inst{7-5} = s6{2-0};
+ let Inst{4-0} = Ru;
+ }
+
+// Rd=add(Rs,sub(#s6,Ru))
+def: Pat<(add (i32 IntRegs:$src1), (sub s6_10ExtPred:$src2,
+ (i32 IntRegs:$src3))),
+ (S4_subaddi IntRegs:$src1, s6_10ExtPred:$src2, IntRegs:$src3)>;
+
+// Rd=sub(add(Rs,#s6),Ru)
+def: Pat<(sub (add (i32 IntRegs:$src1), s6_10ExtPred:$src2),
+ (i32 IntRegs:$src3)),
+ (S4_subaddi IntRegs:$src1, s6_10ExtPred:$src2, IntRegs:$src3)>;
+
+// Rd=add(sub(Rs,Ru),#s6)
+def: Pat<(add (sub (i32 IntRegs:$src1), (i32 IntRegs:$src3)),
+ (s6_10ExtPred:$src2)),
+ (S4_subaddi IntRegs:$src1, s6_10ExtPred:$src2, IntRegs:$src3)>;
// Add or subtract doublewords with carry.
@@ -1235,213 +1898,316 @@ def ADDri_SUBr_V4 : MInst<(outs IntRegs:$dst),
//TODO:
// Rdd=sub(Rss,Rtt,Px):carry
+// Extract bitfield
+// Rdd=extract(Rss,#u6,#U6)
+// Rdd=extract(Rss,Rtt)
+// Rd=extract(Rs,Rtt)
+// Rd=extract(Rs,#u5,#U5)
-// Logical doublewords.
-// Rdd=and(Rtt,~Rss)
-let validSubTargets = HasV4SubT in
-def ANDd_NOTd_V4 : MInst<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2),
- "$dst = and($src1, ~$src2)",
- [(set (i64 DoubleRegs:$dst), (and (i64 DoubleRegs:$src1),
- (not (i64 DoubleRegs:$src2))))]>,
- Requires<[HasV4T]>;
-
-// Rdd=or(Rtt,~Rss)
-let validSubTargets = HasV4SubT in
-def ORd_NOTd_V4 : MInst<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2),
- "$dst = or($src1, ~$src2)",
- [(set (i64 DoubleRegs:$dst),
- (or (i64 DoubleRegs:$src1), (not (i64 DoubleRegs:$src2))))]>,
- Requires<[HasV4T]>;
-
-
-// Logical-logical doublewords.
-// Rxx^=xor(Rss,Rtt)
-let validSubTargets = HasV4SubT in
-def XORd_XORdd: MInst_acc<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3),
- "$dst ^= xor($src2, $src3)",
- [(set (i64 DoubleRegs:$dst),
- (xor (i64 DoubleRegs:$src1), (xor (i64 DoubleRegs:$src2),
- (i64 DoubleRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
+def S4_extractp_rp : T_S3op_64 < "extract", 0b11, 0b100, 0>;
+def S4_extractp : T_S2op_extract <"extract", 0b1010, DoubleRegs, u6Imm>;
+let hasNewValue = 1 in {
+ def S4_extract_rp : T_S3op_extract<"extract", 0b01>;
+ def S4_extract : T_S2op_extract <"extract", 0b1101, IntRegs, u5Imm>;
+}
+
+// Complex add/sub halfwords/words
+let Defs = [USR_OVF] in {
+ def S4_vxaddsubh : T_S3op_64 < "vxaddsubh", 0b01, 0b100, 0, 1>;
+ def S4_vxaddsubw : T_S3op_64 < "vxaddsubw", 0b01, 0b000, 0, 1>;
+ def S4_vxsubaddh : T_S3op_64 < "vxsubaddh", 0b01, 0b110, 0, 1>;
+ def S4_vxsubaddw : T_S3op_64 < "vxsubaddw", 0b01, 0b010, 0, 1>;
+}
+
+let Defs = [USR_OVF] in {
+ def S4_vxaddsubhr : T_S3op_64 < "vxaddsubh", 0b11, 0b000, 0, 1, 1, 1>;
+ def S4_vxsubaddhr : T_S3op_64 < "vxsubaddh", 0b11, 0b010, 0, 1, 1, 1>;
+}
+
+let Itinerary = M_tc_3x_SLOT23, Defs = [USR_OVF] in {
+ def M4_mac_up_s1_sat: T_MType_acc_rr<"+= mpy", 0b011, 0b000, 0, [], 0, 1, 1>;
+ def M4_nac_up_s1_sat: T_MType_acc_rr<"-= mpy", 0b011, 0b001, 0, [], 0, 1, 1>;
+}
+
+// Logical xor with xor accumulation.
+// Rxx^=xor(Rss,Rtt)
+let hasSideEffects = 0 in
+def M4_xor_xacc
+ : SInst <(outs DoubleRegs:$Rxx),
+ (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Rxx ^= xor($Rss, $Rtt)",
+ [(set (i64 DoubleRegs:$Rxx),
+ (xor (i64 DoubleRegs:$dst2), (xor (i64 DoubleRegs:$Rss),
+ (i64 DoubleRegs:$Rtt))))],
+ "$dst2 = $Rxx", S_3op_tc_1_SLOT23> {
+ bits<5> Rxx;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1100;
+
+ let Inst{27-22} = 0b101010;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ let Inst{7-5} = 0b000;
+ let Inst{4-0} = Rxx;
+ }
+
+// Rotate and reduce bytes
+// Rdd=vrcrotate(Rss,Rt,#u2)
+let hasSideEffects = 0 in
+def S4_vrcrotate
+ : SInst <(outs DoubleRegs:$Rdd),
+ (ins DoubleRegs:$Rss, IntRegs:$Rt, u2Imm:$u2),
+ "$Rdd = vrcrotate($Rss, $Rt, #$u2)",
+ [], "", S_3op_tc_3x_SLOT23> {
+ bits<5> Rdd;
+ bits<5> Rss;
+ bits<5> Rt;
+ bits<2> u2;
+
+ let IClass = 0b1100;
+
+ let Inst{27-22} = 0b001111;
+ let Inst{20-16} = Rss;
+ let Inst{13} = u2{1};
+ let Inst{12-8} = Rt;
+ let Inst{7-6} = 0b11;
+ let Inst{5} = u2{0};
+ let Inst{4-0} = Rdd;
+ }
+
+// Rotate and reduce bytes with accumulation
+// Rxx+=vrcrotate(Rss,Rt,#u2)
+let hasSideEffects = 0 in
+def S4_vrcrotate_acc
+ : SInst <(outs DoubleRegs:$Rxx),
+ (ins DoubleRegs:$dst2, DoubleRegs:$Rss, IntRegs:$Rt, u2Imm:$u2),
+ "$Rxx += vrcrotate($Rss, $Rt, #$u2)", [],
+ "$dst2 = $Rxx", S_3op_tc_3x_SLOT23> {
+ bits<5> Rxx;
+ bits<5> Rss;
+ bits<5> Rt;
+ bits<2> u2;
+
+ let IClass = 0b1100;
+
+ let Inst{27-21} = 0b1011101;
+ let Inst{20-16} = Rss;
+ let Inst{13} = u2{1};
+ let Inst{12-8} = Rt;
+ let Inst{5} = u2{0};
+ let Inst{4-0} = Rxx;
+ }
+
+// Vector reduce conditional negate halfwords
+let hasSideEffects = 0 in
+def S2_vrcnegh
+ : SInst <(outs DoubleRegs:$Rxx),
+ (ins DoubleRegs:$dst2, DoubleRegs:$Rss, IntRegs:$Rt),
+ "$Rxx += vrcnegh($Rss, $Rt)", [],
+ "$dst2 = $Rxx", S_3op_tc_3x_SLOT23> {
+ bits<5> Rxx;
+ bits<5> Rss;
+ bits<5> Rt;
+
+ let IClass = 0b1100;
+
+ let Inst{27-21} = 0b1011001;
+ let Inst{20-16} = Rss;
+ let Inst{13} = 0b1;
+ let Inst{12-8} = Rt;
+ let Inst{7-5} = 0b111;
+ let Inst{4-0} = Rxx;
+ }
+
+// Split bitfield
+def A4_bitspliti : T_S2op_2_di <"bitsplit", 0b110, 0b100>;
+
+// Arithmetic/Convergent round
+def A4_cround_ri : T_S2op_2_ii <"cround", 0b111, 0b000>;
+
+def A4_round_ri : T_S2op_2_ii <"round", 0b111, 0b100>;
+
+let Defs = [USR_OVF] in
+def A4_round_ri_sat : T_S2op_2_ii <"round", 0b111, 0b110, 1>;
// Logical-logical words.
-// Rx=or(Ru,and(Rx,#s10))
-let isExtendable = 1, opExtendable = 3, isExtentSigned = 1, opExtentBits = 10,
-validSubTargets = HasV4SubT in
-def ORr_ANDri_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, s10Ext:$src3),
- "$dst = or($src1, and($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
- s10ExtPred:$src3)))],
- "$src2 = $dst">,
- Requires<[HasV4T]>;
+// Compound or-and -- Rx=or(Ru,and(Rx,#s10))
+let isExtentSigned = 1, hasNewValue = 1, isExtendable = 1, opExtentBits = 10,
+ opExtendable = 3 in
+def S4_or_andix:
+ ALU64Inst<(outs IntRegs:$Rx),
+ (ins IntRegs:$Ru, IntRegs:$_src_, s10Ext:$s10),
+ "$Rx = or($Ru, and($_src_, #$s10))" ,
+ [(set (i32 IntRegs:$Rx),
+ (or (i32 IntRegs:$Ru), (and (i32 IntRegs:$_src_), s10ExtPred:$s10)))] ,
+ "$_src_ = $Rx", ALU64_tc_2_SLOT23> {
+ bits<5> Rx;
+ bits<5> Ru;
+ bits<10> s10;
+
+ let IClass = 0b1101;
+
+ let Inst{27-22} = 0b101001;
+ let Inst{20-16} = Rx;
+ let Inst{21} = s10{9};
+ let Inst{13-5} = s10{8-0};
+ let Inst{4-0} = Ru;
+ }
+
+// Miscellaneous ALU64 instructions.
+//
+let hasNewValue = 1, hasSideEffects = 0 in
+def A4_modwrapu: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd = modwrap($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+ let Inst{27-21} = 0b0011111;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{7-5} = 0b111;
+ let Inst{4-0} = Rd;
+}
+
+let hasSideEffects = 0 in
+def A4_bitsplit: ALU64Inst<(outs DoubleRegs:$Rd),
+ (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd = bitsplit($Rs, $Rt)", [], "", ALU64_tc_1_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+ let Inst{27-24} = 0b0100;
+ let Inst{21} = 0b1;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{4-0} = Rd;
+}
+
+let hasSideEffects = 0 in
+def dep_S2_packhl: ALU64Inst<(outs DoubleRegs:$Rd),
+ (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd = packhl($Rs, $Rt):deprecated", [], "", ALU64_tc_1_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+ let Inst{27-24} = 0b0100;
+ let Inst{21} = 0b0;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{4-0} = Rd;
+}
+
+let hasNewValue = 1, hasSideEffects = 0 in
+def dep_A2_addsat: ALU64Inst<(outs IntRegs:$Rd),
+ (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd = add($Rs, $Rt):sat:deprecated", [], "", ALU64_tc_2_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+ let Inst{27-21} = 0b0101100;
+ let Inst{20-16} = Rs;
+ let Inst{12-8} = Rt;
+ let Inst{7} = 0b0;
+ let Inst{4-0} = Rd;
+}
+
+let hasNewValue = 1, hasSideEffects = 0 in
+def dep_A2_subsat: ALU64Inst<(outs IntRegs:$Rd),
+ (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd = sub($Rs, $Rt):sat:deprecated", [], "", ALU64_tc_2_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+ let Inst{27-21} = 0b0101100;
+ let Inst{20-16} = Rt;
+ let Inst{12-8} = Rs;
+ let Inst{7} = 0b1;
+ let Inst{4-0} = Rd;
+}
+
+// Rx[&|]=xor(Rs,Rt)
+def M4_or_xor : T_MType_acc_rr < "|= xor", 0b110, 0b001, 0>;
+def M4_and_xor : T_MType_acc_rr < "&= xor", 0b010, 0b010, 0>;
+
+// Rx[&|^]=or(Rs,Rt)
+def M4_xor_or : T_MType_acc_rr < "^= or", 0b110, 0b011, 0>;
+
+let CextOpcode = "ORr_ORr" in
+def M4_or_or : T_MType_acc_rr < "|= or", 0b110, 0b000, 0>;
+def M4_and_or : T_MType_acc_rr < "&= or", 0b010, 0b001, 0>;
// Rx[&|^]=and(Rs,Rt)
-// Rx&=and(Rs,Rt)
-let validSubTargets = HasV4SubT in
-def ANDr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst &= and($src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
-// Rx|=and(Rs,Rt)
-let validSubTargets = HasV4SubT, CextOpcode = "ORr_ANDr", InputType = "reg" in
-def ORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst |= and($src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>, ImmRegRel;
-
-// Rx^=and(Rs,Rt)
-let validSubTargets = HasV4SubT in
-def XORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst ^= and($src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
+def M4_xor_and : T_MType_acc_rr < "^= and", 0b110, 0b010, 0>;
+
+let CextOpcode = "ORr_ANDr" in
+def M4_or_and : T_MType_acc_rr < "|= and", 0b010, 0b011, 0>;
+def M4_and_and : T_MType_acc_rr < "&= and", 0b010, 0b000, 0>;
// Rx[&|^]=and(Rs,~Rt)
-// Rx&=and(Rs,~Rt)
-let validSubTargets = HasV4SubT in
-def ANDr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst &= and($src2, ~$src3)",
- [(set (i32 IntRegs:$dst),
- (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
- (not (i32 IntRegs:$src3)))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
-// Rx|=and(Rs,~Rt)
-let validSubTargets = HasV4SubT in
-def ORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst |= and($src2, ~$src3)",
- [(set (i32 IntRegs:$dst),
- (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
- (not (i32 IntRegs:$src3)))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
-// Rx^=and(Rs,~Rt)
-let validSubTargets = HasV4SubT in
-def XORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst ^= and($src2, ~$src3)",
- [(set (i32 IntRegs:$dst),
- (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
- (not (i32 IntRegs:$src3)))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
+def M4_xor_andn : T_MType_acc_rr < "^= and", 0b001, 0b010, 0, [], 1>;
+def M4_or_andn : T_MType_acc_rr < "|= and", 0b001, 0b000, 0, [], 1>;
+def M4_and_andn : T_MType_acc_rr < "&= and", 0b001, 0b001, 0, [], 1>;
+
+def: T_MType_acc_pat2 <M4_or_xor, xor, or>;
+def: T_MType_acc_pat2 <M4_and_xor, xor, and>;
+def: T_MType_acc_pat2 <M4_or_and, and, or>;
+def: T_MType_acc_pat2 <M4_and_and, and, and>;
+def: T_MType_acc_pat2 <M4_xor_and, and, xor>;
+def: T_MType_acc_pat2 <M4_or_or, or, or>;
+def: T_MType_acc_pat2 <M4_and_or, or, and>;
+def: T_MType_acc_pat2 <M4_xor_or, or, xor>;
+
+class T_MType_acc_pat3 <InstHexagon MI, SDNode firstOp, SDNode secOp>
+ : Pat <(i32 (secOp IntRegs:$src1, (firstOp IntRegs:$src2,
+ (not IntRegs:$src3)))),
+ (i32 (MI IntRegs:$src1, IntRegs:$src2, IntRegs:$src3))>;
+
+def: T_MType_acc_pat3 <M4_or_andn, and, or>;
+def: T_MType_acc_pat3 <M4_and_andn, and, and>;
+def: T_MType_acc_pat3 <M4_xor_andn, and, xor>;
+
+// Compound or-or and or-and
+let isExtentSigned = 1, InputType = "imm", hasNewValue = 1, isExtendable = 1,
+ opExtentBits = 10, opExtendable = 3 in
+class T_CompOR <string mnemonic, bits<2> MajOp, SDNode OpNode>
+ : MInst_acc <(outs IntRegs:$Rx),
+ (ins IntRegs:$src1, IntRegs:$Rs, s10Ext:$s10),
+ "$Rx |= "#mnemonic#"($Rs, #$s10)",
+ [(set (i32 IntRegs:$Rx), (or (i32 IntRegs:$src1),
+ (OpNode (i32 IntRegs:$Rs), s10ExtPred:$s10)))],
+ "$src1 = $Rx", ALU64_tc_2_SLOT23>, ImmRegRel {
+ bits<5> Rx;
+ bits<5> Rs;
+ bits<10> s10;
+
+ let IClass = 0b1101;
+
+ let Inst{27-24} = 0b1010;
+ let Inst{23-22} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{21} = s10{9};
+ let Inst{13-5} = s10{8-0};
+ let Inst{4-0} = Rx;
+ }
-// Rx[&|^]=or(Rs,Rt)
-// Rx&=or(Rs,Rt)
-let validSubTargets = HasV4SubT in
-def ANDr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst &= or($src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (and (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
-// Rx|=or(Rs,Rt)
-let validSubTargets = HasV4SubT, CextOpcode = "ORr_ORr", InputType = "reg" in
-def ORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst |= or($src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (or (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>, ImmRegRel;
-
-// Rx^=or(Rs,Rt)
-let validSubTargets = HasV4SubT in
-def XORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst ^= or($src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (xor (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
-// Rx[&|^]=xor(Rs,Rt)
-// Rx&=xor(Rs,Rt)
-let validSubTargets = HasV4SubT in
-def ANDr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst &= xor($src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
-// Rx|=xor(Rs,Rt)
-let validSubTargets = HasV4SubT in
-def ORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst |= xor($src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
-// Rx^=xor(Rs,Rt)
-let validSubTargets = HasV4SubT in
-def XORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
- "$dst ^= xor($src2, $src3)",
- [(set (i32 IntRegs:$dst),
- (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
-// Rx|=and(Rs,#s10)
-let isExtendable = 1, opExtendable = 3, isExtentSigned = 1, opExtentBits = 10,
-validSubTargets = HasV4SubT, CextOpcode = "ORr_ANDr", InputType = "imm" in
-def ORr_ANDri2_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, s10Ext:$src3),
- "$dst |= and($src2, #$src3)",
- [(set (i32 IntRegs:$dst),
- (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
- s10ExtPred:$src3)))],
- "$src1 = $dst">,
- Requires<[HasV4T]>, ImmRegRel;
-
-// Rx|=or(Rs,#s10)
-let isExtendable = 1, opExtendable = 3, isExtentSigned = 1, opExtentBits = 10,
-validSubTargets = HasV4SubT, CextOpcode = "ORr_ORr", InputType = "imm" in
-def ORr_ORri_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs: $src2, s10Ext:$src3),
- "$dst |= or($src2, #$src3)",
- [(set (i32 IntRegs:$dst),
- (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
- s10ExtPred:$src3)))],
- "$src1 = $dst">,
- Requires<[HasV4T]>, ImmRegRel;
+let CextOpcode = "ORr_ANDr" in
+def S4_or_andi : T_CompOR <"and", 0b00, and>;
+let CextOpcode = "ORr_ORr" in
+def S4_or_ori : T_CompOR <"or", 0b10, or>;
// Modulo wrap
// Rd=modwrap(Rs,Rt)
@@ -1480,269 +2246,483 @@ def ORr_ORri_V4 : MInst_acc<(outs IntRegs:$dst),
// XTYPE/ALU -
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// XTYPE/BIT +
+//===----------------------------------------------------------------------===//
+
+// Bit reverse
+def S2_brevp : T_S2op_3 <"brev", 0b11, 0b110>;
+
+// Bit count
+def S2_ct0p : T_COUNT_LEADING_64<"ct0", 0b111, 0b010>;
+def S2_ct1p : T_COUNT_LEADING_64<"ct1", 0b111, 0b100>;
+def S4_clbpnorm : T_COUNT_LEADING_64<"normamt", 0b011, 0b000>;
+
+def: Pat<(i32 (trunc (cttz (i64 DoubleRegs:$Rss)))),
+ (S2_ct0p (i64 DoubleRegs:$Rss))>;
+def: Pat<(i32 (trunc (cttz (not (i64 DoubleRegs:$Rss))))),
+ (S2_ct1p (i64 DoubleRegs:$Rss))>;
+
+let hasSideEffects = 0, hasNewValue = 1 in
+def S4_clbaddi : SInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, s6Imm:$s6),
+ "$Rd = add(clb($Rs), #$s6)", [], "", S_2op_tc_2_SLOT23> {
+ bits<5> Rs;
+ bits<5> Rd;
+ bits<6> s6;
+ let IClass = 0b1000;
+ let Inst{27-24} = 0b1100;
+ let Inst{23-21} = 0b001;
+ let Inst{20-16} = Rs;
+ let Inst{13-8} = s6;
+ let Inst{7-5} = 0b000;
+ let Inst{4-0} = Rd;
+}
+
+let hasSideEffects = 0, hasNewValue = 1 in
+def S4_clbpaddi : SInst<(outs IntRegs:$Rd), (ins DoubleRegs:$Rs, s6Imm:$s6),
+ "$Rd = add(clb($Rs), #$s6)", [], "", S_2op_tc_2_SLOT23> {
+ bits<5> Rs;
+ bits<5> Rd;
+ bits<6> s6;
+ let IClass = 0b1000;
+ let Inst{27-24} = 0b1000;
+ let Inst{23-21} = 0b011;
+ let Inst{20-16} = Rs;
+ let Inst{13-8} = s6;
+ let Inst{7-5} = 0b010;
+ let Inst{4-0} = Rd;
+}
+
+
+// Bit test/set/clear
+def S4_ntstbit_i : T_TEST_BIT_IMM<"!tstbit", 0b001>;
+def S4_ntstbit_r : T_TEST_BIT_REG<"!tstbit", 1>;
+
+let AddedComplexity = 20 in { // Complexity greater than cmp reg-imm.
+ def: Pat<(i1 (seteq (and (shl 1, u5ImmPred:$u5), (i32 IntRegs:$Rs)), 0)),
+ (S4_ntstbit_i (i32 IntRegs:$Rs), u5ImmPred:$u5)>;
+ def: Pat<(i1 (seteq (and (shl 1, (i32 IntRegs:$Rt)), (i32 IntRegs:$Rs)), 0)),
+ (S4_ntstbit_r (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))>;
+}
+
+// Add extra complexity to prefer these instructions over bitsset/bitsclr.
+// The reason is that tstbit/ntstbit can be folded into a compound instruction:
+// if ([!]tstbit(...)) jump ...
+let AddedComplexity = 100 in
+def: Pat<(i1 (setne (and (i32 IntRegs:$Rs), (i32 Set5ImmPred:$u5)), (i32 0))),
+ (S2_tstbit_i (i32 IntRegs:$Rs), (BITPOS32 Set5ImmPred:$u5))>;
+
+let AddedComplexity = 100 in
+def: Pat<(i1 (seteq (and (i32 IntRegs:$Rs), (i32 Set5ImmPred:$u5)), (i32 0))),
+ (S4_ntstbit_i (i32 IntRegs:$Rs), (BITPOS32 Set5ImmPred:$u5))>;
+
+def C4_nbitsset : T_TEST_BITS_REG<"!bitsset", 0b01, 1>;
+def C4_nbitsclr : T_TEST_BITS_REG<"!bitsclr", 0b10, 1>;
+def C4_nbitsclri : T_TEST_BITS_IMM<"!bitsclr", 0b10, 1>;
+
+// Do not increase complexity of these patterns. In the DAG, "cmp i8" may be
+// represented as a compare against "value & 0xFF", which is an exact match
+// for cmpb (same for cmph). The patterns below do not contain any additional
+// complexity that would make them preferable, and if they were actually used
+// instead of cmpb/cmph, they would result in a compare against register that
+// is loaded with the byte/half mask (i.e. 0xFF or 0xFFFF).
+def: Pat<(i1 (setne (and I32:$Rs, u6ImmPred:$u6), 0)),
+ (C4_nbitsclri I32:$Rs, u6ImmPred:$u6)>;
+def: Pat<(i1 (setne (and I32:$Rs, I32:$Rt), 0)),
+ (C4_nbitsclr I32:$Rs, I32:$Rt)>;
+def: Pat<(i1 (setne (and I32:$Rs, I32:$Rt), I32:$Rt)),
+ (C4_nbitsset I32:$Rs, I32:$Rt)>;
+
+//===----------------------------------------------------------------------===//
+// XTYPE/BIT -
+//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// XTYPE/MPY +
//===----------------------------------------------------------------------===//
-// Multiply and user lower result.
-// Rd=add(#u6,mpyi(Rs,#U6))
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 6,
-validSubTargets = HasV4SubT in
-def ADDi_MPYri_V4 : MInst<(outs IntRegs:$dst),
- (ins u6Ext:$src1, IntRegs:$src2, u6Imm:$src3),
- "$dst = add(#$src1, mpyi($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (add (mul (i32 IntRegs:$src2), u6ImmPred:$src3),
- u6ExtPred:$src1))]>,
- Requires<[HasV4T]>;
+// Rd=add(#u6,mpyi(Rs,#U6)) -- Multiply by immed and add immed.
+
+let hasNewValue = 1, isExtendable = 1, opExtentBits = 6, opExtendable = 1 in
+def M4_mpyri_addi : MInst<(outs IntRegs:$Rd),
+ (ins u6Ext:$u6, IntRegs:$Rs, u6Imm:$U6),
+ "$Rd = add(#$u6, mpyi($Rs, #$U6))" ,
+ [(set (i32 IntRegs:$Rd),
+ (add (mul (i32 IntRegs:$Rs), u6ImmPred:$U6),
+ u6ExtPred:$u6))] ,"",ALU64_tc_3x_SLOT23> {
+ bits<5> Rd;
+ bits<6> u6;
+ bits<5> Rs;
+ bits<6> U6;
+
+ let IClass = 0b1101;
+
+ let Inst{27-24} = 0b1000;
+ let Inst{23} = U6{5};
+ let Inst{22-21} = u6{5-4};
+ let Inst{20-16} = Rs;
+ let Inst{13} = u6{3};
+ let Inst{12-8} = Rd;
+ let Inst{7-5} = u6{2-0};
+ let Inst{4-0} = U6{4-0};
+ }
+
+// Rd=add(#u6,mpyi(Rs,Rt))
+let CextOpcode = "ADD_MPY", InputType = "imm", hasNewValue = 1,
+ isExtendable = 1, opExtentBits = 6, opExtendable = 1 in
+def M4_mpyrr_addi : MInst <(outs IntRegs:$Rd),
+ (ins u6Ext:$u6, IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd = add(#$u6, mpyi($Rs, $Rt))" ,
+ [(set (i32 IntRegs:$Rd),
+ (add (mul (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)), u6ExtPred:$u6))],
+ "", ALU64_tc_3x_SLOT23>, ImmRegRel {
+ bits<5> Rd;
+ bits<6> u6;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+
+ let Inst{27-23} = 0b01110;
+ let Inst{22-21} = u6{5-4};
+ let Inst{20-16} = Rs;
+ let Inst{13} = u6{3};
+ let Inst{12-8} = Rt;
+ let Inst{7-5} = u6{2-0};
+ let Inst{4-0} = Rd;
+ }
+
+let hasNewValue = 1 in
+class T_AddMpy <bit MajOp, PatLeaf ImmPred, dag ins>
+ : ALU64Inst <(outs IntRegs:$dst), ins,
+ "$dst = add($src1, mpyi("#!if(MajOp,"$src3, #$src2))",
+ "#$src2, $src3))"),
+ [(set (i32 IntRegs:$dst),
+ (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src3), ImmPred:$src2)))],
+ "", ALU64_tc_3x_SLOT23> {
+ bits<5> dst;
+ bits<5> src1;
+ bits<8> src2;
+ bits<5> src3;
+
+ let IClass = 0b1101;
+
+ bits<6> ImmValue = !if(MajOp, src2{5-0}, src2{7-2});
+
+ let Inst{27-24} = 0b1111;
+ let Inst{23} = MajOp;
+ let Inst{22-21} = ImmValue{5-4};
+ let Inst{20-16} = src3;
+ let Inst{13} = ImmValue{3};
+ let Inst{12-8} = dst;
+ let Inst{7-5} = ImmValue{2-0};
+ let Inst{4-0} = src1;
+ }
+
+def M4_mpyri_addr_u2 : T_AddMpy<0b0, u6_2ImmPred,
+ (ins IntRegs:$src1, u6_2Imm:$src2, IntRegs:$src3)>;
+
+let isExtendable = 1, opExtentBits = 6, opExtendable = 3,
+ CextOpcode = "ADD_MPY", InputType = "imm" in
+def M4_mpyri_addr : T_AddMpy<0b1, u6ExtPred,
+ (ins IntRegs:$src1, IntRegs:$src3, u6Ext:$src2)>, ImmRegRel;
+
+// Rx=add(Ru,mpyi(Rx,Rs))
+let CextOpcode = "ADD_MPY", InputType = "reg", hasNewValue = 1 in
+def M4_mpyrr_addr: MInst_acc <(outs IntRegs:$Rx),
+ (ins IntRegs:$Ru, IntRegs:$_src_, IntRegs:$Rs),
+ "$Rx = add($Ru, mpyi($_src_, $Rs))",
+ [(set (i32 IntRegs:$Rx), (add (i32 IntRegs:$Ru),
+ (mul (i32 IntRegs:$_src_), (i32 IntRegs:$Rs))))],
+ "$_src_ = $Rx", M_tc_3x_SLOT23>, ImmRegRel {
+ bits<5> Rx;
+ bits<5> Ru;
+ bits<5> Rs;
+
+ let IClass = 0b1110;
+
+ let Inst{27-21} = 0b0011000;
+ let Inst{12-8} = Rx;
+ let Inst{4-0} = Ru;
+ let Inst{20-16} = Rs;
+ }
// Rd=add(##,mpyi(Rs,#U6))
def : Pat <(add (mul (i32 IntRegs:$src2), u6ImmPred:$src3),
(HexagonCONST32 tglobaladdr:$src1)),
- (i32 (ADDi_MPYri_V4 tglobaladdr:$src1, IntRegs:$src2,
+ (i32 (M4_mpyri_addi tglobaladdr:$src1, IntRegs:$src2,
u6ImmPred:$src3))>;
-// Rd=add(#u6,mpyi(Rs,Rt))
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 6,
-validSubTargets = HasV4SubT, InputType = "imm", CextOpcode = "ADD_MPY" in
-def ADDi_MPYrr_V4 : MInst<(outs IntRegs:$dst),
- (ins u6Ext:$src1, IntRegs:$src2, IntRegs:$src3),
- "$dst = add(#$src1, mpyi($src2, $src3))",
- [(set (i32 IntRegs:$dst),
- (add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)),
- u6ExtPred:$src1))]>,
- Requires<[HasV4T]>, ImmRegRel;
-
// Rd=add(##,mpyi(Rs,Rt))
def : Pat <(add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)),
(HexagonCONST32 tglobaladdr:$src1)),
- (i32 (ADDi_MPYrr_V4 tglobaladdr:$src1, IntRegs:$src2,
+ (i32 (M4_mpyrr_addi tglobaladdr:$src1, IntRegs:$src2,
IntRegs:$src3))>;
-// Rd=add(Ru,mpyi(#u6:2,Rs))
-let validSubTargets = HasV4SubT in
-def ADDr_MPYir_V4 : MInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, u6Imm:$src2, IntRegs:$src3),
- "$dst = add($src1, mpyi(#$src2, $src3))",
- [(set (i32 IntRegs:$dst),
- (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src3),
- u6_2ImmPred:$src2)))]>,
- Requires<[HasV4T]>;
-
-// Rd=add(Ru,mpyi(Rs,#u6))
-let isExtendable = 1, opExtendable = 3, isExtentSigned = 0, opExtentBits = 6,
-validSubTargets = HasV4SubT, InputType = "imm", CextOpcode = "ADD_MPY" in
-def ADDr_MPYri_V4 : MInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2, u6Ext:$src3),
- "$dst = add($src1, mpyi($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
- u6ExtPred:$src3)))]>,
- Requires<[HasV4T]>, ImmRegRel;
+// Vector reduce multiply word by signed half (32x16)
+//Rdd=vrmpyweh(Rss,Rtt)[:<<1]
+def M4_vrmpyeh_s0 : T_M2_vmpy<"vrmpyweh", 0b010, 0b100, 0, 0, 0>;
+def M4_vrmpyeh_s1 : T_M2_vmpy<"vrmpyweh", 0b110, 0b100, 1, 0, 0>;
-// Rx=add(Ru,mpyi(Rx,Rs))
-let validSubTargets = HasV4SubT, InputType = "reg", CextOpcode = "ADD_MPY" in
-def ADDr_MPYrr_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "$dst = add($src1, mpyi($src2, $src3))",
- [(set (i32 IntRegs:$dst),
- (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src2 = $dst">,
- Requires<[HasV4T]>, ImmRegRel;
+//Rdd=vrmpywoh(Rss,Rtt)[:<<1]
+def M4_vrmpyoh_s0 : T_M2_vmpy<"vrmpywoh", 0b001, 0b010, 0, 0, 0>;
+def M4_vrmpyoh_s1 : T_M2_vmpy<"vrmpywoh", 0b101, 0b010, 1, 0, 0>;
+//Rdd+=vrmpyweh(Rss,Rtt)[:<<1]
+def M4_vrmpyeh_acc_s0: T_M2_vmpy_acc<"vrmpyweh", 0b001, 0b110, 0, 0>;
+def M4_vrmpyeh_acc_s1: T_M2_vmpy_acc<"vrmpyweh", 0b101, 0b110, 1, 0>;
-// Polynomial multiply words
-// Rdd=pmpyw(Rs,Rt)
-// Rxx^=pmpyw(Rs,Rt)
+//Rdd=vrmpywoh(Rss,Rtt)[:<<1]
+def M4_vrmpyoh_acc_s0: T_M2_vmpy_acc<"vrmpywoh", 0b011, 0b110, 0, 0>;
+def M4_vrmpyoh_acc_s1: T_M2_vmpy_acc<"vrmpywoh", 0b111, 0b110, 1, 0>;
-// Vector reduce multiply word by signed half (32x16)
-// Rdd=vrmpyweh(Rss,Rtt)[:<<1]
-// Rdd=vrmpywoh(Rss,Rtt)[:<<1]
-// Rxx+=vrmpyweh(Rss,Rtt)[:<<1]
-// Rxx+=vrmpywoh(Rss,Rtt)[:<<1]
-
-// Multiply and use upper result
-// Rd=mpy(Rs,Rt.H):<<1:sat
-// Rd=mpy(Rs,Rt.L):<<1:sat
-// Rd=mpy(Rs,Rt):<<1
-// Rd=mpy(Rs,Rt):<<1:sat
-// Rd=mpysu(Rs,Rt)
-// Rx+=mpy(Rs,Rt):<<1:sat
-// Rx-=mpy(Rs,Rt):<<1:sat
-
-// Vector multiply bytes
-// Rdd=vmpybsu(Rs,Rt)
-// Rdd=vmpybu(Rs,Rt)
-// Rxx+=vmpybsu(Rs,Rt)
-// Rxx+=vmpybu(Rs,Rt)
+// Vector multiply halfwords, signed by unsigned
+// Rdd=vmpyhsu(Rs,Rt)[:<<]:sat
+def M2_vmpy2su_s0 : T_XTYPE_mpy64 < "vmpyhsu", 0b000, 0b111, 1, 0, 0>;
+def M2_vmpy2su_s1 : T_XTYPE_mpy64 < "vmpyhsu", 0b100, 0b111, 1, 1, 0>;
+
+// Rxx+=vmpyhsu(Rs,Rt)[:<<1]:sat
+def M2_vmac2su_s0 : T_XTYPE_mpy64_acc < "vmpyhsu", "+", 0b011, 0b101, 1, 0, 0>;
+def M2_vmac2su_s1 : T_XTYPE_mpy64_acc < "vmpyhsu", "+", 0b111, 0b101, 1, 1, 0>;
// Vector polynomial multiply halfwords
// Rdd=vpmpyh(Rs,Rt)
+def M4_vpmpyh : T_XTYPE_mpy64 < "vpmpyh", 0b110, 0b111, 0, 0, 0>;
+
// Rxx^=vpmpyh(Rs,Rt)
+def M4_vpmpyh_acc : T_XTYPE_mpy64_acc < "vpmpyh", "^", 0b101, 0b111, 0, 0, 0>;
+
+// Polynomial multiply words
+// Rdd=pmpyw(Rs,Rt)
+def M4_pmpyw : T_XTYPE_mpy64 < "pmpyw", 0b010, 0b111, 0, 0, 0>;
+
+// Rxx^=pmpyw(Rs,Rt)
+def M4_pmpyw_acc : T_XTYPE_mpy64_acc < "pmpyw", "^", 0b001, 0b111, 0, 0, 0>;
//===----------------------------------------------------------------------===//
// XTYPE/MPY -
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// ALU64/Vector compare
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// Template class for vector compare
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 0 in
+class T_vcmpImm <string Str, bits<2> cmpOp, bits<2> minOp, Operand ImmOprnd>
+ : ALU64_rr <(outs PredRegs:$Pd),
+ (ins DoubleRegs:$Rss, ImmOprnd:$Imm),
+ "$Pd = "#Str#"($Rss, #$Imm)",
+ [], "", ALU64_tc_2early_SLOT23> {
+ bits<2> Pd;
+ bits<5> Rss;
+ bits<32> Imm;
+ bits<8> ImmBits;
+ let ImmBits{6-0} = Imm{6-0};
+ let ImmBits{7} = !if (!eq(cmpOp,0b10), 0b0, Imm{7}); // 0 for vcmp[bhw].gtu
+
+ let IClass = 0b1101;
+
+ let Inst{27-24} = 0b1100;
+ let Inst{22-21} = cmpOp;
+ let Inst{20-16} = Rss;
+ let Inst{12-5} = ImmBits;
+ let Inst{4-3} = minOp;
+ let Inst{1-0} = Pd;
+ }
+
+// Vector compare bytes
+def A4_vcmpbgt : T_vcmp <"vcmpb.gt", 0b1010>;
+def: T_vcmp_pat<A4_vcmpbgt, setgt, v8i8>;
+
+let AsmString = "$Pd = any8(vcmpb.eq($Rss, $Rtt))" in
+def A4_vcmpbeq_any : T_vcmp <"any8(vcmpb.gt", 0b1000>;
+
+def A4_vcmpbeqi : T_vcmpImm <"vcmpb.eq", 0b00, 0b00, u8Imm>;
+def A4_vcmpbgti : T_vcmpImm <"vcmpb.gt", 0b01, 0b00, s8Imm>;
+def A4_vcmpbgtui : T_vcmpImm <"vcmpb.gtu", 0b10, 0b00, u7Imm>;
+
+// Vector compare halfwords
+def A4_vcmpheqi : T_vcmpImm <"vcmph.eq", 0b00, 0b01, s8Imm>;
+def A4_vcmphgti : T_vcmpImm <"vcmph.gt", 0b01, 0b01, s8Imm>;
+def A4_vcmphgtui : T_vcmpImm <"vcmph.gtu", 0b10, 0b01, u7Imm>;
+
+// Vector compare words
+def A4_vcmpweqi : T_vcmpImm <"vcmpw.eq", 0b00, 0b10, s8Imm>;
+def A4_vcmpwgti : T_vcmpImm <"vcmpw.gt", 0b01, 0b10, s8Imm>;
+def A4_vcmpwgtui : T_vcmpImm <"vcmpw.gtu", 0b10, 0b10, u7Imm>;
//===----------------------------------------------------------------------===//
// XTYPE/SHIFT +
//===----------------------------------------------------------------------===//
-
-// Shift by immediate and accumulate.
-// Rx=add(#u8,asl(Rx,#U5))
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 8,
-validSubTargets = HasV4SubT in
-def ADDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins u8Ext:$src1, IntRegs:$src2, u5Imm:$src3),
- "$dst = add(#$src1, asl($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (add (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
- u8ExtPred:$src1))],
- "$src2 = $dst">,
- Requires<[HasV4T]>;
-
-// Rx=add(#u8,lsr(Rx,#U5))
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 8,
-validSubTargets = HasV4SubT in
-def ADDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins u8Ext:$src1, IntRegs:$src2, u5Imm:$src3),
- "$dst = add(#$src1, lsr($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (add (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
- u8ExtPred:$src1))],
- "$src2 = $dst">,
- Requires<[HasV4T]>;
-
-// Rx=sub(#u8,asl(Rx,#U5))
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 8,
-validSubTargets = HasV4SubT in
-def SUBi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins u8Ext:$src1, IntRegs:$src2, u5Imm:$src3),
- "$dst = sub(#$src1, asl($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (sub (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
- u8ExtPred:$src1))],
- "$src2 = $dst">,
- Requires<[HasV4T]>;
-
-// Rx=sub(#u8,lsr(Rx,#U5))
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 8,
-validSubTargets = HasV4SubT in
-def SUBi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins u8Ext:$src1, IntRegs:$src2, u5Imm:$src3),
- "$dst = sub(#$src1, lsr($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (sub (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
- u8ExtPred:$src1))],
- "$src2 = $dst">,
- Requires<[HasV4T]>;
-
-
-//Shift by immediate and logical.
-//Rx=and(#u8,asl(Rx,#U5))
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 8,
-validSubTargets = HasV4SubT in
-def ANDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins u8Ext:$src1, IntRegs:$src2, u5Imm:$src3),
- "$dst = and(#$src1, asl($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (and (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
- u8ExtPred:$src1))],
- "$src2 = $dst">,
- Requires<[HasV4T]>;
-
-//Rx=and(#u8,lsr(Rx,#U5))
+// Shift by immediate and accumulate/logical.
+// Rx=add(#u8,asl(Rx,#U5)) Rx=add(#u8,lsr(Rx,#U5))
+// Rx=sub(#u8,asl(Rx,#U5)) Rx=sub(#u8,lsr(Rx,#U5))
+// Rx=and(#u8,asl(Rx,#U5)) Rx=and(#u8,lsr(Rx,#U5))
+// Rx=or(#u8,asl(Rx,#U5)) Rx=or(#u8,lsr(Rx,#U5))
let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 8,
-validSubTargets = HasV4SubT in
-def ANDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins u8Ext:$src1, IntRegs:$src2, u5Imm:$src3),
- "$dst = and(#$src1, lsr($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (and (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
- u8ExtPred:$src1))],
- "$src2 = $dst">,
- Requires<[HasV4T]>;
-
-//Rx=or(#u8,asl(Rx,#U5))
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 8,
-AddedComplexity = 30, validSubTargets = HasV4SubT in
-def ORi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins u8Ext:$src1, IntRegs:$src2, u5Imm:$src3),
- "$dst = or(#$src1, asl($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (or (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
- u8ExtPred:$src1))],
- "$src2 = $dst">,
- Requires<[HasV4T]>;
-
-//Rx=or(#u8,lsr(Rx,#U5))
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 8,
-AddedComplexity = 30, validSubTargets = HasV4SubT in
-def ORi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
- (ins u8Ext:$src1, IntRegs:$src2, u5Imm:$src3),
- "$dst = or(#$src1, lsr($src2, #$src3))",
- [(set (i32 IntRegs:$dst),
- (or (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
- u8ExtPred:$src1))],
- "$src2 = $dst">,
- Requires<[HasV4T]>;
-
-
-//Shift by register.
-//Rd=lsl(#s6,Rt)
-let validSubTargets = HasV4SubT in {
-def LSLi_V4 : MInst<(outs IntRegs:$dst), (ins s6Imm:$src1, IntRegs:$src2),
- "$dst = lsl(#$src1, $src2)",
- [(set (i32 IntRegs:$dst), (shl s6ImmPred:$src1,
- (i32 IntRegs:$src2)))]>,
- Requires<[HasV4T]>;
-
-
-//Shift by register and logical.
-//Rxx^=asl(Rss,Rt)
-def ASLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
- "$dst ^= asl($src2, $src3)",
- [(set (i64 DoubleRegs:$dst),
- (xor (i64 DoubleRegs:$src1), (shl (i64 DoubleRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
-//Rxx^=asr(Rss,Rt)
-def ASRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
- "$dst ^= asr($src2, $src3)",
- [(set (i64 DoubleRegs:$dst),
- (xor (i64 DoubleRegs:$src1), (sra (i64 DoubleRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
-//Rxx^=lsl(Rss,Rt)
-def LSLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
- "$dst ^= lsl($src2, $src3)",
- [(set (i64 DoubleRegs:$dst), (xor (i64 DoubleRegs:$src1),
- (shl (i64 DoubleRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
-
-//Rxx^=lsr(Rss,Rt)
-def LSRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
- "$dst ^= lsr($src2, $src3)",
- [(set (i64 DoubleRegs:$dst),
- (xor (i64 DoubleRegs:$src1), (srl (i64 DoubleRegs:$src2),
- (i32 IntRegs:$src3))))],
- "$src1 = $dst">,
- Requires<[HasV4T]>;
+ hasNewValue = 1, opNewValue = 0 in
+class T_S4_ShiftOperate<string MnOp, string MnSh, SDNode Op, SDNode Sh,
+ bit asl_lsr, bits<2> MajOp, InstrItinClass Itin>
+ : MInst_acc<(outs IntRegs:$Rd), (ins u8Ext:$u8, IntRegs:$Rx, u5Imm:$U5),
+ "$Rd = "#MnOp#"(#$u8, "#MnSh#"($Rx, #$U5))",
+ [(set (i32 IntRegs:$Rd),
+ (Op (Sh I32:$Rx, u5ImmPred:$U5), u8ExtPred:$u8))],
+ "$Rd = $Rx", Itin> {
+
+ bits<5> Rd;
+ bits<8> u8;
+ bits<5> Rx;
+ bits<5> U5;
+
+ let IClass = 0b1101;
+ let Inst{27-24} = 0b1110;
+ let Inst{23-21} = u8{7-5};
+ let Inst{20-16} = Rd;
+ let Inst{13} = u8{4};
+ let Inst{12-8} = U5;
+ let Inst{7-5} = u8{3-1};
+ let Inst{4} = asl_lsr;
+ let Inst{3} = u8{0};
+ let Inst{2-1} = MajOp;
+}
+
+multiclass T_ShiftOperate<string mnemonic, SDNode Op, bits<2> MajOp,
+ InstrItinClass Itin> {
+ def _asl_ri : T_S4_ShiftOperate<mnemonic, "asl", Op, shl, 0, MajOp, Itin>;
+ def _lsr_ri : T_S4_ShiftOperate<mnemonic, "lsr", Op, srl, 1, MajOp, Itin>;
+}
+
+let AddedComplexity = 200 in {
+ defm S4_addi : T_ShiftOperate<"add", add, 0b10, ALU64_tc_2_SLOT23>;
+ defm S4_andi : T_ShiftOperate<"and", and, 0b00, ALU64_tc_2_SLOT23>;
}
+let AddedComplexity = 30 in
+defm S4_ori : T_ShiftOperate<"or", or, 0b01, ALU64_tc_1_SLOT23>;
+
+defm S4_subi : T_ShiftOperate<"sub", sub, 0b11, ALU64_tc_1_SLOT23>;
+
+let AddedComplexity = 200 in {
+ def: Pat<(add addrga:$addr, (shl I32:$src2, u5ImmPred:$src3)),
+ (S4_addi_asl_ri addrga:$addr, IntRegs:$src2, u5ImmPred:$src3)>;
+ def: Pat<(add addrga:$addr, (srl I32:$src2, u5ImmPred:$src3)),
+ (S4_addi_lsr_ri addrga:$addr, IntRegs:$src2, u5ImmPred:$src3)>;
+ def: Pat<(sub addrga:$addr, (shl I32:$src2, u5ImmPred:$src3)),
+ (S4_subi_asl_ri addrga:$addr, IntRegs:$src2, u5ImmPred:$src3)>;
+ def: Pat<(sub addrga:$addr, (srl I32:$src2, u5ImmPred:$src3)),
+ (S4_subi_lsr_ri addrga:$addr, IntRegs:$src2, u5ImmPred:$src3)>;
+}
+
+// Vector conditional negate
+// Rdd=vcnegh(Rss,Rt)
+let Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23 in
+def S2_vcnegh : T_S3op_shiftVect < "vcnegh", 0b11, 0b01>;
+
+// Rd=[cround|round](Rs,Rt)
+let hasNewValue = 1, Itinerary = S_3op_tc_2_SLOT23 in {
+ def A4_cround_rr : T_S3op_3 < "cround", IntRegs, 0b11, 0b00>;
+ def A4_round_rr : T_S3op_3 < "round", IntRegs, 0b11, 0b10>;
+}
+
+// Rd=round(Rs,Rt):sat
+let hasNewValue = 1, Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23 in
+def A4_round_rr_sat : T_S3op_3 < "round", IntRegs, 0b11, 0b11, 1>;
+
+// Rd=[cmpyiwh|cmpyrwh](Rss,Rt):<<1:rnd:sat
+let Defs = [USR_OVF], Itinerary = S_3op_tc_3x_SLOT23 in {
+ def M4_cmpyi_wh : T_S3op_8<"cmpyiwh", 0b100, 1, 1, 1>;
+ def M4_cmpyr_wh : T_S3op_8<"cmpyrwh", 0b110, 1, 1, 1>;
+}
+
+// Rdd=[add|sub](Rss,Rtt,Px):carry
+let isPredicateLate = 1, hasSideEffects = 0 in
+class T_S3op_carry <string mnemonic, bits<3> MajOp>
+ : SInst < (outs DoubleRegs:$Rdd, PredRegs:$Px),
+ (ins DoubleRegs:$Rss, DoubleRegs:$Rtt, PredRegs:$Pu),
+ "$Rdd = "#mnemonic#"($Rss, $Rtt, $Pu):carry",
+ [], "$Px = $Pu", S_3op_tc_1_SLOT23 > {
+ bits<5> Rdd;
+ bits<5> Rss;
+ bits<5> Rtt;
+ bits<2> Pu;
+
+ let IClass = 0b1100;
+
+ let Inst{27-24} = 0b0010;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ let Inst{6-5} = Pu;
+ let Inst{4-0} = Rdd;
+ }
+
+def A4_addp_c : T_S3op_carry < "add", 0b110 >;
+def A4_subp_c : T_S3op_carry < "sub", 0b111 >;
+
+let Itinerary = S_3op_tc_3_SLOT23, hasSideEffects = 0 in
+class T_S3op_6 <string mnemonic, bits<3> MinOp, bit isUnsigned>
+ : SInst <(outs DoubleRegs:$Rxx),
+ (ins DoubleRegs:$dst2, DoubleRegs:$Rss, IntRegs:$Ru),
+ "$Rxx = "#mnemonic#"($Rss, $Ru)" ,
+ [] , "$dst2 = $Rxx"> {
+ bits<5> Rxx;
+ bits<5> Rss;
+ bits<5> Ru;
+
+ let IClass = 0b1100;
+
+ let Inst{27-21} = 0b1011001;
+ let Inst{20-16} = Rss;
+ let Inst{13} = isUnsigned;
+ let Inst{12-8} = Rxx;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Ru;
+ }
+
+// Vector reduce maximum halfwords
+// Rxx=vrmax[u]h(Rss,Ru)
+def A4_vrmaxh : T_S3op_6 < "vrmaxh", 0b001, 0>;
+def A4_vrmaxuh : T_S3op_6 < "vrmaxuh", 0b001, 1>;
+
+// Vector reduce maximum words
+// Rxx=vrmax[u]w(Rss,Ru)
+def A4_vrmaxw : T_S3op_6 < "vrmaxw", 0b010, 0>;
+def A4_vrmaxuw : T_S3op_6 < "vrmaxuw", 0b010, 1>;
+
+// Vector reduce minimum halfwords
+// Rxx=vrmin[u]h(Rss,Ru)
+def A4_vrminh : T_S3op_6 < "vrminh", 0b101, 0>;
+def A4_vrminuh : T_S3op_6 < "vrminuh", 0b101, 1>;
+
+// Vector reduce minimum words
+// Rxx=vrmin[u]w(Rss,Ru)
+def A4_vrminw : T_S3op_6 < "vrminw", 0b110, 0>;
+def A4_vrminuw : T_S3op_6 < "vrminuw", 0b110, 1>;
+
+// Shift an immediate left by register amount.
+let hasNewValue = 1, hasSideEffects = 0 in
+def S4_lsli: SInst <(outs IntRegs:$Rd), (ins s6Imm:$s6, IntRegs:$Rt),
+ "$Rd = lsl(#$s6, $Rt)" ,
+ [(set (i32 IntRegs:$Rd), (shl s6ImmPred:$s6,
+ (i32 IntRegs:$Rt)))],
+ "", S_3op_tc_1_SLOT23> {
+ bits<5> Rd;
+ bits<6> s6;
+ bits<5> Rt;
+
+ let IClass = 0b1100;
+
+ let Inst{27-22} = 0b011010;
+ let Inst{20-16} = s6{5-1};
+ let Inst{12-8} = Rt;
+ let Inst{7-6} = 0b11;
+ let Inst{4-0} = Rd;
+ let Inst{5} = s6{0};
+ }
+
//===----------------------------------------------------------------------===//
// XTYPE/SHIFT -
//===----------------------------------------------------------------------===//
@@ -1830,7 +2810,7 @@ class MemOp_rr_base <string opc, bits<2> opcBits, Operand ImmOp,
(ins IntRegs:$base, ImmOp:$offset, IntRegs:$delta),
opc#"($base+#$offset)"#memOp#"$delta",
[]>,
- Requires<[HasV4T, UseMEMOP]> {
+ Requires<[UseMEMOP]> {
bits<5> base;
bits<5> delta;
@@ -1841,6 +2821,7 @@ class MemOp_rr_base <string opc, bits<2> opcBits, Operand ImmOp,
!if (!eq(opcBits, 0b01), offset{6-1},
!if (!eq(opcBits, 0b10), offset{7-2},0)));
+ let opExtentAlign = opcBits;
let IClass = 0b0011;
let Inst{27-24} = 0b1110;
let Inst{22-21} = opcBits;
@@ -1861,7 +2842,7 @@ class MemOp_ri_base <string opc, bits<2> opcBits, Operand ImmOp,
opc#"($base+#$offset)"#memOp#"#$delta"
#!if(memOpBits{1},")", ""), // clrbit, setbit - include ')'
[]>,
- Requires<[HasV4T, UseMEMOP]> {
+ Requires<[UseMEMOP]> {
bits<5> base;
bits<5> delta;
@@ -1872,6 +2853,7 @@ class MemOp_ri_base <string opc, bits<2> opcBits, Operand ImmOp,
!if (!eq(opcBits, 0b01), offset{6-1},
!if (!eq(opcBits, 0b10), offset{7-2},0)));
+ let opExtentAlign = opcBits;
let IClass = 0b0011;
let Inst{27-24} = 0b1111;
let Inst{22-21} = opcBits;
@@ -1884,36 +2866,35 @@ class MemOp_ri_base <string opc, bits<2> opcBits, Operand ImmOp,
// multiclass to define MemOp instructions with register operand.
multiclass MemOp_rr<string opc, bits<2> opcBits, Operand ImmOp> {
- def _ADD#NAME#_V4 : MemOp_rr_base <opc, opcBits, ImmOp, " += ", 0b00>; // add
- def _SUB#NAME#_V4 : MemOp_rr_base <opc, opcBits, ImmOp, " -= ", 0b01>; // sub
- def _AND#NAME#_V4 : MemOp_rr_base <opc, opcBits, ImmOp, " &= ", 0b10>; // and
- def _OR#NAME#_V4 : MemOp_rr_base <opc, opcBits, ImmOp, " |= ", 0b11>; // or
+ def L4_add#NAME : MemOp_rr_base <opc, opcBits, ImmOp, " += ", 0b00>; // add
+ def L4_sub#NAME : MemOp_rr_base <opc, opcBits, ImmOp, " -= ", 0b01>; // sub
+ def L4_and#NAME : MemOp_rr_base <opc, opcBits, ImmOp, " &= ", 0b10>; // and
+ def L4_or#NAME : MemOp_rr_base <opc, opcBits, ImmOp, " |= ", 0b11>; // or
}
// multiclass to define MemOp instructions with immediate Operand.
multiclass MemOp_ri<string opc, bits<2> opcBits, Operand ImmOp> {
- def _ADD#NAME#_V4 : MemOp_ri_base <opc, opcBits, ImmOp, " += ", 0b00 >;
- def _SUB#NAME#_V4 : MemOp_ri_base <opc, opcBits, ImmOp, " -= ", 0b01 >;
- def _CLRBIT#NAME#_V4 : MemOp_ri_base<opc, opcBits, ImmOp, " =clrbit(", 0b10>;
- def _SETBIT#NAME#_V4 : MemOp_ri_base<opc, opcBits, ImmOp, " =setbit(", 0b11>;
+ def L4_iadd#NAME : MemOp_ri_base <opc, opcBits, ImmOp, " += ", 0b00 >;
+ def L4_isub#NAME : MemOp_ri_base <opc, opcBits, ImmOp, " -= ", 0b01 >;
+ def L4_iand#NAME : MemOp_ri_base<opc, opcBits, ImmOp, " = clrbit(", 0b10>;
+ def L4_ior#NAME : MemOp_ri_base<opc, opcBits, ImmOp, " = setbit(", 0b11>;
}
multiclass MemOp_base <string opc, bits<2> opcBits, Operand ImmOp> {
- defm r : MemOp_rr <opc, opcBits, ImmOp>;
- defm i : MemOp_ri <opc, opcBits, ImmOp>;
+ defm _#NAME : MemOp_rr <opc, opcBits, ImmOp>;
+ defm _#NAME : MemOp_ri <opc, opcBits, ImmOp>;
}
// Define MemOp instructions.
-let isExtendable = 1, opExtendable = 1, isExtentSigned = 0,
-validSubTargets =HasV4SubT in {
+let isExtendable = 1, opExtendable = 1, isExtentSigned = 0 in {
let opExtentBits = 6, accessSize = ByteAccess in
- defm MemOPb : MemOp_base <"memb", 0b00, u6_0Ext>;
+ defm memopb_io : MemOp_base <"memb", 0b00, u6_0Ext>;
let opExtentBits = 7, accessSize = HalfWordAccess in
- defm MemOPh : MemOp_base <"memh", 0b01, u6_1Ext>;
+ defm memoph_io : MemOp_base <"memh", 0b01, u6_1Ext>;
let opExtentBits = 8, accessSize = WordAccess in
- defm MemOPw : MemOp_base <"memw", 0b10, u6_2Ext>;
+ defm memopw_io : MemOp_base <"memw", 0b10, u6_2Ext>;
}
//===----------------------------------------------------------------------===//
@@ -1926,40 +2907,40 @@ validSubTargets =HasV4SubT in {
multiclass MemOpi_u5Pats <PatFrag ldOp, PatFrag stOp, PatLeaf ExtPred,
InstHexagon MI, SDNode OpNode> {
let AddedComplexity = 180 in
- def : Pat < (stOp (OpNode (ldOp IntRegs:$addr), u5ImmPred:$addend),
- IntRegs:$addr),
- (MI IntRegs:$addr, #0, u5ImmPred:$addend )>;
+ def: Pat<(stOp (OpNode (ldOp IntRegs:$addr), u5ImmPred:$addend),
+ IntRegs:$addr),
+ (MI IntRegs:$addr, 0, u5ImmPred:$addend)>;
let AddedComplexity = 190 in
- def : Pat <(stOp (OpNode (ldOp (add IntRegs:$base, ExtPred:$offset)),
- u5ImmPred:$addend),
- (add IntRegs:$base, ExtPred:$offset)),
- (MI IntRegs:$base, ExtPred:$offset, u5ImmPred:$addend)>;
+ def: Pat<(stOp (OpNode (ldOp (add IntRegs:$base, ExtPred:$offset)),
+ u5ImmPred:$addend),
+ (add IntRegs:$base, ExtPred:$offset)),
+ (MI IntRegs:$base, ExtPred:$offset, u5ImmPred:$addend)>;
}
multiclass MemOpi_u5ALUOp<PatFrag ldOp, PatFrag stOp, PatLeaf ExtPred,
InstHexagon addMI, InstHexagon subMI> {
- defm : MemOpi_u5Pats<ldOp, stOp, ExtPred, addMI, add>;
- defm : MemOpi_u5Pats<ldOp, stOp, ExtPred, subMI, sub>;
+ defm: MemOpi_u5Pats<ldOp, stOp, ExtPred, addMI, add>;
+ defm: MemOpi_u5Pats<ldOp, stOp, ExtPred, subMI, sub>;
}
multiclass MemOpi_u5ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
// Half Word
- defm : MemOpi_u5ALUOp <ldOpHalf, truncstorei16, u6_1ExtPred,
- MemOPh_ADDi_V4, MemOPh_SUBi_V4>;
+ defm: MemOpi_u5ALUOp <ldOpHalf, truncstorei16, u6_1ExtPred,
+ L4_iadd_memoph_io, L4_isub_memoph_io>;
// Byte
- defm : MemOpi_u5ALUOp <ldOpByte, truncstorei8, u6ExtPred,
- MemOPb_ADDi_V4, MemOPb_SUBi_V4>;
+ defm: MemOpi_u5ALUOp <ldOpByte, truncstorei8, u6ExtPred,
+ L4_iadd_memopb_io, L4_isub_memopb_io>;
}
-let Predicates = [HasV4T, UseMEMOP] in {
- defm : MemOpi_u5ExtType<zextloadi8, zextloadi16>; // zero extend
- defm : MemOpi_u5ExtType<sextloadi8, sextloadi16>; // sign extend
- defm : MemOpi_u5ExtType<extloadi8, extloadi16>; // any extend
+let Predicates = [UseMEMOP] in {
+ defm: MemOpi_u5ExtType<zextloadi8, zextloadi16>; // zero extend
+ defm: MemOpi_u5ExtType<sextloadi8, sextloadi16>; // sign extend
+ defm: MemOpi_u5ExtType<extloadi8, extloadi16>; // any extend
// Word
- defm : MemOpi_u5ALUOp <load, store, u6_2ExtPred, MemOPw_ADDi_V4,
- MemOPw_SUBi_V4>;
+ defm: MemOpi_u5ALUOp <load, store, u6_2ExtPred, L4_iadd_memopw_io,
+ L4_isub_memopw_io>;
}
//===----------------------------------------------------------------------===//
@@ -1970,37 +2951,36 @@ let Predicates = [HasV4T, UseMEMOP] in {
//===----------------------------------------------------------------------===//
multiclass MemOpi_m5Pats <PatFrag ldOp, PatFrag stOp, PatLeaf extPred,
- PatLeaf immPred, ComplexPattern addrPred,
- SDNodeXForm xformFunc, InstHexagon MI> {
+ PatLeaf immPred, SDNodeXForm xformFunc,
+ InstHexagon MI> {
let AddedComplexity = 190 in
- def : Pat <(stOp (add (ldOp IntRegs:$addr), immPred:$subend),
- IntRegs:$addr),
- (MI IntRegs:$addr, #0, (xformFunc immPred:$subend) )>;
+ def: Pat<(stOp (add (ldOp IntRegs:$addr), immPred:$subend), IntRegs:$addr),
+ (MI IntRegs:$addr, 0, (xformFunc immPred:$subend))>;
let AddedComplexity = 195 in
- def : Pat<(stOp (add (ldOp (add IntRegs:$base, extPred:$offset)),
- immPred:$subend),
- (add IntRegs:$base, extPred:$offset)),
- (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$subend))>;
+ def: Pat<(stOp (add (ldOp (add IntRegs:$base, extPred:$offset)),
+ immPred:$subend),
+ (add IntRegs:$base, extPred:$offset)),
+ (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$subend))>;
}
multiclass MemOpi_m5ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
// Half Word
- defm : MemOpi_m5Pats <ldOpHalf, truncstorei16, u6_1ExtPred, m5HImmPred,
- ADDRriU6_1, MEMOPIMM_HALF, MemOPh_SUBi_V4>;
+ defm: MemOpi_m5Pats <ldOpHalf, truncstorei16, u6_1ExtPred, m5HImmPred,
+ MEMOPIMM_HALF, L4_isub_memoph_io>;
// Byte
- defm : MemOpi_m5Pats <ldOpByte, truncstorei8, u6ExtPred, m5BImmPred,
- ADDRriU6_0, MEMOPIMM_BYTE, MemOPb_SUBi_V4>;
+ defm: MemOpi_m5Pats <ldOpByte, truncstorei8, u6ExtPred, m5BImmPred,
+ MEMOPIMM_BYTE, L4_isub_memopb_io>;
}
-let Predicates = [HasV4T, UseMEMOP] in {
- defm : MemOpi_m5ExtType<zextloadi8, zextloadi16>; // zero extend
- defm : MemOpi_m5ExtType<sextloadi8, sextloadi16>; // sign extend
- defm : MemOpi_m5ExtType<extloadi8, extloadi16>; // any extend
+let Predicates = [UseMEMOP] in {
+ defm: MemOpi_m5ExtType<zextloadi8, zextloadi16>; // zero extend
+ defm: MemOpi_m5ExtType<sextloadi8, sextloadi16>; // sign extend
+ defm: MemOpi_m5ExtType<extloadi8, extloadi16>; // any extend
// Word
- defm : MemOpi_m5Pats <load, store, u6_2ExtPred, m5ImmPred,
- ADDRriU6_2, MEMOPIMM, MemOPw_SUBi_V4>;
+ defm: MemOpi_m5Pats <load, store, u6_2ExtPred, m5ImmPred,
+ MEMOPIMM, L4_isub_memopw_io>;
}
//===----------------------------------------------------------------------===//
@@ -2010,52 +2990,50 @@ let Predicates = [HasV4T, UseMEMOP] in {
//===----------------------------------------------------------------------===//
multiclass MemOpi_bitPats <PatFrag ldOp, PatFrag stOp, PatLeaf immPred,
- PatLeaf extPred, ComplexPattern addrPred,
- SDNodeXForm xformFunc, InstHexagon MI, SDNode OpNode> {
+ PatLeaf extPred, SDNodeXForm xformFunc, InstHexagon MI,
+ SDNode OpNode> {
// mem[bhw](Rs+#u6:[012]) = [clrbit|setbit](#U5)
let AddedComplexity = 250 in
- def : Pat<(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)),
- immPred:$bitend),
- (add IntRegs:$base, extPred:$offset)),
- (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$bitend))>;
+ def: Pat<(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)),
+ immPred:$bitend),
+ (add IntRegs:$base, extPred:$offset)),
+ (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$bitend))>;
// mem[bhw](Rs+#0) = [clrbit|setbit](#U5)
let AddedComplexity = 225 in
- def : Pat <(stOp (OpNode (ldOp (addrPred IntRegs:$addr, extPred:$offset)),
- immPred:$bitend),
- (addrPred (i32 IntRegs:$addr), extPred:$offset)),
- (MI IntRegs:$addr, extPred:$offset, (xformFunc immPred:$bitend))>;
+ def: Pat<(stOp (OpNode (ldOp IntRegs:$addr), immPred:$bitend), IntRegs:$addr),
+ (MI IntRegs:$addr, 0, (xformFunc immPred:$bitend))>;
}
-multiclass MemOpi_bitExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
+multiclass MemOpi_bitExtType<PatFrag ldOpByte, PatFrag ldOpHalf> {
// Byte - clrbit
- defm : MemOpi_bitPats<ldOpByte, truncstorei8, Clr3ImmPred, u6ExtPred,
- ADDRriU6_0, CLRMEMIMM_BYTE, MemOPb_CLRBITi_V4, and>;
+ defm: MemOpi_bitPats<ldOpByte, truncstorei8, Clr3ImmPred, u6ExtPred,
+ CLRMEMIMM_BYTE, L4_iand_memopb_io, and>;
// Byte - setbit
- defm : MemOpi_bitPats<ldOpByte, truncstorei8, Set3ImmPred, u6ExtPred,
- ADDRriU6_0, SETMEMIMM_BYTE, MemOPb_SETBITi_V4, or>;
+ defm: MemOpi_bitPats<ldOpByte, truncstorei8, Set3ImmPred, u6ExtPred,
+ SETMEMIMM_BYTE, L4_ior_memopb_io, or>;
// Half Word - clrbit
- defm : MemOpi_bitPats<ldOpHalf, truncstorei16, Clr4ImmPred, u6_1ExtPred,
- ADDRriU6_1, CLRMEMIMM_SHORT, MemOPh_CLRBITi_V4, and>;
+ defm: MemOpi_bitPats<ldOpHalf, truncstorei16, Clr4ImmPred, u6_1ExtPred,
+ CLRMEMIMM_SHORT, L4_iand_memoph_io, and>;
// Half Word - setbit
- defm : MemOpi_bitPats<ldOpHalf, truncstorei16, Set4ImmPred, u6_1ExtPred,
- ADDRriU6_1, SETMEMIMM_SHORT, MemOPh_SETBITi_V4, or>;
+ defm: MemOpi_bitPats<ldOpHalf, truncstorei16, Set4ImmPred, u6_1ExtPred,
+ SETMEMIMM_SHORT, L4_ior_memoph_io, or>;
}
-let Predicates = [HasV4T, UseMEMOP] in {
+let Predicates = [UseMEMOP] in {
// mem[bh](Rs+#0) = [clrbit|setbit](#U5)
// mem[bh](Rs+#u6:[01]) = [clrbit|setbit](#U5)
- defm : MemOpi_bitExtType<zextloadi8, zextloadi16>; // zero extend
- defm : MemOpi_bitExtType<sextloadi8, sextloadi16>; // sign extend
- defm : MemOpi_bitExtType<extloadi8, extloadi16>; // any extend
+ defm: MemOpi_bitExtType<zextloadi8, zextloadi16>; // zero extend
+ defm: MemOpi_bitExtType<sextloadi8, sextloadi16>; // sign extend
+ defm: MemOpi_bitExtType<extloadi8, extloadi16>; // any extend
// memw(Rs+#0) = [clrbit|setbit](#U5)
// memw(Rs+#u6:2) = [clrbit|setbit](#U5)
- defm : MemOpi_bitPats<load, store, Clr5ImmPred, u6_2ExtPred, ADDRriU6_2,
- CLRMEMIMM, MemOPw_CLRBITi_V4, and>;
- defm : MemOpi_bitPats<load, store, Set5ImmPred, u6_2ExtPred, ADDRriU6_2,
- SETMEMIMM, MemOPw_SETBITi_V4, or>;
+ defm: MemOpi_bitPats<load, store, Clr5ImmPred, u6_2ExtPred, CLRMEMIMM,
+ L4_iand_memopw_io, and>;
+ defm: MemOpi_bitPats<load, store, Set5ImmPred, u6_2ExtPred, SETMEMIMM,
+ L4_ior_memopw_io, or>;
}
//===----------------------------------------------------------------------===//
@@ -2065,54 +3043,51 @@ let Predicates = [HasV4T, UseMEMOP] in {
// mem[bhw](Rs+#U6:[012]) [+-&|]= Rt
//===----------------------------------------------------------------------===//
-multiclass MemOpr_Pats <PatFrag ldOp, PatFrag stOp, ComplexPattern addrPred,
- PatLeaf extPred, InstHexagon MI, SDNode OpNode> {
+multiclass MemOpr_Pats <PatFrag ldOp, PatFrag stOp, PatLeaf extPred,
+ InstHexagon MI, SDNode OpNode> {
let AddedComplexity = 141 in
// mem[bhw](Rs+#0) [+-&|]= Rt
- def : Pat <(stOp (OpNode (ldOp (addrPred IntRegs:$addr, extPred:$offset)),
- (i32 IntRegs:$addend)),
- (addrPred (i32 IntRegs:$addr), extPred:$offset)),
- (MI IntRegs:$addr, extPred:$offset, (i32 IntRegs:$addend) )>;
+ def: Pat<(stOp (OpNode (ldOp IntRegs:$addr), (i32 IntRegs:$addend)),
+ IntRegs:$addr),
+ (MI IntRegs:$addr, 0, (i32 IntRegs:$addend))>;
// mem[bhw](Rs+#U6:[012]) [+-&|]= Rt
let AddedComplexity = 150 in
- def : Pat <(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)),
- (i32 IntRegs:$orend)),
- (add IntRegs:$base, extPred:$offset)),
- (MI IntRegs:$base, extPred:$offset, (i32 IntRegs:$orend) )>;
+ def: Pat<(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)),
+ (i32 IntRegs:$orend)),
+ (add IntRegs:$base, extPred:$offset)),
+ (MI IntRegs:$base, extPred:$offset, (i32 IntRegs:$orend))>;
}
-multiclass MemOPr_ALUOp<PatFrag ldOp, PatFrag stOp,
- ComplexPattern addrPred, PatLeaf extPred,
+multiclass MemOPr_ALUOp<PatFrag ldOp, PatFrag stOp, PatLeaf extPred,
InstHexagon addMI, InstHexagon subMI,
- InstHexagon andMI, InstHexagon orMI > {
-
- defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, addMI, add>;
- defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, subMI, sub>;
- defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, andMI, and>;
- defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, orMI, or>;
+ InstHexagon andMI, InstHexagon orMI> {
+ defm: MemOpr_Pats <ldOp, stOp, extPred, addMI, add>;
+ defm: MemOpr_Pats <ldOp, stOp, extPred, subMI, sub>;
+ defm: MemOpr_Pats <ldOp, stOp, extPred, andMI, and>;
+ defm: MemOpr_Pats <ldOp, stOp, extPred, orMI, or>;
}
multiclass MemOPr_ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
// Half Word
- defm : MemOPr_ALUOp <ldOpHalf, truncstorei16, ADDRriU6_1, u6_1ExtPred,
- MemOPh_ADDr_V4, MemOPh_SUBr_V4,
- MemOPh_ANDr_V4, MemOPh_ORr_V4>;
+ defm: MemOPr_ALUOp <ldOpHalf, truncstorei16, u6_1ExtPred,
+ L4_add_memoph_io, L4_sub_memoph_io,
+ L4_and_memoph_io, L4_or_memoph_io>;
// Byte
- defm : MemOPr_ALUOp <ldOpByte, truncstorei8, ADDRriU6_0, u6ExtPred,
- MemOPb_ADDr_V4, MemOPb_SUBr_V4,
- MemOPb_ANDr_V4, MemOPb_ORr_V4>;
+ defm: MemOPr_ALUOp <ldOpByte, truncstorei8, u6ExtPred,
+ L4_add_memopb_io, L4_sub_memopb_io,
+ L4_and_memopb_io, L4_or_memopb_io>;
}
// Define 'def Pats' for MemOps with register addend.
-let Predicates = [HasV4T, UseMEMOP] in {
+let Predicates = [UseMEMOP] in {
// Byte, Half Word
- defm : MemOPr_ExtType<zextloadi8, zextloadi16>; // zero extend
- defm : MemOPr_ExtType<sextloadi8, sextloadi16>; // sign extend
- defm : MemOPr_ExtType<extloadi8, extloadi16>; // any extend
+ defm: MemOPr_ExtType<zextloadi8, zextloadi16>; // zero extend
+ defm: MemOPr_ExtType<sextloadi8, sextloadi16>; // sign extend
+ defm: MemOPr_ExtType<extloadi8, extloadi16>; // any extend
// Word
- defm : MemOPr_ALUOp <load, store, ADDRriU6_2, u6_2ExtPred, MemOPw_ADDr_V4,
- MemOPw_SUBr_V4, MemOPw_ANDr_V4, MemOPw_ORr_V4 >;
+ defm: MemOPr_ALUOp <load, store, u6_2ExtPred, L4_add_memopw_io,
+ L4_sub_memopw_io, L4_and_memopw_io, L4_or_memopw_io>;
}
//===----------------------------------------------------------------------===//
@@ -2130,123 +3105,28 @@ let Predicates = [HasV4T, UseMEMOP] in {
// incorrect code for negative numbers.
// Pd=cmpb.eq(Rs,#u8)
-let isCompare = 1, isExtendable = 1, opExtendable = 2, hasSideEffects = 0,
- validSubTargets = HasV4SubT in
-class CMP_NOT_REG_IMM<string OpName, bits<2> op, Operand ImmOp,
- list<dag> Pattern>
- : ALU32Inst <(outs PredRegs:$dst), (ins IntRegs:$src1, ImmOp:$src2),
- "$dst = !cmp."#OpName#"($src1, #$src2)",
- Pattern,
- "", ALU32_2op_tc_2early_SLOT0123> {
- bits<2> dst;
- bits<5> src1;
- bits<10> src2;
+// p=!cmp.eq(r1,#s10)
+def C4_cmpneqi : T_CMP <"cmp.eq", 0b00, 1, s10Ext>;
+def C4_cmpltei : T_CMP <"cmp.gt", 0b01, 1, s10Ext>;
+def C4_cmplteui : T_CMP <"cmp.gtu", 0b10, 1, u9Ext>;
- let IClass = 0b0111;
- let Inst{27-24} = 0b0101;
- let Inst{23-22} = op;
- let Inst{20-16} = src1;
- let Inst{21} = !if (!eq(OpName, "gtu"), 0b0, src2{9});
- let Inst{13-5} = src2{8-0};
- let Inst{4-2} = 0b100;
- let Inst{1-0} = dst;
-}
-
-let opExtentBits = 10, isExtentSigned = 1 in {
-def C4_cmpneqi : CMP_NOT_REG_IMM <"eq", 0b00, s10Ext, [(set (i1 PredRegs:$dst),
- (setne (i32 IntRegs:$src1), s10ExtPred:$src2))]>;
-
-def C4_cmpltei : CMP_NOT_REG_IMM <"gt", 0b01, s10Ext, [(set (i1 PredRegs:$dst),
- (not (setgt (i32 IntRegs:$src1), s10ExtPred:$src2)))]>;
-
-}
-let opExtentBits = 9 in
-def C4_cmplteui : CMP_NOT_REG_IMM <"gtu", 0b10, u9Ext, [(set (i1 PredRegs:$dst),
- (not (setugt (i32 IntRegs:$src1), u9ExtPred:$src2)))]>;
-
-
-
-// p=!cmp.eq(r1,r2)
-let isCompare = 1, validSubTargets = HasV4SubT in
-def CMPnotEQ_rr : ALU32_rr<(outs PredRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = !cmp.eq($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2)))]>,
- Requires<[HasV4T]>;
-
-// p=!cmp.gt(r1,r2)
-let isCompare = 1, validSubTargets = HasV4SubT in
-def CMPnotGT_rr : ALU32_rr<(outs PredRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = !cmp.gt($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (not (setgt (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>,
- Requires<[HasV4T]>;
-
-
-// p=!cmp.gtu(r1,r2)
-let isCompare = 1, validSubTargets = HasV4SubT in
-def CMPnotGTU_rr : ALU32_rr<(outs PredRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = !cmp.gtu($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (not (setugt (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>,
- Requires<[HasV4T]>;
-
-let isCompare = 1, validSubTargets = HasV4SubT in
-def CMPbEQri_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, u8Imm:$src2),
- "$dst = cmpb.eq($src1, #$src2)",
- [(set (i1 PredRegs:$dst),
- (seteq (and (i32 IntRegs:$src1), 255), u8ImmPred:$src2))]>,
- Requires<[HasV4T]>;
-
-def : Pat <(brcond (i1 (setne (and (i32 IntRegs:$src1), 255), u8ImmPred:$src2)),
- bb:$offset),
- (JMP_f (CMPbEQri_V4 (i32 IntRegs:$src1), u8ImmPred:$src2),
- bb:$offset)>,
- Requires<[HasV4T]>;
-
-// Pd=cmpb.eq(Rs,Rt)
-let isCompare = 1, validSubTargets = HasV4SubT in
-def CMPbEQrr_ubub_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = cmpb.eq($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (seteq (and (xor (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)), 255), 0))]>,
- Requires<[HasV4T]>;
-
-// Pd=cmpb.eq(Rs,Rt)
-let isCompare = 1, validSubTargets = HasV4SubT in
-def CMPbEQrr_sbsb_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = cmpb.eq($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (seteq (shl (i32 IntRegs:$src1), (i32 24)),
- (shl (i32 IntRegs:$src2), (i32 24))))]>,
- Requires<[HasV4T]>;
-
-// Pd=cmpb.gt(Rs,Rt)
-let isCompare = 1, validSubTargets = HasV4SubT in
-def CMPbGTrr_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = cmpb.gt($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (setgt (shl (i32 IntRegs:$src1), (i32 24)),
- (shl (i32 IntRegs:$src2), (i32 24))))]>,
- Requires<[HasV4T]>;
-
-// Pd=cmpb.gtu(Rs,#u7)
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 0, opExtentBits = 7,
-isCompare = 1, validSubTargets = HasV4SubT, CextOpcode = "CMPbGTU", InputType = "imm" in
-def CMPbGTUri_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, u7Ext:$src2),
- "$dst = cmpb.gtu($src1, #$src2)",
- [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 255),
- u7ExtPred:$src2))]>,
- Requires<[HasV4T]>, ImmRegRel;
+def : T_CMP_pat <C4_cmpneqi, setne, s10ExtPred>;
+def : T_CMP_pat <C4_cmpltei, setle, s10ExtPred>;
+def : T_CMP_pat <C4_cmplteui, setule, u9ImmPred>;
+
+// rs <= rt -> !(rs > rt).
+/*
+def: Pat<(i1 (setle (i32 IntRegs:$src1), s10ExtPred:$src2)),
+ (C2_not (C2_cmpgti IntRegs:$src1, s10ExtPred:$src2))>;
+// (C4_cmpltei IntRegs:$src1, s10ExtPred:$src2)>;
+*/
+// Map cmplt(Rs, Imm) -> !cmpgt(Rs, Imm-1).
+def: Pat<(i1 (setlt (i32 IntRegs:$src1), s8ExtPred:$src2)),
+ (C4_cmpltei IntRegs:$src1, (DEC_CONST_SIGNED s8ExtPred:$src2))>;
+
+// rs != rt -> !(rs == rt).
+def: Pat<(i1 (setne (i32 IntRegs:$src1), s10ExtPred:$src2)),
+ (C4_cmpneqi IntRegs:$src1, s10ExtPred:$src2)>;
// SDNode for converting immediate C to C-1.
def DEC_CONST_BYTE : SDNodeXForm<imm, [{
@@ -2263,10 +3143,9 @@ def DEC_CONST_BYTE : SDNodeXForm<imm, [{
// if (!Pd.new) Rd=#0
def : Pat <(i32 (zext (i1 (seteq (i32 (and (i32 IntRegs:$Rs), 255)),
u8ExtPred:$u8)))),
- (i32 (TFR_condset_ii (i1 (CMPbEQri_V4 (i32 IntRegs:$Rs),
+ (i32 (TFR_condset_ii (i1 (A4_cmpbeqi (i32 IntRegs:$Rs),
(u8ExtPred:$u8))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setne ( and(Rs, 255), u8))
@@ -2276,10 +3155,9 @@ def : Pat <(i32 (zext (i1 (seteq (i32 (and (i32 IntRegs:$Rs), 255)),
// if (!Pd.new) Rd=#1
def : Pat <(i32 (zext (i1 (setne (i32 (and (i32 IntRegs:$Rs), 255)),
u8ExtPred:$u8)))),
- (i32 (TFR_condset_ii (i1 (CMPbEQri_V4 (i32 IntRegs:$Rs),
+ (i32 (TFR_condset_ii (i1 (A4_cmpbeqi (i32 IntRegs:$Rs),
(u8ExtPred:$u8))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( seteq (Rs, and(Rt, 255)))
@@ -2289,10 +3167,9 @@ def : Pat <(i32 (zext (i1 (setne (i32 (and (i32 IntRegs:$Rs), 255)),
// if (!Pd.new) Rd=#0
def : Pat <(i32 (zext (i1 (seteq (i32 IntRegs:$Rt),
(i32 (and (i32 IntRegs:$Rs), 255)))))),
- (i32 (TFR_condset_ii (i1 (CMPbEQrr_ubub_V4 (i32 IntRegs:$Rs),
+ (i32 (TFR_condset_ii (i1 (A4_cmpbeq (i32 IntRegs:$Rs),
(i32 IntRegs:$Rt))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setne (Rs, and(Rt, 255)))
@@ -2302,10 +3179,9 @@ def : Pat <(i32 (zext (i1 (seteq (i32 IntRegs:$Rt),
// if (!Pd.new) Rd=#1
def : Pat <(i32 (zext (i1 (setne (i32 IntRegs:$Rt),
(i32 (and (i32 IntRegs:$Rs), 255)))))),
- (i32 (TFR_condset_ii (i1 (CMPbEQrr_ubub_V4 (i32 IntRegs:$Rs),
+ (i32 (TFR_condset_ii (i1 (A4_cmpbeq (i32 IntRegs:$Rs),
(i32 IntRegs:$Rt))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( setugt ( and(Rs, 255), u8))
@@ -2315,10 +3191,9 @@ def : Pat <(i32 (zext (i1 (setne (i32 IntRegs:$Rt),
// if (!Pd.new) Rd=#0
def : Pat <(i32 (zext (i1 (setugt (i32 (and (i32 IntRegs:$Rs), 255)),
u8ExtPred:$u8)))),
- (i32 (TFR_condset_ii (i1 (CMPbGTUri_V4 (i32 IntRegs:$Rs),
+ (i32 (TFR_condset_ii (i1 (A4_cmpbgtui (i32 IntRegs:$Rs),
(u8ExtPred:$u8))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setugt ( and(Rs, 254), u8))
@@ -2328,10 +3203,9 @@ def : Pat <(i32 (zext (i1 (setugt (i32 (and (i32 IntRegs:$Rs), 255)),
// if (!Pd.new) Rd=#0
def : Pat <(i32 (zext (i1 (setugt (i32 (and (i32 IntRegs:$Rs), 254)),
u8ExtPred:$u8)))),
- (i32 (TFR_condset_ii (i1 (CMPbGTUri_V4 (i32 IntRegs:$Rs),
+ (i32 (TFR_condset_ii (i1 (A4_cmpbgtui (i32 IntRegs:$Rs),
(u8ExtPred:$u8))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setult ( Rs, Rt))
@@ -2341,10 +3215,9 @@ def : Pat <(i32 (zext (i1 (setugt (i32 (and (i32 IntRegs:$Rs), 254)),
// if (!Pd.new) Rd=#0
// cmp.ltu(Rs, Rt) -> cmp.gtu(Rt, Rs)
def : Pat <(i32 (zext (i1 (setult (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
- (i32 (TFR_condset_ii (i1 (CMPGTUrr (i32 IntRegs:$Rt),
+ (i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rt),
(i32 IntRegs:$Rs))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setlt ( Rs, Rt))
@@ -2354,10 +3227,9 @@ def : Pat <(i32 (zext (i1 (setult (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
// if (!Pd.new) Rd=#0
// cmp.lt(Rs, Rt) -> cmp.gt(Rt, Rs)
def : Pat <(i32 (zext (i1 (setlt (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
- (i32 (TFR_condset_ii (i1 (CMPGTrr (i32 IntRegs:$Rt),
+ (i32 (TFR_condset_ii (i1 (C2_cmpgt (i32 IntRegs:$Rt),
(i32 IntRegs:$Rs))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setugt ( Rs, Rt))
@@ -2366,10 +3238,9 @@ def : Pat <(i32 (zext (i1 (setlt (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
// if (Pd.new) Rd=#1
// if (!Pd.new) Rd=#0
def : Pat <(i32 (zext (i1 (setugt (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
- (i32 (TFR_condset_ii (i1 (CMPGTUrr (i32 IntRegs:$Rs),
+ (i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rs),
(i32 IntRegs:$Rt))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// This pattern interefers with coremark performance, not implementing at this
// time.
@@ -2388,10 +3259,9 @@ def : Pat <(i32 (zext (i1 (setugt (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
// if (!Pd.new) Rd=#1
// cmp.ltu(Rs, Rt) -> cmp.gtu(Rt, Rs)
def : Pat <(i32 (zext (i1 (setuge (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
- (i32 (TFR_condset_ii (i1 (CMPGTUrr (i32 IntRegs:$Rt),
+ (i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rt),
(i32 IntRegs:$Rs))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( setge ( Rs, Rt))
@@ -2401,10 +3271,9 @@ def : Pat <(i32 (zext (i1 (setuge (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
// if (!Pd.new) Rd=#1
// cmp.lt(Rs, Rt) -> cmp.gt(Rt, Rs)
def : Pat <(i32 (zext (i1 (setge (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
- (i32 (TFR_condset_ii (i1 (CMPGTrr (i32 IntRegs:$Rt),
+ (i32 (TFR_condset_ii (i1 (C2_cmpgt (i32 IntRegs:$Rt),
(i32 IntRegs:$Rs))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( setule ( Rs, Rt))
@@ -2413,10 +3282,9 @@ def : Pat <(i32 (zext (i1 (setge (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
// if (Pd.new) Rd=#0
// if (!Pd.new) Rd=#1
def : Pat <(i32 (zext (i1 (setule (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
- (i32 (TFR_condset_ii (i1 (CMPGTUrr (i32 IntRegs:$Rs),
+ (i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rs),
(i32 IntRegs:$Rt))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( setle ( Rs, Rt))
@@ -2425,16 +3293,15 @@ def : Pat <(i32 (zext (i1 (setule (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
// if (Pd.new) Rd=#0
// if (!Pd.new) Rd=#1
def : Pat <(i32 (zext (i1 (setle (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
- (i32 (TFR_condset_ii (i1 (CMPGTrr (i32 IntRegs:$Rs),
+ (i32 (TFR_condset_ii (i1 (C2_cmpgt (i32 IntRegs:$Rs),
(i32 IntRegs:$Rt))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( setult ( and(Rs, 255), u8))
// Use the isdigit transformation below
-// Generate code of the form 'mux_ii(cmpbgtu(Rdd, C-1),0,1)'
+// Generate code of the form 'C2_muxii(cmpbgtui(Rdd, C-1),0,1)'
// for C code of the form r = ((c>='0') & (c<='9')) ? 1 : 0;.
// The isdigit transformation relies on two 'clever' aspects:
// 1) The data type is unsigned which allows us to eliminate a zero test after
@@ -2447,961 +3314,1044 @@ def : Pat <(i32 (zext (i1 (setle (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
// The code is transformed upstream of llvm into
// retval = (c-48) < 10 ? 1 : 0;
let AddedComplexity = 139 in
-def : Pat <(i32 (zext (i1 (setult (i32 (and (i32 IntRegs:$src1), 255)),
- u7StrictPosImmPred:$src2)))),
- (i32 (MUX_ii (i1 (CMPbGTUri_V4 (i32 IntRegs:$src1),
- (DEC_CONST_BYTE u7StrictPosImmPred:$src2))),
- 0, 1))>,
- Requires<[HasV4T]>;
-
-// Pd=cmpb.gtu(Rs,Rt)
-let isCompare = 1, validSubTargets = HasV4SubT, CextOpcode = "CMPbGTU",
-InputType = "reg" in
-def CMPbGTUrr_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = cmpb.gtu($src1, $src2)",
- [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 255),
- (and (i32 IntRegs:$src2), 255)))]>,
- Requires<[HasV4T]>, ImmRegRel;
-
-// Following instruction is not being extended as it results into the incorrect
-// code for negative numbers.
-
-// Signed half compare(.eq) ri.
-// Pd=cmph.eq(Rs,#s8)
-let isCompare = 1, validSubTargets = HasV4SubT in
-def CMPhEQri_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, s8Imm:$src2),
- "$dst = cmph.eq($src1, #$src2)",
- [(set (i1 PredRegs:$dst), (seteq (and (i32 IntRegs:$src1), 65535),
- s8ImmPred:$src2))]>,
- Requires<[HasV4T]>;
-
-// Signed half compare(.eq) rr.
-// Case 1: xor + and, then compare:
-// r0=xor(r0,r1)
-// r0=and(r0,#0xffff)
-// p0=cmp.eq(r0,#0)
-// Pd=cmph.eq(Rs,Rt)
-let isCompare = 1, validSubTargets = HasV4SubT in
-def CMPhEQrr_xor_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = cmph.eq($src1, $src2)",
- [(set (i1 PredRegs:$dst), (seteq (and (xor (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)),
- 65535), 0))]>,
- Requires<[HasV4T]>;
-
-// Signed half compare(.eq) rr.
-// Case 2: shift left 16 bits then compare:
-// r0=asl(r0,16)
-// r1=asl(r1,16)
-// p0=cmp.eq(r0,r1)
-// Pd=cmph.eq(Rs,Rt)
-let isCompare = 1, validSubTargets = HasV4SubT in
-def CMPhEQrr_shl_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = cmph.eq($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (seteq (shl (i32 IntRegs:$src1), (i32 16)),
- (shl (i32 IntRegs:$src2), (i32 16))))]>,
- Requires<[HasV4T]>;
-
-/* Incorrect Pattern -- immediate should be right shifted before being
-used in the cmph.gt instruction.
-// Signed half compare(.gt) ri.
-// Pd=cmph.gt(Rs,#s8)
-
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 8,
-isCompare = 1, validSubTargets = HasV4SubT in
-def CMPhGTri_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, s8Ext:$src2),
- "$dst = cmph.gt($src1, #$src2)",
- [(set (i1 PredRegs:$dst),
- (setgt (shl (i32 IntRegs:$src1), (i32 16)),
- s8ExtPred:$src2))]>,
- Requires<[HasV4T]>;
-*/
-
-// Signed half compare(.gt) rr.
-// Pd=cmph.gt(Rs,Rt)
-let isCompare = 1, validSubTargets = HasV4SubT in
-def CMPhGTrr_shl_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = cmph.gt($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (setgt (shl (i32 IntRegs:$src1), (i32 16)),
- (shl (i32 IntRegs:$src2), (i32 16))))]>,
- Requires<[HasV4T]>;
-
-// Unsigned half compare rr (.gtu).
-// Pd=cmph.gtu(Rs,Rt)
-let isCompare = 1, validSubTargets = HasV4SubT, CextOpcode = "CMPhGTU",
-InputType = "reg" in
-def CMPhGTUrr_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = cmph.gtu($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (setugt (and (i32 IntRegs:$src1), 65535),
- (and (i32 IntRegs:$src2), 65535)))]>,
- Requires<[HasV4T]>, ImmRegRel;
-
-// Unsigned half compare ri (.gtu).
-// Pd=cmph.gtu(Rs,#u7)
-let isExtendable = 1, opExtendable = 2, isExtentSigned = 0, opExtentBits = 7,
-isCompare = 1, validSubTargets = HasV4SubT, CextOpcode = "CMPhGTU",
-InputType = "imm" in
-def CMPhGTUri_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, u7Ext:$src2),
- "$dst = cmph.gtu($src1, #$src2)",
- [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 65535),
- u7ExtPred:$src2))]>,
- Requires<[HasV4T]>, ImmRegRel;
-
-let validSubTargets = HasV4SubT in
-def NTSTBIT_rr : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = !tstbit($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (seteq (and (shl 1, (i32 IntRegs:$src2)), (i32 IntRegs:$src1)), 0))]>,
- Requires<[HasV4T]>;
-
-let validSubTargets = HasV4SubT in
-def NTSTBIT_ri : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- "$dst = !tstbit($src1, $src2)",
- [(set (i1 PredRegs:$dst),
- (seteq (and (shl 1, u5ImmPred:$src2), (i32 IntRegs:$src1)), 0))]>,
- Requires<[HasV4T]>;
+def: Pat<(i32 (zext (i1 (setult (i32 (and (i32 IntRegs:$src1), 255)),
+ u7StrictPosImmPred:$src2)))),
+ (C2_muxii (A4_cmpbgtui IntRegs:$src1,
+ (DEC_CONST_BYTE u7StrictPosImmPred:$src2)),
+ 0, 1)>;
//===----------------------------------------------------------------------===//
// XTYPE/PRED -
//===----------------------------------------------------------------------===//
-//Deallocate frame and return.
-// dealloc_return
-let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicable = 1,
- Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1 in {
-let validSubTargets = HasV4SubT in
- def DEALLOC_RET_V4 : LD0Inst<(outs), (ins),
- "dealloc_return",
- []>,
- Requires<[HasV4T]>;
+//===----------------------------------------------------------------------===//
+// Multiclass for DeallocReturn
+//===----------------------------------------------------------------------===//
+class L4_RETURN<string mnemonic, bit isNot, bit isPredNew, bit isTak>
+ : LD0Inst<(outs), (ins PredRegs:$src),
+ !if(isNot, "if (!$src", "if ($src")#
+ !if(isPredNew, ".new) ", ") ")#mnemonic#
+ !if(isPredNew, #!if(isTak,":t", ":nt"),""),
+ [], "", LD_tc_3or4stall_SLOT0> {
+
+ bits<2> src;
+ let BaseOpcode = "L4_RETURN";
+ let isPredicatedFalse = isNot;
+ let isPredicatedNew = isPredNew;
+ let isTaken = isTak;
+ let IClass = 0b1001;
+
+ let Inst{27-16} = 0b011000011110;
+
+ let Inst{13} = isNot;
+ let Inst{12} = isTak;
+ let Inst{11} = isPredNew;
+ let Inst{10} = 0b0;
+ let Inst{9-8} = src;
+ let Inst{4-0} = 0b11110;
+ }
+
+// Produce all predicated forms, p, !p, p.new, !p.new, :t, :nt
+multiclass L4_RETURN_PRED<string mnemonic, bit PredNot> {
+ let isPredicated = 1 in {
+ def _#NAME# : L4_RETURN <mnemonic, PredNot, 0, 1>;
+ def _#NAME#new_pnt : L4_RETURN <mnemonic, PredNot, 1, 0>;
+ def _#NAME#new_pt : L4_RETURN <mnemonic, PredNot, 1, 1>;
+ }
}
+multiclass LD_MISC_L4_RETURN<string mnemonic> {
+ let isBarrier = 1, isPredicable = 1 in
+ def NAME : LD0Inst <(outs), (ins), mnemonic, [], "",
+ LD_tc_3or4stall_SLOT0> {
+ let BaseOpcode = "L4_RETURN";
+ let IClass = 0b1001;
+ let Inst{27-16} = 0b011000011110;
+ let Inst{13-10} = 0b0000;
+ let Inst{4-0} = 0b11110;
+ }
+ defm t : L4_RETURN_PRED<mnemonic, 0 >;
+ defm f : L4_RETURN_PRED<mnemonic, 1 >;
+}
+
+let isReturn = 1, isTerminator = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R30], hasSideEffects = 0 in
+defm L4_return: LD_MISC_L4_RETURN <"dealloc_return">, PredNewRel;
+
// Restore registers and dealloc return function call.
let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC] in {
-let validSubTargets = HasV4SubT in
+ Defs = [R29, R30, R31, PC], isPredicable = 0, isAsmParserOnly = 1 in {
def RESTORE_DEALLOC_RET_JMP_V4 : JInst<(outs),
(ins calltarget:$dst),
"jump $dst",
- []>,
- Requires<[HasV4T]>;
+ []>;
}
// Restore registers and dealloc frame before a tail call.
-let isCall = 1, isBarrier = 1,
- Defs = [R29, R30, R31, PC] in {
-let validSubTargets = HasV4SubT in
+let isCall = 1, Defs = [R29, R30, R31, PC], isAsmParserOnly = 1 in {
def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : JInst<(outs),
(ins calltarget:$dst),
"call $dst",
- []>,
- Requires<[HasV4T]>;
+ []>;
}
// Save registers function call.
-let isCall = 1, isBarrier = 1,
- Uses = [R29, R31] in {
+let isCall = 1, Uses = [R29, R31], isAsmParserOnly = 1 in {
def SAVE_REGISTERS_CALL_V4 : JInst<(outs),
(ins calltarget:$dst),
"call $dst // Save_calle_saved_registers",
- []>,
- Requires<[HasV4T]>;
+ []>;
}
-// if (Ps) dealloc_return
-let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
- isPredicated = 1 in {
-let validSubTargets = HasV4SubT in
- def DEALLOC_RET_cPt_V4 : LD0Inst<(outs),
- (ins PredRegs:$src1),
- "if ($src1) dealloc_return",
- []>,
- Requires<[HasV4T]>;
-}
-
-// if (!Ps) dealloc_return
-let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
- isPredicated = 1, isPredicatedFalse = 1 in {
-let validSubTargets = HasV4SubT in
- def DEALLOC_RET_cNotPt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
- "if (!$src1) dealloc_return",
- []>,
- Requires<[HasV4T]>;
-}
-
-// if (Ps.new) dealloc_return:nt
-let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
- isPredicated = 1 in {
-let validSubTargets = HasV4SubT in
- def DEALLOC_RET_cdnPnt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
- "if ($src1.new) dealloc_return:nt",
- []>,
- Requires<[HasV4T]>;
-}
+//===----------------------------------------------------------------------===//
+// Template class for non predicated store instructions with
+// GP-Relative or absolute addressing.
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, isPredicable = 1, isNVStorable = 1 in
+class T_StoreAbsGP <string mnemonic, RegisterClass RC, Operand ImmOp,
+ bits<2>MajOp, Operand AddrOp, bit isAbs, bit isHalf>
+ : STInst<(outs), (ins AddrOp:$addr, RC:$src),
+ mnemonic # !if(isAbs, "(##", "(#")#"$addr) = $src"#!if(isHalf, ".h",""),
+ [], "", V2LDST_tc_st_SLOT01> {
+ bits<19> addr;
+ bits<5> src;
+ bits<16> offsetBits;
+
+ string ImmOpStr = !cast<string>(ImmOp);
+ let offsetBits = !if (!eq(ImmOpStr, "u16_3Imm"), addr{18-3},
+ !if (!eq(ImmOpStr, "u16_2Imm"), addr{17-2},
+ !if (!eq(ImmOpStr, "u16_1Imm"), addr{16-1},
+ /* u16_0Imm */ addr{15-0})));
+ let IClass = 0b0100;
+ let Inst{27} = 1;
+ let Inst{26-25} = offsetBits{15-14};
+ let Inst{24} = 0b0;
+ let Inst{23-22} = MajOp;
+ let Inst{21} = isHalf;
+ let Inst{20-16} = offsetBits{13-9};
+ let Inst{13} = offsetBits{8};
+ let Inst{12-8} = src;
+ let Inst{7-0} = offsetBits{7-0};
+ }
-// if (!Ps.new) dealloc_return:nt
-let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
- isPredicated = 1, isPredicatedFalse = 1 in {
-let validSubTargets = HasV4SubT in
- def DEALLOC_RET_cNotdnPnt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
- "if (!$src1.new) dealloc_return:nt",
- []>,
- Requires<[HasV4T]>;
-}
+//===----------------------------------------------------------------------===//
+// Template class for predicated store instructions with
+// GP-Relative or absolute addressing.
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, isPredicated = 1, isNVStorable = 1, opExtentBits = 6,
+ opExtendable = 1 in
+class T_StoreAbs_Pred <string mnemonic, RegisterClass RC, bits<2> MajOp,
+ bit isHalf, bit isNot, bit isNew>
+ : STInst<(outs), (ins PredRegs:$src1, u6Ext:$absaddr, RC: $src2),
+ !if(isNot, "if (!$src1", "if ($src1")#!if(isNew, ".new) ",
+ ") ")#mnemonic#"(#$absaddr) = $src2"#!if(isHalf, ".h",""),
+ [], "", ST_tc_st_SLOT01>, AddrModeRel {
+ bits<2> src1;
+ bits<6> absaddr;
+ bits<5> src2;
+
+ let isPredicatedNew = isNew;
+ let isPredicatedFalse = isNot;
+
+ let IClass = 0b1010;
-// if (Ps.new) dealloc_return:t
-let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
- isPredicated = 1 in {
-let validSubTargets = HasV4SubT in
- def DEALLOC_RET_cdnPt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
- "if ($src1.new) dealloc_return:t",
- []>,
- Requires<[HasV4T]>;
-}
+ let Inst{27-24} = 0b1111;
+ let Inst{23-22} = MajOp;
+ let Inst{21} = isHalf;
+ let Inst{17-16} = absaddr{5-4};
+ let Inst{13} = isNew;
+ let Inst{12-8} = src2;
+ let Inst{7} = 0b1;
+ let Inst{6-3} = absaddr{3-0};
+ let Inst{2} = isNot;
+ let Inst{1-0} = src1;
+ }
-// if (!Ps.new) dealloc_return:nt
-let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
- isPredicated = 1, isPredicatedFalse = 1 in {
-let validSubTargets = HasV4SubT in
- def DEALLOC_RET_cNotdnPt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
- "if (!$src1.new) dealloc_return:t",
- []>,
- Requires<[HasV4T]>;
+//===----------------------------------------------------------------------===//
+// Template class for predicated store instructions with absolute addressing.
+//===----------------------------------------------------------------------===//
+class T_StoreAbs <string mnemonic, RegisterClass RC, Operand ImmOp,
+ bits<2> MajOp, bit isHalf>
+ : T_StoreAbsGP <mnemonic, RC, ImmOp, MajOp, u0AlwaysExt, 1, isHalf>,
+ AddrModeRel {
+ string ImmOpStr = !cast<string>(ImmOp);
+ let opExtentBits = !if (!eq(ImmOpStr, "u16_3Imm"), 19,
+ !if (!eq(ImmOpStr, "u16_2Imm"), 18,
+ !if (!eq(ImmOpStr, "u16_1Imm"), 17,
+ /* u16_0Imm */ 16)));
+
+ let opExtentAlign = !if (!eq(ImmOpStr, "u16_3Imm"), 3,
+ !if (!eq(ImmOpStr, "u16_2Imm"), 2,
+ !if (!eq(ImmOpStr, "u16_1Imm"), 1,
+ /* u16_0Imm */ 0)));
}
-// Load/Store with absolute addressing mode
-// memw(#u6)=Rt
+//===----------------------------------------------------------------------===//
+// Multiclass for store instructions with absolute addressing.
+//===----------------------------------------------------------------------===//
+let addrMode = Absolute, isExtended = 1 in
+multiclass ST_Abs<string mnemonic, string CextOp, RegisterClass RC,
+ Operand ImmOp, bits<2> MajOp, bit isHalf = 0> {
+ let CextOpcode = CextOp, BaseOpcode = CextOp#_abs in {
+ let opExtendable = 0, isPredicable = 1 in
+ def S2_#NAME#abs : T_StoreAbs <mnemonic, RC, ImmOp, MajOp, isHalf>;
-multiclass ST_Abs_Predbase<string mnemonic, RegisterClass RC, bit isNot,
- bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME#_V4 : STInst2<(outs),
- (ins PredRegs:$src1, u0AlwaysExt:$absaddr, RC: $src2),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#mnemonic#"(##$absaddr) = $src2",
- []>,
- Requires<[HasV4T]>;
-}
+ // Predicated
+ def S4_p#NAME#t_abs : T_StoreAbs_Pred<mnemonic, RC, MajOp, isHalf, 0, 0>;
+ def S4_p#NAME#f_abs : T_StoreAbs_Pred<mnemonic, RC, MajOp, isHalf, 1, 0>;
-multiclass ST_Abs_Pred<string mnemonic, RegisterClass RC, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ST_Abs_Predbase<mnemonic, RC, PredNot, 0>;
- // Predicate new
- defm _cdn#NAME : ST_Abs_Predbase<mnemonic, RC, PredNot, 1>;
+ // .new Predicated
+ def S4_p#NAME#tnew_abs : T_StoreAbs_Pred<mnemonic, RC, MajOp, isHalf, 0, 1>;
+ def S4_p#NAME#fnew_abs : T_StoreAbs_Pred<mnemonic, RC, MajOp, isHalf, 1, 1>;
}
}
-let isNVStorable = 1, isExtended = 1, neverHasSideEffects = 1 in
-multiclass ST_Abs<string mnemonic, string CextOp, RegisterClass RC> {
- let CextOpcode = CextOp, BaseOpcode = CextOp#_abs in {
- let opExtendable = 0, isPredicable = 1 in
- def NAME#_V4 : STInst2<(outs),
- (ins u0AlwaysExt:$absaddr, RC:$src),
- mnemonic#"(##$absaddr) = $src",
- []>,
- Requires<[HasV4T]>;
-
- let opExtendable = 1, isPredicated = 1 in {
- defm Pt : ST_Abs_Pred<mnemonic, RC, 0>;
- defm NotPt : ST_Abs_Pred<mnemonic, RC, 1>;
- }
+//===----------------------------------------------------------------------===//
+// Template class for non predicated new-value store instructions with
+// GP-Relative or absolute addressing.
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, isPredicable = 1, mayStore = 1, isNVStore = 1,
+ isNewValue = 1, opNewValue = 1 in
+class T_StoreAbsGP_NV <string mnemonic, Operand ImmOp, bits<2>MajOp, bit isAbs>
+ : NVInst_V4<(outs), (ins u0AlwaysExt:$addr, IntRegs:$src),
+ mnemonic # !if(isAbs, "(##", "(#")#"$addr) = $src.new",
+ [], "", V2LDST_tc_st_SLOT0> {
+ bits<19> addr;
+ bits<3> src;
+ bits<16> offsetBits;
+
+ string ImmOpStr = !cast<string>(ImmOp);
+ let offsetBits = !if (!eq(ImmOpStr, "u16_3Imm"), addr{18-3},
+ !if (!eq(ImmOpStr, "u16_2Imm"), addr{17-2},
+ !if (!eq(ImmOpStr, "u16_1Imm"), addr{16-1},
+ /* u16_0Imm */ addr{15-0})));
+ let IClass = 0b0100;
+
+ let Inst{27} = 1;
+ let Inst{26-25} = offsetBits{15-14};
+ let Inst{24-21} = 0b0101;
+ let Inst{20-16} = offsetBits{13-9};
+ let Inst{13} = offsetBits{8};
+ let Inst{12-11} = MajOp;
+ let Inst{10-8} = src;
+ let Inst{7-0} = offsetBits{7-0};
}
-}
-multiclass ST_Abs_Predbase_nv<string mnemonic, RegisterClass RC, bit isNot,
- bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME#_nv_V4 : NVInst_V4<(outs),
- (ins PredRegs:$src1, u0AlwaysExt:$absaddr, RC: $src2),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#mnemonic#"(##$absaddr) = $src2.new",
- []>,
- Requires<[HasV4T]>;
+//===----------------------------------------------------------------------===//
+// Template class for predicated new-value store instructions with
+// absolute addressing.
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, isPredicated = 1, mayStore = 1, isNVStore = 1,
+ isNewValue = 1, opNewValue = 2, opExtentBits = 6, opExtendable = 1 in
+class T_StoreAbs_NV_Pred <string mnemonic, bits<2> MajOp, bit isNot, bit isNew>
+ : NVInst_V4<(outs), (ins PredRegs:$src1, u6Ext:$absaddr, IntRegs:$src2),
+ !if(isNot, "if (!$src1", "if ($src1")#!if(isNew, ".new) ",
+ ") ")#mnemonic#"(#$absaddr) = $src2.new",
+ [], "", ST_tc_st_SLOT0>, AddrModeRel {
+ bits<2> src1;
+ bits<6> absaddr;
+ bits<3> src2;
+
+ let isPredicatedNew = isNew;
+ let isPredicatedFalse = isNot;
+
+ let IClass = 0b1010;
+
+ let Inst{27-24} = 0b1111;
+ let Inst{23-21} = 0b101;
+ let Inst{17-16} = absaddr{5-4};
+ let Inst{13} = isNew;
+ let Inst{12-11} = MajOp;
+ let Inst{10-8} = src2;
+ let Inst{7} = 0b1;
+ let Inst{6-3} = absaddr{3-0};
+ let Inst{2} = isNot;
+ let Inst{1-0} = src1;
}
-multiclass ST_Abs_Pred_nv<string mnemonic, RegisterClass RC, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : ST_Abs_Predbase_nv<mnemonic, RC, PredNot, 0>;
- // Predicate new
- defm _cdn#NAME : ST_Abs_Predbase_nv<mnemonic, RC, PredNot, 1>;
- }
+//===----------------------------------------------------------------------===//
+// Template class for non-predicated new-value store instructions with
+// absolute addressing.
+//===----------------------------------------------------------------------===//
+class T_StoreAbs_NV <string mnemonic, Operand ImmOp, bits<2> MajOp>
+ : T_StoreAbsGP_NV <mnemonic, ImmOp, MajOp, 1>, AddrModeRel {
+
+ string ImmOpStr = !cast<string>(ImmOp);
+ let opExtentBits = !if (!eq(ImmOpStr, "u16_3Imm"), 19,
+ !if (!eq(ImmOpStr, "u16_2Imm"), 18,
+ !if (!eq(ImmOpStr, "u16_1Imm"), 17,
+ /* u16_0Imm */ 16)));
+
+ let opExtentAlign = !if (!eq(ImmOpStr, "u16_3Imm"), 3,
+ !if (!eq(ImmOpStr, "u16_2Imm"), 2,
+ !if (!eq(ImmOpStr, "u16_1Imm"), 1,
+ /* u16_0Imm */ 0)));
}
-let mayStore = 1, isNVStore = 1, isExtended = 1, neverHasSideEffects = 1 in
-multiclass ST_Abs_nv<string mnemonic, string CextOp, RegisterClass RC> {
+//===----------------------------------------------------------------------===//
+// Multiclass for new-value store instructions with absolute addressing.
+//===----------------------------------------------------------------------===//
+let addrMode = Absolute, isExtended = 1 in
+multiclass ST_Abs_NV <string mnemonic, string CextOp, Operand ImmOp,
+ bits<2> MajOp> {
let CextOpcode = CextOp, BaseOpcode = CextOp#_abs in {
let opExtendable = 0, isPredicable = 1 in
- def NAME#_nv_V4 : NVInst_V4<(outs),
- (ins u0AlwaysExt:$absaddr, RC:$src),
- mnemonic#"(##$absaddr) = $src.new",
- []>,
- Requires<[HasV4T]>;
-
- let opExtendable = 1, isPredicated = 1 in {
- defm Pt : ST_Abs_Pred_nv<mnemonic, RC, 0>;
- defm NotPt : ST_Abs_Pred_nv<mnemonic, RC, 1>;
- }
- }
-}
+ def S2_#NAME#newabs : T_StoreAbs_NV <mnemonic, ImmOp, MajOp>;
-let addrMode = Absolute in {
- let accessSize = ByteAccess in
- defm STrib_abs : ST_Abs<"memb", "STrib", IntRegs>,
- ST_Abs_nv<"memb", "STrib", IntRegs>, AddrModeRel;
+ // Predicated
+ def S4_p#NAME#newt_abs : T_StoreAbs_NV_Pred <mnemonic, MajOp, 0, 0>;
+ def S4_p#NAME#newf_abs : T_StoreAbs_NV_Pred <mnemonic, MajOp, 1, 0>;
- let accessSize = HalfWordAccess in
- defm STrih_abs : ST_Abs<"memh", "STrih", IntRegs>,
- ST_Abs_nv<"memh", "STrih", IntRegs>, AddrModeRel;
-
- let accessSize = WordAccess in
- defm STriw_abs : ST_Abs<"memw", "STriw", IntRegs>,
- ST_Abs_nv<"memw", "STriw", IntRegs>, AddrModeRel;
-
- let accessSize = DoubleWordAccess, isNVStorable = 0 in
- defm STrid_abs : ST_Abs<"memd", "STrid", DoubleRegs>, AddrModeRel;
+ // .new Predicated
+ def S4_p#NAME#newtnew_abs : T_StoreAbs_NV_Pred <mnemonic, MajOp, 0, 1>;
+ def S4_p#NAME#newfnew_abs : T_StoreAbs_NV_Pred <mnemonic, MajOp, 1, 1>;
+ }
}
-let Predicates = [HasV4T], AddedComplexity = 30 in {
-def : Pat<(truncstorei8 (i32 IntRegs:$src1),
- (HexagonCONST32 tglobaladdr:$absaddr)),
- (STrib_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
+//===----------------------------------------------------------------------===//
+// Stores with absolute addressing
+//===----------------------------------------------------------------------===//
+let accessSize = ByteAccess in
+defm storerb : ST_Abs <"memb", "STrib", IntRegs, u16_0Imm, 0b00>,
+ ST_Abs_NV <"memb", "STrib", u16_0Imm, 0b00>;
-def : Pat<(truncstorei16 (i32 IntRegs:$src1),
- (HexagonCONST32 tglobaladdr:$absaddr)),
- (STrih_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
+let accessSize = HalfWordAccess in
+defm storerh : ST_Abs <"memh", "STrih", IntRegs, u16_1Imm, 0b01>,
+ ST_Abs_NV <"memh", "STrih", u16_1Imm, 0b01>;
-def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32 tglobaladdr:$absaddr)),
- (STriw_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
+let accessSize = WordAccess in
+defm storeri : ST_Abs <"memw", "STriw", IntRegs, u16_2Imm, 0b10>,
+ ST_Abs_NV <"memw", "STriw", u16_2Imm, 0b10>;
-def : Pat<(store (i64 DoubleRegs:$src1),
- (HexagonCONST32 tglobaladdr:$absaddr)),
- (STrid_abs_V4 tglobaladdr: $absaddr, DoubleRegs: $src1)>;
-}
+let isNVStorable = 0, accessSize = DoubleWordAccess in
+defm storerd : ST_Abs <"memd", "STrid", DoubleRegs, u16_3Imm, 0b11>;
+
+let isNVStorable = 0, accessSize = HalfWordAccess in
+defm storerf : ST_Abs <"memh", "STrif", IntRegs, u16_1Imm, 0b01, 1>;
//===----------------------------------------------------------------------===//
-// multiclass for store instructions with GP-relative addressing mode.
+// GP-relative stores.
// mem[bhwd](#global)=Rt
-// if ([!]Pv[.new]) mem[bhwd](##global) = Rt
+// Once predicated, these instructions map to absolute addressing mode.
+// if ([!]Pv[.new]) mem[bhwd](##global)=Rt
//===----------------------------------------------------------------------===//
-let mayStore = 1, isNVStorable = 1 in
-multiclass ST_GP<string mnemonic, string BaseOp, RegisterClass RC> {
- let BaseOpcode = BaseOp, isPredicable = 1 in
- def NAME#_V4 : STInst2<(outs),
- (ins globaladdress:$global, RC:$src),
- mnemonic#"(#$global) = $src",
- []>;
- // When GP-relative instructions are predicated, their addressing mode is
- // changed to absolute and they are always constant extended.
- let BaseOpcode = BaseOp, isExtended = 1, opExtendable = 1,
- isPredicated = 1 in {
- defm Pt : ST_Abs_Pred <mnemonic, RC, 0>;
- defm NotPt : ST_Abs_Pred <mnemonic, RC, 1>;
+let isAsmParserOnly = 1 in
+class T_StoreGP <string mnemonic, string BaseOp, RegisterClass RC,
+ Operand ImmOp, bits<2> MajOp, bit isHalf = 0>
+ : T_StoreAbsGP <mnemonic, RC, ImmOp, MajOp, globaladdress, 0, isHalf> {
+ // Set BaseOpcode same as absolute addressing instructions so that
+ // non-predicated GP-Rel instructions can have relate with predicated
+ // Absolute instruction.
+ let BaseOpcode = BaseOp#_abs;
+ }
+
+let isAsmParserOnly = 1 in
+multiclass ST_GP <string mnemonic, string BaseOp, Operand ImmOp,
+ bits<2> MajOp, bit isHalf = 0> {
+ // Set BaseOpcode same as absolute addressing instructions so that
+ // non-predicated GP-Rel instructions can have relate with predicated
+ // Absolute instruction.
+ let BaseOpcode = BaseOp#_abs in {
+ def NAME#gp : T_StoreAbsGP <mnemonic, IntRegs, ImmOp, MajOp,
+ globaladdress, 0, isHalf>;
+ // New-value store
+ def NAME#newgp : T_StoreAbsGP_NV <mnemonic, ImmOp, MajOp, 0> ;
}
}
-let mayStore = 1, isNVStore = 1 in
-multiclass ST_GP_nv<string mnemonic, string BaseOp, RegisterClass RC> {
- let BaseOpcode = BaseOp, isPredicable = 1 in
- def NAME#_nv_V4 : NVInst_V4<(outs),
- (ins u0AlwaysExt:$global, RC:$src),
- mnemonic#"(#$global) = $src.new",
- []>,
- Requires<[HasV4T]>;
-
- // When GP-relative instructions are predicated, their addressing mode is
- // changed to absolute and they are always constant extended.
- let BaseOpcode = BaseOp, isExtended = 1, opExtendable = 1,
- isPredicated = 1 in {
- defm Pt : ST_Abs_Pred_nv<mnemonic, RC, 0>;
- defm NotPt : ST_Abs_Pred_nv<mnemonic, RC, 1>;
- }
-}
-
-let validSubTargets = HasV4SubT, neverHasSideEffects = 1 in {
- let isNVStorable = 0 in
- defm STd_GP : ST_GP <"memd", "STd_GP", DoubleRegs>, PredNewRel;
-
- defm STb_GP : ST_GP<"memb", "STb_GP", IntRegs>,
- ST_GP_nv<"memb", "STb_GP", IntRegs>, NewValueRel;
- defm STh_GP : ST_GP<"memh", "STh_GP", IntRegs>,
- ST_GP_nv<"memh", "STh_GP", IntRegs>, NewValueRel;
- defm STw_GP : ST_GP<"memw", "STw_GP", IntRegs>,
- ST_GP_nv<"memw", "STw_GP", IntRegs>, NewValueRel;
-}
-
-// 64 bit atomic store
-def : Pat <(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global),
- (i64 DoubleRegs:$src1)),
- (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>,
- Requires<[HasV4T]>;
-
-// Map from store(globaladdress) -> memd(#foo)
-let AddedComplexity = 100 in
-def : Pat <(store (i64 DoubleRegs:$src1),
- (HexagonCONST32_GP tglobaladdr:$global)),
- (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>;
+let accessSize = ByteAccess in
+defm S2_storerb : ST_GP<"memb", "STrib", u16_0Imm, 0b00>, NewValueRel;
-// 8 bit atomic store
-def : Pat < (atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global),
- (i32 IntRegs:$src1)),
- (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>;
+let accessSize = HalfWordAccess in
+defm S2_storerh : ST_GP<"memh", "STrih", u16_1Imm, 0b01>, NewValueRel;
-// Map from store(globaladdress) -> memb(#foo)
-let AddedComplexity = 100 in
-def : Pat<(truncstorei8 (i32 IntRegs:$src1),
- (HexagonCONST32_GP tglobaladdr:$global)),
- (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>;
+let accessSize = WordAccess in
+defm S2_storeri : ST_GP<"memw", "STriw", u16_2Imm, 0b10>, NewValueRel;
-// Map from "i1 = constant<-1>; memw(CONST32(#foo)) = i1"
-// to "r0 = 1; memw(#foo) = r0"
-let AddedComplexity = 100 in
-def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)),
- (STb_GP_V4 tglobaladdr:$global, (TFRI 1))>;
+let isNVStorable = 0, accessSize = DoubleWordAccess in
+def S2_storerdgp : T_StoreGP <"memd", "STrid", DoubleRegs,
+ u16_3Imm, 0b11>, PredNewRel;
-def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global),
- (i32 IntRegs:$src1)),
- (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>;
+let isNVStorable = 0, accessSize = HalfWordAccess in
+def S2_storerfgp : T_StoreGP <"memh", "STrif", IntRegs,
+ u16_1Imm, 0b01, 1>, PredNewRel;
-// Map from store(globaladdress) -> memh(#foo)
-let AddedComplexity = 100 in
-def : Pat<(truncstorei16 (i32 IntRegs:$src1),
- (HexagonCONST32_GP tglobaladdr:$global)),
- (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>;
+class Loada_pat<PatFrag Load, ValueType VT, PatFrag Addr, InstHexagon MI>
+ : Pat<(VT (Load Addr:$addr)), (MI Addr:$addr)>;
-// 32 bit atomic store
-def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global),
- (i32 IntRegs:$src1)),
- (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>;
+class Loadam_pat<PatFrag Load, ValueType VT, PatFrag Addr, PatFrag ValueMod,
+ InstHexagon MI>
+ : Pat<(VT (Load Addr:$addr)), (ValueMod (MI Addr:$addr))>;
-// Map from store(globaladdress) -> memw(#foo)
-let AddedComplexity = 100 in
-def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)),
- (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>;
+class Storea_pat<PatFrag Store, PatFrag Value, PatFrag Addr, InstHexagon MI>
+ : Pat<(Store Value:$val, Addr:$addr), (MI Addr:$addr, Value:$val)>;
-//===----------------------------------------------------------------------===//
-// Multiclass for the load instructions with absolute addressing mode.
-//===----------------------------------------------------------------------===//
-multiclass LD_Abs_Predbase<string mnemonic, RegisterClass RC, bit isNot,
- bit isPredNew> {
- let isPredicatedNew = isPredNew in
- def NAME : LDInst2<(outs RC:$dst),
- (ins PredRegs:$src1, u0AlwaysExt:$absaddr),
- !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
- ") ")#"$dst = "#mnemonic#"(##$absaddr)",
- []>,
- Requires<[HasV4T]>;
-}
+class Stoream_pat<PatFrag Store, PatFrag Value, PatFrag Addr, PatFrag ValueMod,
+ InstHexagon MI>
+ : Pat<(Store Value:$val, Addr:$addr),
+ (MI Addr:$addr, (ValueMod Value:$val))>;
-multiclass LD_Abs_Pred<string mnemonic, RegisterClass RC, bit PredNot> {
- let isPredicatedFalse = PredNot in {
- defm _c#NAME : LD_Abs_Predbase<mnemonic, RC, PredNot, 0>;
- // Predicate new
- defm _cdn#NAME : LD_Abs_Predbase<mnemonic, RC, PredNot, 1>;
- }
+def: Storea_pat<SwapSt<atomic_store_8>, I32, addrgp, S2_storerbgp>;
+def: Storea_pat<SwapSt<atomic_store_16>, I32, addrgp, S2_storerhgp>;
+def: Storea_pat<SwapSt<atomic_store_32>, I32, addrgp, S2_storerigp>;
+def: Storea_pat<SwapSt<atomic_store_64>, I64, addrgp, S2_storerdgp>;
+
+let AddedComplexity = 100 in {
+ def: Storea_pat<truncstorei8, I32, addrgp, S2_storerbgp>;
+ def: Storea_pat<truncstorei16, I32, addrgp, S2_storerhgp>;
+ def: Storea_pat<store, I32, addrgp, S2_storerigp>;
+ def: Storea_pat<store, I64, addrgp, S2_storerdgp>;
+
+ // Map from "i1 = constant<-1>; memw(CONST32(#foo)) = i1"
+ // to "r0 = 1; memw(#foo) = r0"
+ let AddedComplexity = 100 in
+ def: Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)),
+ (S2_storerbgp tglobaladdr:$global, (A2_tfrsi 1))>;
}
-let isExtended = 1, neverHasSideEffects = 1 in
-multiclass LD_Abs<string mnemonic, string CextOp, RegisterClass RC> {
- let CextOpcode = CextOp, BaseOpcode = CextOp#_abs in {
- let opExtendable = 1, isPredicable = 1 in
- def NAME#_V4 : LDInst2<(outs RC:$dst),
- (ins u0AlwaysExt:$absaddr),
- "$dst = "#mnemonic#"(##$absaddr)",
- []>,
- Requires<[HasV4T]>;
-
- let opExtendable = 2, isPredicated = 1 in {
- defm Pt_V4 : LD_Abs_Pred<mnemonic, RC, 0>;
- defm NotPt_V4 : LD_Abs_Pred<mnemonic, RC, 1>;
- }
+//===----------------------------------------------------------------------===//
+// Template class for non predicated load instructions with
+// absolute addressing mode.
+//===----------------------------------------------------------------------===//
+let isPredicable = 1, hasSideEffects = 0 in
+class T_LoadAbsGP <string mnemonic, RegisterClass RC, Operand ImmOp,
+ bits<3> MajOp, Operand AddrOp, bit isAbs>
+ : LDInst <(outs RC:$dst), (ins AddrOp:$addr),
+ "$dst = "#mnemonic# !if(isAbs, "(##", "(#")#"$addr)",
+ [], "", V2LDST_tc_ld_SLOT01> {
+ bits<5> dst;
+ bits<19> addr;
+ bits<16> offsetBits;
+
+ string ImmOpStr = !cast<string>(ImmOp);
+ let offsetBits = !if (!eq(ImmOpStr, "u16_3Imm"), addr{18-3},
+ !if (!eq(ImmOpStr, "u16_2Imm"), addr{17-2},
+ !if (!eq(ImmOpStr, "u16_1Imm"), addr{16-1},
+ /* u16_0Imm */ addr{15-0})));
+
+ let IClass = 0b0100;
+
+ let Inst{27} = 0b1;
+ let Inst{26-25} = offsetBits{15-14};
+ let Inst{24} = 0b1;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = offsetBits{13-9};
+ let Inst{13-5} = offsetBits{8-0};
+ let Inst{4-0} = dst;
}
-}
-let addrMode = Absolute in {
- let accessSize = ByteAccess in {
- defm LDrib_abs : LD_Abs<"memb", "LDrib", IntRegs>, AddrModeRel;
- defm LDriub_abs : LD_Abs<"memub", "LDriub", IntRegs>, AddrModeRel;
+class T_LoadAbs <string mnemonic, RegisterClass RC, Operand ImmOp,
+ bits<3> MajOp>
+ : T_LoadAbsGP <mnemonic, RC, ImmOp, MajOp, u0AlwaysExt, 1>, AddrModeRel {
+
+ string ImmOpStr = !cast<string>(ImmOp);
+ let opExtentBits = !if (!eq(ImmOpStr, "u16_3Imm"), 19,
+ !if (!eq(ImmOpStr, "u16_2Imm"), 18,
+ !if (!eq(ImmOpStr, "u16_1Imm"), 17,
+ /* u16_0Imm */ 16)));
+
+ let opExtentAlign = !if (!eq(ImmOpStr, "u16_3Imm"), 3,
+ !if (!eq(ImmOpStr, "u16_2Imm"), 2,
+ !if (!eq(ImmOpStr, "u16_1Imm"), 1,
+ /* u16_0Imm */ 0)));
}
- let accessSize = HalfWordAccess in {
- defm LDrih_abs : LD_Abs<"memh", "LDrih", IntRegs>, AddrModeRel;
- defm LDriuh_abs : LD_Abs<"memuh", "LDriuh", IntRegs>, AddrModeRel;
+
+//===----------------------------------------------------------------------===//
+// Template class for predicated load instructions with
+// absolute addressing mode.
+//===----------------------------------------------------------------------===//
+let isPredicated = 1, opExtentBits = 6, opExtendable = 2 in
+class T_LoadAbs_Pred <string mnemonic, RegisterClass RC, bits<3> MajOp,
+ bit isPredNot, bit isPredNew>
+ : LDInst <(outs RC:$dst), (ins PredRegs:$src1, u6Ext:$absaddr),
+ !if(isPredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
+ ") ")#"$dst = "#mnemonic#"(#$absaddr)">, AddrModeRel {
+ bits<5> dst;
+ bits<2> src1;
+ bits<6> absaddr;
+
+ let isPredicatedNew = isPredNew;
+ let isPredicatedFalse = isPredNot;
+ let hasNewValue = !if (!eq(!cast<string>(RC), "DoubleRegs"), 0, 1);
+
+ let IClass = 0b1001;
+
+ let Inst{27-24} = 0b1111;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = absaddr{5-1};
+ let Inst{13} = 0b1;
+ let Inst{12} = isPredNew;
+ let Inst{11} = isPredNot;
+ let Inst{10-9} = src1;
+ let Inst{8} = absaddr{0};
+ let Inst{7} = 0b1;
+ let Inst{4-0} = dst;
}
- let accessSize = WordAccess in
- defm LDriw_abs : LD_Abs<"memw", "LDriw", IntRegs>, AddrModeRel;
- let accessSize = DoubleWordAccess in
- defm LDrid_abs : LD_Abs<"memd", "LDrid", DoubleRegs>, AddrModeRel;
+//===----------------------------------------------------------------------===//
+// Multiclass for the load instructions with absolute addressing mode.
+//===----------------------------------------------------------------------===//
+multiclass LD_Abs_Pred<string mnemonic, RegisterClass RC, bits<3> MajOp,
+ bit PredNot> {
+ def _abs : T_LoadAbs_Pred <mnemonic, RC, MajOp, PredNot, 0>;
+ // Predicate new
+ def new_abs : T_LoadAbs_Pred <mnemonic, RC, MajOp, PredNot, 1>;
}
-let Predicates = [HasV4T], AddedComplexity = 30 in {
-def : Pat<(i32 (load (HexagonCONST32 tglobaladdr:$absaddr))),
- (LDriw_abs_V4 tglobaladdr: $absaddr)>;
-
-def : Pat<(i32 (sextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))),
- (LDrib_abs_V4 tglobaladdr:$absaddr)>;
+let addrMode = Absolute, isExtended = 1 in
+multiclass LD_Abs<string mnemonic, string CextOp, RegisterClass RC,
+ Operand ImmOp, bits<3> MajOp> {
+ let CextOpcode = CextOp, BaseOpcode = CextOp#_abs in {
+ let opExtendable = 1, isPredicable = 1 in
+ def L4_#NAME#_abs: T_LoadAbs <mnemonic, RC, ImmOp, MajOp>;
-def : Pat<(i32 (zextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))),
- (LDriub_abs_V4 tglobaladdr:$absaddr)>;
+ // Predicated
+ defm L4_p#NAME#t : LD_Abs_Pred<mnemonic, RC, MajOp, 0>;
+ defm L4_p#NAME#f : LD_Abs_Pred<mnemonic, RC, MajOp, 1>;
+ }
+}
-def : Pat<(i32 (sextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))),
- (LDrih_abs_V4 tglobaladdr:$absaddr)>;
+let accessSize = ByteAccess, hasNewValue = 1 in {
+ defm loadrb : LD_Abs<"memb", "LDrib", IntRegs, u16_0Imm, 0b000>;
+ defm loadrub : LD_Abs<"memub", "LDriub", IntRegs, u16_0Imm, 0b001>;
+}
-def : Pat<(i32 (zextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))),
- (LDriuh_abs_V4 tglobaladdr:$absaddr)>;
+let accessSize = HalfWordAccess, hasNewValue = 1 in {
+ defm loadrh : LD_Abs<"memh", "LDrih", IntRegs, u16_1Imm, 0b010>;
+ defm loadruh : LD_Abs<"memuh", "LDriuh", IntRegs, u16_1Imm, 0b011>;
}
+let accessSize = WordAccess, hasNewValue = 1 in
+defm loadri : LD_Abs<"memw", "LDriw", IntRegs, u16_2Imm, 0b100>;
+
+let accessSize = DoubleWordAccess in
+defm loadrd : LD_Abs<"memd", "LDrid", DoubleRegs, u16_3Imm, 0b110>;
+
//===----------------------------------------------------------------------===//
// multiclass for load instructions with GP-relative addressing mode.
// Rx=mem[bhwd](##global)
+// Once predicated, these instructions map to absolute addressing mode.
// if ([!]Pv[.new]) Rx=mem[bhwd](##global)
//===----------------------------------------------------------------------===//
-let neverHasSideEffects = 1, validSubTargets = HasV4SubT in
-multiclass LD_GP<string mnemonic, string BaseOp, RegisterClass RC> {
- let BaseOpcode = BaseOp in {
- let isPredicable = 1 in
- def NAME#_V4 : LDInst2<(outs RC:$dst),
- (ins globaladdress:$global),
- "$dst = "#mnemonic#"(#$global)",
- []>;
-
- let isExtended = 1, opExtendable = 2, isPredicated = 1 in {
- defm Pt_V4 : LD_Abs_Pred<mnemonic, RC, 0>;
- defm NotPt_V4 : LD_Abs_Pred<mnemonic, RC, 1>;
- }
- }
-}
-defm LDd_GP : LD_GP<"memd", "LDd_GP", DoubleRegs>, PredNewRel;
-defm LDb_GP : LD_GP<"memb", "LDb_GP", IntRegs>, PredNewRel;
-defm LDub_GP : LD_GP<"memub", "LDub_GP", IntRegs>, PredNewRel;
-defm LDh_GP : LD_GP<"memh", "LDh_GP", IntRegs>, PredNewRel;
-defm LDuh_GP : LD_GP<"memuh", "LDuh_GP", IntRegs>, PredNewRel;
-defm LDw_GP : LD_GP<"memw", "LDw_GP", IntRegs>, PredNewRel;
+let isAsmParserOnly = 1 in
+class T_LoadGP <string mnemonic, string BaseOp, RegisterClass RC, Operand ImmOp,
+ bits<3> MajOp>
+ : T_LoadAbsGP <mnemonic, RC, ImmOp, MajOp, globaladdress, 0>, PredNewRel {
+ let BaseOpcode = BaseOp#_abs;
+ }
-def : Pat <(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)),
- (i64 (LDd_GP_V4 tglobaladdr:$global))>;
+let accessSize = ByteAccess, hasNewValue = 1 in {
+ def L2_loadrbgp : T_LoadGP<"memb", "LDrib", IntRegs, u16_0Imm, 0b000>;
+ def L2_loadrubgp : T_LoadGP<"memub", "LDriub", IntRegs, u16_0Imm, 0b001>;
+}
-def : Pat <(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)),
- (i32 (LDw_GP_V4 tglobaladdr:$global))>;
+let accessSize = HalfWordAccess, hasNewValue = 1 in {
+ def L2_loadrhgp : T_LoadGP<"memh", "LDrih", IntRegs, u16_1Imm, 0b010>;
+ def L2_loadruhgp : T_LoadGP<"memuh", "LDriuh", IntRegs, u16_1Imm, 0b011>;
+}
-def : Pat <(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)),
- (i32 (LDuh_GP_V4 tglobaladdr:$global))>;
+let accessSize = WordAccess, hasNewValue = 1 in
+def L2_loadrigp : T_LoadGP<"memw", "LDriw", IntRegs, u16_2Imm, 0b100>;
-def : Pat <(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)),
- (i32 (LDub_GP_V4 tglobaladdr:$global))>;
+let accessSize = DoubleWordAccess in
+def L2_loadrdgp : T_LoadGP<"memd", "LDrid", DoubleRegs, u16_3Imm, 0b110>;
-// Map from load(globaladdress) -> memw(#foo + 0)
-let AddedComplexity = 100 in
-def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))),
- (i64 (LDd_GP_V4 tglobaladdr:$global))>;
+def: Loada_pat<atomic_load_8, i32, addrgp, L2_loadrubgp>;
+def: Loada_pat<atomic_load_16, i32, addrgp, L2_loadruhgp>;
+def: Loada_pat<atomic_load_32, i32, addrgp, L2_loadrigp>;
+def: Loada_pat<atomic_load_64, i64, addrgp, L2_loadrdgp>;
// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd
-let AddedComplexity = 100 in
-def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))),
- (i1 (TFR_PdRs (i32 (LDb_GP_V4 tglobaladdr:$global))))>;
+def: Loadam_pat<load, i1, addrga, I32toI1, L4_loadrub_abs>;
+def: Loadam_pat<load, i1, addrgp, I32toI1, L2_loadrubgp>;
+
+def: Stoream_pat<store, I1, addrga, I1toI32, S2_storerbabs>;
+def: Stoream_pat<store, I1, addrgp, I1toI32, S2_storerbgp>;
+
+// Map from load(globaladdress) -> mem[u][bhwd](#foo)
+class LoadGP_pats <PatFrag ldOp, InstHexagon MI, ValueType VT = i32>
+ : Pat <(VT (ldOp (HexagonCONST32_GP tglobaladdr:$global))),
+ (VT (MI tglobaladdr:$global))>;
+
+let AddedComplexity = 100 in {
+ def: LoadGP_pats <extloadi8, L2_loadrbgp>;
+ def: LoadGP_pats <sextloadi8, L2_loadrbgp>;
+ def: LoadGP_pats <zextloadi8, L2_loadrubgp>;
+ def: LoadGP_pats <extloadi16, L2_loadrhgp>;
+ def: LoadGP_pats <sextloadi16, L2_loadrhgp>;
+ def: LoadGP_pats <zextloadi16, L2_loadruhgp>;
+ def: LoadGP_pats <load, L2_loadrigp>;
+ def: LoadGP_pats <load, L2_loadrdgp, i64>;
+}
// When the Interprocedural Global Variable optimizer realizes that a certain
// global variable takes only two constant values, it shrinks the global to
// a boolean. Catch those loads here in the following 3 patterns.
-let AddedComplexity = 100 in
-def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
- (i32 (LDb_GP_V4 tglobaladdr:$global))>;
+let AddedComplexity = 100 in {
+ def: LoadGP_pats <extloadi1, L2_loadrubgp>;
+ def: LoadGP_pats <zextloadi1, L2_loadrubgp>;
+}
-let AddedComplexity = 100 in
-def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
- (i32 (LDb_GP_V4 tglobaladdr:$global))>;
+// Transfer global address into a register
+def: Pat<(HexagonCONST32 tglobaladdr:$Rs), (A2_tfrsi s16Ext:$Rs)>;
+def: Pat<(HexagonCONST32_GP tblockaddress:$Rs), (A2_tfrsi s16Ext:$Rs)>;
+def: Pat<(HexagonCONST32_GP tglobaladdr:$Rs), (A2_tfrsi s16Ext:$Rs)>;
-// Map from load(globaladdress) -> memb(#foo)
-let AddedComplexity = 100 in
-def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
- (i32 (LDb_GP_V4 tglobaladdr:$global))>;
+def: Pat<(i64 (ctlz I64:$src1)), (Zext64 (S2_cl0p I64:$src1))>;
+def: Pat<(i64 (cttz I64:$src1)), (Zext64 (S2_ct0p I64:$src1))>;
-// Map from load(globaladdress) -> memb(#foo)
-let AddedComplexity = 100 in
-def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
- (i32 (LDb_GP_V4 tglobaladdr:$global))>;
+let AddedComplexity = 30 in {
+ def: Storea_pat<truncstorei8, I32, u0AlwaysExtPred, S2_storerbabs>;
+ def: Storea_pat<truncstorei16, I32, u0AlwaysExtPred, S2_storerhabs>;
+ def: Storea_pat<store, I32, u0AlwaysExtPred, S2_storeriabs>;
+}
-let AddedComplexity = 100 in
-def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
- (i32 (LDub_GP_V4 tglobaladdr:$global))>;
+let AddedComplexity = 30 in {
+ def: Loada_pat<load, i32, u0AlwaysExtPred, L4_loadri_abs>;
+ def: Loada_pat<sextloadi8, i32, u0AlwaysExtPred, L4_loadrb_abs>;
+ def: Loada_pat<zextloadi8, i32, u0AlwaysExtPred, L4_loadrub_abs>;
+ def: Loada_pat<sextloadi16, i32, u0AlwaysExtPred, L4_loadrh_abs>;
+ def: Loada_pat<zextloadi16, i32, u0AlwaysExtPred, L4_loadruh_abs>;
+}
-// Map from load(globaladdress) -> memub(#foo)
+// Indexed store word - global address.
+// memw(Rs+#u6:2)=#S8
let AddedComplexity = 100 in
-def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
- (i32 (LDub_GP_V4 tglobaladdr:$global))>;
+def: Storex_add_pat<store, addrga, u6_2ImmPred, S4_storeiri_io>;
-// Map from load(globaladdress) -> memh(#foo)
-let AddedComplexity = 100 in
-def : Pat <(i32 (extloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
- (i32 (LDh_GP_V4 tglobaladdr:$global))>;
+// Load from a global address that has only one use in the current basic block.
+let AddedComplexity = 100 in {
+ def: Loada_pat<extloadi8, i32, addrga, L4_loadrub_abs>;
+ def: Loada_pat<sextloadi8, i32, addrga, L4_loadrb_abs>;
+ def: Loada_pat<zextloadi8, i32, addrga, L4_loadrub_abs>;
-// Map from load(globaladdress) -> memh(#foo)
-let AddedComplexity = 100 in
-def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
- (i32 (LDh_GP_V4 tglobaladdr:$global))>;
+ def: Loada_pat<extloadi16, i32, addrga, L4_loadruh_abs>;
+ def: Loada_pat<sextloadi16, i32, addrga, L4_loadrh_abs>;
+ def: Loada_pat<zextloadi16, i32, addrga, L4_loadruh_abs>;
-// Map from load(globaladdress) -> memuh(#foo)
-let AddedComplexity = 100 in
-def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
- (i32 (LDuh_GP_V4 tglobaladdr:$global))>;
+ def: Loada_pat<load, i32, addrga, L4_loadri_abs>;
+ def: Loada_pat<load, i64, addrga, L4_loadrd_abs>;
+}
-// Map from load(globaladdress) -> memw(#foo)
-let AddedComplexity = 100 in
-def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))),
- (i32 (LDw_GP_V4 tglobaladdr:$global))>;
+// Store to a global address that has only one use in the current basic block.
+let AddedComplexity = 100 in {
+ def: Storea_pat<truncstorei8, I32, addrga, S2_storerbabs>;
+ def: Storea_pat<truncstorei16, I32, addrga, S2_storerhabs>;
+ def: Storea_pat<store, I32, addrga, S2_storeriabs>;
+ def: Storea_pat<store, I64, addrga, S2_storerdabs>;
+ def: Stoream_pat<truncstorei32, I64, addrga, LoReg, S2_storeriabs>;
+}
+
+// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd
+let AddedComplexity = 100 in
+def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))),
+ (i1 (C2_tfrrp (i32 (L2_loadrbgp tglobaladdr:$global))))>;
// Transfer global address into a register
let isExtended = 1, opExtendable = 1, AddedComplexity=50, isMoveImm = 1,
-isAsCheapAsAMove = 1, isReMaterializable = 1, validSubTargets = HasV4SubT in
+isAsCheapAsAMove = 1, isReMaterializable = 1, isCodeGenOnly = 1 in
def TFRI_V4 : ALU32_ri<(outs IntRegs:$dst), (ins s16Ext:$src1),
"$dst = #$src1",
- [(set IntRegs:$dst, (HexagonCONST32 tglobaladdr:$src1))]>,
- Requires<[HasV4T]>;
+ [(set IntRegs:$dst, (HexagonCONST32 tglobaladdr:$src1))]>;
// Transfer a block address into a register
def : Pat<(HexagonCONST32_GP tblockaddress:$src1),
- (TFRI_V4 tblockaddress:$src1)>,
- Requires<[HasV4T]>;
-
-let isExtended = 1, opExtendable = 2, AddedComplexity=50,
-neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
-def TFRI_cPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, s16Ext:$src2),
- "if($src1) $dst = #$src2",
- []>,
- Requires<[HasV4T]>;
-
-let isExtended = 1, opExtendable = 2, AddedComplexity=50, isPredicatedFalse = 1,
-neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
-def TFRI_cNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, s16Ext:$src2),
- "if(!$src1) $dst = #$src2",
- []>,
- Requires<[HasV4T]>;
-
-let isExtended = 1, opExtendable = 2, AddedComplexity=50,
-neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
-def TFRI_cdnPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, s16Ext:$src2),
- "if($src1.new) $dst = #$src2",
- []>,
- Requires<[HasV4T]>;
-
-let isExtended = 1, opExtendable = 2, AddedComplexity=50, isPredicatedFalse = 1,
-neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
-def TFRI_cdnNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, s16Ext:$src2),
- "if(!$src1.new) $dst = #$src2",
- []>,
- Requires<[HasV4T]>;
-
-let AddedComplexity = 50, Predicates = [HasV4T] in
-def : Pat<(HexagonCONST32_GP tglobaladdr:$src1),
- (TFRI_V4 tglobaladdr:$src1)>,
- Requires<[HasV4T]>;
-
-
-// Load - Indirect with long offset: These instructions take global address
-// as an operand
-let isExtended = 1, opExtendable = 3, AddedComplexity = 40,
-validSubTargets = HasV4SubT in
-def LDrid_ind_lo_V4 : LDInst<(outs DoubleRegs:$dst),
- (ins IntRegs:$src1, u2Imm:$src2, globaladdressExt:$offset),
- "$dst=memd($src1<<#$src2+##$offset)",
- [(set (i64 DoubleRegs:$dst),
- (load (add (shl IntRegs:$src1, u2ImmPred:$src2),
- (HexagonCONST32 tglobaladdr:$offset))))]>,
- Requires<[HasV4T]>;
+ (TFRI_V4 tblockaddress:$src1)>;
-let AddedComplexity = 40 in
-multiclass LD_indirect_lo<string OpcStr, PatFrag OpNode> {
-let isExtended = 1, opExtendable = 3, validSubTargets = HasV4SubT in
- def _lo_V4 : LDInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, u2Imm:$src2, globaladdressExt:$offset),
- !strconcat("$dst = ",
- !strconcat(OpcStr, "($src1<<#$src2+##$offset)")),
- [(set IntRegs:$dst,
- (i32 (OpNode (add (shl IntRegs:$src1, u2ImmPred:$src2),
- (HexagonCONST32 tglobaladdr:$offset)))))]>,
- Requires<[HasV4T]>;
-}
-
-defm LDrib_ind : LD_indirect_lo<"memb", sextloadi8>;
-defm LDriub_ind : LD_indirect_lo<"memub", zextloadi8>;
-defm LDriub_ind_anyext : LD_indirect_lo<"memub", extloadi8>;
-defm LDrih_ind : LD_indirect_lo<"memh", sextloadi16>;
-defm LDriuh_ind : LD_indirect_lo<"memuh", zextloadi16>;
-defm LDriuh_ind_anyext : LD_indirect_lo<"memuh", extloadi16>;
-defm LDriw_ind : LD_indirect_lo<"memw", load>;
-
-let AddedComplexity = 40 in
-def : Pat <(i32 (sextloadi8 (add IntRegs:$src1,
- (NumUsesBelowThresCONST32 tglobaladdr:$offset)))),
- (i32 (LDrib_ind_lo_V4 IntRegs:$src1, 0, tglobaladdr:$offset))>,
- Requires<[HasV4T]>;
-
-let AddedComplexity = 40 in
-def : Pat <(i32 (zextloadi8 (add IntRegs:$src1,
- (NumUsesBelowThresCONST32 tglobaladdr:$offset)))),
- (i32 (LDriub_ind_lo_V4 IntRegs:$src1, 0, tglobaladdr:$offset))>,
- Requires<[HasV4T]>;
+let AddedComplexity = 50 in
+def : Pat<(HexagonCONST32_GP tglobaladdr:$src1),
+ (TFRI_V4 tglobaladdr:$src1)>;
-let Predicates = [HasV4T], AddedComplexity = 30 in {
-def : Pat<(truncstorei8 (i32 IntRegs:$src1), u0AlwaysExtPred:$src2),
- (STrib_abs_V4 u0AlwaysExtPred:$src2, IntRegs: $src1)>;
+// i8/i16/i32 -> i64 loads
+// We need a complexity of 120 here to override preceding handling of
+// zextload.
+let AddedComplexity = 120 in {
+ def: Loadam_pat<extloadi8, i64, addrga, Zext64, L4_loadrub_abs>;
+ def: Loadam_pat<sextloadi8, i64, addrga, Sext64, L4_loadrb_abs>;
+ def: Loadam_pat<zextloadi8, i64, addrga, Zext64, L4_loadrub_abs>;
-def : Pat<(truncstorei16 (i32 IntRegs:$src1), u0AlwaysExtPred:$src2),
- (STrih_abs_V4 u0AlwaysExtPred:$src2, IntRegs: $src1)>;
+ def: Loadam_pat<extloadi16, i64, addrga, Zext64, L4_loadruh_abs>;
+ def: Loadam_pat<sextloadi16, i64, addrga, Sext64, L4_loadrh_abs>;
+ def: Loadam_pat<zextloadi16, i64, addrga, Zext64, L4_loadruh_abs>;
-def : Pat<(store (i32 IntRegs:$src1), u0AlwaysExtPred:$src2),
- (STriw_abs_V4 u0AlwaysExtPred:$src2, IntRegs: $src1)>;
+ def: Loadam_pat<extloadi32, i64, addrga, Zext64, L4_loadri_abs>;
+ def: Loadam_pat<sextloadi32, i64, addrga, Sext64, L4_loadri_abs>;
+ def: Loadam_pat<zextloadi32, i64, addrga, Zext64, L4_loadri_abs>;
}
-let Predicates = [HasV4T], AddedComplexity = 30 in {
-def : Pat<(i32 (load u0AlwaysExtPred:$src)),
- (LDriw_abs_V4 u0AlwaysExtPred:$src)>;
+let AddedComplexity = 100 in {
+ def: Loada_pat<extloadi8, i32, addrgp, L4_loadrub_abs>;
+ def: Loada_pat<sextloadi8, i32, addrgp, L4_loadrb_abs>;
+ def: Loada_pat<zextloadi8, i32, addrgp, L4_loadrub_abs>;
-def : Pat<(i32 (sextloadi8 u0AlwaysExtPred:$src)),
- (LDrib_abs_V4 u0AlwaysExtPred:$src)>;
+ def: Loada_pat<extloadi16, i32, addrgp, L4_loadruh_abs>;
+ def: Loada_pat<sextloadi16, i32, addrgp, L4_loadrh_abs>;
+ def: Loada_pat<zextloadi16, i32, addrgp, L4_loadruh_abs>;
-def : Pat<(i32 (zextloadi8 u0AlwaysExtPred:$src)),
- (LDriub_abs_V4 u0AlwaysExtPred:$src)>;
-
-def : Pat<(i32 (sextloadi16 u0AlwaysExtPred:$src)),
- (LDrih_abs_V4 u0AlwaysExtPred:$src)>;
-
-def : Pat<(i32 (zextloadi16 u0AlwaysExtPred:$src)),
- (LDriuh_abs_V4 u0AlwaysExtPred:$src)>;
+ def: Loada_pat<load, i32, addrgp, L4_loadri_abs>;
+ def: Loada_pat<load, i64, addrgp, L4_loadrd_abs>;
}
-// Indexed store word - global address.
-// memw(Rs+#u6:2)=#S8
-let AddedComplexity = 10 in
-def STriw_offset_ext_V4 : STInst<(outs),
- (ins IntRegs:$src1, u6_2Imm:$src2, globaladdress:$src3),
- "memw($src1+#$src2) = ##$src3",
- [(store (HexagonCONST32 tglobaladdr:$src3),
- (add IntRegs:$src1, u6_2ImmPred:$src2))]>,
- Requires<[HasV4T]>;
-
-def : Pat<(i64 (ctlz (i64 DoubleRegs:$src1))),
- (i64 (COMBINE_Ir_V4 (i32 0), (i32 (CTLZ64_rr DoubleRegs:$src1))))>,
- Requires<[HasV4T]>;
-
-def : Pat<(i64 (cttz (i64 DoubleRegs:$src1))),
- (i64 (COMBINE_Ir_V4 (i32 0), (i32 (CTTZ64_rr DoubleRegs:$src1))))>,
- Requires<[HasV4T]>;
+let AddedComplexity = 100 in {
+ def: Storea_pat<truncstorei8, I32, addrgp, S2_storerbabs>;
+ def: Storea_pat<truncstorei16, I32, addrgp, S2_storerhabs>;
+ def: Storea_pat<store, I32, addrgp, S2_storeriabs>;
+ def: Storea_pat<store, I64, addrgp, S2_storerdabs>;
+}
+def: Loada_pat<atomic_load_8, i32, addrgp, L4_loadrub_abs>;
+def: Loada_pat<atomic_load_16, i32, addrgp, L4_loadruh_abs>;
+def: Loada_pat<atomic_load_32, i32, addrgp, L4_loadri_abs>;
+def: Loada_pat<atomic_load_64, i64, addrgp, L4_loadrd_abs>;
-// i8 -> i64 loads
-// We need a complexity of 120 here to override preceding handling of
-// zextloadi8.
-let Predicates = [HasV4T], AddedComplexity = 120 in {
-def: Pat <(i64 (extloadi8 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
- (i64 (COMBINE_Ir_V4 0, (LDrib_abs_V4 tglobaladdr:$addr)))>;
+def: Storea_pat<SwapSt<atomic_store_8>, I32, addrgp, S2_storerbabs>;
+def: Storea_pat<SwapSt<atomic_store_16>, I32, addrgp, S2_storerhabs>;
+def: Storea_pat<SwapSt<atomic_store_32>, I32, addrgp, S2_storeriabs>;
+def: Storea_pat<SwapSt<atomic_store_64>, I64, addrgp, S2_storerdabs>;
-def: Pat <(i64 (zextloadi8 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
- (i64 (COMBINE_Ir_V4 0, (LDriub_abs_V4 tglobaladdr:$addr)))>;
+//===----------------------------------------------------------------------===//
+// :raw for of boundscheck:hi:lo insns
+//===----------------------------------------------------------------------===//
-def: Pat <(i64 (sextloadi8 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
- (i64 (SXTW (LDrib_abs_V4 tglobaladdr:$addr)))>;
+// A4_boundscheck_lo: Detect if a register is within bounds.
+let hasSideEffects = 0 in
+def A4_boundscheck_lo: ALU64Inst <
+ (outs PredRegs:$Pd),
+ (ins DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Pd = boundscheck($Rss, $Rtt):raw:lo"> {
+ bits<2> Pd;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1101;
+
+ let Inst{27-23} = 0b00100;
+ let Inst{13} = 0b1;
+ let Inst{7-5} = 0b100;
+ let Inst{1-0} = Pd;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ }
-def: Pat <(i64 (extloadi8 FoldGlobalAddr:$addr)),
- (i64 (COMBINE_Ir_V4 0, (LDrib_abs_V4 FoldGlobalAddr:$addr)))>;
+// A4_boundscheck_hi: Detect if a register is within bounds.
+let hasSideEffects = 0 in
+def A4_boundscheck_hi: ALU64Inst <
+ (outs PredRegs:$Pd),
+ (ins DoubleRegs:$Rss, DoubleRegs:$Rtt),
+ "$Pd = boundscheck($Rss, $Rtt):raw:hi"> {
+ bits<2> Pd;
+ bits<5> Rss;
+ bits<5> Rtt;
+
+ let IClass = 0b1101;
+
+ let Inst{27-23} = 0b00100;
+ let Inst{13} = 0b1;
+ let Inst{7-5} = 0b101;
+ let Inst{1-0} = Pd;
+ let Inst{20-16} = Rss;
+ let Inst{12-8} = Rtt;
+ }
-def: Pat <(i64 (zextloadi8 FoldGlobalAddr:$addr)),
- (i64 (COMBINE_Ir_V4 0, (LDriub_abs_V4 FoldGlobalAddr:$addr)))>;
+let hasSideEffects = 0, isAsmParserOnly = 1 in
+def A4_boundscheck : MInst <
+ (outs PredRegs:$Pd), (ins IntRegs:$Rs, DoubleRegs:$Rtt),
+ "$Pd=boundscheck($Rs,$Rtt)">;
+
+// A4_tlbmatch: Detect if a VA/ASID matches a TLB entry.
+let isPredicateLate = 1, hasSideEffects = 0 in
+def A4_tlbmatch : ALU64Inst<(outs PredRegs:$Pd),
+ (ins DoubleRegs:$Rs, IntRegs:$Rt),
+ "$Pd = tlbmatch($Rs, $Rt)",
+ [], "", ALU64_tc_2early_SLOT23> {
+ bits<2> Pd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1101;
+ let Inst{27-23} = 0b00100;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0b1;
+ let Inst{12-8} = Rt;
+ let Inst{7-5} = 0b011;
+ let Inst{1-0} = Pd;
+ }
-def: Pat <(i64 (sextloadi8 FoldGlobalAddr:$addr)),
- (i64 (SXTW (LDrib_abs_V4 FoldGlobalAddr:$addr)))>;
+// We need custom lowering of ISD::PREFETCH into HexagonISD::DCFETCH
+// because the SDNode ISD::PREFETCH has properties MayLoad and MayStore.
+// We don't really want either one here.
+def SDTHexagonDCFETCH : SDTypeProfile<0, 2, [SDTCisPtrTy<0>,SDTCisInt<1>]>;
+def HexagonDCFETCH : SDNode<"HexagonISD::DCFETCH", SDTHexagonDCFETCH,
+ [SDNPHasChain]>;
+
+// Use LD0Inst for dcfetch, but set "mayLoad" to 0 because this doesn't
+// really do a load.
+let hasSideEffects = 1, mayLoad = 0 in
+def Y2_dcfetchbo : LD0Inst<(outs), (ins IntRegs:$Rs, u11_3Imm:$u11_3),
+ "dcfetch($Rs + #$u11_3)",
+ [(HexagonDCFETCH IntRegs:$Rs, u11_3ImmPred:$u11_3)],
+ "", LD_tc_ld_SLOT0> {
+ bits<5> Rs;
+ bits<14> u11_3;
+
+ let IClass = 0b1001;
+ let Inst{27-21} = 0b0100000;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0b0;
+ let Inst{10-0} = u11_3{13-3};
}
-// i16 -> i64 loads
-// We need a complexity of 120 here to override preceding handling of
-// zextloadi16.
-let AddedComplexity = 120 in {
-def: Pat <(i64 (extloadi16 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
- (i64 (COMBINE_Ir_V4 0, (LDrih_abs_V4 tglobaladdr:$addr)))>,
- Requires<[HasV4T]>;
-def: Pat <(i64 (zextloadi16 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
- (i64 (COMBINE_Ir_V4 0, (LDriuh_abs_V4 tglobaladdr:$addr)))>,
- Requires<[HasV4T]>;
-
-def: Pat <(i64 (sextloadi16 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
- (i64 (SXTW (LDrih_abs_V4 tglobaladdr:$addr)))>,
- Requires<[HasV4T]>;
-
-def: Pat <(i64 (extloadi16 FoldGlobalAddr:$addr)),
- (i64 (COMBINE_Ir_V4 0, (LDrih_abs_V4 FoldGlobalAddr:$addr)))>,
- Requires<[HasV4T]>;
-
-def: Pat <(i64 (zextloadi16 FoldGlobalAddr:$addr)),
- (i64 (COMBINE_Ir_V4 0, (LDriuh_abs_V4 FoldGlobalAddr:$addr)))>,
- Requires<[HasV4T]>;
+//===----------------------------------------------------------------------===//
+// Compound instructions
+//===----------------------------------------------------------------------===//
-def: Pat <(i64 (sextloadi16 FoldGlobalAddr:$addr)),
- (i64 (SXTW (LDrih_abs_V4 FoldGlobalAddr:$addr)))>,
- Requires<[HasV4T]>;
+let isBranch = 1, hasSideEffects = 0, isExtentSigned = 1,
+ isPredicated = 1, isPredicatedNew = 1, isExtendable = 1,
+ opExtentBits = 11, opExtentAlign = 2, opExtendable = 1,
+ isTerminator = 1 in
+class CJInst_tstbit_R0<string px, bit np, string tnt>
+ : InstHexagon<(outs), (ins IntRegs:$Rs, brtarget:$r9_2),
+ ""#px#" = tstbit($Rs, #0); if ("
+ #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2",
+ [], "", COMPOUND, TypeCOMPOUND> {
+ bits<4> Rs;
+ bits<11> r9_2;
+
+ // np: !p[01]
+ let isPredicatedFalse = np;
+ // tnt: Taken/Not Taken
+ let isBrTaken = !if (!eq(tnt, "t"), "true", "false");
+ let isTaken = !if (!eq(tnt, "t"), 1, 0);
+
+ let IClass = 0b0001;
+ let Inst{27-26} = 0b00;
+ let Inst{25} = !if (!eq(px, "!p1"), 1,
+ !if (!eq(px, "p1"), 1, 0));
+ let Inst{24-23} = 0b11;
+ let Inst{22} = np;
+ let Inst{21-20} = r9_2{10-9};
+ let Inst{19-16} = Rs;
+ let Inst{13} = !if (!eq(tnt, "t"), 1, 0);
+ let Inst{9-8} = 0b11;
+ let Inst{7-1} = r9_2{8-2};
}
-// i32->i64 loads
-// We need a complexity of 120 here to override preceding handling of
-// zextloadi32.
-let AddedComplexity = 120 in {
-def: Pat <(i64 (extloadi32 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
- (i64 (COMBINE_Ir_V4 0, (LDriw_abs_V4 tglobaladdr:$addr)))>,
- Requires<[HasV4T]>;
-
-def: Pat <(i64 (zextloadi32 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
- (i64 (COMBINE_Ir_V4 0, (LDriw_abs_V4 tglobaladdr:$addr)))>,
- Requires<[HasV4T]>;
-
-def: Pat <(i64 (sextloadi32 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
- (i64 (SXTW (LDriw_abs_V4 tglobaladdr:$addr)))>,
- Requires<[HasV4T]>;
-
-def: Pat <(i64 (extloadi32 FoldGlobalAddr:$addr)),
- (i64 (COMBINE_Ir_V4 0, (LDriw_abs_V4 FoldGlobalAddr:$addr)))>,
- Requires<[HasV4T]>;
-def: Pat <(i64 (zextloadi32 FoldGlobalAddr:$addr)),
- (i64 (COMBINE_Ir_V4 0, (LDriw_abs_V4 FoldGlobalAddr:$addr)))>,
- Requires<[HasV4T]>;
-
-def: Pat <(i64 (sextloadi32 FoldGlobalAddr:$addr)),
- (i64 (SXTW (LDriw_abs_V4 FoldGlobalAddr:$addr)))>,
- Requires<[HasV4T]>;
+let Defs = [PC, P0], Uses = [P0] in {
+ def J4_tstbit0_tp0_jump_nt : CJInst_tstbit_R0<"p0", 0, "nt">;
+ def J4_tstbit0_tp0_jump_t : CJInst_tstbit_R0<"p0", 0, "t">;
+ def J4_tstbit0_fp0_jump_nt : CJInst_tstbit_R0<"p0", 1, "nt">;
+ def J4_tstbit0_fp0_jump_t : CJInst_tstbit_R0<"p0", 1, "t">;
}
-// Indexed store double word - global address.
-// memw(Rs+#u6:2)=#S8
-let AddedComplexity = 10 in
-def STrih_offset_ext_V4 : STInst<(outs),
- (ins IntRegs:$src1, u6_1Imm:$src2, globaladdress:$src3),
- "memh($src1+#$src2) = ##$src3",
- [(truncstorei16 (HexagonCONST32 tglobaladdr:$src3),
- (add IntRegs:$src1, u6_1ImmPred:$src2))]>,
- Requires<[HasV4T]>;
-// Map from store(globaladdress + x) -> memd(#foo + x)
-let AddedComplexity = 100 in
-def : Pat<(store (i64 DoubleRegs:$src1),
- FoldGlobalAddrGP:$addr),
- (STrid_abs_V4 FoldGlobalAddrGP:$addr, (i64 DoubleRegs:$src1))>,
- Requires<[HasV4T]>;
-
-def : Pat<(atomic_store_64 FoldGlobalAddrGP:$addr,
- (i64 DoubleRegs:$src1)),
- (STrid_abs_V4 FoldGlobalAddrGP:$addr, (i64 DoubleRegs:$src1))>,
- Requires<[HasV4T]>;
-
-// Map from store(globaladdress + x) -> memb(#foo + x)
-let AddedComplexity = 100 in
-def : Pat<(truncstorei8 (i32 IntRegs:$src1), FoldGlobalAddrGP:$addr),
- (STrib_abs_V4 FoldGlobalAddrGP:$addr, (i32 IntRegs:$src1))>,
- Requires<[HasV4T]>;
-
-def : Pat<(atomic_store_8 FoldGlobalAddrGP:$addr, (i32 IntRegs:$src1)),
- (STrib_abs_V4 FoldGlobalAddrGP:$addr, (i32 IntRegs:$src1))>,
- Requires<[HasV4T]>;
-
-// Map from store(globaladdress + x) -> memh(#foo + x)
-let AddedComplexity = 100 in
-def : Pat<(truncstorei16 (i32 IntRegs:$src1), FoldGlobalAddrGP:$addr),
- (STrih_abs_V4 FoldGlobalAddrGP:$addr, (i32 IntRegs:$src1))>,
- Requires<[HasV4T]>;
-
-def : Pat<(atomic_store_16 FoldGlobalAddrGP:$addr, (i32 IntRegs:$src1)),
- (STrih_abs_V4 FoldGlobalAddrGP:$addr, (i32 IntRegs:$src1))>,
- Requires<[HasV4T]>;
-
-// Map from store(globaladdress + x) -> memw(#foo + x)
-let AddedComplexity = 100 in
-def : Pat<(store (i32 IntRegs:$src1), FoldGlobalAddrGP:$addr),
- (STriw_abs_V4 FoldGlobalAddrGP:$addr, (i32 IntRegs:$src1))>,
- Requires<[HasV4T]>;
-
-def : Pat<(atomic_store_32 FoldGlobalAddrGP:$addr, (i32 IntRegs:$src1)),
- (STriw_abs_V4 FoldGlobalAddrGP:$addr, (i32 IntRegs:$src1))>,
- Requires<[HasV4T]>;
-
-// Map from load(globaladdress + x) -> memd(#foo + x)
-let AddedComplexity = 100 in
-def : Pat<(i64 (load FoldGlobalAddrGP:$addr)),
- (i64 (LDrid_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
-
-def : Pat<(atomic_load_64 FoldGlobalAddrGP:$addr),
- (i64 (LDrid_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
-
-// Map from load(globaladdress + x) -> memb(#foo + x)
-let AddedComplexity = 100 in
-def : Pat<(i32 (extloadi8 FoldGlobalAddrGP:$addr)),
- (i32 (LDrib_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
-
-// Map from load(globaladdress + x) -> memb(#foo + x)
-let AddedComplexity = 100 in
-def : Pat<(i32 (sextloadi8 FoldGlobalAddrGP:$addr)),
- (i32 (LDrib_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
-
-//let AddedComplexity = 100 in
-let AddedComplexity = 100 in
-def : Pat<(i32 (extloadi16 FoldGlobalAddrGP:$addr)),
- (i32 (LDrih_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
-
-// Map from load(globaladdress + x) -> memh(#foo + x)
-let AddedComplexity = 100 in
-def : Pat<(i32 (sextloadi16 FoldGlobalAddrGP:$addr)),
- (i32 (LDrih_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
-
-// Map from load(globaladdress + x) -> memuh(#foo + x)
-let AddedComplexity = 100 in
-def : Pat<(i32 (zextloadi16 FoldGlobalAddrGP:$addr)),
- (i32 (LDriuh_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
+let Defs = [PC, P1], Uses = [P1] in {
+ def J4_tstbit0_tp1_jump_nt : CJInst_tstbit_R0<"p1", 0, "nt">;
+ def J4_tstbit0_tp1_jump_t : CJInst_tstbit_R0<"p1", 0, "t">;
+ def J4_tstbit0_fp1_jump_nt : CJInst_tstbit_R0<"p1", 1, "nt">;
+ def J4_tstbit0_fp1_jump_t : CJInst_tstbit_R0<"p1", 1, "t">;
+}
-def : Pat<(atomic_load_16 FoldGlobalAddrGP:$addr),
- (i32 (LDriuh_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
-// Map from load(globaladdress + x) -> memub(#foo + x)
-let AddedComplexity = 100 in
-def : Pat<(i32 (zextloadi8 FoldGlobalAddrGP:$addr)),
- (i32 (LDriub_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
+let isBranch = 1, hasSideEffects = 0,
+ isExtentSigned = 1, isPredicated = 1, isPredicatedNew = 1,
+ isExtendable = 1, opExtentBits = 11, opExtentAlign = 2,
+ opExtendable = 2, isTerminator = 1 in
+class CJInst_RR<string px, string op, bit np, string tnt>
+ : InstHexagon<(outs), (ins IntRegs:$Rs, IntRegs:$Rt, brtarget:$r9_2),
+ ""#px#" = cmp."#op#"($Rs, $Rt); if ("
+ #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2",
+ [], "", COMPOUND, TypeCOMPOUND> {
+ bits<4> Rs;
+ bits<4> Rt;
+ bits<11> r9_2;
+
+ // np: !p[01]
+ let isPredicatedFalse = np;
+ // tnt: Taken/Not Taken
+ let isBrTaken = !if (!eq(tnt, "t"), "true", "false");
+ let isTaken = !if (!eq(tnt, "t"), 1, 0);
+
+ let IClass = 0b0001;
+ let Inst{27-23} = !if (!eq(op, "eq"), 0b01000,
+ !if (!eq(op, "gt"), 0b01001,
+ !if (!eq(op, "gtu"), 0b01010, 0)));
+ let Inst{22} = np;
+ let Inst{21-20} = r9_2{10-9};
+ let Inst{19-16} = Rs;
+ let Inst{13} = !if (!eq(tnt, "t"), 1, 0);
+ // px: Predicate reg 0/1
+ let Inst{12} = !if (!eq(px, "!p1"), 1,
+ !if (!eq(px, "p1"), 1, 0));
+ let Inst{11-8} = Rt;
+ let Inst{7-1} = r9_2{8-2};
+}
-def : Pat<(atomic_load_8 FoldGlobalAddrGP:$addr),
- (i32 (LDriub_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
+// P[10] taken/not taken.
+multiclass T_tnt_CJInst_RR<string op, bit np> {
+ let Defs = [PC, P0], Uses = [P0] in {
+ def NAME#p0_jump_nt : CJInst_RR<"p0", op, np, "nt">;
+ def NAME#p0_jump_t : CJInst_RR<"p0", op, np, "t">;
+ }
+ let Defs = [PC, P1], Uses = [P1] in {
+ def NAME#p1_jump_nt : CJInst_RR<"p1", op, np, "nt">;
+ def NAME#p1_jump_t : CJInst_RR<"p1", op, np, "t">;
+ }
+}
+// Predicate / !Predicate
+multiclass T_pnp_CJInst_RR<string op>{
+ defm J4_cmp#NAME#_t : T_tnt_CJInst_RR<op, 0>;
+ defm J4_cmp#NAME#_f : T_tnt_CJInst_RR<op, 1>;
+}
+// TypeCJ Instructions compare RR and jump
+defm eq : T_pnp_CJInst_RR<"eq">;
+defm gt : T_pnp_CJInst_RR<"gt">;
+defm gtu : T_pnp_CJInst_RR<"gtu">;
+
+let isBranch = 1, hasSideEffects = 0, isExtentSigned = 1,
+ isPredicated = 1, isPredicatedNew = 1, isExtendable = 1, opExtentBits = 11,
+ opExtentAlign = 2, opExtendable = 2, isTerminator = 1 in
+class CJInst_RU5<string px, string op, bit np, string tnt>
+ : InstHexagon<(outs), (ins IntRegs:$Rs, u5Imm:$U5, brtarget:$r9_2),
+ ""#px#" = cmp."#op#"($Rs, #$U5); if ("
+ #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2",
+ [], "", COMPOUND, TypeCOMPOUND> {
+ bits<4> Rs;
+ bits<5> U5;
+ bits<11> r9_2;
+
+ // np: !p[01]
+ let isPredicatedFalse = np;
+ // tnt: Taken/Not Taken
+ let isBrTaken = !if (!eq(tnt, "t"), "true", "false");
+ let isTaken = !if (!eq(tnt, "t"), 1, 0);
+
+ let IClass = 0b0001;
+ let Inst{27-26} = 0b00;
+ // px: Predicate reg 0/1
+ let Inst{25} = !if (!eq(px, "!p1"), 1,
+ !if (!eq(px, "p1"), 1, 0));
+ let Inst{24-23} = !if (!eq(op, "eq"), 0b00,
+ !if (!eq(op, "gt"), 0b01,
+ !if (!eq(op, "gtu"), 0b10, 0)));
+ let Inst{22} = np;
+ let Inst{21-20} = r9_2{10-9};
+ let Inst{19-16} = Rs;
+ let Inst{13} = !if (!eq(tnt, "t"), 1, 0);
+ let Inst{12-8} = U5;
+ let Inst{7-1} = r9_2{8-2};
+}
+// P[10] taken/not taken.
+multiclass T_tnt_CJInst_RU5<string op, bit np> {
+ let Defs = [PC, P0], Uses = [P0] in {
+ def NAME#p0_jump_nt : CJInst_RU5<"p0", op, np, "nt">;
+ def NAME#p0_jump_t : CJInst_RU5<"p0", op, np, "t">;
+ }
+ let Defs = [PC, P1], Uses = [P1] in {
+ def NAME#p1_jump_nt : CJInst_RU5<"p1", op, np, "nt">;
+ def NAME#p1_jump_t : CJInst_RU5<"p1", op, np, "t">;
+ }
+}
+// Predicate / !Predicate
+multiclass T_pnp_CJInst_RU5<string op>{
+ defm J4_cmp#NAME#i_t : T_tnt_CJInst_RU5<op, 0>;
+ defm J4_cmp#NAME#i_f : T_tnt_CJInst_RU5<op, 1>;
+}
+// TypeCJ Instructions compare RI and jump
+defm eq : T_pnp_CJInst_RU5<"eq">;
+defm gt : T_pnp_CJInst_RU5<"gt">;
+defm gtu : T_pnp_CJInst_RU5<"gtu">;
+
+let isBranch = 1, hasSideEffects = 0, isExtentSigned = 1,
+ isPredicated = 1, isPredicatedFalse = 1, isPredicatedNew = 1,
+ isExtendable = 1, opExtentBits = 11, opExtentAlign = 2, opExtendable = 1,
+ isTerminator = 1 in
+class CJInst_Rn1<string px, string op, bit np, string tnt>
+ : InstHexagon<(outs), (ins IntRegs:$Rs, brtarget:$r9_2),
+ ""#px#" = cmp."#op#"($Rs,#-1); if ("
+ #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2",
+ [], "", COMPOUND, TypeCOMPOUND> {
+ bits<4> Rs;
+ bits<11> r9_2;
+
+ // np: !p[01]
+ let isPredicatedFalse = np;
+ // tnt: Taken/Not Taken
+ let isBrTaken = !if (!eq(tnt, "t"), "true", "false");
+ let isTaken = !if (!eq(tnt, "t"), 1, 0);
+
+ let IClass = 0b0001;
+ let Inst{27-26} = 0b00;
+ let Inst{25} = !if (!eq(px, "!p1"), 1,
+ !if (!eq(px, "p1"), 1, 0));
+
+ let Inst{24-23} = 0b11;
+ let Inst{22} = np;
+ let Inst{21-20} = r9_2{10-9};
+ let Inst{19-16} = Rs;
+ let Inst{13} = !if (!eq(tnt, "t"), 1, 0);
+ let Inst{9-8} = !if (!eq(op, "eq"), 0b00,
+ !if (!eq(op, "gt"), 0b01, 0));
+ let Inst{7-1} = r9_2{8-2};
+}
-// Map from load(globaladdress + x) -> memw(#foo + x)
-let AddedComplexity = 100 in
-def : Pat<(i32 (load FoldGlobalAddrGP:$addr)),
- (i32 (LDriw_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
+// P[10] taken/not taken.
+multiclass T_tnt_CJInst_Rn1<string op, bit np> {
+ let Defs = [PC, P0], Uses = [P0] in {
+ def NAME#p0_jump_nt : CJInst_Rn1<"p0", op, np, "nt">;
+ def NAME#p0_jump_t : CJInst_Rn1<"p0", op, np, "t">;
+ }
+ let Defs = [PC, P1], Uses = [P1] in {
+ def NAME#p1_jump_nt : CJInst_Rn1<"p1", op, np, "nt">;
+ def NAME#p1_jump_t : CJInst_Rn1<"p1", op, np, "t">;
+ }
+}
+// Predicate / !Predicate
+multiclass T_pnp_CJInst_Rn1<string op>{
+ defm J4_cmp#NAME#n1_t : T_tnt_CJInst_Rn1<op, 0>;
+ defm J4_cmp#NAME#n1_f : T_tnt_CJInst_Rn1<op, 1>;
+}
+// TypeCJ Instructions compare -1 and jump
+defm eq : T_pnp_CJInst_Rn1<"eq">;
+defm gt : T_pnp_CJInst_Rn1<"gt">;
+
+// J4_jumpseti: Direct unconditional jump and set register to immediate.
+let Defs = [PC], isBranch = 1, hasSideEffects = 0, hasNewValue = 1,
+ isExtentSigned = 1, opNewValue = 0, isExtendable = 1, opExtentBits = 11,
+ opExtentAlign = 2, opExtendable = 2 in
+def J4_jumpseti: CJInst <
+ (outs IntRegs:$Rd),
+ (ins u6Imm:$U6, brtarget:$r9_2),
+ "$Rd = #$U6 ; jump $r9_2"> {
+ bits<4> Rd;
+ bits<6> U6;
+ bits<11> r9_2;
+
+ let IClass = 0b0001;
+ let Inst{27-24} = 0b0110;
+ let Inst{21-20} = r9_2{10-9};
+ let Inst{19-16} = Rd;
+ let Inst{13-8} = U6;
+ let Inst{7-1} = r9_2{8-2};
+ }
-def : Pat<(atomic_load_32 FoldGlobalAddrGP:$addr),
- (i32 (LDriw_abs_V4 FoldGlobalAddrGP:$addr))>,
- Requires<[HasV4T]>;
+// J4_jumpsetr: Direct unconditional jump and transfer register.
+let Defs = [PC], isBranch = 1, hasSideEffects = 0, hasNewValue = 1,
+ isExtentSigned = 1, opNewValue = 0, isExtendable = 1, opExtentBits = 11,
+ opExtentAlign = 2, opExtendable = 2 in
+def J4_jumpsetr: CJInst <
+ (outs IntRegs:$Rd),
+ (ins IntRegs:$Rs, brtarget:$r9_2),
+ "$Rd = $Rs ; jump $r9_2"> {
+ bits<4> Rd;
+ bits<4> Rs;
+ bits<11> r9_2;
+
+ let IClass = 0b0001;
+ let Inst{27-24} = 0b0111;
+ let Inst{21-20} = r9_2{10-9};
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rs;
+ let Inst{7-1} = r9_2{8-2};
+ }
diff --git a/lib/Target/Hexagon/HexagonInstrInfoV5.td b/lib/Target/Hexagon/HexagonInstrInfoV5.td
index 9da6074..19b0935 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoV5.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoV5.td
@@ -1,26 +1,94 @@
-def SDTHexagonFCONST32 : SDTypeProfile<1, 1, [
- SDTCisVT<0, f32>,
- SDTCisPtrTy<1>]>;
-def HexagonFCONST32 : SDNode<"HexagonISD::FCONST32", SDTHexagonFCONST32>;
+//=- HexagonInstrInfoV5.td - Target Desc. for Hexagon Target -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the Hexagon V5 instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// XTYPE/MPY
+//===----------------------------------------------------------------------===//
+
+ //Rdd[+]=vrmpybsu(Rss,Rtt)
+let Predicates = [HasV5T] in {
+ def M5_vrmpybsu: T_XTYPE_Vect<"vrmpybsu", 0b110, 0b001, 0>;
+ def M5_vrmacbsu: T_XTYPE_Vect_acc<"vrmpybsu", 0b110, 0b001, 0>;
+
+ //Rdd[+]=vrmpybu(Rss,Rtt)
+ def M5_vrmpybuu: T_XTYPE_Vect<"vrmpybu", 0b100, 0b001, 0>;
+ def M5_vrmacbuu: T_XTYPE_Vect_acc<"vrmpybu", 0b100, 0b001, 0>;
+
+ def M5_vdmpybsu: T_M2_vmpy<"vdmpybsu", 0b101, 0b001, 0, 0, 1>;
+ def M5_vdmacbsu: T_M2_vmpy_acc_sat <"vdmpybsu", 0b001, 0b001, 0, 0>;
+}
+
+// Vector multiply bytes
+// Rdd=vmpyb[s]u(Rs,Rt)
+let Predicates = [HasV5T] in {
+ def M5_vmpybsu: T_XTYPE_mpy64 <"vmpybsu", 0b010, 0b001, 0, 0, 0>;
+ def M5_vmpybuu: T_XTYPE_mpy64 <"vmpybu", 0b100, 0b001, 0, 0, 0>;
+
+ // Rxx+=vmpyb[s]u(Rs,Rt)
+ def M5_vmacbsu: T_XTYPE_mpy64_acc <"vmpybsu", "+", 0b110, 0b001, 0, 0, 0>;
+ def M5_vmacbuu: T_XTYPE_mpy64_acc <"vmpybu", "+", 0b100, 0b001, 0, 0, 0>;
+
+ // Rd=vaddhub(Rss,Rtt):sat
+ let hasNewValue = 1, opNewValue = 0 in
+ def A5_vaddhubs: T_S3op_1 <"vaddhub", IntRegs, 0b01, 0b001, 0, 1>;
+}
+
+def S2_asr_i_p_rnd : S_2OpInstImm<"asr", 0b110, 0b111, u6Imm,
+ [(set I64:$dst,
+ (sra (i64 (add (i64 (sra I64:$src1, u6ImmPred:$src2)), 1)),
+ (i32 1)))], 1>,
+ Requires<[HasV5T]> {
+ bits<6> src2;
+ let Inst{13-8} = src2;
+}
+
+let isAsmParserOnly = 1 in
+def S2_asr_i_p_rnd_goodsyntax
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
+ "$dst = asrrnd($src1, #$src2)">;
+
+def C4_fastcorner9 : T_LOGICAL_2OP<"fastcorner9", 0b000, 0, 0>,
+ Requires<[HasV5T]> {
+ let Inst{13,7,4} = 0b111;
+}
+
+def C4_fastcorner9_not : T_LOGICAL_2OP<"!fastcorner9", 0b000, 0, 0>,
+ Requires<[HasV5T]> {
+ let Inst{20,13,7,4} = 0b1111;
+}
-let isReMaterializable = 1, isMoveImm = 1 in
+def SDTHexagonFCONST32 : SDTypeProfile<1, 1, [SDTCisVT<0, f32>,
+ SDTCisPtrTy<1>]>;
+def HexagonFCONST32 : SDNode<"HexagonISD::FCONST32", SDTHexagonFCONST32>;
+
+let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
def FCONST32_nsdata : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
- "$dst = CONST32(#$global)",
- [(set (f32 IntRegs:$dst),
- (HexagonFCONST32 tglobaladdr:$global))]>,
- Requires<[HasV5T]>;
+ "$dst = CONST32(#$global)",
+ [(set F32:$dst,
+ (HexagonFCONST32 tglobaladdr:$global))]>,
+ Requires<[HasV5T]>;
-let isReMaterializable = 1, isMoveImm = 1 in
+let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
def CONST64_Float_Real : LDInst<(outs DoubleRegs:$dst), (ins f64imm:$src1),
- "$dst = CONST64(#$src1)",
- [(set DoubleRegs:$dst, fpimm:$src1)]>,
- Requires<[HasV5T]>;
+ "$dst = CONST64(#$src1)",
+ [(set F64:$dst, fpimm:$src1)]>,
+ Requires<[HasV5T]>;
-let isReMaterializable = 1, isMoveImm = 1 in
+let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1),
- "$dst = CONST32(#$src1)",
- [(set IntRegs:$dst, fpimm:$src1)]>,
- Requires<[HasV5T]>;
+ "$dst = CONST32(#$src1)",
+ [(set F32:$dst, fpimm:$src1)]>,
+ Requires<[HasV5T]>;
// Transfer immediate float.
// Only works with single precision fp value.
@@ -29,605 +97,841 @@ def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1),
// Make sure that complexity is more than the CONST32 pattern in
// HexagonInstrInfo.td patterns.
let isExtended = 1, opExtendable = 1, isMoveImm = 1, isReMaterializable = 1,
-isPredicable = 1, AddedComplexity = 30, validSubTargets = HasV5SubT,
-isCodeGenOnly = 1 in
+ isPredicable = 1, AddedComplexity = 30, validSubTargets = HasV5SubT,
+ isCodeGenOnly = 1 in
def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32Ext:$src1),
- "$dst = #$src1",
- [(set IntRegs:$dst, fpimm:$src1)]>,
- Requires<[HasV5T]>;
+ "$dst = #$src1",
+ [(set F32:$dst, fpimm:$src1)]>,
+ Requires<[HasV5T]>;
let isExtended = 1, opExtendable = 2, isPredicated = 1,
-neverHasSideEffects = 1, validSubTargets = HasV5SubT in
+ hasSideEffects = 0, validSubTargets = HasV5SubT, isCodeGenOnly = 1 in
def TFRI_cPt_f : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, f32Ext:$src2),
- "if ($src1) $dst = #$src2",
- []>,
- Requires<[HasV5T]>;
+ "if ($src1) $dst = #$src2", []>,
+ Requires<[HasV5T]>;
-let isExtended = 1, opExtendable = 2, isPredicated = 1, isPredicatedFalse = 1,
-neverHasSideEffects = 1, validSubTargets = HasV5SubT in
+let isPseudo = 1, isExtended = 1, opExtendable = 2, isPredicated = 1,
+ isPredicatedFalse = 1, hasSideEffects = 0, validSubTargets = HasV5SubT in
def TFRI_cNotPt_f : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, f32Ext:$src2),
- "if (!$src1) $dst =#$src2",
- []>,
- Requires<[HasV5T]>;
+ "if (!$src1) $dst = #$src2", []>,
+ Requires<[HasV5T]>;
+
+def SDTHexagonI32I64: SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
+ SDTCisVT<1, i64>]>;
+
+def HexagonPOPCOUNT: SDNode<"HexagonISD::POPCOUNT", SDTHexagonI32I64>;
+
+let hasNewValue = 1, validSubTargets = HasV5SubT in
+def S5_popcountp : ALU64_rr<(outs IntRegs:$Rd), (ins DoubleRegs:$Rss),
+ "$Rd = popcount($Rss)",
+ [(set I32:$Rd, (HexagonPOPCOUNT I64:$Rss))], "", S_2op_tc_2_SLOT23>,
+ Requires<[HasV5T]> {
+ bits<5> Rd;
+ bits<5> Rss;
+
+ let IClass = 0b1000;
+
+ let Inst{27-21} = 0b1000011;
+ let Inst{7-5} = 0b011;
+ let Inst{4-0} = Rd;
+ let Inst{20-16} = Rss;
+ }
+
+defm: Loadx_pat<load, f32, s11_2ExtPred, L2_loadri_io>;
+defm: Loadx_pat<load, f64, s11_3ExtPred, L2_loadrd_io>;
+
+defm: Storex_pat<store, F32, s11_2ExtPred, S2_storeri_io>;
+defm: Storex_pat<store, F64, s11_3ExtPred, S2_storerd_io>;
+def: Storex_simple_pat<store, F32, S2_storeri_io>;
+def: Storex_simple_pat<store, F64, S2_storerd_io>;
+
+let isFP = 1, hasNewValue = 1, opNewValue = 0 in
+class T_MInstFloat <string mnemonic, bits<3> MajOp, bits<3> MinOp>
+ : MInst<(outs IntRegs:$Rd),
+ (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd = "#mnemonic#"($Rs, $Rt)", [],
+ "" , M_tc_3or4x_SLOT23 > ,
+ Requires<[HasV5T]> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-24} = 0b1011;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0b0;
+ let Inst{12-8} = Rt;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rd;
+ }
+
+let isCommutable = 1 in {
+ def F2_sfadd : T_MInstFloat < "sfadd", 0b000, 0b000>;
+ def F2_sfmpy : T_MInstFloat < "sfmpy", 0b010, 0b000>;
+}
-// Convert single precision to double precision and vice-versa.
-def CONVERT_sf2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_sf2df($src)",
- [(set DoubleRegs:$dst, (fextend IntRegs:$src))]>,
- Requires<[HasV5T]>;
+def F2_sfsub : T_MInstFloat < "sfsub", 0b000, 0b001>;
-def CONVERT_df2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_df2sf($src)",
- [(set IntRegs:$dst, (fround DoubleRegs:$src))]>,
- Requires<[HasV5T]>;
+def: Pat<(f32 (fadd F32:$src1, F32:$src2)),
+ (F2_sfadd F32:$src1, F32:$src2)>;
+def: Pat<(f32 (fsub F32:$src1, F32:$src2)),
+ (F2_sfsub F32:$src1, F32:$src2)>;
-// Load.
-def LDrid_f : LDInst<(outs DoubleRegs:$dst),
- (ins MEMri:$addr),
- "$dst = memd($addr)",
- [(set DoubleRegs:$dst, (f64 (load ADDRriS11_3:$addr)))]>,
- Requires<[HasV5T]>;
+def: Pat<(f32 (fmul F32:$src1, F32:$src2)),
+ (F2_sfmpy F32:$src1, F32:$src2)>;
+let Itinerary = M_tc_3x_SLOT23 in {
+ def F2_sfmax : T_MInstFloat < "sfmax", 0b100, 0b000>;
+ def F2_sfmin : T_MInstFloat < "sfmin", 0b100, 0b001>;
+}
-let AddedComplexity = 20 in
-def LDrid_indexed_f : LDInst<(outs DoubleRegs:$dst),
- (ins IntRegs:$src1, s11_3Imm:$offset),
- "$dst = memd($src1+#$offset)",
- [(set DoubleRegs:$dst, (f64 (load (add IntRegs:$src1,
- s11_3ImmPred:$offset))))]>,
- Requires<[HasV5T]>;
+let AddedComplexity = 100, Predicates = [HasV5T] in {
+ def: Pat<(f32 (select (i1 (setolt F32:$src1, F32:$src2)),
+ F32:$src1, F32:$src2)),
+ (F2_sfmin F32:$src1, F32:$src2)>;
-def LDriw_f : LDInst<(outs IntRegs:$dst),
- (ins MEMri:$addr), "$dst = memw($addr)",
- [(set IntRegs:$dst, (f32 (load ADDRriS11_2:$addr)))]>,
- Requires<[HasV5T]>;
+ def: Pat<(f32 (select (i1 (setogt F32:$src1, F32:$src2)),
+ F32:$src2, F32:$src1)),
+ (F2_sfmin F32:$src1, F32:$src2)>;
+ def: Pat<(f32 (select (i1 (setogt F32:$src1, F32:$src2)),
+ F32:$src1, F32:$src2)),
+ (F2_sfmax F32:$src1, F32:$src2)>;
-let AddedComplexity = 20 in
-def LDriw_indexed_f : LDInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s11_2Imm:$offset),
- "$dst = memw($src1+#$offset)",
- [(set IntRegs:$dst, (f32 (load (add IntRegs:$src1,
- s11_2ImmPred:$offset))))]>,
- Requires<[HasV5T]>;
+ def: Pat<(f32 (select (i1 (setolt F32:$src1, F32:$src2)),
+ F32:$src2, F32:$src1)),
+ (F2_sfmax F32:$src1, F32:$src2)>;
+}
-// Store.
-def STriw_f : STInst<(outs),
- (ins MEMri:$addr, IntRegs:$src1),
- "memw($addr) = $src1",
- [(store (f32 IntRegs:$src1), ADDRriS11_2:$addr)]>,
- Requires<[HasV5T]>;
+def F2_sffixupn : T_MInstFloat < "sffixupn", 0b110, 0b000>;
+def F2_sffixupd : T_MInstFloat < "sffixupd", 0b110, 0b001>;
+
+// F2_sfrecipa: Reciprocal approximation for division.
+let isPredicateLate = 1, isFP = 1,
+hasSideEffects = 0, hasNewValue = 1 in
+def F2_sfrecipa: MInst <
+ (outs IntRegs:$Rd, PredRegs:$Pe),
+ (ins IntRegs:$Rs, IntRegs:$Rt),
+ "$Rd, $Pe = sfrecipa($Rs, $Rt)">,
+ Requires<[HasV5T]> {
+ bits<5> Rd;
+ bits<2> Pe;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1110;
+ let Inst{27-21} = 0b1011111;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0b0;
+ let Inst{12-8} = Rt;
+ let Inst{7} = 0b1;
+ let Inst{6-5} = Pe;
+ let Inst{4-0} = Rd;
+ }
+
+// F2_dfcmpeq: Floating point compare for equal.
+let isCompare = 1, isFP = 1 in
+class T_fcmp <string mnemonic, RegisterClass RC, bits<3> MinOp,
+ list<dag> pattern = [] >
+ : ALU64Inst <(outs PredRegs:$dst), (ins RC:$src1, RC:$src2),
+ "$dst = "#mnemonic#"($src1, $src2)", pattern,
+ "" , ALU64_tc_2early_SLOT23 > ,
+ Requires<[HasV5T]> {
+ bits<2> dst;
+ bits<5> src1;
+ bits<5> src2;
+
+ let IClass = 0b1101;
+
+ let Inst{27-21} = 0b0010111;
+ let Inst{20-16} = src1;
+ let Inst{12-8} = src2;
+ let Inst{7-5} = MinOp;
+ let Inst{1-0} = dst;
+ }
+
+class T_fcmp64 <string mnemonic, PatFrag OpNode, bits<3> MinOp>
+ : T_fcmp <mnemonic, DoubleRegs, MinOp,
+ [(set I1:$dst, (OpNode F64:$src1, F64:$src2))]> {
+ let IClass = 0b1101;
+ let Inst{27-21} = 0b0010111;
+}
-let AddedComplexity = 10 in
-def STriw_indexed_f : STInst<(outs),
- (ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3),
- "memw($src1+#$src2) = $src3",
- [(store (f32 IntRegs:$src3),
- (add IntRegs:$src1, s11_2ImmPred:$src2))]>,
- Requires<[HasV5T]>;
+class T_fcmp32 <string mnemonic, PatFrag OpNode, bits<3> MinOp>
+ : T_fcmp <mnemonic, IntRegs, MinOp,
+ [(set I1:$dst, (OpNode F32:$src1, F32:$src2))]> {
+ let IClass = 0b1100;
+ let Inst{27-21} = 0b0111111;
+}
-def STrid_f : STInst<(outs),
- (ins MEMri:$addr, DoubleRegs:$src1),
- "memd($addr) = $src1",
- [(store (f64 DoubleRegs:$src1), ADDRriS11_2:$addr)]>,
- Requires<[HasV5T]>;
+def F2_dfcmpeq : T_fcmp64<"dfcmp.eq", setoeq, 0b000>;
+def F2_dfcmpgt : T_fcmp64<"dfcmp.gt", setogt, 0b001>;
+def F2_dfcmpge : T_fcmp64<"dfcmp.ge", setoge, 0b010>;
+def F2_dfcmpuo : T_fcmp64<"dfcmp.uo", setuo, 0b011>;
+
+def F2_sfcmpge : T_fcmp32<"sfcmp.ge", setoge, 0b000>;
+def F2_sfcmpuo : T_fcmp32<"sfcmp.uo", setuo, 0b001>;
+def F2_sfcmpeq : T_fcmp32<"sfcmp.eq", setoeq, 0b011>;
+def F2_sfcmpgt : T_fcmp32<"sfcmp.gt", setogt, 0b100>;
+
+//===----------------------------------------------------------------------===//
+// Multiclass to define 'Def Pats' for ordered gt, ge, eq operations.
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasV5T] in
+multiclass T_fcmp_pats<PatFrag cmpOp, InstHexagon IntMI, InstHexagon DoubleMI> {
+ // IntRegs
+ def: Pat<(i1 (cmpOp F32:$src1, F32:$src2)),
+ (IntMI F32:$src1, F32:$src2)>;
+ // DoubleRegs
+ def: Pat<(i1 (cmpOp F64:$src1, F64:$src2)),
+ (DoubleMI F64:$src1, F64:$src2)>;
+}
-// Indexed store double word.
-let AddedComplexity = 10 in
-def STrid_indexed_f : STInst<(outs),
- (ins IntRegs:$src1, s11_3Imm:$src2, DoubleRegs:$src3),
- "memd($src1+#$src2) = $src3",
- [(store (f64 DoubleRegs:$src3),
- (add IntRegs:$src1, s11_3ImmPred:$src2))]>,
- Requires<[HasV5T]>;
+defm : T_fcmp_pats <seteq, F2_sfcmpeq, F2_dfcmpeq>;
+defm : T_fcmp_pats <setgt, F2_sfcmpgt, F2_dfcmpgt>;
+defm : T_fcmp_pats <setge, F2_sfcmpge, F2_dfcmpge>;
+
+//===----------------------------------------------------------------------===//
+// Multiclass to define 'Def Pats' for unordered gt, ge, eq operations.
+//===----------------------------------------------------------------------===//
+let Predicates = [HasV5T] in
+multiclass unord_Pats <PatFrag cmpOp, InstHexagon IntMI, InstHexagon DoubleMI> {
+ // IntRegs
+ def: Pat<(i1 (cmpOp F32:$src1, F32:$src2)),
+ (C2_or (F2_sfcmpuo F32:$src1, F32:$src2),
+ (IntMI F32:$src1, F32:$src2))>;
+
+ // DoubleRegs
+ def: Pat<(i1 (cmpOp F64:$src1, F64:$src2)),
+ (C2_or (F2_dfcmpuo F64:$src1, F64:$src2),
+ (DoubleMI F64:$src1, F64:$src2))>;
+}
+defm : unord_Pats <setuge, F2_sfcmpge, F2_dfcmpge>;
+defm : unord_Pats <setugt, F2_sfcmpgt, F2_dfcmpgt>;
+defm : unord_Pats <setueq, F2_sfcmpeq, F2_dfcmpeq>;
+
+//===----------------------------------------------------------------------===//
+// Multiclass to define 'Def Pats' for the following dags:
+// seteq(setoeq(op1, op2), 0) -> not(setoeq(op1, op2))
+// seteq(setoeq(op1, op2), 1) -> setoeq(op1, op2)
+// setne(setoeq(op1, op2), 0) -> setoeq(op1, op2)
+// setne(setoeq(op1, op2), 1) -> not(setoeq(op1, op2))
+//===----------------------------------------------------------------------===//
+let Predicates = [HasV5T] in
+multiclass eq_ordgePats <PatFrag cmpOp, InstHexagon IntMI,
+ InstHexagon DoubleMI> {
+ // IntRegs
+ def: Pat<(i1 (seteq (i1 (cmpOp F32:$src1, F32:$src2)), 0)),
+ (C2_not (IntMI F32:$src1, F32:$src2))>;
+ def: Pat<(i1 (seteq (i1 (cmpOp F32:$src1, F32:$src2)), 1)),
+ (IntMI F32:$src1, F32:$src2)>;
+ def: Pat<(i1 (setne (i1 (cmpOp F32:$src1, F32:$src2)), 0)),
+ (IntMI F32:$src1, F32:$src2)>;
+ def: Pat<(i1 (setne (i1 (cmpOp F32:$src1, F32:$src2)), 1)),
+ (C2_not (IntMI F32:$src1, F32:$src2))>;
+
+ // DoubleRegs
+ def : Pat<(i1 (seteq (i1 (cmpOp F64:$src1, F64:$src2)), 0)),
+ (C2_not (DoubleMI F64:$src1, F64:$src2))>;
+ def : Pat<(i1 (seteq (i1 (cmpOp F64:$src1, F64:$src2)), 1)),
+ (DoubleMI F64:$src1, F64:$src2)>;
+ def : Pat<(i1 (setne (i1 (cmpOp F64:$src1, F64:$src2)), 0)),
+ (DoubleMI F64:$src1, F64:$src2)>;
+ def : Pat<(i1 (setne (i1 (cmpOp F64:$src1, F64:$src2)), 1)),
+ (C2_not (DoubleMI F64:$src1, F64:$src2))>;
+}
-// Add
-let isCommutable = 1 in
-def fADD_rr : ALU64_rr<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = sfadd($src1, $src2)",
- [(set IntRegs:$dst, (fadd IntRegs:$src1, IntRegs:$src2))]>,
- Requires<[HasV5T]>;
+defm : eq_ordgePats<setoeq, F2_sfcmpeq, F2_dfcmpeq>;
+defm : eq_ordgePats<setoge, F2_sfcmpge, F2_dfcmpge>;
+defm : eq_ordgePats<setogt, F2_sfcmpgt, F2_dfcmpgt>;
+
+//===----------------------------------------------------------------------===//
+// Multiclass to define 'Def Pats' for the following dags:
+// seteq(setolt(op1, op2), 0) -> not(setogt(op2, op1))
+// seteq(setolt(op1, op2), 1) -> setogt(op2, op1)
+// setne(setolt(op1, op2), 0) -> setogt(op2, op1)
+// setne(setolt(op1, op2), 1) -> not(setogt(op2, op1))
+//===----------------------------------------------------------------------===//
+let Predicates = [HasV5T] in
+multiclass eq_ordltPats <PatFrag cmpOp, InstHexagon IntMI,
+ InstHexagon DoubleMI> {
+ // IntRegs
+ def: Pat<(i1 (seteq (i1 (cmpOp F32:$src1, F32:$src2)), 0)),
+ (C2_not (IntMI F32:$src2, F32:$src1))>;
+ def: Pat<(i1 (seteq (i1 (cmpOp F32:$src1, F32:$src2)), 1)),
+ (IntMI F32:$src2, F32:$src1)>;
+ def: Pat<(i1 (setne (i1 (cmpOp F32:$src1, F32:$src2)), 0)),
+ (IntMI F32:$src2, F32:$src1)>;
+ def: Pat<(i1 (setne (i1 (cmpOp F32:$src1, F32:$src2)), 1)),
+ (C2_not (IntMI F32:$src2, F32:$src1))>;
+
+ // DoubleRegs
+ def: Pat<(i1 (seteq (i1 (cmpOp F64:$src1, F64:$src2)), 0)),
+ (C2_not (DoubleMI F64:$src2, F64:$src1))>;
+ def: Pat<(i1 (seteq (i1 (cmpOp F64:$src1, F64:$src2)), 1)),
+ (DoubleMI F64:$src2, F64:$src1)>;
+ def: Pat<(i1 (setne (i1 (cmpOp F64:$src1, F64:$src2)), 0)),
+ (DoubleMI F64:$src2, F64:$src1)>;
+ def: Pat<(i1 (setne (i1 (cmpOp F64:$src1, F64:$src2)), 0)),
+ (C2_not (DoubleMI F64:$src2, F64:$src1))>;
+}
-let isCommutable = 1 in
-def fADD64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = dfadd($src1, $src2)",
- [(set DoubleRegs:$dst, (fadd DoubleRegs:$src1,
- DoubleRegs:$src2))]>,
- Requires<[HasV5T]>;
+defm : eq_ordltPats<setole, F2_sfcmpge, F2_dfcmpge>;
+defm : eq_ordltPats<setolt, F2_sfcmpgt, F2_dfcmpgt>;
-def fSUB_rr : ALU64_rr<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = sfsub($src1, $src2)",
- [(set IntRegs:$dst, (fsub IntRegs:$src1, IntRegs:$src2))]>,
- Requires<[HasV5T]>;
-def fSUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = dfsub($src1, $src2)",
- [(set DoubleRegs:$dst, (fsub DoubleRegs:$src1,
- DoubleRegs:$src2))]>,
- Requires<[HasV5T]>;
-
-let isCommutable = 1 in
-def fMUL_rr : ALU64_rr<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = sfmpy($src1, $src2)",
- [(set IntRegs:$dst, (fmul IntRegs:$src1, IntRegs:$src2))]>,
- Requires<[HasV5T]>;
-
-let isCommutable = 1 in
-def fMUL64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2),
- "$dst = dfmpy($src1, $src2)",
- [(set DoubleRegs:$dst, (fmul DoubleRegs:$src1,
- DoubleRegs:$src2))]>,
- Requires<[HasV5T]>;
-
-// Compare.
-let isCompare = 1 in {
-multiclass FCMP64_rr<string OpcStr, PatFrag OpNode> {
- def _rr : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$b, DoubleRegs:$c),
- !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
- [(set PredRegs:$dst,
- (OpNode (f64 DoubleRegs:$b), (f64 DoubleRegs:$c)))]>,
- Requires<[HasV5T]>;
+// o. seto inverse of setuo. http://llvm.org/docs/LangRef.html#i_fcmp
+let Predicates = [HasV5T] in {
+ def: Pat<(i1 (seto F32:$src1, F32:$src2)),
+ (C2_not (F2_sfcmpuo F32:$src2, F32:$src1))>;
+ def: Pat<(i1 (seto F32:$src1, fpimm:$src2)),
+ (C2_not (F2_sfcmpuo (TFRI_f fpimm:$src2), F32:$src1))>;
+ def: Pat<(i1 (seto F64:$src1, F64:$src2)),
+ (C2_not (F2_dfcmpuo F64:$src2, F64:$src1))>;
+ def: Pat<(i1 (seto F64:$src1, fpimm:$src2)),
+ (C2_not (F2_dfcmpuo (CONST64_Float_Real fpimm:$src2), F64:$src1))>;
+}
+
+// Ordered lt.
+let Predicates = [HasV5T] in {
+ def: Pat<(i1 (setolt F32:$src1, F32:$src2)),
+ (F2_sfcmpgt F32:$src2, F32:$src1)>;
+ def: Pat<(i1 (setolt F32:$src1, fpimm:$src2)),
+ (F2_sfcmpgt (f32 (TFRI_f fpimm:$src2)), F32:$src1)>;
+ def: Pat<(i1 (setolt F64:$src1, F64:$src2)),
+ (F2_dfcmpgt F64:$src2, F64:$src1)>;
+ def: Pat<(i1 (setolt F64:$src1, fpimm:$src2)),
+ (F2_dfcmpgt (CONST64_Float_Real fpimm:$src2), F64:$src1)>;
}
-multiclass FCMP32_rr<string OpcStr, PatFrag OpNode> {
- def _rr : ALU64_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
- !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
- [(set PredRegs:$dst,
- (OpNode (f32 IntRegs:$b), (f32 IntRegs:$c)))]>,
- Requires<[HasV5T]>;
+// Unordered lt.
+let Predicates = [HasV5T] in {
+ def: Pat<(i1 (setult F32:$src1, F32:$src2)),
+ (C2_or (F2_sfcmpuo F32:$src1, F32:$src2),
+ (F2_sfcmpgt F32:$src2, F32:$src1))>;
+ def: Pat<(i1 (setult F32:$src1, fpimm:$src2)),
+ (C2_or (F2_sfcmpuo F32:$src1, (TFRI_f fpimm:$src2)),
+ (F2_sfcmpgt (TFRI_f fpimm:$src2), F32:$src1))>;
+ def: Pat<(i1 (setult F64:$src1, F64:$src2)),
+ (C2_or (F2_dfcmpuo F64:$src1, F64:$src2),
+ (F2_dfcmpgt F64:$src2, F64:$src1))>;
+ def: Pat<(i1 (setult F64:$src1, fpimm:$src2)),
+ (C2_or (F2_dfcmpuo F64:$src1, (CONST64_Float_Real fpimm:$src2)),
+ (F2_dfcmpgt (CONST64_Float_Real fpimm:$src2), F64:$src1))>;
}
+
+// Ordered le.
+let Predicates = [HasV5T] in {
+ // rs <= rt -> rt >= rs.
+ def: Pat<(i1 (setole F32:$src1, F32:$src2)),
+ (F2_sfcmpge F32:$src2, F32:$src1)>;
+ def: Pat<(i1 (setole F32:$src1, fpimm:$src2)),
+ (F2_sfcmpge (TFRI_f fpimm:$src2), F32:$src1)>;
+
+ // Rss <= Rtt -> Rtt >= Rss.
+ def: Pat<(i1 (setole F64:$src1, F64:$src2)),
+ (F2_dfcmpge F64:$src2, F64:$src1)>;
+ def: Pat<(i1 (setole F64:$src1, fpimm:$src2)),
+ (F2_dfcmpge (CONST64_Float_Real fpimm:$src2), F64:$src1)>;
}
-defm FCMPOEQ64 : FCMP64_rr<"dfcmp.eq", setoeq>;
-defm FCMPUEQ64 : FCMP64_rr<"dfcmp.eq", setueq>;
-defm FCMPOGT64 : FCMP64_rr<"dfcmp.gt", setogt>;
-defm FCMPUGT64 : FCMP64_rr<"dfcmp.gt", setugt>;
-defm FCMPOGE64 : FCMP64_rr<"dfcmp.ge", setoge>;
-defm FCMPUGE64 : FCMP64_rr<"dfcmp.ge", setuge>;
-
-defm FCMPOEQ32 : FCMP32_rr<"sfcmp.eq", setoeq>;
-defm FCMPUEQ32 : FCMP32_rr<"sfcmp.eq", setueq>;
-defm FCMPOGT32 : FCMP32_rr<"sfcmp.gt", setogt>;
-defm FCMPUGT32 : FCMP32_rr<"sfcmp.gt", setugt>;
-defm FCMPOGE32 : FCMP32_rr<"sfcmp.ge", setoge>;
-defm FCMPUGE32 : FCMP32_rr<"sfcmp.ge", setuge>;
-
-// olt.
-def : Pat <(i1 (setolt (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
- (i1 (FCMPOGT32_rr IntRegs:$src2, IntRegs:$src1))>,
- Requires<[HasV5T]>;
-
-def : Pat <(i1 (setolt (f32 IntRegs:$src1), (fpimm:$src2))),
- (i1 (FCMPOGT32_rr (f32 (TFRI_f fpimm:$src2)), (f32 IntRegs:$src1)))>,
- Requires<[HasV5T]>;
-
-def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
- (i1 (FCMPOGT64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
- Requires<[HasV5T]>;
-
-def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (fpimm:$src2))),
- (i1 (FCMPOGT64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
- (f64 DoubleRegs:$src1)))>,
- Requires<[HasV5T]>;
-
-// gt.
-def : Pat <(i1 (setugt (f64 DoubleRegs:$src1), (fpimm:$src2))),
- (i1 (FCMPUGT64_rr (f64 DoubleRegs:$src1),
- (f64 (CONST64_Float_Real fpimm:$src2))))>,
- Requires<[HasV5T]>;
-
-def : Pat <(i1 (setugt (f32 IntRegs:$src1), (fpimm:$src2))),
- (i1 (FCMPUGT32_rr (f32 IntRegs:$src1), (f32 (TFRI_f fpimm:$src2))))>,
- Requires<[HasV5T]>;
-
-// ult.
-def : Pat <(i1 (setult (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
- (i1 (FCMPUGT32_rr IntRegs:$src2, IntRegs:$src1))>,
- Requires<[HasV5T]>;
-
-def : Pat <(i1 (setult (f32 IntRegs:$src1), (fpimm:$src2))),
- (i1 (FCMPUGT32_rr (f32 (TFRI_f fpimm:$src2)), (f32 IntRegs:$src1)))>,
- Requires<[HasV5T]>;
-
-def : Pat <(i1 (setult (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
- (i1 (FCMPUGT64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
- Requires<[HasV5T]>;
-
-def : Pat <(i1 (setult (f64 DoubleRegs:$src1), (fpimm:$src2))),
- (i1 (FCMPUGT64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
- (f64 DoubleRegs:$src1)))>,
- Requires<[HasV5T]>;
-
-// le.
+// Unordered le.
+let Predicates = [HasV5T] in {
// rs <= rt -> rt >= rs.
-def : Pat<(i1 (setole (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
- (i1 (FCMPOGE32_rr IntRegs:$src2, IntRegs:$src1))>,
- Requires<[HasV5T]>;
+ def: Pat<(i1 (setule F32:$src1, F32:$src2)),
+ (C2_or (F2_sfcmpuo F32:$src1, F32:$src2),
+ (F2_sfcmpge F32:$src2, F32:$src1))>;
+ def: Pat<(i1 (setule F32:$src1, fpimm:$src2)),
+ (C2_or (F2_sfcmpuo F32:$src1, (TFRI_f fpimm:$src2)),
+ (F2_sfcmpge (TFRI_f fpimm:$src2), F32:$src1))>;
+ def: Pat<(i1 (setule F64:$src1, F64:$src2)),
+ (C2_or (F2_dfcmpuo F64:$src1, F64:$src2),
+ (F2_dfcmpge F64:$src2, F64:$src1))>;
+ def: Pat<(i1 (setule F64:$src1, fpimm:$src2)),
+ (C2_or (F2_dfcmpuo F64:$src1, (CONST64_Float_Real fpimm:$src2)),
+ (F2_dfcmpge (CONST64_Float_Real fpimm:$src2), F64:$src1))>;
+}
+
+// Ordered ne.
+let Predicates = [HasV5T] in {
+ def: Pat<(i1 (setone F32:$src1, F32:$src2)),
+ (C2_not (F2_sfcmpeq F32:$src1, F32:$src2))>;
+ def: Pat<(i1 (setone F64:$src1, F64:$src2)),
+ (C2_not (F2_dfcmpeq F64:$src1, F64:$src2))>;
+ def: Pat<(i1 (setone F32:$src1, fpimm:$src2)),
+ (C2_not (F2_sfcmpeq F32:$src1, (TFRI_f fpimm:$src2)))>;
+ def: Pat<(i1 (setone F64:$src1, fpimm:$src2)),
+ (C2_not (F2_dfcmpeq F64:$src1, (CONST64_Float_Real fpimm:$src2)))>;
+}
-def : Pat<(i1 (setole (f32 IntRegs:$src1), (fpimm:$src2))),
- (i1 (FCMPOGE32_rr (f32 (TFRI_f fpimm:$src2)), IntRegs:$src1))>,
- Requires<[HasV5T]>;
+// Unordered ne.
+let Predicates = [HasV5T] in {
+ def: Pat<(i1 (setune F32:$src1, F32:$src2)),
+ (C2_or (F2_sfcmpuo F32:$src1, F32:$src2),
+ (C2_not (F2_sfcmpeq F32:$src1, F32:$src2)))>;
+ def: Pat<(i1 (setune F64:$src1, F64:$src2)),
+ (C2_or (F2_dfcmpuo F64:$src1, F64:$src2),
+ (C2_not (F2_dfcmpeq F64:$src1, F64:$src2)))>;
+ def: Pat<(i1 (setune F32:$src1, fpimm:$src2)),
+ (C2_or (F2_sfcmpuo F32:$src1, (TFRI_f fpimm:$src2)),
+ (C2_not (F2_sfcmpeq F32:$src1, (TFRI_f fpimm:$src2))))>;
+ def: Pat<(i1 (setune F64:$src1, fpimm:$src2)),
+ (C2_or (F2_dfcmpuo F64:$src1, (CONST64_Float_Real fpimm:$src2)),
+ (C2_not (F2_dfcmpeq F64:$src1,
+ (CONST64_Float_Real fpimm:$src2))))>;
+}
+// Besides set[o|u][comparions], we also need set[comparisons].
+let Predicates = [HasV5T] in {
+ // lt.
+ def: Pat<(i1 (setlt F32:$src1, F32:$src2)),
+ (F2_sfcmpgt F32:$src2, F32:$src1)>;
+ def: Pat<(i1 (setlt F32:$src1, fpimm:$src2)),
+ (F2_sfcmpgt (TFRI_f fpimm:$src2), F32:$src1)>;
+ def: Pat<(i1 (setlt F64:$src1, F64:$src2)),
+ (F2_dfcmpgt F64:$src2, F64:$src1)>;
+ def: Pat<(i1 (setlt F64:$src1, fpimm:$src2)),
+ (F2_dfcmpgt (CONST64_Float_Real fpimm:$src2), F64:$src1)>;
+
+ // le.
+ // rs <= rt -> rt >= rs.
+ def: Pat<(i1 (setle F32:$src1, F32:$src2)),
+ (F2_sfcmpge F32:$src2, F32:$src1)>;
+ def: Pat<(i1 (setle F32:$src1, fpimm:$src2)),
+ (F2_sfcmpge (TFRI_f fpimm:$src2), F32:$src1)>;
+
+ // Rss <= Rtt -> Rtt >= Rss.
+ def: Pat<(i1 (setle F64:$src1, F64:$src2)),
+ (F2_dfcmpge F64:$src2, F64:$src1)>;
+ def: Pat<(i1 (setle F64:$src1, fpimm:$src2)),
+ (F2_dfcmpge (CONST64_Float_Real fpimm:$src2), F64:$src1)>;
+
+ // ne.
+ def: Pat<(i1 (setne F32:$src1, F32:$src2)),
+ (C2_not (F2_sfcmpeq F32:$src1, F32:$src2))>;
+ def: Pat<(i1 (setne F64:$src1, F64:$src2)),
+ (C2_not (F2_dfcmpeq F64:$src1, F64:$src2))>;
+ def: Pat<(i1 (setne F32:$src1, fpimm:$src2)),
+ (C2_not (F2_sfcmpeq F32:$src1, (TFRI_f fpimm:$src2)))>;
+ def: Pat<(i1 (setne F64:$src1, fpimm:$src2)),
+ (C2_not (F2_dfcmpeq F64:$src1, (CONST64_Float_Real fpimm:$src2)))>;
+}
-// Rss <= Rtt -> Rtt >= Rss.
-def : Pat<(i1 (setole (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
- (i1 (FCMPOGE64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
- Requires<[HasV5T]>;
+// F2 convert template classes:
+let isFP = 1 in
+class F2_RDD_RSS_CONVERT<string mnemonic, bits<3> MinOp,
+ SDNode Op, PatLeaf RCOut, PatLeaf RCIn,
+ string chop ="">
+ : SInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss),
+ "$Rdd = "#mnemonic#"($Rss)"#chop,
+ [(set RCOut:$Rdd, (Op RCIn:$Rss))], "",
+ S_2op_tc_3or4x_SLOT23> {
+ bits<5> Rdd;
+ bits<5> Rss;
+
+ let IClass = 0b1000;
+
+ let Inst{27-21} = 0b0000111;
+ let Inst{20-16} = Rss;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rdd;
+ }
+
+let isFP = 1 in
+class F2_RDD_RS_CONVERT<string mnemonic, bits<3> MinOp,
+ SDNode Op, PatLeaf RCOut, PatLeaf RCIn,
+ string chop ="">
+ : SInst <(outs DoubleRegs:$Rdd), (ins IntRegs:$Rs),
+ "$Rdd = "#mnemonic#"($Rs)"#chop,
+ [(set RCOut:$Rdd, (Op RCIn:$Rs))], "",
+ S_2op_tc_3or4x_SLOT23> {
+ bits<5> Rdd;
+ bits<5> Rs;
+
+ let IClass = 0b1000;
+
+ let Inst{27-21} = 0b0100100;
+ let Inst{20-16} = Rs;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rdd;
+ }
+
+let isFP = 1, hasNewValue = 1 in
+class F2_RD_RSS_CONVERT<string mnemonic, bits<3> MinOp,
+ SDNode Op, PatLeaf RCOut, PatLeaf RCIn,
+ string chop ="">
+ : SInst <(outs IntRegs:$Rd), (ins DoubleRegs:$Rss),
+ "$Rd = "#mnemonic#"($Rss)"#chop,
+ [(set RCOut:$Rd, (Op RCIn:$Rss))], "",
+ S_2op_tc_3or4x_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rss;
+
+ let IClass = 0b1000;
+
+ let Inst{27-24} = 0b1000;
+ let Inst{23-21} = MinOp;
+ let Inst{20-16} = Rss;
+ let Inst{7-5} = 0b001;
+ let Inst{4-0} = Rd;
+ }
+
+let isFP = 1, hasNewValue = 1 in
+class F2_RD_RS_CONVERT<string mnemonic, bits<3> MajOp, bits<3> MinOp,
+ SDNode Op, PatLeaf RCOut, PatLeaf RCIn,
+ string chop ="">
+ : SInst <(outs IntRegs:$Rd), (ins IntRegs:$Rs),
+ "$Rd = "#mnemonic#"($Rs)"#chop,
+ [(set RCOut:$Rd, (Op RCIn:$Rs))], "",
+ S_2op_tc_3or4x_SLOT23> {
+ bits<5> Rd;
+ bits<5> Rs;
+
+ let IClass = 0b1000;
+
+ let Inst{27-24} = 0b1011;
+ let Inst{23-21} = MajOp;
+ let Inst{20-16} = Rs;
+ let Inst{7-5} = MinOp;
+ let Inst{4-0} = Rd;
+ }
-def : Pat<(i1 (setole (f64 DoubleRegs:$src1), (fpimm:$src2))),
- (i1 (FCMPOGE64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
- DoubleRegs:$src1))>,
- Requires<[HasV5T]>;
+// Convert single precision to double precision and vice-versa.
+def F2_conv_sf2df : F2_RDD_RS_CONVERT <"convert_sf2df", 0b000,
+ fextend, F64, F32>;
-// rs <= rt -> rt >= rs.
-def : Pat<(i1 (setule (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
- (i1 (FCMPUGE32_rr IntRegs:$src2, IntRegs:$src1))>,
- Requires<[HasV5T]>;
-
-def : Pat<(i1 (setule (f32 IntRegs:$src1), (fpimm:$src2))),
- (i1 (FCMPUGE32_rr (f32 (TFRI_f fpimm:$src2)), IntRegs:$src1))>,
- Requires<[HasV5T]>;
-
-// Rss <= Rtt -> Rtt >= Rss.
-def : Pat<(i1 (setule (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
- (i1 (FCMPUGE64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
- Requires<[HasV5T]>;
-
-def : Pat<(i1 (setule (f64 DoubleRegs:$src1), (fpimm:$src2))),
- (i1 (FCMPUGE64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
- DoubleRegs:$src1))>,
- Requires<[HasV5T]>;
-
-// ne.
-def : Pat<(i1 (setone (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
- (i1 (NOT_p (FCMPOEQ32_rr IntRegs:$src1, IntRegs:$src2)))>,
- Requires<[HasV5T]>;
-
-def : Pat<(i1 (setone (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
- (i1 (NOT_p (FCMPOEQ64_rr DoubleRegs:$src1, DoubleRegs:$src2)))>,
- Requires<[HasV5T]>;
-
-def : Pat<(i1 (setune (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
- (i1 (NOT_p (FCMPUEQ32_rr IntRegs:$src1, IntRegs:$src2)))>,
- Requires<[HasV5T]>;
-
-def : Pat<(i1 (setune (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
- (i1 (NOT_p (FCMPUEQ64_rr DoubleRegs:$src1, DoubleRegs:$src2)))>,
- Requires<[HasV5T]>;
-
-def : Pat<(i1 (setone (f32 IntRegs:$src1), (fpimm:$src2))),
- (i1 (NOT_p (FCMPOEQ32_rr IntRegs:$src1, (f32 (TFRI_f fpimm:$src2)))))>,
- Requires<[HasV5T]>;
-
-def : Pat<(i1 (setone (f64 DoubleRegs:$src1), (fpimm:$src2))),
- (i1 (NOT_p (FCMPOEQ64_rr DoubleRegs:$src1,
- (f64 (CONST64_Float_Real fpimm:$src2)))))>,
- Requires<[HasV5T]>;
-
-def : Pat<(i1 (setune (f32 IntRegs:$src1), (fpimm:$src2))),
- (i1 (NOT_p (FCMPUEQ32_rr IntRegs:$src1, (f32 (TFRI_f fpimm:$src2)))))>,
- Requires<[HasV5T]>;
-
-def : Pat<(i1 (setune (f64 DoubleRegs:$src1), (fpimm:$src2))),
- (i1 (NOT_p (FCMPUEQ64_rr DoubleRegs:$src1,
- (f64 (CONST64_Float_Real fpimm:$src2)))))>,
- Requires<[HasV5T]>;
+def F2_conv_df2sf : F2_RD_RSS_CONVERT <"convert_df2sf", 0b000,
+ fround, F32, F64>;
// Convert Integer to Floating Point.
-def CONVERT_d2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_d2sf($src)",
- [(set (f32 IntRegs:$dst), (sint_to_fp (i64 DoubleRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_ud2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_ud2sf($src)",
- [(set (f32 IntRegs:$dst), (uint_to_fp (i64 DoubleRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_uw2sf : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_uw2sf($src)",
- [(set (f32 IntRegs:$dst), (uint_to_fp (i32 IntRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_w2sf : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_w2sf($src)",
- [(set (f32 IntRegs:$dst), (sint_to_fp (i32 IntRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_d2df : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_d2df($src)",
- [(set (f64 DoubleRegs:$dst), (sint_to_fp (i64 DoubleRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_ud2df : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_ud2df($src)",
- [(set (f64 DoubleRegs:$dst), (uint_to_fp (i64 DoubleRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_uw2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_uw2df($src)",
- [(set (f64 DoubleRegs:$dst), (uint_to_fp (i32 IntRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_w2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_w2df($src)",
- [(set (f64 DoubleRegs:$dst), (sint_to_fp (i32 IntRegs:$src)))]>,
- Requires<[HasV5T]>;
+def F2_conv_d2sf : F2_RD_RSS_CONVERT <"convert_d2sf", 0b010,
+ sint_to_fp, F32, I64>;
+def F2_conv_ud2sf : F2_RD_RSS_CONVERT <"convert_ud2sf", 0b001,
+ uint_to_fp, F32, I64>;
+def F2_conv_uw2sf : F2_RD_RS_CONVERT <"convert_uw2sf", 0b001, 0b000,
+ uint_to_fp, F32, I32>;
+def F2_conv_w2sf : F2_RD_RS_CONVERT <"convert_w2sf", 0b010, 0b000,
+ sint_to_fp, F32, I32>;
+def F2_conv_d2df : F2_RDD_RSS_CONVERT <"convert_d2df", 0b011,
+ sint_to_fp, F64, I64>;
+def F2_conv_ud2df : F2_RDD_RSS_CONVERT <"convert_ud2df", 0b010,
+ uint_to_fp, F64, I64>;
+def F2_conv_uw2df : F2_RDD_RS_CONVERT <"convert_uw2df", 0b001,
+ uint_to_fp, F64, I32>;
+def F2_conv_w2df : F2_RDD_RS_CONVERT <"convert_w2df", 0b010,
+ sint_to_fp, F64, I32>;
// Convert Floating Point to Integer - default.
-def CONVERT_df2uw : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_df2uw($src):chop",
- [(set (i32 IntRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_df2w : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_df2w($src):chop",
- [(set (i32 IntRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_sf2uw : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_sf2uw($src):chop",
- [(set (i32 IntRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_sf2w : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_sf2w($src):chop",
- [(set (i32 IntRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_df2d : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_df2d($src):chop",
- [(set (i64 DoubleRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_df2ud : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_df2ud($src):chop",
- [(set (i64 DoubleRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_sf2d : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_sf2d($src):chop",
- [(set (i64 DoubleRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
- Requires<[HasV5T]>;
-
-def CONVERT_sf2ud : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_sf2ud($src):chop",
- [(set (i64 DoubleRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
- Requires<[HasV5T]>;
+def F2_conv_df2uw_chop : F2_RD_RSS_CONVERT <"convert_df2uw", 0b101,
+ fp_to_uint, I32, F64, ":chop">;
+def F2_conv_df2w_chop : F2_RD_RSS_CONVERT <"convert_df2w", 0b111,
+ fp_to_sint, I32, F64, ":chop">;
+def F2_conv_sf2uw_chop : F2_RD_RS_CONVERT <"convert_sf2uw", 0b011, 0b001,
+ fp_to_uint, I32, F32, ":chop">;
+def F2_conv_sf2w_chop : F2_RD_RS_CONVERT <"convert_sf2w", 0b100, 0b001,
+ fp_to_sint, I32, F32, ":chop">;
+def F2_conv_df2d_chop : F2_RDD_RSS_CONVERT <"convert_df2d", 0b110,
+ fp_to_sint, I64, F64, ":chop">;
+def F2_conv_df2ud_chop : F2_RDD_RSS_CONVERT <"convert_df2ud", 0b111,
+ fp_to_uint, I64, F64, ":chop">;
+def F2_conv_sf2d_chop : F2_RDD_RS_CONVERT <"convert_sf2d", 0b110,
+ fp_to_sint, I64, F32, ":chop">;
+def F2_conv_sf2ud_chop : F2_RDD_RS_CONVERT <"convert_sf2ud", 0b101,
+ fp_to_uint, I64, F32, ":chop">;
// Convert Floating Point to Integer: non-chopped.
-let AddedComplexity = 20 in
-def CONVERT_df2uw_nchop : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_df2uw($src)",
- [(set (i32 IntRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
- Requires<[HasV5T, IEEERndNearV5T]>;
-
-let AddedComplexity = 20 in
-def CONVERT_df2w_nchop : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_df2w($src)",
- [(set (i32 IntRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
- Requires<[HasV5T, IEEERndNearV5T]>;
-
-let AddedComplexity = 20 in
-def CONVERT_sf2uw_nchop : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_sf2uw($src)",
- [(set (i32 IntRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
- Requires<[HasV5T, IEEERndNearV5T]>;
-
-let AddedComplexity = 20 in
-def CONVERT_sf2w_nchop : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_sf2w($src)",
- [(set (i32 IntRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
- Requires<[HasV5T, IEEERndNearV5T]>;
-
-let AddedComplexity = 20 in
-def CONVERT_df2d_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_df2d($src)",
- [(set (i64 DoubleRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
- Requires<[HasV5T, IEEERndNearV5T]>;
-
-let AddedComplexity = 20 in
-def CONVERT_df2ud_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
- "$dst = convert_df2ud($src)",
- [(set (i64 DoubleRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
- Requires<[HasV5T, IEEERndNearV5T]>;
-
-let AddedComplexity = 20 in
-def CONVERT_sf2d_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_sf2d($src)",
- [(set (i64 DoubleRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
- Requires<[HasV5T, IEEERndNearV5T]>;
-
-let AddedComplexity = 20 in
-def CONVERT_sf2ud_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
- "$dst = convert_sf2ud($src)",
- [(set (i64 DoubleRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
- Requires<[HasV5T, IEEERndNearV5T]>;
-
+let AddedComplexity = 20, Predicates = [HasV5T, IEEERndNearV5T] in {
+ def F2_conv_df2d : F2_RDD_RSS_CONVERT <"convert_df2d", 0b000,
+ fp_to_sint, I64, F64>;
+ def F2_conv_df2ud : F2_RDD_RSS_CONVERT <"convert_df2ud", 0b001,
+ fp_to_uint, I64, F64>;
+ def F2_conv_sf2ud : F2_RDD_RS_CONVERT <"convert_sf2ud", 0b011,
+ fp_to_uint, I64, F32>;
+ def F2_conv_sf2d : F2_RDD_RS_CONVERT <"convert_sf2d", 0b100,
+ fp_to_sint, I64, F32>;
+ def F2_conv_df2uw : F2_RD_RSS_CONVERT <"convert_df2uw", 0b011,
+ fp_to_uint, I32, F64>;
+ def F2_conv_df2w : F2_RD_RSS_CONVERT <"convert_df2w", 0b100,
+ fp_to_sint, I32, F64>;
+ def F2_conv_sf2uw : F2_RD_RS_CONVERT <"convert_sf2uw", 0b011, 0b000,
+ fp_to_uint, I32, F32>;
+ def F2_conv_sf2w : F2_RD_RS_CONVERT <"convert_sf2w", 0b100, 0b000,
+ fp_to_sint, I32, F32>;
+}
+// Fix up radicand.
+let isFP = 1, hasNewValue = 1 in
+def F2_sffixupr: SInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs),
+ "$Rd = sffixupr($Rs)",
+ [], "" , S_2op_tc_3or4x_SLOT23>, Requires<[HasV5T]> {
+ bits<5> Rd;
+ bits<5> Rs;
-// Bitcast is different than [fp|sint|uint]_to_[sint|uint|fp].
-def : Pat <(i32 (bitconvert (f32 IntRegs:$src))),
- (i32 (TFR IntRegs:$src))>,
- Requires<[HasV5T]>;
+ let IClass = 0b1000;
-def : Pat <(f32 (bitconvert (i32 IntRegs:$src))),
- (f32 (TFR IntRegs:$src))>,
- Requires<[HasV5T]>;
+ let Inst{27-21} = 0b1011101;
+ let Inst{20-16} = Rs;
+ let Inst{7-5} = 0b000;
+ let Inst{4-0} = Rd;
+ }
-def : Pat <(i64 (bitconvert (f64 DoubleRegs:$src))),
- (i64 (TFR64 DoubleRegs:$src))>,
- Requires<[HasV5T]>;
-
-def : Pat <(f64 (bitconvert (i64 DoubleRegs:$src))),
- (f64 (TFR64 DoubleRegs:$src))>,
- Requires<[HasV5T]>;
+// Bitcast is different than [fp|sint|uint]_to_[sint|uint|fp].
+let Predicates = [HasV5T] in {
+ def: Pat <(i32 (bitconvert F32:$src)), (I32:$src)>;
+ def: Pat <(f32 (bitconvert I32:$src)), (F32:$src)>;
+ def: Pat <(i64 (bitconvert F64:$src)), (I64:$src)>;
+ def: Pat <(f64 (bitconvert I64:$src)), (F64:$src)>;
+}
-// Floating point fused multiply-add.
-def FMADD_dp : ALU64_acc<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3),
- "$dst += dfmpy($src2, $src3)",
- [(set (f64 DoubleRegs:$dst),
- (fma DoubleRegs:$src2, DoubleRegs:$src3, DoubleRegs:$src1))],
- "$src1 = $dst">,
- Requires<[HasV5T]>;
-
-def FMADD_sp : ALU64_acc<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "$dst += sfmpy($src2, $src3)",
- [(set (f32 IntRegs:$dst),
- (fma IntRegs:$src2, IntRegs:$src3, IntRegs:$src1))],
- "$src1 = $dst">,
- Requires<[HasV5T]>;
-
-
-// Floating point max/min.
-let AddedComplexity = 100 in
-def FMAX_dp : ALU64_rr<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2),
- "$dst = dfmax($src1, $src2)",
- [(set DoubleRegs:$dst, (f64 (select (i1 (setolt DoubleRegs:$src2,
- DoubleRegs:$src1)),
- DoubleRegs:$src1,
- DoubleRegs:$src2)))]>,
- Requires<[HasV5T]>;
-
-let AddedComplexity = 100 in
-def FMAX_sp : ALU64_rr<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = sfmax($src1, $src2)",
- [(set IntRegs:$dst, (f32 (select (i1 (setolt IntRegs:$src2,
- IntRegs:$src1)),
- IntRegs:$src1,
- IntRegs:$src2)))]>,
- Requires<[HasV5T]>;
-
-let AddedComplexity = 100 in
-def FMIN_dp : ALU64_rr<(outs DoubleRegs:$dst),
- (ins DoubleRegs:$src1, DoubleRegs:$src2),
- "$dst = dfmin($src1, $src2)",
- [(set DoubleRegs:$dst, (f64 (select (i1 (setogt DoubleRegs:$src2,
- DoubleRegs:$src1)),
- DoubleRegs:$src1,
- DoubleRegs:$src2)))]>,
- Requires<[HasV5T]>;
-
-let AddedComplexity = 100 in
-def FMIN_sp : ALU64_rr<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = sfmin($src1, $src2)",
- [(set IntRegs:$dst, (f32 (select (i1 (setogt IntRegs:$src2,
- IntRegs:$src1)),
- IntRegs:$src1,
- IntRegs:$src2)))]>,
- Requires<[HasV5T]>;
-
-// Pseudo instruction to encode a set of conditional transfers.
-// This instruction is used instead of a mux and trades-off codesize
-// for performance. We conduct this transformation optimistically in
-// the hope that these instructions get promoted to dot-new transfers.
-let AddedComplexity = 100, isPredicated = 1 in
-def TFR_condset_rr_f : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
- IntRegs:$src2,
- IntRegs:$src3),
- "Error; should not emit",
- [(set IntRegs:$dst, (f32 (select PredRegs:$src1,
- IntRegs:$src2,
- IntRegs:$src3)))]>,
- Requires<[HasV5T]>;
-
-let AddedComplexity = 100, isPredicated = 1 in
-def TFR_condset_rr64_f : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
- DoubleRegs:$src2,
- DoubleRegs:$src3),
- "Error; should not emit",
- [(set DoubleRegs:$dst, (f64 (select PredRegs:$src1,
- DoubleRegs:$src2,
- DoubleRegs:$src3)))]>,
- Requires<[HasV5T]>;
-
-
-
-let AddedComplexity = 100, isPredicated = 1 in
-def TFR_condset_ri_f : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, f32imm:$src3),
- "Error; should not emit",
- [(set IntRegs:$dst,
- (f32 (select PredRegs:$src1, IntRegs:$src2, fpimm:$src3)))]>,
- Requires<[HasV5T]>;
-
-let AddedComplexity = 100, isPredicated = 1 in
-def TFR_condset_ir_f : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, f32imm:$src2, IntRegs:$src3),
- "Error; should not emit",
- [(set IntRegs:$dst,
- (f32 (select PredRegs:$src1, fpimm:$src2, IntRegs:$src3)))]>,
- Requires<[HasV5T]>;
-
-let AddedComplexity = 100, isPredicated = 1 in
-def TFR_condset_ii_f : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, f32imm:$src2, f32imm:$src3),
- "Error; should not emit",
- [(set IntRegs:$dst, (f32 (select PredRegs:$src1,
- fpimm:$src2,
- fpimm:$src3)))]>,
- Requires<[HasV5T]>;
-
-
-def : Pat <(select (i1 (setult (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
- (f32 IntRegs:$src3),
- (f32 IntRegs:$src4)),
- (TFR_condset_rr_f (FCMPUGT32_rr IntRegs:$src2, IntRegs:$src1), IntRegs:$src4,
- IntRegs:$src3)>, Requires<[HasV5T]>;
-
-def : Pat <(select (i1 (setult (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
- (f64 DoubleRegs:$src3),
- (f64 DoubleRegs:$src4)),
- (TFR_condset_rr64_f (FCMPUGT64_rr DoubleRegs:$src2, DoubleRegs:$src1),
- DoubleRegs:$src4, DoubleRegs:$src3)>, Requires<[HasV5T]>;
-
-// Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i).
-def : Pat <(select (not PredRegs:$src1), fpimm:$src2, fpimm:$src3),
- (TFR_condset_ii_f PredRegs:$src1, fpimm:$src3, fpimm:$src2)>;
+// F2_sffma: Floating-point fused multiply add.
+let isFP = 1, hasNewValue = 1 in
+class T_sfmpy_acc <bit isSub, bit isLib>
+ : MInst<(outs IntRegs:$Rx),
+ (ins IntRegs:$dst2, IntRegs:$Rs, IntRegs:$Rt),
+ "$Rx "#!if(isSub, "-=","+=")#" sfmpy($Rs, $Rt)"#!if(isLib, ":lib",""),
+ [], "$dst2 = $Rx" , M_tc_3_SLOT23 > ,
+ Requires<[HasV5T]> {
+ bits<5> Rx;
+ bits<5> Rs;
+ bits<5> Rt;
+
+ let IClass = 0b1110;
+
+ let Inst{27-21} = 0b1111000;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0b0;
+ let Inst{12-8} = Rt;
+ let Inst{7} = 0b1;
+ let Inst{6} = isLib;
+ let Inst{5} = isSub;
+ let Inst{4-0} = Rx;
+ }
+
+def F2_sffma: T_sfmpy_acc <0, 0>;
+def F2_sffms: T_sfmpy_acc <1, 0>;
+def F2_sffma_lib: T_sfmpy_acc <0, 1>;
+def F2_sffms_lib: T_sfmpy_acc <1, 1>;
+
+def : Pat <(f32 (fma F32:$src2, F32:$src3, F32:$src1)),
+ (F2_sffma F32:$src1, F32:$src2, F32:$src3)>;
+
+// Floating-point fused multiply add w/ additional scaling (2**pu).
+let isFP = 1, hasNewValue = 1 in
+def F2_sffma_sc: MInst <
+ (outs IntRegs:$Rx),
+ (ins IntRegs:$dst2, IntRegs:$Rs, IntRegs:$Rt, PredRegs:$Pu),
+ "$Rx += sfmpy($Rs, $Rt, $Pu):scale" ,
+ [], "$dst2 = $Rx" , M_tc_3_SLOT23 > ,
+ Requires<[HasV5T]> {
+ bits<5> Rx;
+ bits<5> Rs;
+ bits<5> Rt;
+ bits<2> Pu;
+
+ let IClass = 0b1110;
+
+ let Inst{27-21} = 0b1111011;
+ let Inst{20-16} = Rs;
+ let Inst{13} = 0b0;
+ let Inst{12-8} = Rt;
+ let Inst{7} = 0b1;
+ let Inst{6-5} = Pu;
+ let Inst{4-0} = Rx;
+ }
+
+let isExtended = 1, isExtentSigned = 1, opExtentBits = 8, opExtendable = 3,
+ isPseudo = 1, InputType = "imm" in
+def MUX_ir_f : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, f32Ext:$src3),
+ "$dst = mux($src1, $src2, #$src3)",
+ [(set F32:$dst, (f32 (select I1:$src1, F32:$src2, fpimm:$src3)))]>,
+ Requires<[HasV5T]>;
+
+let isExtended = 1, isExtentSigned = 1, opExtentBits = 8, opExtendable = 2,
+ isPseudo = 1, InputType = "imm" in
+def MUX_ri_f : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, f32Ext:$src2, IntRegs:$src3),
+ "$dst = mux($src1, #$src2, $src3)",
+ [(set F32:$dst, (f32 (select I1:$src1, fpimm:$src2, F32:$src3)))]>,
+ Requires<[HasV5T]>;
+
+def: Pat<(select I1:$src1, F32:$src2, F32:$src3),
+ (C2_mux I1:$src1, F32:$src2, F32:$src3)>,
+ Requires<[HasV5T]>;
+
+def: Pat<(select (i1 (setult F32:$src1, F32:$src2)), F32:$src3, F32:$src4),
+ (C2_mux (F2_sfcmpgt F32:$src2, F32:$src1), F32:$src4, F32:$src3)>,
+ Requires<[HasV5T]>;
+
+def: Pat<(select I1:$src1, F64:$src2, F64:$src3),
+ (C2_vmux I1:$src1, F64:$src2, F64:$src3)>,
+ Requires<[HasV5T]>;
+
+def: Pat<(select (i1 (setult F64:$src1, F64:$src2)), F64:$src3, F64:$src4),
+ (C2_vmux (F2_dfcmpgt F64:$src2, F64:$src1), F64:$src3, F64:$src4)>,
+ Requires<[HasV5T]>;
// Map from p0 = pnot(p0); r0 = select(p0, #i, r1)
-// => r0 = TFR_condset_ri(p0, r1, #i)
-def : Pat <(select (not PredRegs:$src1), fpimm:$src2, IntRegs:$src3),
- (TFR_condset_ri_f PredRegs:$src1, IntRegs:$src3, fpimm:$src2)>;
+// => r0 = MUX_ir_f(p0, #i, r1)
+def: Pat<(select (not I1:$src1), fpimm:$src2, F32:$src3),
+ (MUX_ir_f I1:$src1, F32:$src3, fpimm:$src2)>,
+ Requires<[HasV5T]>;
// Map from p0 = pnot(p0); r0 = mux(p0, r1, #i)
-// => r0 = TFR_condset_ir(p0, #i, r1)
-def : Pat <(select (not PredRegs:$src1), IntRegs:$src2, fpimm:$src3),
- (TFR_condset_ir_f PredRegs:$src1, fpimm:$src3, IntRegs:$src2)>;
+// => r0 = MUX_ri_f(p0, r1, #i)
+def: Pat<(select (not I1:$src1), F32:$src2, fpimm:$src3),
+ (MUX_ri_f I1:$src1, fpimm:$src3, F32:$src2)>,
+ Requires<[HasV5T]>;
+
+def: Pat<(i32 (fp_to_sint F64:$src1)),
+ (LoReg (F2_conv_df2d_chop F64:$src1))>,
+ Requires<[HasV5T]>;
+
+//===----------------------------------------------------------------------===//
+// :natural forms of vasrh and vasrhub insns
+//===----------------------------------------------------------------------===//
+// S5_asrhub_rnd_sat: Vector arithmetic shift right by immediate with round,
+// saturate, and pack.
+let Defs = [USR_OVF], hasSideEffects = 0, hasNewValue = 1, opNewValue = 0 in
+class T_ASRHUB<bit isSat>
+ : SInst <(outs IntRegs:$Rd),
+ (ins DoubleRegs:$Rss, u4Imm:$u4),
+ "$Rd = vasrhub($Rss, #$u4):"#!if(isSat, "sat", "raw"),
+ [], "", S_2op_tc_2_SLOT23>,
+ Requires<[HasV5T]> {
+ bits<5> Rd;
+ bits<5> Rss;
+ bits<4> u4;
+
+ let IClass = 0b1000;
+
+ let Inst{27-21} = 0b1000011;
+ let Inst{20-16} = Rss;
+ let Inst{13-12} = 0b00;
+ let Inst{11-8} = u4;
+ let Inst{7-6} = 0b10;
+ let Inst{5} = isSat;
+ let Inst{4-0} = Rd;
+ }
+
+def S5_asrhub_rnd_sat : T_ASRHUB <0>;
+def S5_asrhub_sat : T_ASRHUB <1>;
+
+let isAsmParserOnly = 1 in
+def S5_asrhub_rnd_sat_goodsyntax
+ : SInst <(outs IntRegs:$Rd), (ins DoubleRegs:$Rss, u4Imm:$u4),
+ "$Rd = vasrhub($Rss, #$u4):rnd:sat">, Requires<[HasV5T]>;
+
+// S5_vasrhrnd: Vector arithmetic shift right by immediate with round.
+let hasSideEffects = 0 in
+def S5_vasrhrnd : SInst <(outs DoubleRegs:$Rdd),
+ (ins DoubleRegs:$Rss, u4Imm:$u4),
+ "$Rdd = vasrh($Rss, #$u4):raw">,
+ Requires<[HasV5T]> {
+ bits<5> Rdd;
+ bits<5> Rss;
+ bits<4> u4;
+
+ let IClass = 0b1000;
+
+ let Inst{27-21} = 0b0000001;
+ let Inst{20-16} = Rss;
+ let Inst{13-12} = 0b00;
+ let Inst{11-8} = u4;
+ let Inst{7-5} = 0b000;
+ let Inst{4-0} = Rdd;
+ }
+
+let isAsmParserOnly = 1 in
+def S5_vasrhrnd_goodsyntax
+ : SInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, u4Imm:$u4),
+ "$Rdd = vasrh($Rss,#$u4):rnd">, Requires<[HasV5T]>;
+
+// Floating point reciprocal square root approximation
+let Uses = [USR], isPredicateLate = 1, isFP = 1,
+ hasSideEffects = 0, hasNewValue = 1, opNewValue = 0,
+ validSubTargets = HasV5SubT in
+def F2_sfinvsqrta: SInst <
+ (outs IntRegs:$Rd, PredRegs:$Pe),
+ (ins IntRegs:$Rs),
+ "$Rd, $Pe = sfinvsqrta($Rs)" > ,
+ Requires<[HasV5T]> {
+ bits<5> Rd;
+ bits<2> Pe;
+ bits<5> Rs;
+
+ let IClass = 0b1000;
+
+ let Inst{27-21} = 0b1011111;
+ let Inst{20-16} = Rs;
+ let Inst{7} = 0b0;
+ let Inst{6-5} = Pe;
+ let Inst{4-0} = Rd;
+ }
+
+// Complex multiply 32x16
+let Defs = [USR_OVF], Itinerary = S_3op_tc_3x_SLOT23 in {
+ def M4_cmpyi_whc : T_S3op_8<"cmpyiwh", 0b101, 1, 1, 1, 1>;
+ def M4_cmpyr_whc : T_S3op_8<"cmpyrwh", 0b111, 1, 1, 1, 1>;
+}
-def : Pat <(i32 (fp_to_sint (f64 DoubleRegs:$src1))),
- (i32 (EXTRACT_SUBREG (i64 (CONVERT_df2d (f64 DoubleRegs:$src1))), subreg_loreg))>,
- Requires<[HasV5T]>;
+// Classify floating-point value
+let isFP = 1 in
+ def F2_sfclass : T_TEST_BIT_IMM<"sfclass", 0b111>;
+
+let isFP = 1 in
+def F2_dfclass: ALU64Inst<(outs PredRegs:$Pd), (ins DoubleRegs:$Rss, u5Imm:$u5),
+ "$Pd = dfclass($Rss, #$u5)",
+ [], "" , ALU64_tc_2early_SLOT23 > , Requires<[HasV5T]> {
+ bits<2> Pd;
+ bits<5> Rss;
+ bits<5> u5;
+
+ let IClass = 0b1101;
+ let Inst{27-21} = 0b1100100;
+ let Inst{20-16} = Rss;
+ let Inst{12-10} = 0b000;
+ let Inst{9-5} = u5;
+ let Inst{4-3} = 0b10;
+ let Inst{1-0} = Pd;
+ }
+
+// Instructions to create floating point constant
+class T_fimm <string mnemonic, RegisterClass RC, bits<4> RegType, bit isNeg>
+ : ALU64Inst<(outs RC:$dst), (ins u10Imm:$src),
+ "$dst = "#mnemonic#"(#$src)"#!if(isNeg, ":neg", ":pos"),
+ [], "", ALU64_tc_3x_SLOT23>, Requires<[HasV5T]> {
+ bits<5> dst;
+ bits<10> src;
+
+ let IClass = 0b1101;
+ let Inst{27-24} = RegType;
+ let Inst{23} = 0b0;
+ let Inst{22} = isNeg;
+ let Inst{21} = src{9};
+ let Inst{13-5} = src{8-0};
+ let Inst{4-0} = dst;
+ }
+
+let hasNewValue = 1, opNewValue = 0 in {
+def F2_sfimm_p : T_fimm <"sfmake", IntRegs, 0b0110, 0>;
+def F2_sfimm_n : T_fimm <"sfmake", IntRegs, 0b0110, 1>;
+}
+
+def F2_dfimm_p : T_fimm <"dfmake", DoubleRegs, 0b1001, 0>;
+def F2_dfimm_n : T_fimm <"dfmake", DoubleRegs, 0b1001, 1>;
def : Pat <(fabs (f32 IntRegs:$src1)),
- (CLRBIT_31 (f32 IntRegs:$src1), 31)>,
+ (S2_clrbit_i (f32 IntRegs:$src1), 31)>,
Requires<[HasV5T]>;
def : Pat <(fneg (f32 IntRegs:$src1)),
- (TOGBIT_31 (f32 IntRegs:$src1), 31)>,
- Requires<[HasV5T]>;
-
-/*
-def : Pat <(fabs (f64 DoubleRegs:$src1)),
- (CLRBIT_31 (f32 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), 31)>,
- Requires<[HasV5T]>;
-
-def : Pat <(fabs (f64 DoubleRegs:$src1)),
- (CLRBIT_31 (f32 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), 31)>,
+ (S2_togglebit_i (f32 IntRegs:$src1), 31)>,
Requires<[HasV5T]>;
- */
diff --git a/lib/Target/Hexagon/HexagonInstrInfoVector.td b/lib/Target/Hexagon/HexagonInstrInfoVector.td
new file mode 100644
index 0000000..6e67b6e
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonInstrInfoVector.td
@@ -0,0 +1,65 @@
+//===- HexagonInstrInfoVector.td - Hexagon Vector Patterns -*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the Hexagon Vector instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+def V2I1: PatLeaf<(v2i1 PredRegs:$R)>;
+def V4I1: PatLeaf<(v4i1 PredRegs:$R)>;
+def V8I1: PatLeaf<(v8i1 PredRegs:$R)>;
+def V4I8: PatLeaf<(v4i8 IntRegs:$R)>;
+def V2I16: PatLeaf<(v2i16 IntRegs:$R)>;
+def V8I8: PatLeaf<(v8i8 DoubleRegs:$R)>;
+def V4I16: PatLeaf<(v4i16 DoubleRegs:$R)>;
+def V2I32: PatLeaf<(v2i32 DoubleRegs:$R)>;
+
+// Vector shift support. Vector shifting in Hexagon is rather different
+// from internal representation of LLVM.
+// LLVM assumes all shifts (in vector case) will have the form
+// <VT> = SHL/SRA/SRL <VT> by <VT>
+// while Hexagon has the following format:
+// <VT> = SHL/SRA/SRL <VT> by <IT/i32>
+// As a result, special care is needed to guarantee correctness and
+// performance.
+class vshift_v4i16<SDNode Op, string Str, bits<3>MajOp, bits<3>MinOp>
+ : S_2OpInstImm<Str, MajOp, MinOp, u4Imm,
+ [(set (v4i16 DoubleRegs:$dst),
+ (Op (v4i16 DoubleRegs:$src1), u4ImmPred:$src2))]> {
+ bits<4> src2;
+ let Inst{11-8} = src2;
+}
+
+class vshift_v2i32<SDNode Op, string Str, bits<3>MajOp, bits<3>MinOp>
+ : S_2OpInstImm<Str, MajOp, MinOp, u5Imm,
+ [(set (v2i32 DoubleRegs:$dst),
+ (Op (v2i32 DoubleRegs:$src1), u5ImmPred:$src2))]> {
+ bits<5> src2;
+ let Inst{12-8} = src2;
+}
+
+def S2_asr_i_vw : vshift_v2i32<sra, "vasrw", 0b010, 0b000>;
+def S2_lsr_i_vw : vshift_v2i32<srl, "vlsrw", 0b010, 0b001>;
+def S2_asl_i_vw : vshift_v2i32<shl, "vaslw", 0b010, 0b010>;
+
+def S2_asr_i_vh : vshift_v4i16<sra, "vasrh", 0b100, 0b000>;
+def S2_lsr_i_vh : vshift_v4i16<srl, "vlsrh", 0b100, 0b001>;
+def S2_asl_i_vh : vshift_v4i16<shl, "vaslh", 0b100, 0b010>;
+
+// Vector shift words by register
+def S2_asr_r_vw : T_S3op_shiftVect < "vasrw", 0b00, 0b00>;
+def S2_lsr_r_vw : T_S3op_shiftVect < "vlsrw", 0b00, 0b01>;
+def S2_asl_r_vw : T_S3op_shiftVect < "vaslw", 0b00, 0b10>;
+def S2_lsl_r_vw : T_S3op_shiftVect < "vlslw", 0b00, 0b11>;
+
+// Vector shift halfwords by register
+def S2_asr_r_vh : T_S3op_shiftVect < "vasrh", 0b01, 0b00>;
+def S2_lsr_r_vh : T_S3op_shiftVect < "vlsrh", 0b01, 0b01>;
+def S2_asl_r_vh : T_S3op_shiftVect < "vaslh", 0b01, 0b10>;
+def S2_lsl_r_vh : T_S3op_shiftVect < "vlslh", 0b01, 0b11>;
diff --git a/lib/Target/Hexagon/HexagonIntrinsics.td b/lib/Target/Hexagon/HexagonIntrinsics.td
index b3385d8..c0551e8 100644
--- a/lib/Target/Hexagon/HexagonIntrinsics.td
+++ b/lib/Target/Hexagon/HexagonIntrinsics.td
@@ -13,3495 +13,1250 @@
// March 4, 2008
//===----------------------------------------------------------------------===//
-//
-// ALU 32 types.
-//
+class T_I_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID imm:$Is),
+ (MI imm:$Is)>;
-class qi_ALU32_sisi<string opc, Intrinsic IntID>
- : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class qi_ALU32_sis10<string opc, Intrinsic IntID>
- : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, s10Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class qi_ALU32_sis8<string opc, Intrinsic IntID>
- : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class qi_ALU32_siu8<string opc, Intrinsic IntID>
- : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, u8Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class qi_ALU32_siu9<string opc, Intrinsic IntID>
- : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, u9Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class si_ALU32_qisisi<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class si_ALU32_qis8si<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2,
- IntRegs:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2, $src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2,
- IntRegs:$src3))]>;
-
-class si_ALU32_qisis8<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
- s8Imm:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
- imm:$src3))]>;
-
-class si_ALU32_qis8s8<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2, s8Imm:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2, imm:$src3))]>;
-
-class si_ALU32_sisi<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU32_sisi_sat<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU32_sisi_rnd<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):rnd")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU32_sis16<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s16Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class si_ALU32_sis10<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s10Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class si_ALU32_s10si<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins s10Imm:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "(#$src1, $src2)")),
- [(set IntRegs:$dst, (IntID imm:$src1, IntRegs:$src2))]>;
-
-class si_lo_ALU32_siu16<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u16Imm:$src2),
- !strconcat("$dst.l = ", !strconcat(opc , "#$src2")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class si_hi_ALU32_siu16<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u16Imm:$src2),
- !strconcat("$dst.h = ", !strconcat(opc , "#$src2")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class si_ALU32_s16<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins s16Imm:$src1),
- !strconcat("$dst = ", !strconcat(opc , "#$src1")),
- [(set IntRegs:$dst, (IntID imm:$src1))]>;
-
-class di_ALU32_s8<string opc, Intrinsic IntID>
- : ALU32_rr<(outs DoubleRegs:$dst), (ins s8Imm:$src1),
- !strconcat("$dst = ", !strconcat(opc , "#$src1")),
- [(set DoubleRegs:$dst, (IntID imm:$src1))]>;
-
-class di_ALU64_di<string opc, Intrinsic IntID>
- : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "$src")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src))]>;
-
-class si_ALU32_si<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "($src)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
-
-class si_ALU32_si_tfr<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "$src")),
- [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
+class T_R_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rs),
+ (MI I32:$Rs)>;
-//
-// ALU 64 types.
-//
+class T_P_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs),
+ (MI DoubleRegs:$Rs)>;
+
+class T_II_pat <InstHexagon MI, Intrinsic IntID, PatFrag Imm1, PatFrag Imm2>
+ : Pat<(IntID Imm1:$Is, Imm2:$It),
+ (MI Imm1:$Is, Imm2:$It)>;
+
+class T_RI_pat <InstHexagon MI, Intrinsic IntID, PatLeaf ImmPred = PatLeaf<(i32 imm)>>
+ : Pat<(IntID I32:$Rs, ImmPred:$It),
+ (MI I32:$Rs, ImmPred:$It)>;
+
+class T_IR_pat <InstHexagon MI, Intrinsic IntID, PatFrag ImmPred = PatLeaf<(i32 imm)>>
+ : Pat<(IntID ImmPred:$Is, I32:$Rt),
+ (MI ImmPred:$Is, I32:$Rt)>;
+
+class T_PI_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID I64:$Rs, imm:$It),
+ (MI DoubleRegs:$Rs, imm:$It)>;
+
+class T_RP_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID I32:$Rs, I64:$Rt),
+ (MI I32:$Rs, DoubleRegs:$Rt)>;
+
+class T_RR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rs, I32:$Rt),
+ (MI I32:$Rs, I32:$Rt)>;
+
+class T_PP_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I64:$Rt),
+ (MI DoubleRegs:$Rs, DoubleRegs:$Rt)>;
-class si_ALU64_si_sat<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "($src):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
-
-class si_ALU64_didi<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
-
-class di_ALU64_sidi<string opc, Intrinsic IntID>
- : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, DoubleRegs:$src2))]>;
-
-class di_ALU64_didi<string opc, Intrinsic IntID>
- : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class di_ALU64_qididi<string opc, Intrinsic IntID>
- : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1, DoubleRegs:$src2,
- DoubleRegs:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, DoubleRegs:$src2,
- DoubleRegs:$src3))]>;
-
-class di_ALU64_sisi<string opc, Intrinsic IntID>
- : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_ALU64_didi_sat<string opc, Intrinsic IntID>
- : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class di_ALU64_didi_rnd<string opc, Intrinsic IntID>
- : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):rnd")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class di_ALU64_didi_crnd<string opc, Intrinsic IntID>
- : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):crnd")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class di_ALU64_didi_rnd_sat<string opc, Intrinsic IntID>
- : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):rnd:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class di_ALU64_didi_crnd_sat<string opc, Intrinsic IntID>
- : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):crnd:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class qi_ALU64_didi<string opc, Intrinsic IntID>
- : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set PredRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
-
-class si_ALU64_sisi<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_sat_lh<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_l16_sat_hh<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_l16_sat_lh<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_l16_sat_hl<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_l16_sat_ll<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_l16_hh<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_l16_hl<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_l16_lh<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_l16_ll<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_h16_sat_hh<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.H, $src2.H):sat:<<16")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_h16_sat_lh<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.L, $src2.H):sat:<<16")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_h16_sat_hl<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.H, $src2.L):sat:<<16")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_h16_sat_ll<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.L, $src2.L):sat:<<16")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_h16_hh<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H):<<16")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_h16_hl<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L):<<16")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_h16_lh<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):<<16")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_h16_ll<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L):<<16")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_lh<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_ll<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_ALU64_sisi_sat<string opc, Intrinsic IntID>
- : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+class T_QII_pat <InstHexagon MI, Intrinsic IntID, PatFrag Imm1, PatFrag Imm2>
+ : Pat <(IntID (i32 PredRegs:$Ps), Imm1:$Is, Imm2:$It),
+ (MI PredRegs:$Ps, Imm1:$Is, Imm2:$It)>;
-//
-// SInst classes.
-//
+class T_QRI_pat <InstHexagon MI, Intrinsic IntID, PatFrag ImmPred>
+ : Pat <(IntID (i32 PredRegs:$Ps), I32:$Rs, ImmPred:$Is),
+ (MI PredRegs:$Ps, I32:$Rs, ImmPred:$Is)>;
-class qi_SInst_qi<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "($src)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src))]>;
-
-class qi_SInst_qi_pxfer<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "$src")),
- [(set PredRegs:$dst, (IntID IntRegs:$src))]>;
-
-class qi_SInst_qiqi<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class qi_SInst_qiqi_neg<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, !$src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_SInst_di<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "($src)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src))]>;
-
-class di_SInst_di_sat<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "($src):sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src))]>;
-
-class si_SInst_di<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "($src)")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src))]>;
-
-class si_SInst_di_sat<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "($src):sat")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src))]>;
-
-class di_SInst_disi<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>;
-
-class di_SInst_didi<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
-
-class di_SInst_si<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>;
-
-class si_SInst_sisiu3<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u3Imm:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
- imm:$src3))]>;
-
-class si_SInst_diu5<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u5Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
-
-class si_SInst_disi<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>;
-
-class si_SInst_sidi<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, DoubleRegs:$src2))]>;
-
-class di_SInst_disisi<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class di_SInst_sisi<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class qi_SInst_siu5<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class qi_SInst_siu6<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u6Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class qi_SInst_sisi<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_SInst_si<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "($src)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
-
-class si_SInst_si_sat<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "($src):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
-
-class di_SInst_qi<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "($src)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src))]>;
-
-class si_SInst_qi<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "$src")),
- [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
-
-class si_SInst_qiqi<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class qi_SInst_si<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src),
- !strconcat("$dst = ", !strconcat(opc , "$src")),
- [(set PredRegs:$dst, (IntID IntRegs:$src))]>;
-
-class si_SInst_sisi<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_SInst_diu6<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
-
-class si_SInst_siu5<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class si_SInst_siu5_rnd<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class si_SInst_siu5u5<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2, u5Imm:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2, imm:$src3))]>;
-
-class si_SInst_sisisi_acc<string opc, Intrinsic IntID>
- : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_SInst_sisisi_nac<string opc, Intrinsic IntID>
- : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_SInst_didisi_acc<string opc, Intrinsic IntID>
- : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_SInst_didisi_nac<string opc, Intrinsic IntID>
- : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1, IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_SInst_sisiu5u5<string opc, Intrinsic IntID>
- : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- u5Imm:$src2, u5Imm:$src3),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, #$src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- imm:$src2, imm:$src3))],
- "$dst2 = $dst">;
-
-class si_SInst_sisidi<string opc, Intrinsic IntID>
- : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- DoubleRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_SInst_didiu6u6<string opc, Intrinsic IntID>
- : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- u6Imm:$src2, u6Imm:$src3),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, #$src2, #$src3)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
- imm:$src2, imm:$src3))],
- "$dst2 = $dst">;
-
-class di_SInst_dididi<string opc, Intrinsic IntID>
- : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1,
- DoubleRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_SInst_diu6u6<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2,
- u6Imm:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2, #$src3)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2,
- imm:$src3))]>;
-
-class di_SInst_didiqi<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2,
- IntRegs:$src3))]>;
-
-class di_SInst_didiu3<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
- u3Imm:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, #$src3)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2,
- imm:$src3))]>;
-
-class di_SInst_didisi_or<string opc, Intrinsic IntID>
- : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst |= ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_SInst_didisi_and<string opc, Intrinsic IntID>
- : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst &= ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_SInst_didiu6_and<string opc, Intrinsic IntID>
- : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- u6Imm:$src2),
- !strconcat("$dst &= ", !strconcat(opc , "($src1, #$src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
- imm:$src2))],
- "$dst2 = $dst">;
-
-class di_SInst_didiu6_or<string opc, Intrinsic IntID>
- : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- u6Imm:$src2),
- !strconcat("$dst |= ", !strconcat(opc , "($src1, #$src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
- imm:$src2))],
- "$dst2 = $dst">;
-
-class di_SInst_didiu6_xor<string opc, Intrinsic IntID>
- : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- u6Imm:$src2),
- !strconcat("$dst ^= ", !strconcat(opc , "($src1, #$src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
- imm:$src2))],
- "$dst2 = $dst">;
-
-class si_SInst_sisisi_and<string opc, Intrinsic IntID>
- : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst &= ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_SInst_sisisi_or<string opc, Intrinsic IntID>
- : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst |= ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-
-class si_SInst_sisiu5_and<string opc, Intrinsic IntID>
- : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- u5Imm:$src2),
- !strconcat("$dst &= ", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- imm:$src2))],
- "$dst2 = $dst">;
-
-class si_SInst_sisiu5_or<string opc, Intrinsic IntID>
- : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- u5Imm:$src2),
- !strconcat("$dst |= ", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- imm:$src2))],
- "$dst2 = $dst">;
-
-class si_SInst_sisiu5_xor<string opc, Intrinsic IntID>
- : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- u5Imm:$src2),
- !strconcat("$dst ^= ", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- imm:$src2))],
- "$dst2 = $dst">;
-
-class si_SInst_sisiu5_acc<string opc, Intrinsic IntID>
- : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- u5Imm:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- imm:$src2))],
- "$dst2 = $dst">;
-
-class si_SInst_sisiu5_nac<string opc, Intrinsic IntID>
- : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- u5Imm:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- imm:$src2))],
- "$dst2 = $dst">;
-
-class di_SInst_didiu6_acc<string opc, Intrinsic IntID>
- : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- u5Imm:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1, #$src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1, imm:$src2))],
- "$dst2 = $dst">;
-
-class di_SInst_didiu6_nac<string opc, Intrinsic IntID>
- : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- u5Imm:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1, #$src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
- imm:$src2))],
- "$dst2 = $dst">;
+class T_QIR_pat <InstHexagon MI, Intrinsic IntID, PatFrag ImmPred>
+ : Pat <(IntID (i32 PredRegs:$Ps), ImmPred:$Is, I32:$Rs),
+ (MI PredRegs:$Ps, ImmPred:$Is, I32:$Rs)>;
+class T_RRI_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rs, I32:$Rt, imm:$Iu),
+ (MI I32:$Rs, I32:$Rt, imm:$Iu)>;
-//
-// MInst classes.
-//
+class T_RII_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rs, imm:$It, imm:$Iu),
+ (MI I32:$Rs, imm:$It, imm:$Iu)>;
-class di_MInst_sisi_rnd_hh_s1<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.H, $src2.H):<<1:rnd")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_rnd_hh<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.H, $src2.H):rnd")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_rnd_hl_s1<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.H, $src2.L):<<1:rnd")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_rnd_hl<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.H, $src2.L):rnd")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_rnd_lh_s1<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.L, $src2.H):<<1:rnd")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_rnd_lh<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.L, $src2.H):rnd")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_rnd_ll_s1<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.L, $src2.L):<<1:rnd")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_rnd_ll<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.L, $src2.L):rnd")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_disisi_acc<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1, $src2):sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1, $src2):sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_sat_conj<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1, $src2*):sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_sat_conj<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1, $src2*):sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_s1_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1, $src2):<<1:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_s1_sat_conj<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1, $src2*):<<1:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_s1_sat_conj<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1, $src2*):<<1:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_s8s8<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins s8Imm:$src1, s8Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "(#$src1, #$src2)")),
- [(set DoubleRegs:$dst, (IntID imm:$src1, imm:$src2))]>;
-
-class si_MInst_sis9<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class si_MInst_sisi<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_hh<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_hh_s1<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H):<<1")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_lh<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_lh_s1<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):<<1")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_hl<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_hl_s1<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L):<<1")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_ll<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_ll_s1<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L):<<1")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-
-class si_MInst_sisi_hh<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_hh_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_lh<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_lh_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_hl<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_hl_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_ll<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_ll_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_up<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_didi<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class di_MInst_didi_conj<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2*)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class di_MInst_sisi_s1_sat_conj<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, $src2*):<<1:sat")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_didi_s1_rnd_sat<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, $src2):<<1:rnd:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class di_MInst_didi_sat<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class di_MInst_didi_rnd_sat<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, $src2):rnd:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class si_SInst_sisi_sat<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_SInst_didi_sat<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
-
-class si_SInst_disi_s1_rnd_sat<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, $src2):<<1:rnd:sat")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_s1_rnd_sat<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, $src2):<<1:rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_l_s1_rnd_sat<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, $src2.L):<<1:rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_h_s1_rnd_sat<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, $src2.H):<<1:rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_rnd_sat_conj<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, $src2*):rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_s1_rnd_sat_conj<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, $src2*):<<1:rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_rnd_sat<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, $src2):rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_rnd<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):rnd")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisisi_xacc<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst ^= ", !strconcat(opc , "($src2, $src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
- IntRegs:$src3))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst += ", !strconcat(opc , "($src2, $src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
- IntRegs:$src3))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst -= ", !strconcat(opc , "($src2, $src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
- IntRegs:$src3))],
- "$dst2 = $dst">;
-
-class si_MInst_sisis8_acc<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
- s8Imm:$src3),
- !strconcat("$dst += ", !strconcat(opc , "($src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
- imm:$src3))],
- "$dst2 = $dst">;
-
-class si_MInst_sisis8_nac<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
- s8Imm:$src3),
- !strconcat("$dst -= ", !strconcat(opc , "($src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
- imm:$src3))],
- "$dst2 = $dst">;
-
-class si_MInst_sisiu4u5<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- u4Imm:$src2, u5Imm:$src3),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, #$src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- imm:$src2, imm:$src3))],
- "$dst2 = $dst">;
-
-class si_MInst_sisiu8_acc<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
- u8Imm:$src3),
- !strconcat("$dst += ", !strconcat(opc , "($src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
- imm:$src3))],
- "$dst2 = $dst">;
-
-class si_MInst_sisiu8_nac<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
- u8Imm:$src3),
- !strconcat("$dst -= ", !strconcat(opc , "($src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
- imm:$src3))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_hh<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1.H, $src2.H)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_sat_lh<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.L, $src2.H):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_sat_lh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.L, $src2.H):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_sat_hh<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.H, $src2.H):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_sat_hh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.H, $src2.H):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_hh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.H, $src2.H):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_hh<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1.H, $src2.H)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_sat_hh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.H, $src2.H):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_sat_hh<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.H, $src2.H):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_sat_hl_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.H, $src2.L):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_sat_hl<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.H, $src2.L):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_sat_lh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.L, $src2.H):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_sat_lh<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.L, $src2.H):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_sat_ll_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.L, $src2.L):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_sat_ll<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.L, $src2.L):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_hh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.H, $src2.H):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_hl<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1.H, $src2.L)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_hl_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.H, $src2.L):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_hl<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1.H, $src2.L)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_hl_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.H, $src2.L):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_lh<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1.L, $src2.H)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_lh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.L, $src2.H):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_lh<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1.L, $src2.H)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_lh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.L, $src2.H):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_ll<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1.L, $src2.L)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_ll_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.L, $src2.L):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_sat_ll_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.L, $src2.L):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_sat_hl_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.H, $src2.L):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_sat_ll<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.L, $src2.L):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_acc_sat_hl<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1.H, $src2.L):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_ll<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1.L, $src2.L)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_ll_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.L, $src2.L):<<1")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_hh_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.H, $src2.H):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_hh_s1_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.H, $src2.H):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_hl_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.H, $src2.L):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_hl_s1_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.H, $src2.L):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_lh_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.L, $src2.H):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_lh_s1_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.L, $src2.H):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_ll_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.L, $src2.L):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_nac_ll_s1_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1.L, $src2.L):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_ALU32_sisi<string opc, Intrinsic IntID>
- : ALU32_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_sat<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_sat_conj<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2*):sat")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_sisi_s1_sat<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):<<1:sat")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_didi_s1_sat<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):<<1:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
-
-class si_MInst_didi_s1_rnd_sat<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, $src2):<<1:rnd:sat")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
-
-class si_MInst_didi_rnd_sat<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):rnd:sat")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
-
-class si_MInst_sisi_sat_hh<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_hh_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.H, $src2.H):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_hl<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_hl_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.H, $src2.L):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_lh<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_lh_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.L, $src2.H):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_ll<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_ll_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.L, $src2.L):<<1:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_rnd_hh<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.H, $src2.H):rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_rnd_hh<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.H, $src2.H):rnd")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_rnd_hh_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1.H, $src2.H):<<1:rnd")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_rnd_hh_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc ,
- "($src1.H, $src2.H):<<1:rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_rnd_hl<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.H, $src2.L):rnd")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_rnd_hl_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.H, $src2.L):<<1:rnd")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_rnd_hl<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.H, $src2.L):rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_rnd_hl_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.H, $src2.L):<<1:rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_rnd_lh<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.L, $src2.H):rnd")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_rnd_lh<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.L, $src2.H):rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_rnd_lh_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.L, $src2.H):<<1:rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_rnd_lh_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.L, $src2.H):<<1:rnd")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_rnd_ll<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.L, $src2.L):rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_sat_rnd_ll_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.L, $src2.L):<<1:rnd:sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_rnd_ll<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.L, $src2.L):rnd")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_sisi_rnd_ll_s1<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1.L, $src2.L):<<1:rnd")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_dididi_acc_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2,
- DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1, $src2):sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1,
- DoubleRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_dididi_acc_rnd_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- DoubleRegs:$src2),
- !strconcat("$dst += ",
- !strconcat(opc , "($src1, $src2):rnd:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1,
- DoubleRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_dididi_acc_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2,
- DoubleRegs:$src1,
- DoubleRegs:$src2),
- !strconcat("$dst += ",
- !strconcat(opc , "($src1, $src2):<<1")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1,
- DoubleRegs:$src2))],
- "$dst2 = $dst">;
-
-
-class di_MInst_dididi_acc_s1_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2,
- DoubleRegs:$src1,
- DoubleRegs:$src2),
- !strconcat("$dst += ",
- !strconcat(opc , "($src1, $src2):<<1:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1,
- DoubleRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_dididi_acc_s1_rnd_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- DoubleRegs:$src2),
- !strconcat("$dst += ",
- !strconcat(opc , "($src1, $src2):<<1:rnd:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1,
- DoubleRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_dididi_acc<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- DoubleRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1,
- DoubleRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_dididi_acc_conj<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- DoubleRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1, $src2*)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1,
- DoubleRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_hh<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1.H, $src2.H)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_hl<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1.H, $src2.L)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_lh<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1.L, $src2.H)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_ll<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ", !strconcat(opc , "($src1.L, $src2.L)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_hh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ",
- !strconcat(opc , "($src1.H, $src2.H):<<1")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_hl_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ",
- !strconcat(opc , "($src1.H, $src2.L):<<1")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_lh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ",
- !strconcat(opc , "($src1.L, $src2.H):<<1")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_ll_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ",
- !strconcat(opc , "($src1.L, $src2.L):<<1")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_hh<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1.H, $src2.H)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_hl<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1.H, $src2.L)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_lh<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1.L, $src2.H)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_ll<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ", !strconcat(opc , "($src1.L, $src2.L)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_hh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ",
- !strconcat(opc , "($src1.H, $src2.H):<<1")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_hl_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ",
- !strconcat(opc , "($src1.H, $src2.L):<<1")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_lh_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ",
- !strconcat(opc , "($src1.L, $src2.H):<<1")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_nac_ll_s1<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst -= ",
- !strconcat(opc , "($src1.L, $src2.L):<<1")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disisi_acc_s1_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ",
- !strconcat(opc , "($src1, $src2):<<1:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class di_MInst_disi_s1_sat<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):<<1:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>;
-
-class di_MInst_didisi_acc_s1_sat<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- IntRegs:$src2),
- !strconcat("$dst += ",
- !strconcat(opc , "($src1, $src2):<<1:sat")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
- DoubleRegs:$src1,
- IntRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_disi_s1_rnd_sat<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ",
- !strconcat(opc , "($src1, $src2):<<1:rnd:sat")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>;
-
-class si_MInst_didi<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
-
-
-class T_RI_pat <InstHexagon MI, Intrinsic IntID>
- : Pat<(IntID (i32 IntRegs:$Rs), imm:$It),
- (MI IntRegs:$Rs, imm:$It)>;
+class T_IRI_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID imm:$It, I32:$Rs, imm:$Iu),
+ (MI imm:$It, I32:$Rs, imm:$Iu)>;
-//
-// LDInst classes.
-//
-let mayLoad = 1, neverHasSideEffects = 1 in
-class di_LDInstPI_diu4<string opc, Intrinsic IntID>
- : LDInstPI<(outs IntRegs:$dst, DoubleRegs:$dst2),
- (ins IntRegs:$src1, IntRegs:$src2, CRRegs:$src3, s4Imm:$offset),
- "$dst2 = memd($src1++#$offset:circ($src3))",
- [],
- "$src1 = $dst">;
+class T_IRR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID imm:$Is, I32:$Rs, I32:$Rt),
+ (MI imm:$Is, I32:$Rs, I32:$Rt)>;
-/********************************************************************
-* ALU32/ALU *
-*********************************************************************/
+class T_RIR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rs, imm:$Is, I32:$Rt),
+ (MI I32:$Rs, imm:$Is, I32:$Rt)>;
-// ALU32 / ALU / Add.
-def HEXAGON_A2_add:
- si_ALU32_sisi <"add", int_hexagon_A2_add>;
-def HEXAGON_A2_addi:
- si_ALU32_sis16 <"add", int_hexagon_A2_addi>;
-
-// ALU32 / ALU / Logical operations.
-def HEXAGON_A2_and:
- si_ALU32_sisi <"and", int_hexagon_A2_and>;
-def HEXAGON_A2_andir:
- si_ALU32_sis10 <"and", int_hexagon_A2_andir>;
-def HEXAGON_A2_not:
- si_ALU32_si <"not", int_hexagon_A2_not>;
-def HEXAGON_A2_or:
- si_ALU32_sisi <"or", int_hexagon_A2_or>;
-def HEXAGON_A2_orir:
- si_ALU32_sis10 <"or", int_hexagon_A2_orir>;
-def HEXAGON_A2_xor:
- si_ALU32_sisi <"xor", int_hexagon_A2_xor>;
-
-// ALU32 / ALU / Negate.
-def HEXAGON_A2_neg:
- si_ALU32_si <"neg", int_hexagon_A2_neg>;
-
-// ALU32 / ALU / Subtract.
-def HEXAGON_A2_sub:
- si_ALU32_sisi <"sub", int_hexagon_A2_sub>;
-def HEXAGON_A2_subri:
- si_ALU32_s10si <"sub", int_hexagon_A2_subri>;
-
-// ALU32 / ALU / Transfer Immediate.
-def HEXAGON_A2_tfril:
- si_lo_ALU32_siu16 <"", int_hexagon_A2_tfril>;
-def HEXAGON_A2_tfrih:
- si_hi_ALU32_siu16 <"", int_hexagon_A2_tfrih>;
-def HEXAGON_A2_tfrsi:
- si_ALU32_s16 <"", int_hexagon_A2_tfrsi>;
-def HEXAGON_A2_tfrpi:
- di_ALU32_s8 <"", int_hexagon_A2_tfrpi>;
-
-// ALU32 / ALU / Transfer Register.
-def HEXAGON_A2_tfr:
- si_ALU32_si_tfr <"", int_hexagon_A2_tfr>;
+class T_RRR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rs, I32:$Rt, I32:$Ru),
+ (MI I32:$Rs, I32:$Rt, I32:$Ru)>;
-/********************************************************************
-* ALU32/PERM *
-*********************************************************************/
+class T_PPI_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I64:$Rt, imm:$Iu),
+ (MI DoubleRegs:$Rs, DoubleRegs:$Rt, imm:$Iu)>;
-// ALU32 / PERM / Combine.
-def HEXAGON_A2_combinew:
- di_ALU32_sisi <"combine", int_hexagon_A2_combinew>;
-def HEXAGON_A2_combine_hh:
- si_MInst_sisi_hh <"combine", int_hexagon_A2_combine_hh>;
-def HEXAGON_A2_combine_lh:
- si_MInst_sisi_lh <"combine", int_hexagon_A2_combine_lh>;
-def HEXAGON_A2_combine_hl:
- si_MInst_sisi_hl <"combine", int_hexagon_A2_combine_hl>;
-def HEXAGON_A2_combine_ll:
- si_MInst_sisi_ll <"combine", int_hexagon_A2_combine_ll>;
-def HEXAGON_A2_combineii:
- di_MInst_s8s8 <"combine", int_hexagon_A2_combineii>;
-
-// ALU32 / PERM / Mux.
-def HEXAGON_C2_mux:
- si_ALU32_qisisi <"mux", int_hexagon_C2_mux>;
-def HEXAGON_C2_muxri:
- si_ALU32_qis8si <"mux", int_hexagon_C2_muxri>;
-def HEXAGON_C2_muxir:
- si_ALU32_qisis8 <"mux", int_hexagon_C2_muxir>;
-def HEXAGON_C2_muxii:
- si_ALU32_qis8s8 <"mux", int_hexagon_C2_muxii>;
-
-// ALU32 / PERM / Shift halfword.
-def HEXAGON_A2_aslh:
- si_ALU32_si <"aslh", int_hexagon_A2_aslh>;
-def HEXAGON_A2_asrh:
- si_ALU32_si <"asrh", int_hexagon_A2_asrh>;
-def SI_to_SXTHI_asrh:
- si_ALU32_si <"asrh", int_hexagon_SI_to_SXTHI_asrh>;
-
-// ALU32 / PERM / Sign/zero extend.
-def HEXAGON_A2_sxth:
- si_ALU32_si <"sxth", int_hexagon_A2_sxth>;
-def HEXAGON_A2_sxtb:
- si_ALU32_si <"sxtb", int_hexagon_A2_sxtb>;
-def HEXAGON_A2_zxth:
- si_ALU32_si <"zxth", int_hexagon_A2_zxth>;
-def HEXAGON_A2_zxtb:
- si_ALU32_si <"zxtb", int_hexagon_A2_zxtb>;
+class T_PII_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, imm:$It, imm:$Iu),
+ (MI DoubleRegs:$Rs, imm:$It, imm:$Iu)>;
-/********************************************************************
-* ALU32/PRED *
-*********************************************************************/
+class T_PPP_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I64:$Rt, I64:$Ru),
+ (MI DoubleRegs:$Rs, DoubleRegs:$Rt, DoubleRegs:$Ru)>;
-// ALU32 / PRED / Compare.
-def HEXAGON_C2_cmpeq:
- qi_ALU32_sisi <"cmp.eq", int_hexagon_C2_cmpeq>;
-def HEXAGON_C2_cmpeqi:
- qi_ALU32_sis10 <"cmp.eq", int_hexagon_C2_cmpeqi>;
-def HEXAGON_C2_cmpgei:
- qi_ALU32_sis8 <"cmp.ge", int_hexagon_C2_cmpgei>;
-def HEXAGON_C2_cmpgeui:
- qi_ALU32_siu8 <"cmp.geu", int_hexagon_C2_cmpgeui>;
-def HEXAGON_C2_cmpgt:
- qi_ALU32_sisi <"cmp.gt", int_hexagon_C2_cmpgt>;
-def HEXAGON_C2_cmpgti:
- qi_ALU32_sis10 <"cmp.gt", int_hexagon_C2_cmpgti>;
-def HEXAGON_C2_cmpgtu:
- qi_ALU32_sisi <"cmp.gtu", int_hexagon_C2_cmpgtu>;
-def HEXAGON_C2_cmpgtui:
- qi_ALU32_siu9 <"cmp.gtu", int_hexagon_C2_cmpgtui>;
-def HEXAGON_C2_cmplt:
- qi_ALU32_sisi <"cmp.lt", int_hexagon_C2_cmplt>;
-def HEXAGON_C2_cmpltu:
- qi_ALU32_sisi <"cmp.ltu", int_hexagon_C2_cmpltu>;
+class T_PPR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I64:$Rt, I32:$Ru),
+ (MI DoubleRegs:$Rs, DoubleRegs:$Rt, I32:$Ru)>;
-/********************************************************************
-* ALU32/VH *
-*********************************************************************/
+class T_PRR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I32:$Rt, I32:$Ru),
+ (MI DoubleRegs:$Rs, I32:$Rt, I32:$Ru)>;
-// ALU32 / VH / Vector add halfwords.
-// Rd32=vadd[u]h(Rs32,Rt32:sat]
-def HEXAGON_A2_svaddh:
- si_ALU32_sisi <"vaddh", int_hexagon_A2_svaddh>;
-def HEXAGON_A2_svaddhs:
- si_ALU32_sisi_sat <"vaddh", int_hexagon_A2_svaddhs>;
-def HEXAGON_A2_svadduhs:
- si_ALU32_sisi_sat <"vadduh", int_hexagon_A2_svadduhs>;
-
-// ALU32 / VH / Vector average halfwords.
-def HEXAGON_A2_svavgh:
- si_ALU32_sisi <"vavgh", int_hexagon_A2_svavgh>;
-def HEXAGON_A2_svavghs:
- si_ALU32_sisi_rnd <"vavgh", int_hexagon_A2_svavghs>;
-def HEXAGON_A2_svnavgh:
- si_ALU32_sisi <"vnavgh", int_hexagon_A2_svnavgh>;
-
-// ALU32 / VH / Vector subtract halfwords.
-def HEXAGON_A2_svsubh:
- si_ALU32_sisi <"vsubh", int_hexagon_A2_svsubh>;
-def HEXAGON_A2_svsubhs:
- si_ALU32_sisi_sat <"vsubh", int_hexagon_A2_svsubhs>;
-def HEXAGON_A2_svsubuhs:
- si_ALU32_sisi_sat <"vsubuh", int_hexagon_A2_svsubuhs>;
+class T_PPQ_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I64:$Rt, (i32 PredRegs:$Ru)),
+ (MI DoubleRegs:$Rs, DoubleRegs:$Rt, PredRegs:$Ru)>;
-/********************************************************************
-* ALU64/ALU *
-*********************************************************************/
+class T_PR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I32:$Rt),
+ (MI DoubleRegs:$Rs, I32:$Rt)>;
-// ALU64 / ALU / Add.
-def HEXAGON_A2_addp:
- di_ALU64_didi <"add", int_hexagon_A2_addp>;
-def HEXAGON_A2_addsat:
- si_ALU64_sisi_sat <"add", int_hexagon_A2_addsat>;
-
-// ALU64 / ALU / Add halfword.
-// Even though the definition says hl, it should be lh -
-//so DON'T change the class " si_ALU64_sisi_l16_lh " it inherits.
-def HEXAGON_A2_addh_l16_hl:
- si_ALU64_sisi_l16_lh <"add", int_hexagon_A2_addh_l16_hl>;
-def HEXAGON_A2_addh_l16_ll:
- si_ALU64_sisi_l16_ll <"add", int_hexagon_A2_addh_l16_ll>;
-
-def HEXAGON_A2_addh_l16_sat_hl:
- si_ALU64_sisi_l16_sat_lh <"add", int_hexagon_A2_addh_l16_sat_hl>;
-def HEXAGON_A2_addh_l16_sat_ll:
- si_ALU64_sisi_l16_sat_ll <"add", int_hexagon_A2_addh_l16_sat_ll>;
-
-def HEXAGON_A2_addh_h16_hh:
- si_ALU64_sisi_h16_hh <"add", int_hexagon_A2_addh_h16_hh>;
-def HEXAGON_A2_addh_h16_hl:
- si_ALU64_sisi_h16_hl <"add", int_hexagon_A2_addh_h16_hl>;
-def HEXAGON_A2_addh_h16_lh:
- si_ALU64_sisi_h16_lh <"add", int_hexagon_A2_addh_h16_lh>;
-def HEXAGON_A2_addh_h16_ll:
- si_ALU64_sisi_h16_ll <"add", int_hexagon_A2_addh_h16_ll>;
-
-def HEXAGON_A2_addh_h16_sat_hh:
- si_ALU64_sisi_h16_sat_hh <"add", int_hexagon_A2_addh_h16_sat_hh>;
-def HEXAGON_A2_addh_h16_sat_hl:
- si_ALU64_sisi_h16_sat_hl <"add", int_hexagon_A2_addh_h16_sat_hl>;
-def HEXAGON_A2_addh_h16_sat_lh:
- si_ALU64_sisi_h16_sat_lh <"add", int_hexagon_A2_addh_h16_sat_lh>;
-def HEXAGON_A2_addh_h16_sat_ll:
- si_ALU64_sisi_h16_sat_ll <"add", int_hexagon_A2_addh_h16_sat_ll>;
-
-// ALU64 / ALU / Compare.
-def HEXAGON_C2_cmpeqp:
- qi_ALU64_didi <"cmp.eq", int_hexagon_C2_cmpeqp>;
-def HEXAGON_C2_cmpgtp:
- qi_ALU64_didi <"cmp.gt", int_hexagon_C2_cmpgtp>;
-def HEXAGON_C2_cmpgtup:
- qi_ALU64_didi <"cmp.gtu", int_hexagon_C2_cmpgtup>;
-
-// ALU64 / ALU / Logical operations.
-def HEXAGON_A2_andp:
- di_ALU64_didi <"and", int_hexagon_A2_andp>;
-def HEXAGON_A2_orp:
- di_ALU64_didi <"or", int_hexagon_A2_orp>;
-def HEXAGON_A2_xorp:
- di_ALU64_didi <"xor", int_hexagon_A2_xorp>;
-
-// ALU64 / ALU / Maximum.
-def HEXAGON_A2_max:
- si_ALU64_sisi <"max", int_hexagon_A2_max>;
-def HEXAGON_A2_maxu:
- si_ALU64_sisi <"maxu", int_hexagon_A2_maxu>;
-
-// ALU64 / ALU / Minimum.
-def HEXAGON_A2_min:
- si_ALU64_sisi <"min", int_hexagon_A2_min>;
-def HEXAGON_A2_minu:
- si_ALU64_sisi <"minu", int_hexagon_A2_minu>;
-
-// ALU64 / ALU / Subtract.
-def HEXAGON_A2_subp:
- di_ALU64_didi <"sub", int_hexagon_A2_subp>;
-def HEXAGON_A2_subsat:
- si_ALU64_sisi_sat <"sub", int_hexagon_A2_subsat>;
-
-// ALU64 / ALU / Subtract halfword.
-// Even though the definition says hl, it should be lh -
-//so DON'T change the class " si_ALU64_sisi_l16_lh " it inherits.
-def HEXAGON_A2_subh_l16_hl:
- si_ALU64_sisi_l16_lh <"sub", int_hexagon_A2_subh_l16_hl>;
-def HEXAGON_A2_subh_l16_ll:
- si_ALU64_sisi_l16_ll <"sub", int_hexagon_A2_subh_l16_ll>;
-
-def HEXAGON_A2_subh_l16_sat_hl:
- si_ALU64_sisi_l16_sat_lh <"sub", int_hexagon_A2_subh_l16_sat_hl>;
-def HEXAGON_A2_subh_l16_sat_ll:
- si_ALU64_sisi_l16_sat_ll <"sub", int_hexagon_A2_subh_l16_sat_ll>;
-
-def HEXAGON_A2_subh_h16_hh:
- si_ALU64_sisi_h16_hh <"sub", int_hexagon_A2_subh_h16_hh>;
-def HEXAGON_A2_subh_h16_hl:
- si_ALU64_sisi_h16_hl <"sub", int_hexagon_A2_subh_h16_hl>;
-def HEXAGON_A2_subh_h16_lh:
- si_ALU64_sisi_h16_lh <"sub", int_hexagon_A2_subh_h16_lh>;
-def HEXAGON_A2_subh_h16_ll:
- si_ALU64_sisi_h16_ll <"sub", int_hexagon_A2_subh_h16_ll>;
-
-def HEXAGON_A2_subh_h16_sat_hh:
- si_ALU64_sisi_h16_sat_hh <"sub", int_hexagon_A2_subh_h16_sat_hh>;
-def HEXAGON_A2_subh_h16_sat_hl:
- si_ALU64_sisi_h16_sat_hl <"sub", int_hexagon_A2_subh_h16_sat_hl>;
-def HEXAGON_A2_subh_h16_sat_lh:
- si_ALU64_sisi_h16_sat_lh <"sub", int_hexagon_A2_subh_h16_sat_lh>;
-def HEXAGON_A2_subh_h16_sat_ll:
- si_ALU64_sisi_h16_sat_ll <"sub", int_hexagon_A2_subh_h16_sat_ll>;
-
-// ALU64 / ALU / Transfer register.
-def HEXAGON_A2_tfrp:
- di_ALU64_di <"", int_hexagon_A2_tfrp>;
+class T_D_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID (F64:$Rs)),
+ (MI (F64:$Rs))>;
-/********************************************************************
-* ALU64/BIT *
-*********************************************************************/
+class T_DI_pat <InstHexagon MI, Intrinsic IntID,
+ PatLeaf ImmPred = PatLeaf<(i32 imm)>>
+ : Pat<(IntID F64:$Rs, ImmPred:$It),
+ (MI F64:$Rs, ImmPred:$It)>;
-// ALU64 / BIT / Masked parity.
-def HEXAGON_S2_parityp:
- si_ALU64_didi <"parity", int_hexagon_S2_parityp>;
+class T_F_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID F32:$Rs),
+ (MI F32:$Rs)>;
-/********************************************************************
-* ALU64/PERM *
-*********************************************************************/
+class T_FI_pat <InstHexagon MI, Intrinsic IntID,
+ PatLeaf ImmPred = PatLeaf<(i32 imm)>>
+ : Pat<(IntID F32:$Rs, ImmPred:$It),
+ (MI F32:$Rs, ImmPred:$It)>;
-// ALU64 / PERM / Vector pack high and low halfwords.
-def HEXAGON_S2_packhl:
- di_ALU64_sisi <"packhl", int_hexagon_S2_packhl>;
+class T_FF_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID F32:$Rs, F32:$Rt),
+ (MI F32:$Rs, F32:$Rt)>;
-/********************************************************************
-* ALU64/VB *
-*********************************************************************/
+class T_DD_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID F64:$Rs, F64:$Rt),
+ (MI F64:$Rs, F64:$Rt)>;
-// ALU64 / VB / Vector add unsigned bytes.
-def HEXAGON_A2_vaddub:
- di_ALU64_didi <"vaddub", int_hexagon_A2_vaddub>;
-def HEXAGON_A2_vaddubs:
- di_ALU64_didi_sat <"vaddub", int_hexagon_A2_vaddubs>;
-
-// ALU64 / VB / Vector average unsigned bytes.
-def HEXAGON_A2_vavgub:
- di_ALU64_didi <"vavgub", int_hexagon_A2_vavgub>;
-def HEXAGON_A2_vavgubr:
- di_ALU64_didi_rnd <"vavgub", int_hexagon_A2_vavgubr>;
-
-// ALU64 / VB / Vector compare unsigned bytes.
-def HEXAGON_A2_vcmpbeq:
- qi_ALU64_didi <"vcmpb.eq", int_hexagon_A2_vcmpbeq>;
-def HEXAGON_A2_vcmpbgtu:
- qi_ALU64_didi <"vcmpb.gtu",int_hexagon_A2_vcmpbgtu>;
-
-// ALU64 / VB / Vector maximum/minimum unsigned bytes.
-def HEXAGON_A2_vmaxub:
- di_ALU64_didi <"vmaxub", int_hexagon_A2_vmaxub>;
-def HEXAGON_A2_vminub:
- di_ALU64_didi <"vminub", int_hexagon_A2_vminub>;
-
-// ALU64 / VB / Vector subtract unsigned bytes.
-def HEXAGON_A2_vsubub:
- di_ALU64_didi <"vsubub", int_hexagon_A2_vsubub>;
-def HEXAGON_A2_vsububs:
- di_ALU64_didi_sat <"vsubub", int_hexagon_A2_vsububs>;
+class T_FFF_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID F32:$Rs, F32:$Rt, F32:$Ru),
+ (MI F32:$Rs, F32:$Rt, F32:$Ru)>;
-// ALU64 / VB / Vector mux.
-def HEXAGON_C2_vmux:
- di_ALU64_qididi <"vmux", int_hexagon_C2_vmux>;
+class T_FFFQ_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID F32:$Rs, F32:$Rt, F32:$Ru, (i32 PredRegs:$Rx)),
+ (MI F32:$Rs, F32:$Rt, F32:$Ru, PredRegs:$Rx)>;
+//===----------------------------------------------------------------------===//
+// MPYS / Multipy signed/unsigned halfwords
+//Rd=mpy[u](Rs.[H|L],Rt.[H|L])[:<<1][:rnd][:sat]
+//===----------------------------------------------------------------------===//
-/********************************************************************
-* ALU64/VH *
-*********************************************************************/
+def : T_RR_pat <M2_mpy_ll_s1, int_hexagon_M2_mpy_ll_s1>;
+def : T_RR_pat <M2_mpy_ll_s0, int_hexagon_M2_mpy_ll_s0>;
+def : T_RR_pat <M2_mpy_lh_s1, int_hexagon_M2_mpy_lh_s1>;
+def : T_RR_pat <M2_mpy_lh_s0, int_hexagon_M2_mpy_lh_s0>;
+def : T_RR_pat <M2_mpy_hl_s1, int_hexagon_M2_mpy_hl_s1>;
+def : T_RR_pat <M2_mpy_hl_s0, int_hexagon_M2_mpy_hl_s0>;
+def : T_RR_pat <M2_mpy_hh_s1, int_hexagon_M2_mpy_hh_s1>;
+def : T_RR_pat <M2_mpy_hh_s0, int_hexagon_M2_mpy_hh_s0>;
+
+def : T_RR_pat <M2_mpyu_ll_s1, int_hexagon_M2_mpyu_ll_s1>;
+def : T_RR_pat <M2_mpyu_ll_s0, int_hexagon_M2_mpyu_ll_s0>;
+def : T_RR_pat <M2_mpyu_lh_s1, int_hexagon_M2_mpyu_lh_s1>;
+def : T_RR_pat <M2_mpyu_lh_s0, int_hexagon_M2_mpyu_lh_s0>;
+def : T_RR_pat <M2_mpyu_hl_s1, int_hexagon_M2_mpyu_hl_s1>;
+def : T_RR_pat <M2_mpyu_hl_s0, int_hexagon_M2_mpyu_hl_s0>;
+def : T_RR_pat <M2_mpyu_hh_s1, int_hexagon_M2_mpyu_hh_s1>;
+def : T_RR_pat <M2_mpyu_hh_s0, int_hexagon_M2_mpyu_hh_s0>;
+
+def : T_RR_pat <M2_mpy_sat_ll_s1, int_hexagon_M2_mpy_sat_ll_s1>;
+def : T_RR_pat <M2_mpy_sat_ll_s0, int_hexagon_M2_mpy_sat_ll_s0>;
+def : T_RR_pat <M2_mpy_sat_lh_s1, int_hexagon_M2_mpy_sat_lh_s1>;
+def : T_RR_pat <M2_mpy_sat_lh_s0, int_hexagon_M2_mpy_sat_lh_s0>;
+def : T_RR_pat <M2_mpy_sat_hl_s1, int_hexagon_M2_mpy_sat_hl_s1>;
+def : T_RR_pat <M2_mpy_sat_hl_s0, int_hexagon_M2_mpy_sat_hl_s0>;
+def : T_RR_pat <M2_mpy_sat_hh_s1, int_hexagon_M2_mpy_sat_hh_s1>;
+def : T_RR_pat <M2_mpy_sat_hh_s0, int_hexagon_M2_mpy_sat_hh_s0>;
+
+def : T_RR_pat <M2_mpy_rnd_ll_s1, int_hexagon_M2_mpy_rnd_ll_s1>;
+def : T_RR_pat <M2_mpy_rnd_ll_s0, int_hexagon_M2_mpy_rnd_ll_s0>;
+def : T_RR_pat <M2_mpy_rnd_lh_s1, int_hexagon_M2_mpy_rnd_lh_s1>;
+def : T_RR_pat <M2_mpy_rnd_lh_s0, int_hexagon_M2_mpy_rnd_lh_s0>;
+def : T_RR_pat <M2_mpy_rnd_hl_s1, int_hexagon_M2_mpy_rnd_hl_s1>;
+def : T_RR_pat <M2_mpy_rnd_hl_s0, int_hexagon_M2_mpy_rnd_hl_s0>;
+def : T_RR_pat <M2_mpy_rnd_hh_s1, int_hexagon_M2_mpy_rnd_hh_s1>;
+def : T_RR_pat <M2_mpy_rnd_hh_s0, int_hexagon_M2_mpy_rnd_hh_s0>;
+
+def : T_RR_pat <M2_mpy_sat_rnd_ll_s1, int_hexagon_M2_mpy_sat_rnd_ll_s1>;
+def : T_RR_pat <M2_mpy_sat_rnd_ll_s0, int_hexagon_M2_mpy_sat_rnd_ll_s0>;
+def : T_RR_pat <M2_mpy_sat_rnd_lh_s1, int_hexagon_M2_mpy_sat_rnd_lh_s1>;
+def : T_RR_pat <M2_mpy_sat_rnd_lh_s0, int_hexagon_M2_mpy_sat_rnd_lh_s0>;
+def : T_RR_pat <M2_mpy_sat_rnd_hl_s1, int_hexagon_M2_mpy_sat_rnd_hl_s1>;
+def : T_RR_pat <M2_mpy_sat_rnd_hl_s0, int_hexagon_M2_mpy_sat_rnd_hl_s0>;
+def : T_RR_pat <M2_mpy_sat_rnd_hh_s1, int_hexagon_M2_mpy_sat_rnd_hh_s1>;
+def : T_RR_pat <M2_mpy_sat_rnd_hh_s0, int_hexagon_M2_mpy_sat_rnd_hh_s0>;
-// ALU64 / VH / Vector add halfwords.
-// Rdd64=vadd[u]h(Rss64,Rtt64:sat]
-def HEXAGON_A2_vaddh:
- di_ALU64_didi <"vaddh", int_hexagon_A2_vaddh>;
-def HEXAGON_A2_vaddhs:
- di_ALU64_didi_sat <"vaddh", int_hexagon_A2_vaddhs>;
-def HEXAGON_A2_vadduhs:
- di_ALU64_didi_sat <"vadduh", int_hexagon_A2_vadduhs>;
-
-// ALU64 / VH / Vector average halfwords.
-// Rdd64=v[n]avg[u]h(Rss64,Rtt64:rnd/:crnd][:sat]
-def HEXAGON_A2_vavgh:
- di_ALU64_didi <"vavgh", int_hexagon_A2_vavgh>;
-def HEXAGON_A2_vavghcr:
- di_ALU64_didi_crnd <"vavgh", int_hexagon_A2_vavghcr>;
-def HEXAGON_A2_vavghr:
- di_ALU64_didi_rnd <"vavgh", int_hexagon_A2_vavghr>;
-def HEXAGON_A2_vavguh:
- di_ALU64_didi <"vavguh", int_hexagon_A2_vavguh>;
-def HEXAGON_A2_vavguhr:
- di_ALU64_didi_rnd <"vavguh", int_hexagon_A2_vavguhr>;
-def HEXAGON_A2_vnavgh:
- di_ALU64_didi <"vnavgh", int_hexagon_A2_vnavgh>;
-def HEXAGON_A2_vnavghcr:
- di_ALU64_didi_crnd_sat <"vnavgh", int_hexagon_A2_vnavghcr>;
-def HEXAGON_A2_vnavghr:
- di_ALU64_didi_rnd_sat <"vnavgh", int_hexagon_A2_vnavghr>;
-
-// ALU64 / VH / Vector compare halfwords.
-def HEXAGON_A2_vcmpheq:
- qi_ALU64_didi <"vcmph.eq", int_hexagon_A2_vcmpheq>;
-def HEXAGON_A2_vcmphgt:
- qi_ALU64_didi <"vcmph.gt", int_hexagon_A2_vcmphgt>;
-def HEXAGON_A2_vcmphgtu:
- qi_ALU64_didi <"vcmph.gtu",int_hexagon_A2_vcmphgtu>;
-
-// ALU64 / VH / Vector maximum halfwords.
-def HEXAGON_A2_vmaxh:
- di_ALU64_didi <"vmaxh", int_hexagon_A2_vmaxh>;
-def HEXAGON_A2_vmaxuh:
- di_ALU64_didi <"vmaxuh", int_hexagon_A2_vmaxuh>;
-
-// ALU64 / VH / Vector minimum halfwords.
-def HEXAGON_A2_vminh:
- di_ALU64_didi <"vminh", int_hexagon_A2_vminh>;
-def HEXAGON_A2_vminuh:
- di_ALU64_didi <"vminuh", int_hexagon_A2_vminuh>;
-
-// ALU64 / VH / Vector subtract halfwords.
-def HEXAGON_A2_vsubh:
- di_ALU64_didi <"vsubh", int_hexagon_A2_vsubh>;
-def HEXAGON_A2_vsubhs:
- di_ALU64_didi_sat <"vsubh", int_hexagon_A2_vsubhs>;
-def HEXAGON_A2_vsubuhs:
- di_ALU64_didi_sat <"vsubuh", int_hexagon_A2_vsubuhs>;
+//===----------------------------------------------------------------------===//
+// MPYS / Multipy signed/unsigned halfwords and add/subtract the
+// result from the accumulator.
+//Rx [-+]= mpy[u](Rs.[H|L],Rt.[H|L])[:<<1][:sat]
+//===----------------------------------------------------------------------===//
-/********************************************************************
-* ALU64/VW *
-*********************************************************************/
+def : T_RRR_pat <M2_mpy_acc_ll_s1, int_hexagon_M2_mpy_acc_ll_s1>;
+def : T_RRR_pat <M2_mpy_acc_ll_s0, int_hexagon_M2_mpy_acc_ll_s0>;
+def : T_RRR_pat <M2_mpy_acc_lh_s1, int_hexagon_M2_mpy_acc_lh_s1>;
+def : T_RRR_pat <M2_mpy_acc_lh_s0, int_hexagon_M2_mpy_acc_lh_s0>;
+def : T_RRR_pat <M2_mpy_acc_hl_s1, int_hexagon_M2_mpy_acc_hl_s1>;
+def : T_RRR_pat <M2_mpy_acc_hl_s0, int_hexagon_M2_mpy_acc_hl_s0>;
+def : T_RRR_pat <M2_mpy_acc_hh_s1, int_hexagon_M2_mpy_acc_hh_s1>;
+def : T_RRR_pat <M2_mpy_acc_hh_s0, int_hexagon_M2_mpy_acc_hh_s0>;
+
+def : T_RRR_pat <M2_mpyu_acc_ll_s1, int_hexagon_M2_mpyu_acc_ll_s1>;
+def : T_RRR_pat <M2_mpyu_acc_ll_s0, int_hexagon_M2_mpyu_acc_ll_s0>;
+def : T_RRR_pat <M2_mpyu_acc_lh_s1, int_hexagon_M2_mpyu_acc_lh_s1>;
+def : T_RRR_pat <M2_mpyu_acc_lh_s0, int_hexagon_M2_mpyu_acc_lh_s0>;
+def : T_RRR_pat <M2_mpyu_acc_hl_s1, int_hexagon_M2_mpyu_acc_hl_s1>;
+def : T_RRR_pat <M2_mpyu_acc_hl_s0, int_hexagon_M2_mpyu_acc_hl_s0>;
+def : T_RRR_pat <M2_mpyu_acc_hh_s1, int_hexagon_M2_mpyu_acc_hh_s1>;
+def : T_RRR_pat <M2_mpyu_acc_hh_s0, int_hexagon_M2_mpyu_acc_hh_s0>;
+
+def : T_RRR_pat <M2_mpy_nac_ll_s1, int_hexagon_M2_mpy_nac_ll_s1>;
+def : T_RRR_pat <M2_mpy_nac_ll_s0, int_hexagon_M2_mpy_nac_ll_s0>;
+def : T_RRR_pat <M2_mpy_nac_lh_s1, int_hexagon_M2_mpy_nac_lh_s1>;
+def : T_RRR_pat <M2_mpy_nac_lh_s0, int_hexagon_M2_mpy_nac_lh_s0>;
+def : T_RRR_pat <M2_mpy_nac_hl_s1, int_hexagon_M2_mpy_nac_hl_s1>;
+def : T_RRR_pat <M2_mpy_nac_hl_s0, int_hexagon_M2_mpy_nac_hl_s0>;
+def : T_RRR_pat <M2_mpy_nac_hh_s1, int_hexagon_M2_mpy_nac_hh_s1>;
+def : T_RRR_pat <M2_mpy_nac_hh_s0, int_hexagon_M2_mpy_nac_hh_s0>;
+
+def : T_RRR_pat <M2_mpyu_nac_ll_s1, int_hexagon_M2_mpyu_nac_ll_s1>;
+def : T_RRR_pat <M2_mpyu_nac_ll_s0, int_hexagon_M2_mpyu_nac_ll_s0>;
+def : T_RRR_pat <M2_mpyu_nac_lh_s1, int_hexagon_M2_mpyu_nac_lh_s1>;
+def : T_RRR_pat <M2_mpyu_nac_lh_s0, int_hexagon_M2_mpyu_nac_lh_s0>;
+def : T_RRR_pat <M2_mpyu_nac_hl_s1, int_hexagon_M2_mpyu_nac_hl_s1>;
+def : T_RRR_pat <M2_mpyu_nac_hl_s0, int_hexagon_M2_mpyu_nac_hl_s0>;
+def : T_RRR_pat <M2_mpyu_nac_hh_s1, int_hexagon_M2_mpyu_nac_hh_s1>;
+def : T_RRR_pat <M2_mpyu_nac_hh_s0, int_hexagon_M2_mpyu_nac_hh_s0>;
+
+def : T_RRR_pat <M2_mpy_acc_sat_ll_s1, int_hexagon_M2_mpy_acc_sat_ll_s1>;
+def : T_RRR_pat <M2_mpy_acc_sat_ll_s0, int_hexagon_M2_mpy_acc_sat_ll_s0>;
+def : T_RRR_pat <M2_mpy_acc_sat_lh_s1, int_hexagon_M2_mpy_acc_sat_lh_s1>;
+def : T_RRR_pat <M2_mpy_acc_sat_lh_s0, int_hexagon_M2_mpy_acc_sat_lh_s0>;
+def : T_RRR_pat <M2_mpy_acc_sat_hl_s1, int_hexagon_M2_mpy_acc_sat_hl_s1>;
+def : T_RRR_pat <M2_mpy_acc_sat_hl_s0, int_hexagon_M2_mpy_acc_sat_hl_s0>;
+def : T_RRR_pat <M2_mpy_acc_sat_hh_s1, int_hexagon_M2_mpy_acc_sat_hh_s1>;
+def : T_RRR_pat <M2_mpy_acc_sat_hh_s0, int_hexagon_M2_mpy_acc_sat_hh_s0>;
+
+def : T_RRR_pat <M2_mpy_nac_sat_ll_s1, int_hexagon_M2_mpy_nac_sat_ll_s1>;
+def : T_RRR_pat <M2_mpy_nac_sat_ll_s0, int_hexagon_M2_mpy_nac_sat_ll_s0>;
+def : T_RRR_pat <M2_mpy_nac_sat_lh_s1, int_hexagon_M2_mpy_nac_sat_lh_s1>;
+def : T_RRR_pat <M2_mpy_nac_sat_lh_s0, int_hexagon_M2_mpy_nac_sat_lh_s0>;
+def : T_RRR_pat <M2_mpy_nac_sat_hl_s1, int_hexagon_M2_mpy_nac_sat_hl_s1>;
+def : T_RRR_pat <M2_mpy_nac_sat_hl_s0, int_hexagon_M2_mpy_nac_sat_hl_s0>;
+def : T_RRR_pat <M2_mpy_nac_sat_hh_s1, int_hexagon_M2_mpy_nac_sat_hh_s1>;
+def : T_RRR_pat <M2_mpy_nac_sat_hh_s0, int_hexagon_M2_mpy_nac_sat_hh_s0>;
+
+
+//===----------------------------------------------------------------------===//
+// Multiply signed/unsigned halfwords with and without saturation and rounding
+// into a 64-bits destination register.
+//===----------------------------------------------------------------------===//
+
+def : T_RR_pat <M2_mpyd_hh_s0, int_hexagon_M2_mpyd_hh_s0>;
+def : T_RR_pat <M2_mpyd_hl_s0, int_hexagon_M2_mpyd_hl_s0>;
+def : T_RR_pat <M2_mpyd_lh_s0, int_hexagon_M2_mpyd_lh_s0>;
+def : T_RR_pat <M2_mpyd_ll_s0, int_hexagon_M2_mpyd_ll_s0>;
+def : T_RR_pat <M2_mpyd_hh_s1, int_hexagon_M2_mpyd_hh_s1>;
+def : T_RR_pat <M2_mpyd_hl_s1, int_hexagon_M2_mpyd_hl_s1>;
+def : T_RR_pat <M2_mpyd_lh_s1, int_hexagon_M2_mpyd_lh_s1>;
+def : T_RR_pat <M2_mpyd_ll_s1, int_hexagon_M2_mpyd_ll_s1>;
+
+def : T_RR_pat <M2_mpyd_rnd_hh_s0, int_hexagon_M2_mpyd_rnd_hh_s0>;
+def : T_RR_pat <M2_mpyd_rnd_hl_s0, int_hexagon_M2_mpyd_rnd_hl_s0>;
+def : T_RR_pat <M2_mpyd_rnd_lh_s0, int_hexagon_M2_mpyd_rnd_lh_s0>;
+def : T_RR_pat <M2_mpyd_rnd_ll_s0, int_hexagon_M2_mpyd_rnd_ll_s0>;
+def : T_RR_pat <M2_mpyd_rnd_hh_s1, int_hexagon_M2_mpyd_rnd_hh_s1>;
+def : T_RR_pat <M2_mpyd_rnd_hl_s1, int_hexagon_M2_mpyd_rnd_hl_s1>;
+def : T_RR_pat <M2_mpyd_rnd_lh_s1, int_hexagon_M2_mpyd_rnd_lh_s1>;
+def : T_RR_pat <M2_mpyd_rnd_ll_s1, int_hexagon_M2_mpyd_rnd_ll_s1>;
+
+def : T_RR_pat <M2_mpyud_hh_s0, int_hexagon_M2_mpyud_hh_s0>;
+def : T_RR_pat <M2_mpyud_hl_s0, int_hexagon_M2_mpyud_hl_s0>;
+def : T_RR_pat <M2_mpyud_lh_s0, int_hexagon_M2_mpyud_lh_s0>;
+def : T_RR_pat <M2_mpyud_ll_s0, int_hexagon_M2_mpyud_ll_s0>;
+def : T_RR_pat <M2_mpyud_hh_s1, int_hexagon_M2_mpyud_hh_s1>;
+def : T_RR_pat <M2_mpyud_hl_s1, int_hexagon_M2_mpyud_hl_s1>;
+def : T_RR_pat <M2_mpyud_lh_s1, int_hexagon_M2_mpyud_lh_s1>;
+def : T_RR_pat <M2_mpyud_ll_s1, int_hexagon_M2_mpyud_ll_s1>;
+
+//===----------------------------------------------------------------------===//
+// MPYS / Multipy signed/unsigned halfwords and add/subtract the
+// result from the 64-bit destination register.
+//Rxx [-+]= mpy[u](Rs.[H|L],Rt.[H|L])[:<<1][:sat]
+//===----------------------------------------------------------------------===//
+
+def : T_PRR_pat <M2_mpyd_acc_hh_s0, int_hexagon_M2_mpyd_acc_hh_s0>;
+def : T_PRR_pat <M2_mpyd_acc_hl_s0, int_hexagon_M2_mpyd_acc_hl_s0>;
+def : T_PRR_pat <M2_mpyd_acc_lh_s0, int_hexagon_M2_mpyd_acc_lh_s0>;
+def : T_PRR_pat <M2_mpyd_acc_ll_s0, int_hexagon_M2_mpyd_acc_ll_s0>;
+
+def : T_PRR_pat <M2_mpyd_acc_hh_s1, int_hexagon_M2_mpyd_acc_hh_s1>;
+def : T_PRR_pat <M2_mpyd_acc_hl_s1, int_hexagon_M2_mpyd_acc_hl_s1>;
+def : T_PRR_pat <M2_mpyd_acc_lh_s1, int_hexagon_M2_mpyd_acc_lh_s1>;
+def : T_PRR_pat <M2_mpyd_acc_ll_s1, int_hexagon_M2_mpyd_acc_ll_s1>;
+
+def : T_PRR_pat <M2_mpyd_nac_hh_s0, int_hexagon_M2_mpyd_nac_hh_s0>;
+def : T_PRR_pat <M2_mpyd_nac_hl_s0, int_hexagon_M2_mpyd_nac_hl_s0>;
+def : T_PRR_pat <M2_mpyd_nac_lh_s0, int_hexagon_M2_mpyd_nac_lh_s0>;
+def : T_PRR_pat <M2_mpyd_nac_ll_s0, int_hexagon_M2_mpyd_nac_ll_s0>;
+
+def : T_PRR_pat <M2_mpyd_nac_hh_s1, int_hexagon_M2_mpyd_nac_hh_s1>;
+def : T_PRR_pat <M2_mpyd_nac_hl_s1, int_hexagon_M2_mpyd_nac_hl_s1>;
+def : T_PRR_pat <M2_mpyd_nac_lh_s1, int_hexagon_M2_mpyd_nac_lh_s1>;
+def : T_PRR_pat <M2_mpyd_nac_ll_s1, int_hexagon_M2_mpyd_nac_ll_s1>;
+
+def : T_PRR_pat <M2_mpyud_acc_hh_s0, int_hexagon_M2_mpyud_acc_hh_s0>;
+def : T_PRR_pat <M2_mpyud_acc_hl_s0, int_hexagon_M2_mpyud_acc_hl_s0>;
+def : T_PRR_pat <M2_mpyud_acc_lh_s0, int_hexagon_M2_mpyud_acc_lh_s0>;
+def : T_PRR_pat <M2_mpyud_acc_ll_s0, int_hexagon_M2_mpyud_acc_ll_s0>;
+
+def : T_PRR_pat <M2_mpyud_acc_hh_s1, int_hexagon_M2_mpyud_acc_hh_s1>;
+def : T_PRR_pat <M2_mpyud_acc_hl_s1, int_hexagon_M2_mpyud_acc_hl_s1>;
+def : T_PRR_pat <M2_mpyud_acc_lh_s1, int_hexagon_M2_mpyud_acc_lh_s1>;
+def : T_PRR_pat <M2_mpyud_acc_ll_s1, int_hexagon_M2_mpyud_acc_ll_s1>;
+
+def : T_PRR_pat <M2_mpyud_nac_hh_s0, int_hexagon_M2_mpyud_nac_hh_s0>;
+def : T_PRR_pat <M2_mpyud_nac_hl_s0, int_hexagon_M2_mpyud_nac_hl_s0>;
+def : T_PRR_pat <M2_mpyud_nac_lh_s0, int_hexagon_M2_mpyud_nac_lh_s0>;
+def : T_PRR_pat <M2_mpyud_nac_ll_s0, int_hexagon_M2_mpyud_nac_ll_s0>;
+
+def : T_PRR_pat <M2_mpyud_nac_hh_s1, int_hexagon_M2_mpyud_nac_hh_s1>;
+def : T_PRR_pat <M2_mpyud_nac_hl_s1, int_hexagon_M2_mpyud_nac_hl_s1>;
+def : T_PRR_pat <M2_mpyud_nac_lh_s1, int_hexagon_M2_mpyud_nac_lh_s1>;
+def : T_PRR_pat <M2_mpyud_nac_ll_s1, int_hexagon_M2_mpyud_nac_ll_s1>;
+
+// Vector complex multiply imaginary: Rdd=vcmpyi(Rss,Rtt)[:<<1]:sat
+def : T_PP_pat <M2_vcmpy_s1_sat_i, int_hexagon_M2_vcmpy_s1_sat_i>;
+def : T_PP_pat <M2_vcmpy_s0_sat_i, int_hexagon_M2_vcmpy_s0_sat_i>;
+
+// Vector complex multiply real: Rdd=vcmpyr(Rss,Rtt)[:<<1]:sat
+def : T_PP_pat <M2_vcmpy_s1_sat_r, int_hexagon_M2_vcmpy_s1_sat_r>;
+def : T_PP_pat <M2_vcmpy_s0_sat_r, int_hexagon_M2_vcmpy_s0_sat_r>;
+
+// Vector dual multiply: Rdd=vdmpy(Rss,Rtt)[:<<1]:sat
+def : T_PP_pat <M2_vdmpys_s1, int_hexagon_M2_vdmpys_s1>;
+def : T_PP_pat <M2_vdmpys_s0, int_hexagon_M2_vdmpys_s0>;
+
+// Vector multiply even halfwords: Rdd=vmpyeh(Rss,Rtt)[:<<1]:sat
+def : T_PP_pat <M2_vmpy2es_s1, int_hexagon_M2_vmpy2es_s1>;
+def : T_PP_pat <M2_vmpy2es_s0, int_hexagon_M2_vmpy2es_s0>;
+
+//Rdd=vmpywoh(Rss,Rtt)[:<<1][:rnd]:sat
+def : T_PP_pat <M2_mmpyh_s0, int_hexagon_M2_mmpyh_s0>;
+def : T_PP_pat <M2_mmpyh_s1, int_hexagon_M2_mmpyh_s1>;
+def : T_PP_pat <M2_mmpyh_rs0, int_hexagon_M2_mmpyh_rs0>;
+def : T_PP_pat <M2_mmpyh_rs1, int_hexagon_M2_mmpyh_rs1>;
+
+//Rdd=vmpyweh(Rss,Rtt)[:<<1][:rnd]:sat
+def : T_PP_pat <M2_mmpyl_s0, int_hexagon_M2_mmpyl_s0>;
+def : T_PP_pat <M2_mmpyl_s1, int_hexagon_M2_mmpyl_s1>;
+def : T_PP_pat <M2_mmpyl_rs0, int_hexagon_M2_mmpyl_rs0>;
+def : T_PP_pat <M2_mmpyl_rs1, int_hexagon_M2_mmpyl_rs1>;
+
+//Rdd=vmpywouh(Rss,Rtt)[:<<1][:rnd]:sat
+def : T_PP_pat <M2_mmpyuh_s0, int_hexagon_M2_mmpyuh_s0>;
+def : T_PP_pat <M2_mmpyuh_s1, int_hexagon_M2_mmpyuh_s1>;
+def : T_PP_pat <M2_mmpyuh_rs0, int_hexagon_M2_mmpyuh_rs0>;
+def : T_PP_pat <M2_mmpyuh_rs1, int_hexagon_M2_mmpyuh_rs1>;
+
+//Rdd=vmpyweuh(Rss,Rtt)[:<<1][:rnd]:sat
+def : T_PP_pat <M2_mmpyul_s0, int_hexagon_M2_mmpyul_s0>;
+def : T_PP_pat <M2_mmpyul_s1, int_hexagon_M2_mmpyul_s1>;
+def : T_PP_pat <M2_mmpyul_rs0, int_hexagon_M2_mmpyul_rs0>;
+def : T_PP_pat <M2_mmpyul_rs1, int_hexagon_M2_mmpyul_rs1>;
+
+// Vector reduce add unsigned bytes: Rdd32[+]=vrmpybu(Rss32,Rtt32)
+def : T_PP_pat <A2_vraddub, int_hexagon_A2_vraddub>;
+def : T_PPP_pat <A2_vraddub_acc, int_hexagon_A2_vraddub_acc>;
+
+// Vector sum of absolute differences unsigned bytes: Rdd=vrsadub(Rss,Rtt)
+def : T_PP_pat <A2_vrsadub, int_hexagon_A2_vrsadub>;
+def : T_PPP_pat <A2_vrsadub_acc, int_hexagon_A2_vrsadub_acc>;
+
+// Vector absolute difference: Rdd=vabsdiffh(Rtt,Rss)
+def : T_PP_pat <M2_vabsdiffh, int_hexagon_M2_vabsdiffh>;
+
+// Vector absolute difference words: Rdd=vabsdiffw(Rtt,Rss)
+def : T_PP_pat <M2_vabsdiffw, int_hexagon_M2_vabsdiffw>;
+
+// Vector reduce complex multiply real or imaginary:
+// Rdd[+]=vrcmpy[ir](Rss,Rtt[*])
+def : T_PP_pat <M2_vrcmpyi_s0, int_hexagon_M2_vrcmpyi_s0>;
+def : T_PP_pat <M2_vrcmpyi_s0c, int_hexagon_M2_vrcmpyi_s0c>;
+def : T_PPP_pat <M2_vrcmaci_s0, int_hexagon_M2_vrcmaci_s0>;
+def : T_PPP_pat <M2_vrcmaci_s0c, int_hexagon_M2_vrcmaci_s0c>;
+
+def : T_PP_pat <M2_vrcmpyr_s0, int_hexagon_M2_vrcmpyr_s0>;
+def : T_PP_pat <M2_vrcmpyr_s0c, int_hexagon_M2_vrcmpyr_s0c>;
+def : T_PPP_pat <M2_vrcmacr_s0, int_hexagon_M2_vrcmacr_s0>;
+def : T_PPP_pat <M2_vrcmacr_s0c, int_hexagon_M2_vrcmacr_s0c>;
+
+// Vector reduce halfwords
+// Rdd[+]=vrmpyh(Rss,Rtt)
+def : T_PP_pat <M2_vrmpy_s0, int_hexagon_M2_vrmpy_s0>;
+def : T_PPP_pat <M2_vrmac_s0, int_hexagon_M2_vrmac_s0>;
+
+//===----------------------------------------------------------------------===//
+// Vector Multipy with accumulation
+//===----------------------------------------------------------------------===//
-// ALU64 / VW / Vector add words.
-// Rdd32=vaddw(Rss32,Rtt32)[:sat]
-def HEXAGON_A2_vaddw:
- di_ALU64_didi <"vaddw", int_hexagon_A2_vaddw>;
-def HEXAGON_A2_vaddws:
- di_ALU64_didi_sat <"vaddw", int_hexagon_A2_vaddws>;
-
-// ALU64 / VW / Vector average words.
-def HEXAGON_A2_vavguw:
- di_ALU64_didi <"vavguw", int_hexagon_A2_vavguw>;
-def HEXAGON_A2_vavguwr:
- di_ALU64_didi_rnd <"vavguw", int_hexagon_A2_vavguwr>;
-def HEXAGON_A2_vavgw:
- di_ALU64_didi <"vavgw", int_hexagon_A2_vavgw>;
-def HEXAGON_A2_vavgwcr:
- di_ALU64_didi_crnd <"vavgw", int_hexagon_A2_vavgwcr>;
-def HEXAGON_A2_vavgwr:
- di_ALU64_didi_rnd <"vavgw", int_hexagon_A2_vavgwr>;
-def HEXAGON_A2_vnavgw:
- di_ALU64_didi <"vnavgw", int_hexagon_A2_vnavgw>;
-def HEXAGON_A2_vnavgwcr:
- di_ALU64_didi_crnd_sat <"vnavgw", int_hexagon_A2_vnavgwcr>;
-def HEXAGON_A2_vnavgwr:
- di_ALU64_didi_rnd_sat <"vnavgw", int_hexagon_A2_vnavgwr>;
-
-// ALU64 / VW / Vector compare words.
-def HEXAGON_A2_vcmpweq:
- qi_ALU64_didi <"vcmpw.eq", int_hexagon_A2_vcmpweq>;
-def HEXAGON_A2_vcmpwgt:
- qi_ALU64_didi <"vcmpw.gt", int_hexagon_A2_vcmpwgt>;
-def HEXAGON_A2_vcmpwgtu:
- qi_ALU64_didi <"vcmpw.gtu",int_hexagon_A2_vcmpwgtu>;
-
-// ALU64 / VW / Vector maximum words.
-def HEXAGON_A2_vmaxw:
- di_ALU64_didi <"vmaxw", int_hexagon_A2_vmaxw>;
-def HEXAGON_A2_vmaxuw:
- di_ALU64_didi <"vmaxuw", int_hexagon_A2_vmaxuw>;
-
-// ALU64 / VW / Vector minimum words.
-def HEXAGON_A2_vminw:
- di_ALU64_didi <"vminw", int_hexagon_A2_vminw>;
-def HEXAGON_A2_vminuw:
- di_ALU64_didi <"vminuw", int_hexagon_A2_vminuw>;
-
-// ALU64 / VW / Vector subtract words.
-def HEXAGON_A2_vsubw:
- di_ALU64_didi <"vsubw", int_hexagon_A2_vsubw>;
-def HEXAGON_A2_vsubws:
- di_ALU64_didi_sat <"vsubw", int_hexagon_A2_vsubws>;
+// Vector multiply word by signed half with accumulation
+// Rxx+=vmpyw[eo]h(Rss,Rtt)[:<<1][:rnd]:sat
+def : T_PPP_pat <M2_mmacls_s1, int_hexagon_M2_mmacls_s1>;
+def : T_PPP_pat <M2_mmacls_s0, int_hexagon_M2_mmacls_s0>;
+def : T_PPP_pat <M2_mmacls_rs1, int_hexagon_M2_mmacls_rs1>;
+def : T_PPP_pat <M2_mmacls_rs0, int_hexagon_M2_mmacls_rs0>;
+def : T_PPP_pat <M2_mmachs_s1, int_hexagon_M2_mmachs_s1>;
+def : T_PPP_pat <M2_mmachs_s0, int_hexagon_M2_mmachs_s0>;
+def : T_PPP_pat <M2_mmachs_rs1, int_hexagon_M2_mmachs_rs1>;
+def : T_PPP_pat <M2_mmachs_rs0, int_hexagon_M2_mmachs_rs0>;
+
+// Vector multiply word by unsigned half with accumulation
+// Rxx+=vmpyw[eo]uh(Rss,Rtt)[:<<1][:rnd]:sat
+def : T_PPP_pat <M2_mmaculs_s1, int_hexagon_M2_mmaculs_s1>;
+def : T_PPP_pat <M2_mmaculs_s0, int_hexagon_M2_mmaculs_s0>;
+def : T_PPP_pat <M2_mmaculs_rs1, int_hexagon_M2_mmaculs_rs1>;
+def : T_PPP_pat <M2_mmaculs_rs0, int_hexagon_M2_mmaculs_rs0>;
+def : T_PPP_pat <M2_mmacuhs_s1, int_hexagon_M2_mmacuhs_s1>;
+def : T_PPP_pat <M2_mmacuhs_s0, int_hexagon_M2_mmacuhs_s0>;
+def : T_PPP_pat <M2_mmacuhs_rs1, int_hexagon_M2_mmacuhs_rs1>;
+def : T_PPP_pat <M2_mmacuhs_rs0, int_hexagon_M2_mmacuhs_rs0>;
+
+// Vector multiply even halfwords with accumulation
+// Rxx+=vmpyeh(Rss,Rtt)[:<<1][:sat]
+def : T_PPP_pat <M2_vmac2es, int_hexagon_M2_vmac2es>;
+def : T_PPP_pat <M2_vmac2es_s1, int_hexagon_M2_vmac2es_s1>;
+def : T_PPP_pat <M2_vmac2es_s0, int_hexagon_M2_vmac2es_s0>;
+
+// Vector dual multiply with accumulation
+// Rxx+=vdmpy(Rss,Rtt)[:sat]
+def : T_PPP_pat <M2_vdmacs_s1, int_hexagon_M2_vdmacs_s1>;
+def : T_PPP_pat <M2_vdmacs_s0, int_hexagon_M2_vdmacs_s0>;
+
+// Vector complex multiply real or imaginary with accumulation
+// Rxx+=vcmpy[ir](Rss,Rtt):sat
+def : T_PPP_pat <M2_vcmac_s0_sat_r, int_hexagon_M2_vcmac_s0_sat_r>;
+def : T_PPP_pat <M2_vcmac_s0_sat_i, int_hexagon_M2_vcmac_s0_sat_i>;
+//===----------------------------------------------------------------------===//
+// Add/Subtract halfword
+// Rd=add(Rt.L,Rs.[HL])[:sat]
+// Rd=sub(Rt.L,Rs.[HL])[:sat]
+// Rd=add(Rt.[LH],Rs.[HL])[:sat][:<16]
+// Rd=sub(Rt.[LH],Rs.[HL])[:sat][:<16]
+//===----------------------------------------------------------------------===//
+
+//Rd=add(Rt.L,Rs.[LH])
+def : T_RR_pat <A2_addh_l16_ll, int_hexagon_A2_addh_l16_ll>;
+def : T_RR_pat <A2_addh_l16_hl, int_hexagon_A2_addh_l16_hl>;
+
+//Rd=add(Rt.L,Rs.[LH]):sat
+def : T_RR_pat <A2_addh_l16_sat_ll, int_hexagon_A2_addh_l16_sat_ll>;
+def : T_RR_pat <A2_addh_l16_sat_hl, int_hexagon_A2_addh_l16_sat_hl>;
+
+//Rd=sub(Rt.L,Rs.[LH])
+def : T_RR_pat <A2_subh_l16_ll, int_hexagon_A2_subh_l16_ll>;
+def : T_RR_pat <A2_subh_l16_hl, int_hexagon_A2_subh_l16_hl>;
+
+//Rd=sub(Rt.L,Rs.[LH]):sat
+def : T_RR_pat <A2_subh_l16_sat_ll, int_hexagon_A2_subh_l16_sat_ll>;
+def : T_RR_pat <A2_subh_l16_sat_hl, int_hexagon_A2_subh_l16_sat_hl>;
+
+//Rd=add(Rt.[LH],Rs.[LH]):<<16
+def : T_RR_pat <A2_addh_h16_ll, int_hexagon_A2_addh_h16_ll>;
+def : T_RR_pat <A2_addh_h16_lh, int_hexagon_A2_addh_h16_lh>;
+def : T_RR_pat <A2_addh_h16_hl, int_hexagon_A2_addh_h16_hl>;
+def : T_RR_pat <A2_addh_h16_hh, int_hexagon_A2_addh_h16_hh>;
+
+//Rd=sub(Rt.[LH],Rs.[LH]):<<16
+def : T_RR_pat <A2_subh_h16_ll, int_hexagon_A2_subh_h16_ll>;
+def : T_RR_pat <A2_subh_h16_lh, int_hexagon_A2_subh_h16_lh>;
+def : T_RR_pat <A2_subh_h16_hl, int_hexagon_A2_subh_h16_hl>;
+def : T_RR_pat <A2_subh_h16_hh, int_hexagon_A2_subh_h16_hh>;
+
+//Rd=add(Rt.[LH],Rs.[LH]):sat:<<16
+def : T_RR_pat <A2_addh_h16_sat_ll, int_hexagon_A2_addh_h16_sat_ll>;
+def : T_RR_pat <A2_addh_h16_sat_lh, int_hexagon_A2_addh_h16_sat_lh>;
+def : T_RR_pat <A2_addh_h16_sat_hl, int_hexagon_A2_addh_h16_sat_hl>;
+def : T_RR_pat <A2_addh_h16_sat_hh, int_hexagon_A2_addh_h16_sat_hh>;
+
+//Rd=sub(Rt.[LH],Rs.[LH]):sat:<<16
+def : T_RR_pat <A2_subh_h16_sat_ll, int_hexagon_A2_subh_h16_sat_ll>;
+def : T_RR_pat <A2_subh_h16_sat_lh, int_hexagon_A2_subh_h16_sat_lh>;
+def : T_RR_pat <A2_subh_h16_sat_hl, int_hexagon_A2_subh_h16_sat_hl>;
+def : T_RR_pat <A2_subh_h16_sat_hh, int_hexagon_A2_subh_h16_sat_hh>;
+
+// ALU64 / ALU / min max
+def : T_RR_pat<A2_max, int_hexagon_A2_max>;
+def : T_RR_pat<A2_min, int_hexagon_A2_min>;
+def : T_RR_pat<A2_maxu, int_hexagon_A2_maxu>;
+def : T_RR_pat<A2_minu, int_hexagon_A2_minu>;
+
+// Shift and accumulate
+def : T_RRI_pat <S2_asr_i_r_nac, int_hexagon_S2_asr_i_r_nac>;
+def : T_RRI_pat <S2_lsr_i_r_nac, int_hexagon_S2_lsr_i_r_nac>;
+def : T_RRI_pat <S2_asl_i_r_nac, int_hexagon_S2_asl_i_r_nac>;
+def : T_RRI_pat <S2_asr_i_r_acc, int_hexagon_S2_asr_i_r_acc>;
+def : T_RRI_pat <S2_lsr_i_r_acc, int_hexagon_S2_lsr_i_r_acc>;
+def : T_RRI_pat <S2_asl_i_r_acc, int_hexagon_S2_asl_i_r_acc>;
+
+def : T_RRI_pat <S2_asr_i_r_and, int_hexagon_S2_asr_i_r_and>;
+def : T_RRI_pat <S2_lsr_i_r_and, int_hexagon_S2_lsr_i_r_and>;
+def : T_RRI_pat <S2_asl_i_r_and, int_hexagon_S2_asl_i_r_and>;
+def : T_RRI_pat <S2_asr_i_r_or, int_hexagon_S2_asr_i_r_or>;
+def : T_RRI_pat <S2_lsr_i_r_or, int_hexagon_S2_lsr_i_r_or>;
+def : T_RRI_pat <S2_asl_i_r_or, int_hexagon_S2_asl_i_r_or>;
+def : T_RRI_pat <S2_lsr_i_r_xacc, int_hexagon_S2_lsr_i_r_xacc>;
+def : T_RRI_pat <S2_asl_i_r_xacc, int_hexagon_S2_asl_i_r_xacc>;
+
+def : T_PPI_pat <S2_asr_i_p_nac, int_hexagon_S2_asr_i_p_nac>;
+def : T_PPI_pat <S2_lsr_i_p_nac, int_hexagon_S2_lsr_i_p_nac>;
+def : T_PPI_pat <S2_asl_i_p_nac, int_hexagon_S2_asl_i_p_nac>;
+def : T_PPI_pat <S2_asr_i_p_acc, int_hexagon_S2_asr_i_p_acc>;
+def : T_PPI_pat <S2_lsr_i_p_acc, int_hexagon_S2_lsr_i_p_acc>;
+def : T_PPI_pat <S2_asl_i_p_acc, int_hexagon_S2_asl_i_p_acc>;
+
+def : T_PPI_pat <S2_asr_i_p_and, int_hexagon_S2_asr_i_p_and>;
+def : T_PPI_pat <S2_lsr_i_p_and, int_hexagon_S2_lsr_i_p_and>;
+def : T_PPI_pat <S2_asl_i_p_and, int_hexagon_S2_asl_i_p_and>;
+def : T_PPI_pat <S2_asr_i_p_or, int_hexagon_S2_asr_i_p_or>;
+def : T_PPI_pat <S2_lsr_i_p_or, int_hexagon_S2_lsr_i_p_or>;
+def : T_PPI_pat <S2_asl_i_p_or, int_hexagon_S2_asl_i_p_or>;
+def : T_PPI_pat <S2_lsr_i_p_xacc, int_hexagon_S2_lsr_i_p_xacc>;
+def : T_PPI_pat <S2_asl_i_p_xacc, int_hexagon_S2_asl_i_p_xacc>;
+
+def : T_RRR_pat <S2_asr_r_r_nac, int_hexagon_S2_asr_r_r_nac>;
+def : T_RRR_pat <S2_lsr_r_r_nac, int_hexagon_S2_lsr_r_r_nac>;
+def : T_RRR_pat <S2_asl_r_r_nac, int_hexagon_S2_asl_r_r_nac>;
+def : T_RRR_pat <S2_lsl_r_r_nac, int_hexagon_S2_lsl_r_r_nac>;
+def : T_RRR_pat <S2_asr_r_r_acc, int_hexagon_S2_asr_r_r_acc>;
+def : T_RRR_pat <S2_lsr_r_r_acc, int_hexagon_S2_lsr_r_r_acc>;
+def : T_RRR_pat <S2_asl_r_r_acc, int_hexagon_S2_asl_r_r_acc>;
+def : T_RRR_pat <S2_lsl_r_r_acc, int_hexagon_S2_lsl_r_r_acc>;
+
+def : T_RRR_pat <S2_asr_r_r_and, int_hexagon_S2_asr_r_r_and>;
+def : T_RRR_pat <S2_lsr_r_r_and, int_hexagon_S2_lsr_r_r_and>;
+def : T_RRR_pat <S2_asl_r_r_and, int_hexagon_S2_asl_r_r_and>;
+def : T_RRR_pat <S2_lsl_r_r_and, int_hexagon_S2_lsl_r_r_and>;
+def : T_RRR_pat <S2_asr_r_r_or, int_hexagon_S2_asr_r_r_or>;
+def : T_RRR_pat <S2_lsr_r_r_or, int_hexagon_S2_lsr_r_r_or>;
+def : T_RRR_pat <S2_asl_r_r_or, int_hexagon_S2_asl_r_r_or>;
+def : T_RRR_pat <S2_lsl_r_r_or, int_hexagon_S2_lsl_r_r_or>;
+
+def : T_PPR_pat <S2_asr_r_p_nac, int_hexagon_S2_asr_r_p_nac>;
+def : T_PPR_pat <S2_lsr_r_p_nac, int_hexagon_S2_lsr_r_p_nac>;
+def : T_PPR_pat <S2_asl_r_p_nac, int_hexagon_S2_asl_r_p_nac>;
+def : T_PPR_pat <S2_lsl_r_p_nac, int_hexagon_S2_lsl_r_p_nac>;
+def : T_PPR_pat <S2_asr_r_p_acc, int_hexagon_S2_asr_r_p_acc>;
+def : T_PPR_pat <S2_lsr_r_p_acc, int_hexagon_S2_lsr_r_p_acc>;
+def : T_PPR_pat <S2_asl_r_p_acc, int_hexagon_S2_asl_r_p_acc>;
+def : T_PPR_pat <S2_lsl_r_p_acc, int_hexagon_S2_lsl_r_p_acc>;
+
+def : T_PPR_pat <S2_asr_r_p_and, int_hexagon_S2_asr_r_p_and>;
+def : T_PPR_pat <S2_lsr_r_p_and, int_hexagon_S2_lsr_r_p_and>;
+def : T_PPR_pat <S2_asl_r_p_and, int_hexagon_S2_asl_r_p_and>;
+def : T_PPR_pat <S2_lsl_r_p_and, int_hexagon_S2_lsl_r_p_and>;
+def : T_PPR_pat <S2_asr_r_p_or, int_hexagon_S2_asr_r_p_or>;
+def : T_PPR_pat <S2_lsr_r_p_or, int_hexagon_S2_lsr_r_p_or>;
+def : T_PPR_pat <S2_asl_r_p_or, int_hexagon_S2_asl_r_p_or>;
+def : T_PPR_pat <S2_lsl_r_p_or, int_hexagon_S2_lsl_r_p_or>;
+
+def : T_RRI_pat <S2_asr_i_r_nac, int_hexagon_S2_asr_i_r_nac>;
+def : T_RRI_pat <S2_lsr_i_r_nac, int_hexagon_S2_lsr_i_r_nac>;
+def : T_RRI_pat <S2_asl_i_r_nac, int_hexagon_S2_asl_i_r_nac>;
+def : T_RRI_pat <S2_asr_i_r_acc, int_hexagon_S2_asr_i_r_acc>;
+def : T_RRI_pat <S2_lsr_i_r_acc, int_hexagon_S2_lsr_i_r_acc>;
+def : T_RRI_pat <S2_asl_i_r_acc, int_hexagon_S2_asl_i_r_acc>;
+
+def : T_RRI_pat <S2_asr_i_r_and, int_hexagon_S2_asr_i_r_and>;
+def : T_RRI_pat <S2_lsr_i_r_and, int_hexagon_S2_lsr_i_r_and>;
+def : T_RRI_pat <S2_asl_i_r_and, int_hexagon_S2_asl_i_r_and>;
+def : T_RRI_pat <S2_asr_i_r_or, int_hexagon_S2_asr_i_r_or>;
+def : T_RRI_pat <S2_lsr_i_r_or, int_hexagon_S2_lsr_i_r_or>;
+def : T_RRI_pat <S2_asl_i_r_or, int_hexagon_S2_asl_i_r_or>;
+def : T_RRI_pat <S2_lsr_i_r_xacc, int_hexagon_S2_lsr_i_r_xacc>;
+def : T_RRI_pat <S2_asl_i_r_xacc, int_hexagon_S2_asl_i_r_xacc>;
+
+def : T_PPI_pat <S2_asr_i_p_nac, int_hexagon_S2_asr_i_p_nac>;
+def : T_PPI_pat <S2_lsr_i_p_nac, int_hexagon_S2_lsr_i_p_nac>;
+def : T_PPI_pat <S2_asl_i_p_nac, int_hexagon_S2_asl_i_p_nac>;
+def : T_PPI_pat <S2_asr_i_p_acc, int_hexagon_S2_asr_i_p_acc>;
+def : T_PPI_pat <S2_lsr_i_p_acc, int_hexagon_S2_lsr_i_p_acc>;
+def : T_PPI_pat <S2_asl_i_p_acc, int_hexagon_S2_asl_i_p_acc>;
+
+def : T_PPI_pat <S2_asr_i_p_and, int_hexagon_S2_asr_i_p_and>;
+def : T_PPI_pat <S2_lsr_i_p_and, int_hexagon_S2_lsr_i_p_and>;
+def : T_PPI_pat <S2_asl_i_p_and, int_hexagon_S2_asl_i_p_and>;
+def : T_PPI_pat <S2_asr_i_p_or, int_hexagon_S2_asr_i_p_or>;
+def : T_PPI_pat <S2_lsr_i_p_or, int_hexagon_S2_lsr_i_p_or>;
+def : T_PPI_pat <S2_asl_i_p_or, int_hexagon_S2_asl_i_p_or>;
+def : T_PPI_pat <S2_lsr_i_p_xacc, int_hexagon_S2_lsr_i_p_xacc>;
+def : T_PPI_pat <S2_asl_i_p_xacc, int_hexagon_S2_asl_i_p_xacc>;
+
+def : T_RRR_pat <S2_asr_r_r_nac, int_hexagon_S2_asr_r_r_nac>;
+def : T_RRR_pat <S2_lsr_r_r_nac, int_hexagon_S2_lsr_r_r_nac>;
+def : T_RRR_pat <S2_asl_r_r_nac, int_hexagon_S2_asl_r_r_nac>;
+def : T_RRR_pat <S2_lsl_r_r_nac, int_hexagon_S2_lsl_r_r_nac>;
+def : T_RRR_pat <S2_asr_r_r_acc, int_hexagon_S2_asr_r_r_acc>;
+def : T_RRR_pat <S2_lsr_r_r_acc, int_hexagon_S2_lsr_r_r_acc>;
+def : T_RRR_pat <S2_asl_r_r_acc, int_hexagon_S2_asl_r_r_acc>;
+def : T_RRR_pat <S2_lsl_r_r_acc, int_hexagon_S2_lsl_r_r_acc>;
+
+def : T_RRR_pat <S2_asr_r_r_and, int_hexagon_S2_asr_r_r_and>;
+def : T_RRR_pat <S2_lsr_r_r_and, int_hexagon_S2_lsr_r_r_and>;
+def : T_RRR_pat <S2_asl_r_r_and, int_hexagon_S2_asl_r_r_and>;
+def : T_RRR_pat <S2_lsl_r_r_and, int_hexagon_S2_lsl_r_r_and>;
+def : T_RRR_pat <S2_asr_r_r_or, int_hexagon_S2_asr_r_r_or>;
+def : T_RRR_pat <S2_lsr_r_r_or, int_hexagon_S2_lsr_r_r_or>;
+def : T_RRR_pat <S2_asl_r_r_or, int_hexagon_S2_asl_r_r_or>;
+def : T_RRR_pat <S2_lsl_r_r_or, int_hexagon_S2_lsl_r_r_or>;
+
+def : T_PPR_pat <S2_asr_r_p_nac, int_hexagon_S2_asr_r_p_nac>;
+def : T_PPR_pat <S2_lsr_r_p_nac, int_hexagon_S2_lsr_r_p_nac>;
+def : T_PPR_pat <S2_asl_r_p_nac, int_hexagon_S2_asl_r_p_nac>;
+def : T_PPR_pat <S2_lsl_r_p_nac, int_hexagon_S2_lsl_r_p_nac>;
+def : T_PPR_pat <S2_asr_r_p_acc, int_hexagon_S2_asr_r_p_acc>;
+def : T_PPR_pat <S2_lsr_r_p_acc, int_hexagon_S2_lsr_r_p_acc>;
+def : T_PPR_pat <S2_asl_r_p_acc, int_hexagon_S2_asl_r_p_acc>;
+def : T_PPR_pat <S2_lsl_r_p_acc, int_hexagon_S2_lsl_r_p_acc>;
+
+def : T_PPR_pat <S2_asr_r_p_and, int_hexagon_S2_asr_r_p_and>;
+def : T_PPR_pat <S2_lsr_r_p_and, int_hexagon_S2_lsr_r_p_and>;
+def : T_PPR_pat <S2_asl_r_p_and, int_hexagon_S2_asl_r_p_and>;
+def : T_PPR_pat <S2_lsl_r_p_and, int_hexagon_S2_lsl_r_p_and>;
+def : T_PPR_pat <S2_asr_r_p_or, int_hexagon_S2_asr_r_p_or>;
+def : T_PPR_pat <S2_lsr_r_p_or, int_hexagon_S2_lsr_r_p_or>;
+def : T_PPR_pat <S2_asl_r_p_or, int_hexagon_S2_asl_r_p_or>;
+def : T_PPR_pat <S2_lsl_r_p_or, int_hexagon_S2_lsl_r_p_or>;
/********************************************************************
-* CR *
+* ALU32/ALU *
*********************************************************************/
+def : T_RR_pat<A2_add, int_hexagon_A2_add>;
+def : T_RI_pat<A2_addi, int_hexagon_A2_addi>;
+def : T_RR_pat<A2_sub, int_hexagon_A2_sub>;
+def : T_IR_pat<A2_subri, int_hexagon_A2_subri>;
+def : T_RR_pat<A2_and, int_hexagon_A2_and>;
+def : T_RI_pat<A2_andir, int_hexagon_A2_andir>;
+def : T_RR_pat<A2_or, int_hexagon_A2_or>;
+def : T_RI_pat<A2_orir, int_hexagon_A2_orir>;
+def : T_RR_pat<A2_xor, int_hexagon_A2_xor>;
+def : T_RR_pat<A2_combinew, int_hexagon_A2_combinew>;
+
+// Assembler mapped from Rd32=not(Rs32) to Rd32=sub(#-1,Rs32)
+def : Pat <(int_hexagon_A2_not (I32:$Rs)),
+ (A2_subri -1, IntRegs:$Rs)>;
+
+// Assembler mapped from Rd32=neg(Rs32) to Rd32=sub(#0,Rs32)
+def : Pat <(int_hexagon_A2_neg IntRegs:$Rs),
+ (A2_subri 0, IntRegs:$Rs)>;
+
+// Transfer immediate
+def : Pat <(int_hexagon_A2_tfril (I32:$Rs), u16_0ImmPred:$Is),
+ (A2_tfril IntRegs:$Rs, u16_0ImmPred:$Is)>;
+def : Pat <(int_hexagon_A2_tfrih (I32:$Rs), u16_0ImmPred:$Is),
+ (A2_tfrih IntRegs:$Rs, u16_0ImmPred:$Is)>;
+
+// Transfer Register/immediate.
+def : T_R_pat <A2_tfr, int_hexagon_A2_tfr>;
+def : T_I_pat <A2_tfrsi, int_hexagon_A2_tfrsi>;
+
+// Assembler mapped from Rdd32=Rss32 to Rdd32=combine(Rss.H32,Rss.L32)
+def : Pat<(int_hexagon_A2_tfrp DoubleRegs:$src),
+ (A2_combinew (HiReg DoubleRegs:$src), (LoReg DoubleRegs:$src))>;
-// CR / Logical reductions on predicates.
-def HEXAGON_C2_all8:
- qi_SInst_qi <"all8", int_hexagon_C2_all8>;
-def HEXAGON_C2_any8:
- qi_SInst_qi <"any8", int_hexagon_C2_any8>;
-
-// CR / Logical operations on predicates.
-def HEXAGON_C2_pxfer_map:
- qi_SInst_qi_pxfer <"", int_hexagon_C2_pxfer_map>;
-def HEXAGON_C2_and:
- qi_SInst_qiqi <"and", int_hexagon_C2_and>;
-def HEXAGON_C2_andn:
- qi_SInst_qiqi_neg <"and", int_hexagon_C2_andn>;
-def HEXAGON_C2_not:
- qi_SInst_qi <"not", int_hexagon_C2_not>;
-def HEXAGON_C2_or:
- qi_SInst_qiqi <"or", int_hexagon_C2_or>;
-def HEXAGON_C2_orn:
- qi_SInst_qiqi_neg <"or", int_hexagon_C2_orn>;
-def HEXAGON_C2_xor:
- qi_SInst_qiqi <"xor", int_hexagon_C2_xor>;
-
+/********************************************************************
+* ALU32/PERM *
+*********************************************************************/
+// Combine
+def: T_RR_pat<A2_combine_hh, int_hexagon_A2_combine_hh>;
+def: T_RR_pat<A2_combine_hl, int_hexagon_A2_combine_hl>;
+def: T_RR_pat<A2_combine_lh, int_hexagon_A2_combine_lh>;
+def: T_RR_pat<A2_combine_ll, int_hexagon_A2_combine_ll>;
+
+def: T_II_pat<A2_combineii, int_hexagon_A2_combineii, s8ExtPred, s8ImmPred>;
+
+def: Pat<(i32 (int_hexagon_C2_mux (I32:$Rp), (I32:$Rs),
+ (I32:$Rt))),
+ (i32 (C2_mux (C2_tfrrp IntRegs:$Rp), IntRegs:$Rs, IntRegs:$Rt))>;
+
+// Mux
+def : T_QRI_pat<C2_muxir, int_hexagon_C2_muxir, s8ExtPred>;
+def : T_QIR_pat<C2_muxri, int_hexagon_C2_muxri, s8ExtPred>;
+def : T_QII_pat<C2_muxii, int_hexagon_C2_muxii, s8ExtPred, s8ImmPred>;
+
+// Shift halfword
+def : T_R_pat<A2_aslh, int_hexagon_A2_aslh>;
+def : T_R_pat<A2_asrh, int_hexagon_A2_asrh>;
+def : T_R_pat<A2_asrh, int_hexagon_SI_to_SXTHI_asrh>;
+
+// Sign/zero extend
+def : T_R_pat<A2_sxth, int_hexagon_A2_sxth>;
+def : T_R_pat<A2_sxtb, int_hexagon_A2_sxtb>;
+def : T_R_pat<A2_zxth, int_hexagon_A2_zxth>;
+def : T_R_pat<A2_zxtb, int_hexagon_A2_zxtb>;
/********************************************************************
-* MTYPE/ALU *
+* ALU32/PRED *
*********************************************************************/
+// Compare
+def : T_RR_pat<C2_cmpeq, int_hexagon_C2_cmpeq>;
+def : T_RR_pat<C2_cmpgt, int_hexagon_C2_cmpgt>;
+def : T_RR_pat<C2_cmpgtu, int_hexagon_C2_cmpgtu>;
+
+def : T_RI_pat<C2_cmpeqi, int_hexagon_C2_cmpeqi, s10ExtPred>;
+def : T_RI_pat<C2_cmpgti, int_hexagon_C2_cmpgti, s10ExtPred>;
+def : T_RI_pat<C2_cmpgtui, int_hexagon_C2_cmpgtui, u9ExtPred>;
-// MTYPE / ALU / Add and accumulate.
-def HEXAGON_M2_acci:
- si_MInst_sisisi_acc <"add", int_hexagon_M2_acci>;
-def HEXAGON_M2_accii:
- si_MInst_sisis8_acc <"add", int_hexagon_M2_accii>;
-def HEXAGON_M2_nacci:
- si_MInst_sisisi_nac <"add", int_hexagon_M2_nacci>;
-def HEXAGON_M2_naccii:
- si_MInst_sisis8_nac <"add", int_hexagon_M2_naccii>;
+def : Pat <(i32 (int_hexagon_C2_cmpgei (I32:$src1), s8ExtPred:$src2)),
+ (i32 (C2_cmpgti (I32:$src1),
+ (DEC_CONST_SIGNED s8ExtPred:$src2)))>;
-// MTYPE / ALU / Subtract and accumulate.
-def HEXAGON_M2_subacc:
- si_MInst_sisisi_acc <"sub", int_hexagon_M2_subacc>;
+def : Pat <(i32 (int_hexagon_C2_cmpgeui (I32:$src1), u8ExtPred:$src2)),
+ (i32 (C2_cmpgtui (I32:$src1),
+ (DEC_CONST_UNSIGNED u8ExtPred:$src2)))>;
-// MTYPE / ALU / Vector absolute difference.
-def HEXAGON_M2_vabsdiffh:
- di_MInst_didi <"vabsdiffh",int_hexagon_M2_vabsdiffh>;
-def HEXAGON_M2_vabsdiffw:
- di_MInst_didi <"vabsdiffw",int_hexagon_M2_vabsdiffw>;
+// The instruction, Pd=cmp.geu(Rs, #u8) -> Pd=cmp.eq(Rs,Rs) when #u8 == 0.
+def : Pat <(i32 (int_hexagon_C2_cmpgeui (I32:$src1), 0)),
+ (i32 (C2_cmpeq (I32:$src1), (I32:$src1)))>;
-// MTYPE / ALU / XOR and xor with destination.
-def HEXAGON_M2_xor_xacc:
- si_MInst_sisisi_xacc <"xor", int_hexagon_M2_xor_xacc>;
+def : Pat <(i32 (int_hexagon_C2_cmplt (I32:$src1),
+ (I32:$src2))),
+ (i32 (C2_cmpgt (I32:$src2), (I32:$src1)))>;
+def : Pat <(i32 (int_hexagon_C2_cmpltu (I32:$src1),
+ (I32:$src2))),
+ (i32 (C2_cmpgtu (I32:$src2), (I32:$src1)))>;
/********************************************************************
-* MTYPE/COMPLEX *
+* ALU32/VH *
*********************************************************************/
+// Vector add, subtract, average halfwords
+def: T_RR_pat<A2_svaddh, int_hexagon_A2_svaddh>;
+def: T_RR_pat<A2_svaddhs, int_hexagon_A2_svaddhs>;
+def: T_RR_pat<A2_svadduhs, int_hexagon_A2_svadduhs>;
-// MTYPE / COMPLEX / Complex multiply.
-// Rdd[-+]=cmpy(Rs, Rt:<<1]:sat
-def HEXAGON_M2_cmpys_s1:
- di_MInst_sisi_s1_sat <"cmpy", int_hexagon_M2_cmpys_s1>;
-def HEXAGON_M2_cmpys_s0:
- di_MInst_sisi_sat <"cmpy", int_hexagon_M2_cmpys_s0>;
-def HEXAGON_M2_cmpysc_s1:
- di_MInst_sisi_s1_sat_conj <"cmpy", int_hexagon_M2_cmpysc_s1>;
-def HEXAGON_M2_cmpysc_s0:
- di_MInst_sisi_sat_conj <"cmpy", int_hexagon_M2_cmpysc_s0>;
-
-def HEXAGON_M2_cmacs_s1:
- di_MInst_disisi_acc_s1_sat <"cmpy", int_hexagon_M2_cmacs_s1>;
-def HEXAGON_M2_cmacs_s0:
- di_MInst_disisi_acc_sat <"cmpy", int_hexagon_M2_cmacs_s0>;
-def HEXAGON_M2_cmacsc_s1:
- di_MInst_disisi_acc_s1_sat_conj <"cmpy", int_hexagon_M2_cmacsc_s1>;
-def HEXAGON_M2_cmacsc_s0:
- di_MInst_disisi_acc_sat_conj <"cmpy", int_hexagon_M2_cmacsc_s0>;
-
-def HEXAGON_M2_cnacs_s1:
- di_MInst_disisi_nac_s1_sat <"cmpy", int_hexagon_M2_cnacs_s1>;
-def HEXAGON_M2_cnacs_s0:
- di_MInst_disisi_nac_sat <"cmpy", int_hexagon_M2_cnacs_s0>;
-def HEXAGON_M2_cnacsc_s1:
- di_MInst_disisi_nac_s1_sat_conj <"cmpy", int_hexagon_M2_cnacsc_s1>;
-def HEXAGON_M2_cnacsc_s0:
- di_MInst_disisi_nac_sat_conj <"cmpy", int_hexagon_M2_cnacsc_s0>;
-
-// MTYPE / COMPLEX / Complex multiply real or imaginary.
-def HEXAGON_M2_cmpyr_s0:
- di_MInst_sisi <"cmpyr", int_hexagon_M2_cmpyr_s0>;
-def HEXAGON_M2_cmacr_s0:
- di_MInst_disisi_acc <"cmpyr", int_hexagon_M2_cmacr_s0>;
-
-def HEXAGON_M2_cmpyi_s0:
- di_MInst_sisi <"cmpyi", int_hexagon_M2_cmpyi_s0>;
-def HEXAGON_M2_cmaci_s0:
- di_MInst_disisi_acc <"cmpyi", int_hexagon_M2_cmaci_s0>;
-
-// MTYPE / COMPLEX / Complex multiply with round and pack.
-// Rxx32+=cmpy(Rs32,[*]Rt32:<<1]:rnd:sat
-def HEXAGON_M2_cmpyrs_s0:
- si_MInst_sisi_rnd_sat <"cmpy", int_hexagon_M2_cmpyrs_s0>;
-def HEXAGON_M2_cmpyrs_s1:
- si_MInst_sisi_s1_rnd_sat <"cmpy", int_hexagon_M2_cmpyrs_s1>;
-
-def HEXAGON_M2_cmpyrsc_s0:
- si_MInst_sisi_rnd_sat_conj <"cmpy", int_hexagon_M2_cmpyrsc_s0>;
-def HEXAGON_M2_cmpyrsc_s1:
- si_MInst_sisi_s1_rnd_sat_conj <"cmpy", int_hexagon_M2_cmpyrsc_s1>;
-
-//MTYPE / COMPLEX / Vector complex multiply real or imaginary.
-def HEXAGON_M2_vcmpy_s0_sat_i:
- di_MInst_didi_sat <"vcmpyi", int_hexagon_M2_vcmpy_s0_sat_i>;
-def HEXAGON_M2_vcmpy_s1_sat_i:
- di_MInst_didi_s1_sat <"vcmpyi", int_hexagon_M2_vcmpy_s1_sat_i>;
-
-def HEXAGON_M2_vcmpy_s0_sat_r:
- di_MInst_didi_sat <"vcmpyr", int_hexagon_M2_vcmpy_s0_sat_r>;
-def HEXAGON_M2_vcmpy_s1_sat_r:
- di_MInst_didi_s1_sat <"vcmpyr", int_hexagon_M2_vcmpy_s1_sat_r>;
-
-def HEXAGON_M2_vcmac_s0_sat_i:
- di_MInst_dididi_acc_sat <"vcmpyi", int_hexagon_M2_vcmac_s0_sat_i>;
-def HEXAGON_M2_vcmac_s0_sat_r:
- di_MInst_dididi_acc_sat <"vcmpyr", int_hexagon_M2_vcmac_s0_sat_r>;
-
-//MTYPE / COMPLEX / Vector reduce complex multiply real or imaginary.
-def HEXAGON_M2_vrcmpyi_s0:
- di_MInst_didi <"vrcmpyi", int_hexagon_M2_vrcmpyi_s0>;
-def HEXAGON_M2_vrcmpyr_s0:
- di_MInst_didi <"vrcmpyr", int_hexagon_M2_vrcmpyr_s0>;
-
-def HEXAGON_M2_vrcmpyi_s0c:
- di_MInst_didi_conj <"vrcmpyi", int_hexagon_M2_vrcmpyi_s0c>;
-def HEXAGON_M2_vrcmpyr_s0c:
- di_MInst_didi_conj <"vrcmpyr", int_hexagon_M2_vrcmpyr_s0c>;
-
-def HEXAGON_M2_vrcmaci_s0:
- di_MInst_dididi_acc <"vrcmpyi", int_hexagon_M2_vrcmaci_s0>;
-def HEXAGON_M2_vrcmacr_s0:
- di_MInst_dididi_acc <"vrcmpyr", int_hexagon_M2_vrcmacr_s0>;
-
-def HEXAGON_M2_vrcmaci_s0c:
- di_MInst_dididi_acc_conj <"vrcmpyi", int_hexagon_M2_vrcmaci_s0c>;
-def HEXAGON_M2_vrcmacr_s0c:
- di_MInst_dididi_acc_conj <"vrcmpyr", int_hexagon_M2_vrcmacr_s0c>;
+def: T_RR_pat<A2_svsubh, int_hexagon_A2_svsubh>;
+def: T_RR_pat<A2_svsubhs, int_hexagon_A2_svsubhs>;
+def: T_RR_pat<A2_svsubuhs, int_hexagon_A2_svsubuhs>;
+def: T_RR_pat<A2_svavgh, int_hexagon_A2_svavgh>;
+def: T_RR_pat<A2_svavghs, int_hexagon_A2_svavghs>;
+def: T_RR_pat<A2_svnavgh, int_hexagon_A2_svnavgh>;
/********************************************************************
-* MTYPE/MPYH *
+* ALU64/ALU *
*********************************************************************/
+def: T_RR_pat<A2_addsat, int_hexagon_A2_addsat>;
+def: T_RR_pat<A2_subsat, int_hexagon_A2_subsat>;
+def: T_PP_pat<A2_addp, int_hexagon_A2_addp>;
+def: T_PP_pat<A2_subp, int_hexagon_A2_subp>;
+
+def: T_PP_pat<A2_andp, int_hexagon_A2_andp>;
+def: T_PP_pat<A2_orp, int_hexagon_A2_orp>;
+def: T_PP_pat<A2_xorp, int_hexagon_A2_xorp>;
-// MTYPE / MPYH / Multiply and use lower result.
-//def HEXAGON_M2_mpysmi:
-//FIXME: Hexagon_M2_mpysmi should really by of the type si_MInst_sim9,
-// not si_MInst_sis9 - but for now, we will use s9.
-// def Hexagon_M2_mpysmi:
-// si_MInst_sim9 <"mpyi", int_hexagon_M2_mpysmi>;
-def Hexagon_M2_mpysmi:
- si_MInst_sis9 <"mpyi", int_hexagon_M2_mpysmi>;
-def HEXAGON_M2_mpyi:
- si_MInst_sisi <"mpyi", int_hexagon_M2_mpyi>;
-def HEXAGON_M2_mpyui:
- si_MInst_sisi <"mpyui", int_hexagon_M2_mpyui>;
-def HEXAGON_M2_macsip:
- si_MInst_sisiu8_acc <"mpyi", int_hexagon_M2_macsip>;
-def HEXAGON_M2_maci:
- si_MInst_sisisi_acc <"mpyi", int_hexagon_M2_maci>;
-def HEXAGON_M2_macsin:
- si_MInst_sisiu8_nac <"mpyi", int_hexagon_M2_macsin>;
-
-// MTYPE / MPYH / Multiply word by half (32x16).
-//Rdd[+]=vmpywoh(Rss,Rtt)[:<<1][:rnd][:sat]
-//Rdd[+]=vmpyweh(Rss,Rtt)[:<<1][:rnd][:sat]
-def HEXAGON_M2_mmpyl_rs1:
- di_MInst_didi_s1_rnd_sat <"vmpyweh", int_hexagon_M2_mmpyl_rs1>;
-def HEXAGON_M2_mmpyl_s1:
- di_MInst_didi_s1_sat <"vmpyweh", int_hexagon_M2_mmpyl_s1>;
-def HEXAGON_M2_mmpyl_rs0:
- di_MInst_didi_rnd_sat <"vmpyweh", int_hexagon_M2_mmpyl_rs0>;
-def HEXAGON_M2_mmpyl_s0:
- di_MInst_didi_sat <"vmpyweh", int_hexagon_M2_mmpyl_s0>;
-def HEXAGON_M2_mmpyh_rs1:
- di_MInst_didi_s1_rnd_sat <"vmpywoh", int_hexagon_M2_mmpyh_rs1>;
-def HEXAGON_M2_mmpyh_s1:
- di_MInst_didi_s1_sat <"vmpywoh", int_hexagon_M2_mmpyh_s1>;
-def HEXAGON_M2_mmpyh_rs0:
- di_MInst_didi_rnd_sat <"vmpywoh", int_hexagon_M2_mmpyh_rs0>;
-def HEXAGON_M2_mmpyh_s0:
- di_MInst_didi_sat <"vmpywoh", int_hexagon_M2_mmpyh_s0>;
-def HEXAGON_M2_mmacls_rs1:
- di_MInst_dididi_acc_s1_rnd_sat <"vmpyweh", int_hexagon_M2_mmacls_rs1>;
-def HEXAGON_M2_mmacls_s1:
- di_MInst_dididi_acc_s1_sat <"vmpyweh", int_hexagon_M2_mmacls_s1>;
-def HEXAGON_M2_mmacls_rs0:
- di_MInst_dididi_acc_rnd_sat <"vmpyweh", int_hexagon_M2_mmacls_rs0>;
-def HEXAGON_M2_mmacls_s0:
- di_MInst_dididi_acc_sat <"vmpyweh", int_hexagon_M2_mmacls_s0>;
-def HEXAGON_M2_mmachs_rs1:
- di_MInst_dididi_acc_s1_rnd_sat <"vmpywoh", int_hexagon_M2_mmachs_rs1>;
-def HEXAGON_M2_mmachs_s1:
- di_MInst_dididi_acc_s1_sat <"vmpywoh", int_hexagon_M2_mmachs_s1>;
-def HEXAGON_M2_mmachs_rs0:
- di_MInst_dididi_acc_rnd_sat <"vmpywoh", int_hexagon_M2_mmachs_rs0>;
-def HEXAGON_M2_mmachs_s0:
- di_MInst_dididi_acc_sat <"vmpywoh", int_hexagon_M2_mmachs_s0>;
-
-// MTYPE / MPYH / Multiply word by unsigned half (32x16).
-//Rdd[+]=vmpywouh(Rss,Rtt)[:<<1][:rnd][:sat]
-//Rdd[+]=vmpyweuh(Rss,Rtt)[:<<1][:rnd][:sat]
-def HEXAGON_M2_mmpyul_rs1:
- di_MInst_didi_s1_rnd_sat <"vmpyweuh", int_hexagon_M2_mmpyul_rs1>;
-def HEXAGON_M2_mmpyul_s1:
- di_MInst_didi_s1_sat <"vmpyweuh", int_hexagon_M2_mmpyul_s1>;
-def HEXAGON_M2_mmpyul_rs0:
- di_MInst_didi_rnd_sat <"vmpyweuh", int_hexagon_M2_mmpyul_rs0>;
-def HEXAGON_M2_mmpyul_s0:
- di_MInst_didi_sat <"vmpyweuh", int_hexagon_M2_mmpyul_s0>;
-def HEXAGON_M2_mmpyuh_rs1:
- di_MInst_didi_s1_rnd_sat <"vmpywouh", int_hexagon_M2_mmpyuh_rs1>;
-def HEXAGON_M2_mmpyuh_s1:
- di_MInst_didi_s1_sat <"vmpywouh", int_hexagon_M2_mmpyuh_s1>;
-def HEXAGON_M2_mmpyuh_rs0:
- di_MInst_didi_rnd_sat <"vmpywouh", int_hexagon_M2_mmpyuh_rs0>;
-def HEXAGON_M2_mmpyuh_s0:
- di_MInst_didi_sat <"vmpywouh", int_hexagon_M2_mmpyuh_s0>;
-def HEXAGON_M2_mmaculs_rs1:
- di_MInst_dididi_acc_s1_rnd_sat <"vmpyweuh", int_hexagon_M2_mmaculs_rs1>;
-def HEXAGON_M2_mmaculs_s1:
- di_MInst_dididi_acc_s1_sat <"vmpyweuh", int_hexagon_M2_mmaculs_s1>;
-def HEXAGON_M2_mmaculs_rs0:
- di_MInst_dididi_acc_rnd_sat <"vmpyweuh", int_hexagon_M2_mmaculs_rs0>;
-def HEXAGON_M2_mmaculs_s0:
- di_MInst_dididi_acc_sat <"vmpyweuh", int_hexagon_M2_mmaculs_s0>;
-def HEXAGON_M2_mmacuhs_rs1:
- di_MInst_dididi_acc_s1_rnd_sat <"vmpywouh", int_hexagon_M2_mmacuhs_rs1>;
-def HEXAGON_M2_mmacuhs_s1:
- di_MInst_dididi_acc_s1_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s1>;
-def HEXAGON_M2_mmacuhs_rs0:
- di_MInst_dididi_acc_rnd_sat <"vmpywouh", int_hexagon_M2_mmacuhs_rs0>;
-def HEXAGON_M2_mmacuhs_s0:
- di_MInst_dididi_acc_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s0>;
-
-// MTYPE / MPYH / Multiply and use upper result.
-def HEXAGON_M2_hmmpyh_rs1:
- si_MInst_sisi_h_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyh_rs1>;
-def HEXAGON_M2_hmmpyl_rs1:
- si_MInst_sisi_l_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyl_rs1>;
-def HEXAGON_M2_mpy_up:
- si_MInst_sisi <"mpy", int_hexagon_M2_mpy_up>;
-def HEXAGON_M2_dpmpyss_rnd_s0:
- si_MInst_sisi_rnd <"mpy", int_hexagon_M2_dpmpyss_rnd_s0>;
-def HEXAGON_M2_mpyu_up:
- si_MInst_sisi <"mpyu", int_hexagon_M2_mpyu_up>;
-
-// MTYPE / MPYH / Multiply and use full result.
-def HEXAGON_M2_dpmpyuu_s0:
- di_MInst_sisi <"mpyu", int_hexagon_M2_dpmpyuu_s0>;
-def HEXAGON_M2_dpmpyuu_acc_s0:
- di_MInst_disisi_acc <"mpyu", int_hexagon_M2_dpmpyuu_acc_s0>;
-def HEXAGON_M2_dpmpyuu_nac_s0:
- di_MInst_disisi_nac <"mpyu", int_hexagon_M2_dpmpyuu_nac_s0>;
-def HEXAGON_M2_dpmpyss_s0:
- di_MInst_sisi <"mpy", int_hexagon_M2_dpmpyss_s0>;
-def HEXAGON_M2_dpmpyss_acc_s0:
- di_MInst_disisi_acc <"mpy", int_hexagon_M2_dpmpyss_acc_s0>;
-def HEXAGON_M2_dpmpyss_nac_s0:
- di_MInst_disisi_nac <"mpy", int_hexagon_M2_dpmpyss_nac_s0>;
+def: T_PP_pat<C2_cmpeqp, int_hexagon_C2_cmpeqp>;
+def: T_PP_pat<C2_cmpgtp, int_hexagon_C2_cmpgtp>;
+def: T_PP_pat<C2_cmpgtup, int_hexagon_C2_cmpgtup>;
+def: T_PP_pat<S2_parityp, int_hexagon_S2_parityp>;
+def: T_RR_pat<S2_packhl, int_hexagon_S2_packhl>;
/********************************************************************
-* MTYPE/MPYS *
+* ALU64/VB *
*********************************************************************/
+// ALU64 - Vector add
+def : T_PP_pat <A2_vaddub, int_hexagon_A2_vaddub>;
+def : T_PP_pat <A2_vaddubs, int_hexagon_A2_vaddubs>;
+def : T_PP_pat <A2_vaddh, int_hexagon_A2_vaddh>;
+def : T_PP_pat <A2_vaddhs, int_hexagon_A2_vaddhs>;
+def : T_PP_pat <A2_vadduhs, int_hexagon_A2_vadduhs>;
+def : T_PP_pat <A2_vaddw, int_hexagon_A2_vaddw>;
+def : T_PP_pat <A2_vaddws, int_hexagon_A2_vaddws>;
+
+// ALU64 - Vector average
+def : T_PP_pat <A2_vavgub, int_hexagon_A2_vavgub>;
+def : T_PP_pat <A2_vavgubr, int_hexagon_A2_vavgubr>;
+def : T_PP_pat <A2_vavgh, int_hexagon_A2_vavgh>;
+def : T_PP_pat <A2_vavghr, int_hexagon_A2_vavghr>;
+def : T_PP_pat <A2_vavghcr, int_hexagon_A2_vavghcr>;
+def : T_PP_pat <A2_vavguh, int_hexagon_A2_vavguh>;
+def : T_PP_pat <A2_vavguhr, int_hexagon_A2_vavguhr>;
+
+def : T_PP_pat <A2_vavgw, int_hexagon_A2_vavgw>;
+def : T_PP_pat <A2_vavgwr, int_hexagon_A2_vavgwr>;
+def : T_PP_pat <A2_vavgwcr, int_hexagon_A2_vavgwcr>;
+def : T_PP_pat <A2_vavguw, int_hexagon_A2_vavguw>;
+def : T_PP_pat <A2_vavguwr, int_hexagon_A2_vavguwr>;
+
+// ALU64 - Vector negative average
+def : T_PP_pat <A2_vnavgh, int_hexagon_A2_vnavgh>;
+def : T_PP_pat <A2_vnavghr, int_hexagon_A2_vnavghr>;
+def : T_PP_pat <A2_vnavghcr, int_hexagon_A2_vnavghcr>;
+def : T_PP_pat <A2_vnavgw, int_hexagon_A2_vnavgw>;
+def : T_PP_pat <A2_vnavgwr, int_hexagon_A2_vnavgwr>;
+def : T_PP_pat <A2_vnavgwcr, int_hexagon_A2_vnavgwcr>;
+
+// ALU64 - Vector max
+def : T_PP_pat <A2_vmaxh, int_hexagon_A2_vmaxh>;
+def : T_PP_pat <A2_vmaxw, int_hexagon_A2_vmaxw>;
+def : T_PP_pat <A2_vmaxub, int_hexagon_A2_vmaxub>;
+def : T_PP_pat <A2_vmaxuh, int_hexagon_A2_vmaxuh>;
+def : T_PP_pat <A2_vmaxuw, int_hexagon_A2_vmaxuw>;
+
+// ALU64 - Vector min
+def : T_PP_pat <A2_vminh, int_hexagon_A2_vminh>;
+def : T_PP_pat <A2_vminw, int_hexagon_A2_vminw>;
+def : T_PP_pat <A2_vminub, int_hexagon_A2_vminub>;
+def : T_PP_pat <A2_vminuh, int_hexagon_A2_vminuh>;
+def : T_PP_pat <A2_vminuw, int_hexagon_A2_vminuw>;
+
+// ALU64 - Vector sub
+def : T_PP_pat <A2_vsubub, int_hexagon_A2_vsubub>;
+def : T_PP_pat <A2_vsububs, int_hexagon_A2_vsububs>;
+def : T_PP_pat <A2_vsubh, int_hexagon_A2_vsubh>;
+def : T_PP_pat <A2_vsubhs, int_hexagon_A2_vsubhs>;
+def : T_PP_pat <A2_vsubuhs, int_hexagon_A2_vsubuhs>;
+def : T_PP_pat <A2_vsubw, int_hexagon_A2_vsubw>;
+def : T_PP_pat <A2_vsubws, int_hexagon_A2_vsubws>;
+
+// ALU64 - Vector compare bytes
+def : T_PP_pat <A2_vcmpbeq, int_hexagon_A2_vcmpbeq>;
+def : T_PP_pat <A4_vcmpbgt, int_hexagon_A4_vcmpbgt>;
+def : T_PP_pat <A2_vcmpbgtu, int_hexagon_A2_vcmpbgtu>;
+
+// ALU64 - Vector compare halfwords
+def : T_PP_pat <A2_vcmpheq, int_hexagon_A2_vcmpheq>;
+def : T_PP_pat <A2_vcmphgt, int_hexagon_A2_vcmphgt>;
+def : T_PP_pat <A2_vcmphgtu, int_hexagon_A2_vcmphgtu>;
+
+// ALU64 - Vector compare words
+def : T_PP_pat <A2_vcmpweq, int_hexagon_A2_vcmpweq>;
+def : T_PP_pat <A2_vcmpwgt, int_hexagon_A2_vcmpwgt>;
+def : T_PP_pat <A2_vcmpwgtu, int_hexagon_A2_vcmpwgtu>;
-// MTYPE / MPYS / Scalar 16x16 multiply signed.
-//Rd=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1]|
-// [:<<0[:rnd|:sat|:rnd:sat]|:<<1[:rnd|:sat|:rnd:sat]]]
-def HEXAGON_M2_mpy_hh_s0:
- si_MInst_sisi_hh <"mpy", int_hexagon_M2_mpy_hh_s0>;
-def HEXAGON_M2_mpy_hh_s1:
- si_MInst_sisi_hh_s1 <"mpy", int_hexagon_M2_mpy_hh_s1>;
-def HEXAGON_M2_mpy_rnd_hh_s1:
- si_MInst_sisi_rnd_hh_s1 <"mpy", int_hexagon_M2_mpy_rnd_hh_s1>;
-def HEXAGON_M2_mpy_sat_rnd_hh_s1:
- si_MInst_sisi_sat_rnd_hh_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_hh_s1>;
-def HEXAGON_M2_mpy_sat_hh_s1:
- si_MInst_sisi_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_sat_hh_s1>;
-def HEXAGON_M2_mpy_rnd_hh_s0:
- si_MInst_sisi_rnd_hh <"mpy", int_hexagon_M2_mpy_rnd_hh_s0>;
-def HEXAGON_M2_mpy_sat_rnd_hh_s0:
- si_MInst_sisi_sat_rnd_hh <"mpy", int_hexagon_M2_mpy_sat_rnd_hh_s0>;
-def HEXAGON_M2_mpy_sat_hh_s0:
- si_MInst_sisi_sat_hh <"mpy", int_hexagon_M2_mpy_sat_hh_s0>;
-
-def HEXAGON_M2_mpy_hl_s0:
- si_MInst_sisi_hl <"mpy", int_hexagon_M2_mpy_hl_s0>;
-def HEXAGON_M2_mpy_hl_s1:
- si_MInst_sisi_hl_s1 <"mpy", int_hexagon_M2_mpy_hl_s1>;
-def HEXAGON_M2_mpy_rnd_hl_s1:
- si_MInst_sisi_rnd_hl_s1 <"mpy", int_hexagon_M2_mpy_rnd_hl_s1>;
-def HEXAGON_M2_mpy_sat_rnd_hl_s1:
- si_MInst_sisi_sat_rnd_hl_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_hl_s1>;
-def HEXAGON_M2_mpy_sat_hl_s1:
- si_MInst_sisi_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_sat_hl_s1>;
-def HEXAGON_M2_mpy_rnd_hl_s0:
- si_MInst_sisi_rnd_hl <"mpy", int_hexagon_M2_mpy_rnd_hl_s0>;
-def HEXAGON_M2_mpy_sat_rnd_hl_s0:
- si_MInst_sisi_sat_rnd_hl <"mpy", int_hexagon_M2_mpy_sat_rnd_hl_s0>;
-def HEXAGON_M2_mpy_sat_hl_s0:
- si_MInst_sisi_sat_hl <"mpy", int_hexagon_M2_mpy_sat_hl_s0>;
-
-def HEXAGON_M2_mpy_lh_s0:
- si_MInst_sisi_lh <"mpy", int_hexagon_M2_mpy_lh_s0>;
-def HEXAGON_M2_mpy_lh_s1:
- si_MInst_sisi_lh_s1 <"mpy", int_hexagon_M2_mpy_lh_s1>;
-def HEXAGON_M2_mpy_rnd_lh_s1:
- si_MInst_sisi_rnd_lh_s1 <"mpy", int_hexagon_M2_mpy_rnd_lh_s1>;
-def HEXAGON_M2_mpy_sat_rnd_lh_s1:
- si_MInst_sisi_sat_rnd_lh_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_lh_s1>;
-def HEXAGON_M2_mpy_sat_lh_s1:
- si_MInst_sisi_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_sat_lh_s1>;
-def HEXAGON_M2_mpy_rnd_lh_s0:
- si_MInst_sisi_rnd_lh <"mpy", int_hexagon_M2_mpy_rnd_lh_s0>;
-def HEXAGON_M2_mpy_sat_rnd_lh_s0:
- si_MInst_sisi_sat_rnd_lh <"mpy", int_hexagon_M2_mpy_sat_rnd_lh_s0>;
-def HEXAGON_M2_mpy_sat_lh_s0:
- si_MInst_sisi_sat_lh <"mpy", int_hexagon_M2_mpy_sat_lh_s0>;
-
-def HEXAGON_M2_mpy_ll_s0:
- si_MInst_sisi_ll <"mpy", int_hexagon_M2_mpy_ll_s0>;
-def HEXAGON_M2_mpy_ll_s1:
- si_MInst_sisi_ll_s1 <"mpy", int_hexagon_M2_mpy_ll_s1>;
-def HEXAGON_M2_mpy_rnd_ll_s1:
- si_MInst_sisi_rnd_ll_s1 <"mpy", int_hexagon_M2_mpy_rnd_ll_s1>;
-def HEXAGON_M2_mpy_sat_rnd_ll_s1:
- si_MInst_sisi_sat_rnd_ll_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_ll_s1>;
-def HEXAGON_M2_mpy_sat_ll_s1:
- si_MInst_sisi_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_sat_ll_s1>;
-def HEXAGON_M2_mpy_rnd_ll_s0:
- si_MInst_sisi_rnd_ll <"mpy", int_hexagon_M2_mpy_rnd_ll_s0>;
-def HEXAGON_M2_mpy_sat_rnd_ll_s0:
- si_MInst_sisi_sat_rnd_ll <"mpy", int_hexagon_M2_mpy_sat_rnd_ll_s0>;
-def HEXAGON_M2_mpy_sat_ll_s0:
- si_MInst_sisi_sat_ll <"mpy", int_hexagon_M2_mpy_sat_ll_s0>;
-
-//Rdd=mpy(Rs.[H|L],Rt.[H|L])[[:<<0|:<<1]|[:<<0:rnd|:<<1:rnd]]
-def HEXAGON_M2_mpyd_hh_s0:
- di_MInst_sisi_hh <"mpy", int_hexagon_M2_mpyd_hh_s0>;
-def HEXAGON_M2_mpyd_hh_s1:
- di_MInst_sisi_hh_s1 <"mpy", int_hexagon_M2_mpyd_hh_s1>;
-def HEXAGON_M2_mpyd_rnd_hh_s1:
- di_MInst_sisi_rnd_hh_s1 <"mpy", int_hexagon_M2_mpyd_rnd_hh_s1>;
-def HEXAGON_M2_mpyd_rnd_hh_s0:
- di_MInst_sisi_rnd_hh <"mpy", int_hexagon_M2_mpyd_rnd_hh_s0>;
-
-def HEXAGON_M2_mpyd_hl_s0:
- di_MInst_sisi_hl <"mpy", int_hexagon_M2_mpyd_hl_s0>;
-def HEXAGON_M2_mpyd_hl_s1:
- di_MInst_sisi_hl_s1 <"mpy", int_hexagon_M2_mpyd_hl_s1>;
-def HEXAGON_M2_mpyd_rnd_hl_s1:
- di_MInst_sisi_rnd_hl_s1 <"mpy", int_hexagon_M2_mpyd_rnd_hl_s1>;
-def HEXAGON_M2_mpyd_rnd_hl_s0:
- di_MInst_sisi_rnd_hl <"mpy", int_hexagon_M2_mpyd_rnd_hl_s0>;
-
-def HEXAGON_M2_mpyd_lh_s0:
- di_MInst_sisi_lh <"mpy", int_hexagon_M2_mpyd_lh_s0>;
-def HEXAGON_M2_mpyd_lh_s1:
- di_MInst_sisi_lh_s1 <"mpy", int_hexagon_M2_mpyd_lh_s1>;
-def HEXAGON_M2_mpyd_rnd_lh_s1:
- di_MInst_sisi_rnd_lh_s1 <"mpy", int_hexagon_M2_mpyd_rnd_lh_s1>;
-def HEXAGON_M2_mpyd_rnd_lh_s0:
- di_MInst_sisi_rnd_lh <"mpy", int_hexagon_M2_mpyd_rnd_lh_s0>;
-
-def HEXAGON_M2_mpyd_ll_s0:
- di_MInst_sisi_ll <"mpy", int_hexagon_M2_mpyd_ll_s0>;
-def HEXAGON_M2_mpyd_ll_s1:
- di_MInst_sisi_ll_s1 <"mpy", int_hexagon_M2_mpyd_ll_s1>;
-def HEXAGON_M2_mpyd_rnd_ll_s1:
- di_MInst_sisi_rnd_ll_s1 <"mpy", int_hexagon_M2_mpyd_rnd_ll_s1>;
-def HEXAGON_M2_mpyd_rnd_ll_s0:
- di_MInst_sisi_rnd_ll <"mpy", int_hexagon_M2_mpyd_rnd_ll_s0>;
-
-//Rx+=mpy(Rs.[H|L],Rt.[H|L])[[[:<<0|:<<1]|[:<<0:sat|:<<1:sat]]
-def HEXAGON_M2_mpy_acc_hh_s0:
- si_MInst_sisisi_acc_hh <"mpy", int_hexagon_M2_mpy_acc_hh_s0>;
-def HEXAGON_M2_mpy_acc_hh_s1:
- si_MInst_sisisi_acc_hh_s1 <"mpy", int_hexagon_M2_mpy_acc_hh_s1>;
-def HEXAGON_M2_mpy_acc_sat_hh_s1:
- si_MInst_sisisi_acc_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_hh_s1>;
-def HEXAGON_M2_mpy_acc_sat_hh_s0:
- si_MInst_sisisi_acc_sat_hh <"mpy", int_hexagon_M2_mpy_acc_sat_hh_s0>;
-
-def HEXAGON_M2_mpy_acc_hl_s0:
- si_MInst_sisisi_acc_hl <"mpy", int_hexagon_M2_mpy_acc_hl_s0>;
-def HEXAGON_M2_mpy_acc_hl_s1:
- si_MInst_sisisi_acc_hl_s1 <"mpy", int_hexagon_M2_mpy_acc_hl_s1>;
-def HEXAGON_M2_mpy_acc_sat_hl_s1:
- si_MInst_sisisi_acc_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_hl_s1>;
-def HEXAGON_M2_mpy_acc_sat_hl_s0:
- si_MInst_sisisi_acc_sat_hl <"mpy", int_hexagon_M2_mpy_acc_sat_hl_s0>;
-
-def HEXAGON_M2_mpy_acc_lh_s0:
- si_MInst_sisisi_acc_lh <"mpy", int_hexagon_M2_mpy_acc_lh_s0>;
-def HEXAGON_M2_mpy_acc_lh_s1:
- si_MInst_sisisi_acc_lh_s1 <"mpy", int_hexagon_M2_mpy_acc_lh_s1>;
-def HEXAGON_M2_mpy_acc_sat_lh_s1:
- si_MInst_sisisi_acc_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_lh_s1>;
-def HEXAGON_M2_mpy_acc_sat_lh_s0:
- si_MInst_sisisi_acc_sat_lh <"mpy", int_hexagon_M2_mpy_acc_sat_lh_s0>;
-
-def HEXAGON_M2_mpy_acc_ll_s0:
- si_MInst_sisisi_acc_ll <"mpy", int_hexagon_M2_mpy_acc_ll_s0>;
-def HEXAGON_M2_mpy_acc_ll_s1:
- si_MInst_sisisi_acc_ll_s1 <"mpy", int_hexagon_M2_mpy_acc_ll_s1>;
-def HEXAGON_M2_mpy_acc_sat_ll_s1:
- si_MInst_sisisi_acc_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_ll_s1>;
-def HEXAGON_M2_mpy_acc_sat_ll_s0:
- si_MInst_sisisi_acc_sat_ll <"mpy", int_hexagon_M2_mpy_acc_sat_ll_s0>;
-
-//Rx-=mpy(Rs.[H|L],Rt.[H|L])[[[:<<0|:<<1]|[:<<0:sat|:<<1:sat]]
-def HEXAGON_M2_mpy_nac_hh_s0:
- si_MInst_sisisi_nac_hh <"mpy", int_hexagon_M2_mpy_nac_hh_s0>;
-def HEXAGON_M2_mpy_nac_hh_s1:
- si_MInst_sisisi_nac_hh_s1 <"mpy", int_hexagon_M2_mpy_nac_hh_s1>;
-def HEXAGON_M2_mpy_nac_sat_hh_s1:
- si_MInst_sisisi_nac_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_hh_s1>;
-def HEXAGON_M2_mpy_nac_sat_hh_s0:
- si_MInst_sisisi_nac_sat_hh <"mpy", int_hexagon_M2_mpy_nac_sat_hh_s0>;
-
-def HEXAGON_M2_mpy_nac_hl_s0:
- si_MInst_sisisi_nac_hl <"mpy", int_hexagon_M2_mpy_nac_hl_s0>;
-def HEXAGON_M2_mpy_nac_hl_s1:
- si_MInst_sisisi_nac_hl_s1 <"mpy", int_hexagon_M2_mpy_nac_hl_s1>;
-def HEXAGON_M2_mpy_nac_sat_hl_s1:
- si_MInst_sisisi_nac_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_hl_s1>;
-def HEXAGON_M2_mpy_nac_sat_hl_s0:
- si_MInst_sisisi_nac_sat_hl <"mpy", int_hexagon_M2_mpy_nac_sat_hl_s0>;
-
-def HEXAGON_M2_mpy_nac_lh_s0:
- si_MInst_sisisi_nac_lh <"mpy", int_hexagon_M2_mpy_nac_lh_s0>;
-def HEXAGON_M2_mpy_nac_lh_s1:
- si_MInst_sisisi_nac_lh_s1 <"mpy", int_hexagon_M2_mpy_nac_lh_s1>;
-def HEXAGON_M2_mpy_nac_sat_lh_s1:
- si_MInst_sisisi_nac_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_lh_s1>;
-def HEXAGON_M2_mpy_nac_sat_lh_s0:
- si_MInst_sisisi_nac_sat_lh <"mpy", int_hexagon_M2_mpy_nac_sat_lh_s0>;
-
-def HEXAGON_M2_mpy_nac_ll_s0:
- si_MInst_sisisi_nac_ll <"mpy", int_hexagon_M2_mpy_nac_ll_s0>;
-def HEXAGON_M2_mpy_nac_ll_s1:
- si_MInst_sisisi_nac_ll_s1 <"mpy", int_hexagon_M2_mpy_nac_ll_s1>;
-def HEXAGON_M2_mpy_nac_sat_ll_s1:
- si_MInst_sisisi_nac_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_ll_s1>;
-def HEXAGON_M2_mpy_nac_sat_ll_s0:
- si_MInst_sisisi_nac_sat_ll <"mpy", int_hexagon_M2_mpy_nac_sat_ll_s0>;
-
-//Rx+=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1]
-def HEXAGON_M2_mpyd_acc_hh_s0:
- di_MInst_disisi_acc_hh <"mpy", int_hexagon_M2_mpyd_acc_hh_s0>;
-def HEXAGON_M2_mpyd_acc_hh_s1:
- di_MInst_disisi_acc_hh_s1 <"mpy", int_hexagon_M2_mpyd_acc_hh_s1>;
-
-def HEXAGON_M2_mpyd_acc_hl_s0:
- di_MInst_disisi_acc_hl <"mpy", int_hexagon_M2_mpyd_acc_hl_s0>;
-def HEXAGON_M2_mpyd_acc_hl_s1:
- di_MInst_disisi_acc_hl_s1 <"mpy", int_hexagon_M2_mpyd_acc_hl_s1>;
-
-def HEXAGON_M2_mpyd_acc_lh_s0:
- di_MInst_disisi_acc_lh <"mpy", int_hexagon_M2_mpyd_acc_lh_s0>;
-def HEXAGON_M2_mpyd_acc_lh_s1:
- di_MInst_disisi_acc_lh_s1 <"mpy", int_hexagon_M2_mpyd_acc_lh_s1>;
-
-def HEXAGON_M2_mpyd_acc_ll_s0:
- di_MInst_disisi_acc_ll <"mpy", int_hexagon_M2_mpyd_acc_ll_s0>;
-def HEXAGON_M2_mpyd_acc_ll_s1:
- di_MInst_disisi_acc_ll_s1 <"mpy", int_hexagon_M2_mpyd_acc_ll_s1>;
-
-//Rx-=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1]
-def HEXAGON_M2_mpyd_nac_hh_s0:
- di_MInst_disisi_nac_hh <"mpy", int_hexagon_M2_mpyd_nac_hh_s0>;
-def HEXAGON_M2_mpyd_nac_hh_s1:
- di_MInst_disisi_nac_hh_s1 <"mpy", int_hexagon_M2_mpyd_nac_hh_s1>;
-
-def HEXAGON_M2_mpyd_nac_hl_s0:
- di_MInst_disisi_nac_hl <"mpy", int_hexagon_M2_mpyd_nac_hl_s0>;
-def HEXAGON_M2_mpyd_nac_hl_s1:
- di_MInst_disisi_nac_hl_s1 <"mpy", int_hexagon_M2_mpyd_nac_hl_s1>;
-
-def HEXAGON_M2_mpyd_nac_lh_s0:
- di_MInst_disisi_nac_lh <"mpy", int_hexagon_M2_mpyd_nac_lh_s0>;
-def HEXAGON_M2_mpyd_nac_lh_s1:
- di_MInst_disisi_nac_lh_s1 <"mpy", int_hexagon_M2_mpyd_nac_lh_s1>;
-
-def HEXAGON_M2_mpyd_nac_ll_s0:
- di_MInst_disisi_nac_ll <"mpy", int_hexagon_M2_mpyd_nac_ll_s0>;
-def HEXAGON_M2_mpyd_nac_ll_s1:
- di_MInst_disisi_nac_ll_s1 <"mpy", int_hexagon_M2_mpyd_nac_ll_s1>;
-
-// MTYPE / MPYS / Scalar 16x16 multiply unsigned.
-//Rd=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def HEXAGON_M2_mpyu_hh_s0:
- si_MInst_sisi_hh <"mpyu", int_hexagon_M2_mpyu_hh_s0>;
-def HEXAGON_M2_mpyu_hh_s1:
- si_MInst_sisi_hh_s1 <"mpyu", int_hexagon_M2_mpyu_hh_s1>;
-def HEXAGON_M2_mpyu_hl_s0:
- si_MInst_sisi_hl <"mpyu", int_hexagon_M2_mpyu_hl_s0>;
-def HEXAGON_M2_mpyu_hl_s1:
- si_MInst_sisi_hl_s1 <"mpyu", int_hexagon_M2_mpyu_hl_s1>;
-def HEXAGON_M2_mpyu_lh_s0:
- si_MInst_sisi_lh <"mpyu", int_hexagon_M2_mpyu_lh_s0>;
-def HEXAGON_M2_mpyu_lh_s1:
- si_MInst_sisi_lh_s1 <"mpyu", int_hexagon_M2_mpyu_lh_s1>;
-def HEXAGON_M2_mpyu_ll_s0:
- si_MInst_sisi_ll <"mpyu", int_hexagon_M2_mpyu_ll_s0>;
-def HEXAGON_M2_mpyu_ll_s1:
- si_MInst_sisi_ll_s1 <"mpyu", int_hexagon_M2_mpyu_ll_s1>;
-
-//Rdd=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def HEXAGON_M2_mpyud_hh_s0:
- di_MInst_sisi_hh <"mpyu", int_hexagon_M2_mpyud_hh_s0>;
-def HEXAGON_M2_mpyud_hh_s1:
- di_MInst_sisi_hh_s1 <"mpyu", int_hexagon_M2_mpyud_hh_s1>;
-def HEXAGON_M2_mpyud_hl_s0:
- di_MInst_sisi_hl <"mpyu", int_hexagon_M2_mpyud_hl_s0>;
-def HEXAGON_M2_mpyud_hl_s1:
- di_MInst_sisi_hl_s1 <"mpyu", int_hexagon_M2_mpyud_hl_s1>;
-def HEXAGON_M2_mpyud_lh_s0:
- di_MInst_sisi_lh <"mpyu", int_hexagon_M2_mpyud_lh_s0>;
-def HEXAGON_M2_mpyud_lh_s1:
- di_MInst_sisi_lh_s1 <"mpyu", int_hexagon_M2_mpyud_lh_s1>;
-def HEXAGON_M2_mpyud_ll_s0:
- di_MInst_sisi_ll <"mpyu", int_hexagon_M2_mpyud_ll_s0>;
-def HEXAGON_M2_mpyud_ll_s1:
- di_MInst_sisi_ll_s1 <"mpyu", int_hexagon_M2_mpyud_ll_s1>;
-
-//Rd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def HEXAGON_M2_mpyu_acc_hh_s0:
- si_MInst_sisisi_acc_hh <"mpyu", int_hexagon_M2_mpyu_acc_hh_s0>;
-def HEXAGON_M2_mpyu_acc_hh_s1:
- si_MInst_sisisi_acc_hh_s1 <"mpyu", int_hexagon_M2_mpyu_acc_hh_s1>;
-def HEXAGON_M2_mpyu_acc_hl_s0:
- si_MInst_sisisi_acc_hl <"mpyu", int_hexagon_M2_mpyu_acc_hl_s0>;
-def HEXAGON_M2_mpyu_acc_hl_s1:
- si_MInst_sisisi_acc_hl_s1 <"mpyu", int_hexagon_M2_mpyu_acc_hl_s1>;
-def HEXAGON_M2_mpyu_acc_lh_s0:
- si_MInst_sisisi_acc_lh <"mpyu", int_hexagon_M2_mpyu_acc_lh_s0>;
-def HEXAGON_M2_mpyu_acc_lh_s1:
- si_MInst_sisisi_acc_lh_s1 <"mpyu", int_hexagon_M2_mpyu_acc_lh_s1>;
-def HEXAGON_M2_mpyu_acc_ll_s0:
- si_MInst_sisisi_acc_ll <"mpyu", int_hexagon_M2_mpyu_acc_ll_s0>;
-def HEXAGON_M2_mpyu_acc_ll_s1:
- si_MInst_sisisi_acc_ll_s1 <"mpyu", int_hexagon_M2_mpyu_acc_ll_s1>;
-
-//Rd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def HEXAGON_M2_mpyu_nac_hh_s0:
- si_MInst_sisisi_nac_hh <"mpyu", int_hexagon_M2_mpyu_nac_hh_s0>;
-def HEXAGON_M2_mpyu_nac_hh_s1:
- si_MInst_sisisi_nac_hh_s1 <"mpyu", int_hexagon_M2_mpyu_nac_hh_s1>;
-def HEXAGON_M2_mpyu_nac_hl_s0:
- si_MInst_sisisi_nac_hl <"mpyu", int_hexagon_M2_mpyu_nac_hl_s0>;
-def HEXAGON_M2_mpyu_nac_hl_s1:
- si_MInst_sisisi_nac_hl_s1 <"mpyu", int_hexagon_M2_mpyu_nac_hl_s1>;
-def HEXAGON_M2_mpyu_nac_lh_s0:
- si_MInst_sisisi_nac_lh <"mpyu", int_hexagon_M2_mpyu_nac_lh_s0>;
-def HEXAGON_M2_mpyu_nac_lh_s1:
- si_MInst_sisisi_nac_lh_s1 <"mpyu", int_hexagon_M2_mpyu_nac_lh_s1>;
-def HEXAGON_M2_mpyu_nac_ll_s0:
- si_MInst_sisisi_nac_ll <"mpyu", int_hexagon_M2_mpyu_nac_ll_s0>;
-def HEXAGON_M2_mpyu_nac_ll_s1:
- si_MInst_sisisi_nac_ll_s1 <"mpyu", int_hexagon_M2_mpyu_nac_ll_s1>;
-
-//Rdd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def HEXAGON_M2_mpyud_acc_hh_s0:
- di_MInst_disisi_acc_hh <"mpyu", int_hexagon_M2_mpyud_acc_hh_s0>;
-def HEXAGON_M2_mpyud_acc_hh_s1:
- di_MInst_disisi_acc_hh_s1 <"mpyu", int_hexagon_M2_mpyud_acc_hh_s1>;
-def HEXAGON_M2_mpyud_acc_hl_s0:
- di_MInst_disisi_acc_hl <"mpyu", int_hexagon_M2_mpyud_acc_hl_s0>;
-def HEXAGON_M2_mpyud_acc_hl_s1:
- di_MInst_disisi_acc_hl_s1 <"mpyu", int_hexagon_M2_mpyud_acc_hl_s1>;
-def HEXAGON_M2_mpyud_acc_lh_s0:
- di_MInst_disisi_acc_lh <"mpyu", int_hexagon_M2_mpyud_acc_lh_s0>;
-def HEXAGON_M2_mpyud_acc_lh_s1:
- di_MInst_disisi_acc_lh_s1 <"mpyu", int_hexagon_M2_mpyud_acc_lh_s1>;
-def HEXAGON_M2_mpyud_acc_ll_s0:
- di_MInst_disisi_acc_ll <"mpyu", int_hexagon_M2_mpyud_acc_ll_s0>;
-def HEXAGON_M2_mpyud_acc_ll_s1:
- di_MInst_disisi_acc_ll_s1 <"mpyu", int_hexagon_M2_mpyud_acc_ll_s1>;
-
-//Rdd-=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def HEXAGON_M2_mpyud_nac_hh_s0:
- di_MInst_disisi_nac_hh <"mpyu", int_hexagon_M2_mpyud_nac_hh_s0>;
-def HEXAGON_M2_mpyud_nac_hh_s1:
- di_MInst_disisi_nac_hh_s1 <"mpyu", int_hexagon_M2_mpyud_nac_hh_s1>;
-def HEXAGON_M2_mpyud_nac_hl_s0:
- di_MInst_disisi_nac_hl <"mpyu", int_hexagon_M2_mpyud_nac_hl_s0>;
-def HEXAGON_M2_mpyud_nac_hl_s1:
- di_MInst_disisi_nac_hl_s1 <"mpyu", int_hexagon_M2_mpyud_nac_hl_s1>;
-def HEXAGON_M2_mpyud_nac_lh_s0:
- di_MInst_disisi_nac_lh <"mpyu", int_hexagon_M2_mpyud_nac_lh_s0>;
-def HEXAGON_M2_mpyud_nac_lh_s1:
- di_MInst_disisi_nac_lh_s1 <"mpyu", int_hexagon_M2_mpyud_nac_lh_s1>;
-def HEXAGON_M2_mpyud_nac_ll_s0:
- di_MInst_disisi_nac_ll <"mpyu", int_hexagon_M2_mpyud_nac_ll_s0>;
-def HEXAGON_M2_mpyud_nac_ll_s1:
- di_MInst_disisi_nac_ll_s1 <"mpyu", int_hexagon_M2_mpyud_nac_ll_s1>;
-
+// ALU64 / VB / Vector mux.
+def : Pat<(int_hexagon_C2_vmux PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt),
+ (C2_vmux PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt)>;
+
+// MPY - Multiply and use full result
+// Rdd = mpy[u](Rs, Rt)
+def : T_RR_pat <M2_dpmpyss_s0, int_hexagon_M2_dpmpyss_s0>;
+def : T_RR_pat <M2_dpmpyuu_s0, int_hexagon_M2_dpmpyuu_s0>;
+
+// Complex multiply real or imaginary
+def : T_RR_pat <M2_cmpyi_s0, int_hexagon_M2_cmpyi_s0>;
+def : T_RR_pat <M2_cmpyr_s0, int_hexagon_M2_cmpyr_s0>;
+
+// Complex multiply
+def : T_RR_pat <M2_cmpys_s0, int_hexagon_M2_cmpys_s0>;
+def : T_RR_pat <M2_cmpysc_s0, int_hexagon_M2_cmpysc_s0>;
+def : T_RR_pat <M2_cmpys_s1, int_hexagon_M2_cmpys_s1>;
+def : T_RR_pat <M2_cmpysc_s1, int_hexagon_M2_cmpysc_s1>;
+
+// Vector multiply halfwords
+// Rdd=vmpyh(Rs,Rt)[:<<1]:sat
+def : T_RR_pat <M2_vmpy2s_s0, int_hexagon_M2_vmpy2s_s0>;
+def : T_RR_pat <M2_vmpy2s_s1, int_hexagon_M2_vmpy2s_s1>;
+
+// Rxx[+-]= mpy[u](Rs,Rt)
+def : T_PRR_pat <M2_dpmpyss_acc_s0, int_hexagon_M2_dpmpyss_acc_s0>;
+def : T_PRR_pat <M2_dpmpyss_nac_s0, int_hexagon_M2_dpmpyss_nac_s0>;
+def : T_PRR_pat <M2_dpmpyuu_acc_s0, int_hexagon_M2_dpmpyuu_acc_s0>;
+def : T_PRR_pat <M2_dpmpyuu_nac_s0, int_hexagon_M2_dpmpyuu_nac_s0>;
+
+// Rxx[-+]=cmpy(Rs,Rt)[:<<1]:sat
+def : T_PRR_pat <M2_cmacs_s0, int_hexagon_M2_cmacs_s0>;
+def : T_PRR_pat <M2_cnacs_s0, int_hexagon_M2_cnacs_s0>;
+def : T_PRR_pat <M2_cmacs_s1, int_hexagon_M2_cmacs_s1>;
+def : T_PRR_pat <M2_cnacs_s1, int_hexagon_M2_cnacs_s1>;
+
+// Rxx[-+]=cmpy(Rs,Rt*)[:<<1]:sat
+def : T_PRR_pat <M2_cmacsc_s0, int_hexagon_M2_cmacsc_s0>;
+def : T_PRR_pat <M2_cnacsc_s0, int_hexagon_M2_cnacsc_s0>;
+def : T_PRR_pat <M2_cmacsc_s1, int_hexagon_M2_cmacsc_s1>;
+def : T_PRR_pat <M2_cnacsc_s1, int_hexagon_M2_cnacsc_s1>;
+
+// Rxx+=cmpy[ir](Rs,Rt)
+def : T_PRR_pat <M2_cmaci_s0, int_hexagon_M2_cmaci_s0>;
+def : T_PRR_pat <M2_cmacr_s0, int_hexagon_M2_cmacr_s0>;
+
+// Rxx+=vmpyh(Rs,Rt)[:<<1][:sat]
+def : T_PRR_pat <M2_vmac2, int_hexagon_M2_vmac2>;
+def : T_PRR_pat <M2_vmac2s_s0, int_hexagon_M2_vmac2s_s0>;
+def : T_PRR_pat <M2_vmac2s_s1, int_hexagon_M2_vmac2s_s1>;
/********************************************************************
-* MTYPE/VB *
+* CR *
*********************************************************************/
+class qi_CRInst_qi_pat<InstHexagon Inst, Intrinsic IntID> :
+ Pat<(i32 (IntID IntRegs:$Rs)),
+ (i32 (C2_tfrpr (Inst (C2_tfrrp IntRegs:$Rs))))>;
-// MTYPE / VB / Vector reduce add unsigned bytes.
-def HEXAGON_A2_vraddub:
- di_MInst_didi <"vraddub", int_hexagon_A2_vraddub>;
-def HEXAGON_A2_vraddub_acc:
- di_MInst_dididi_acc <"vraddub", int_hexagon_A2_vraddub_acc>;
+class qi_CRInst_qiqi_pat<InstHexagon Inst, Intrinsic IntID> :
+ Pat<(i32 (IntID IntRegs:$Rs, IntRegs:$Rt)),
+ (i32 (C2_tfrpr (Inst (C2_tfrrp IntRegs:$Rs), (C2_tfrrp IntRegs:$Rt))))>;
-// MTYPE / VB / Vector sum of absolute differences unsigned bytes.
-def HEXAGON_A2_vrsadub:
- di_MInst_didi <"vrsadub", int_hexagon_A2_vrsadub>;
-def HEXAGON_A2_vrsadub_acc:
- di_MInst_dididi_acc <"vrsadub", int_hexagon_A2_vrsadub_acc>;
+def: qi_CRInst_qi_pat<C2_not, int_hexagon_C2_not>;
+def: qi_CRInst_qi_pat<C2_all8, int_hexagon_C2_all8>;
+def: qi_CRInst_qi_pat<C2_any8, int_hexagon_C2_any8>;
-/********************************************************************
-* MTYPE/VH *
-*********************************************************************/
+def: qi_CRInst_qiqi_pat<C2_and, int_hexagon_C2_and>;
+def: qi_CRInst_qiqi_pat<C2_andn, int_hexagon_C2_andn>;
+def: qi_CRInst_qiqi_pat<C2_or, int_hexagon_C2_or>;
+def: qi_CRInst_qiqi_pat<C2_orn, int_hexagon_C2_orn>;
+def: qi_CRInst_qiqi_pat<C2_xor, int_hexagon_C2_xor>;
-// MTYPE / VH / Vector dual multiply.
-def HEXAGON_M2_vdmpys_s1:
- di_MInst_didi_s1_sat <"vdmpy", int_hexagon_M2_vdmpys_s1>;
-def HEXAGON_M2_vdmpys_s0:
- di_MInst_didi_sat <"vdmpy", int_hexagon_M2_vdmpys_s0>;
-def HEXAGON_M2_vdmacs_s1:
- di_MInst_dididi_acc_s1_sat <"vdmpy", int_hexagon_M2_vdmacs_s1>;
-def HEXAGON_M2_vdmacs_s0:
- di_MInst_dididi_acc_sat <"vdmpy", int_hexagon_M2_vdmacs_s0>;
-
-// MTYPE / VH / Vector dual multiply with round and pack.
-def HEXAGON_M2_vdmpyrs_s0:
- si_MInst_didi_rnd_sat <"vdmpy", int_hexagon_M2_vdmpyrs_s0>;
-def HEXAGON_M2_vdmpyrs_s1:
- si_MInst_didi_s1_rnd_sat <"vdmpy", int_hexagon_M2_vdmpyrs_s1>;
-
-// MTYPE / VH / Vector multiply even halfwords.
-def HEXAGON_M2_vmpy2es_s1:
- di_MInst_didi_s1_sat <"vmpyeh", int_hexagon_M2_vmpy2es_s1>;
-def HEXAGON_M2_vmpy2es_s0:
- di_MInst_didi_sat <"vmpyeh", int_hexagon_M2_vmpy2es_s0>;
-def HEXAGON_M2_vmac2es:
- di_MInst_dididi_acc <"vmpyeh", int_hexagon_M2_vmac2es>;
-def HEXAGON_M2_vmac2es_s1:
- di_MInst_dididi_acc_s1_sat <"vmpyeh", int_hexagon_M2_vmac2es_s1>;
-def HEXAGON_M2_vmac2es_s0:
- di_MInst_dididi_acc_sat <"vmpyeh", int_hexagon_M2_vmac2es_s0>;
-
-// MTYPE / VH / Vector multiply halfwords.
-def HEXAGON_M2_vmpy2s_s0:
- di_MInst_sisi_sat <"vmpyh", int_hexagon_M2_vmpy2s_s0>;
-def HEXAGON_M2_vmpy2s_s1:
- di_MInst_sisi_s1_sat <"vmpyh", int_hexagon_M2_vmpy2s_s1>;
-def HEXAGON_M2_vmac2:
- di_MInst_disisi_acc <"vmpyh", int_hexagon_M2_vmac2>;
-def HEXAGON_M2_vmac2s_s0:
- di_MInst_disisi_acc_sat <"vmpyh", int_hexagon_M2_vmac2s_s0>;
-def HEXAGON_M2_vmac2s_s1:
- di_MInst_disisi_acc_s1_sat <"vmpyh", int_hexagon_M2_vmac2s_s1>;
-
-// MTYPE / VH / Vector multiply halfwords with round and pack.
-def HEXAGON_M2_vmpy2s_s0pack:
- si_MInst_sisi_rnd_sat <"vmpyh", int_hexagon_M2_vmpy2s_s0pack>;
-def HEXAGON_M2_vmpy2s_s1pack:
- si_MInst_sisi_s1_rnd_sat <"vmpyh", int_hexagon_M2_vmpy2s_s1pack>;
-
-// MTYPE / VH / Vector reduce multiply halfwords.
-// Rxx32+=vrmpyh(Rss32,Rtt32)
-def HEXAGON_M2_vrmpy_s0:
- di_MInst_didi <"vrmpyh", int_hexagon_M2_vrmpy_s0>;
-def HEXAGON_M2_vrmac_s0:
- di_MInst_dididi_acc <"vrmpyh", int_hexagon_M2_vrmac_s0>;
+// Multiply 32x32 and use lower result
+def : T_RRI_pat <M2_macsip, int_hexagon_M2_macsip>;
+def : T_RRI_pat <M2_macsin, int_hexagon_M2_macsin>;
+def : T_RRR_pat <M2_maci, int_hexagon_M2_maci>;
+// Subtract and accumulate
+def : T_RRR_pat <M2_subacc, int_hexagon_M2_subacc>;
-/********************************************************************
-* STYPE/ALU *
-*********************************************************************/
+// Add and accumulate
+def : T_RRR_pat <M2_acci, int_hexagon_M2_acci>;
+def : T_RRR_pat <M2_nacci, int_hexagon_M2_nacci>;
+def : T_RRI_pat <M2_accii, int_hexagon_M2_accii>;
+def : T_RRI_pat <M2_naccii, int_hexagon_M2_naccii>;
-// STYPE / ALU / Absolute value.
-def HEXAGON_A2_abs:
- si_SInst_si <"abs", int_hexagon_A2_abs>;
-def HEXAGON_A2_absp:
- di_SInst_di <"abs", int_hexagon_A2_absp>;
-def HEXAGON_A2_abssat:
- si_SInst_si_sat <"abs", int_hexagon_A2_abssat>;
+// XOR and XOR with destination
+def : T_RRR_pat <M2_xor_xacc, int_hexagon_M2_xor_xacc>;
-// STYPE / ALU / Negate.
-def HEXAGON_A2_negp:
- di_SInst_di <"neg", int_hexagon_A2_negp>;
-def HEXAGON_A2_negsat:
- si_SInst_si_sat <"neg", int_hexagon_A2_negsat>;
+class MType_R32_pat <Intrinsic IntID, InstHexagon OutputInst> :
+ Pat <(IntID IntRegs:$src1, IntRegs:$src2),
+ (OutputInst IntRegs:$src1, IntRegs:$src2)>;
-// STYPE / ALU / Logical Not.
-def HEXAGON_A2_notp:
- di_SInst_di <"not", int_hexagon_A2_notp>;
+// Vector dual multiply with round and pack
-// STYPE / ALU / Sign extend word to doubleword.
-def HEXAGON_A2_sxtw:
- di_SInst_si <"sxtw", int_hexagon_A2_sxtw>;
+def : Pat <(int_hexagon_M2_vdmpyrs_s0 DoubleRegs:$src1, DoubleRegs:$src2),
+ (M2_vdmpyrs_s0 DoubleRegs:$src1, DoubleRegs:$src2)>;
+def : Pat <(int_hexagon_M2_vdmpyrs_s1 DoubleRegs:$src1, DoubleRegs:$src2),
+ (M2_vdmpyrs_s1 DoubleRegs:$src1, DoubleRegs:$src2)>;
+
+// Vector multiply halfwords with round and pack
+
+def : MType_R32_pat <int_hexagon_M2_vmpy2s_s0pack, M2_vmpy2s_s0pack>;
+def : MType_R32_pat <int_hexagon_M2_vmpy2s_s1pack, M2_vmpy2s_s1pack>;
+
+// Multiply and use lower result
+def : MType_R32_pat <int_hexagon_M2_mpyi, M2_mpyi>;
+def : T_RI_pat<M2_mpysmi, int_hexagon_M2_mpysmi>;
+
+// Assembler mapped from Rd32=mpyui(Rs32,Rt32) to Rd32=mpyi(Rs32,Rt32)
+def : MType_R32_pat <int_hexagon_M2_mpyui, M2_mpyi>;
+
+// Multiply and use upper result
+def : MType_R32_pat <int_hexagon_M2_mpy_up, M2_mpy_up>;
+def : MType_R32_pat <int_hexagon_M2_mpyu_up, M2_mpyu_up>;
+def : MType_R32_pat <int_hexagon_M2_hmmpyh_rs1, M2_hmmpyh_rs1>;
+def : MType_R32_pat <int_hexagon_M2_hmmpyl_rs1, M2_hmmpyl_rs1>;
+def : MType_R32_pat <int_hexagon_M2_dpmpyss_rnd_s0, M2_dpmpyss_rnd_s0>;
+
+// Complex multiply with round and pack
+// Rxx32+=cmpy(Rs32,[*]Rt32:<<1]:rnd:sat
+def : MType_R32_pat <int_hexagon_M2_cmpyrs_s0, M2_cmpyrs_s0>;
+def : MType_R32_pat <int_hexagon_M2_cmpyrs_s1, M2_cmpyrs_s1>;
+def : MType_R32_pat <int_hexagon_M2_cmpyrsc_s0, M2_cmpyrsc_s0>;
+def : MType_R32_pat <int_hexagon_M2_cmpyrsc_s1, M2_cmpyrsc_s1>;
/********************************************************************
-* STYPE/BIT *
+* STYPE/ALU *
*********************************************************************/
+def : T_P_pat <A2_absp, int_hexagon_A2_absp>;
+def : T_P_pat <A2_negp, int_hexagon_A2_negp>;
+def : T_P_pat <A2_notp, int_hexagon_A2_notp>;
-// STYPE / BIT / Count leading.
-def HEXAGON_S2_cl0:
- si_SInst_si <"cl0", int_hexagon_S2_cl0>;
-def HEXAGON_S2_cl0p:
- si_SInst_di <"cl0", int_hexagon_S2_cl0p>;
-def HEXAGON_S2_cl1:
- si_SInst_si <"cl1", int_hexagon_S2_cl1>;
-def HEXAGON_S2_cl1p:
- si_SInst_di <"cl1", int_hexagon_S2_cl1p>;
-def HEXAGON_S2_clb:
- si_SInst_si <"clb", int_hexagon_S2_clb>;
-def HEXAGON_S2_clbp:
- si_SInst_di <"clb", int_hexagon_S2_clbp>;
-def HEXAGON_S2_clbnorm:
- si_SInst_si <"normamt", int_hexagon_S2_clbnorm>;
-
-// STYPE / BIT / Count trailing.
-def HEXAGON_S2_ct0:
- si_SInst_si <"ct0", int_hexagon_S2_ct0>;
-def HEXAGON_S2_ct1:
- si_SInst_si <"ct1", int_hexagon_S2_ct1>;
-
-// STYPE / BIT / Compare bit mask.
-def Hexagon_C2_bitsclr:
- qi_SInst_sisi <"bitsclr", int_hexagon_C2_bitsclr>;
-def Hexagon_C2_bitsclri:
- qi_SInst_siu6 <"bitsclr", int_hexagon_C2_bitsclri>;
-def Hexagon_C2_bitsset:
- qi_SInst_sisi <"bitsset", int_hexagon_C2_bitsset>;
-
-// STYPE / BIT / Extract unsigned.
-// Rd[d][32/64]=extractu(Rs[s],Rt[t],[imm])
-def HEXAGON_S2_extractu:
- si_SInst_siu5u5 <"extractu",int_hexagon_S2_extractu>;
-def HEXAGON_S2_extractu_rp:
- si_SInst_sidi <"extractu",int_hexagon_S2_extractu_rp>;
-def HEXAGON_S2_extractup:
- di_SInst_diu6u6 <"extractu",int_hexagon_S2_extractup>;
-def HEXAGON_S2_extractup_rp:
- di_SInst_didi <"extractu",int_hexagon_S2_extractup_rp>;
-
-// STYPE / BIT / Insert bitfield.
-def Hexagon_S2_insert:
- si_SInst_sisiu5u5 <"insert", int_hexagon_S2_insert>;
-def Hexagon_S2_insert_rp:
- si_SInst_sisidi <"insert", int_hexagon_S2_insert_rp>;
-def Hexagon_S2_insertp:
- di_SInst_didiu6u6 <"insert", int_hexagon_S2_insertp>;
-def Hexagon_S2_insertp_rp:
- di_SInst_dididi <"insert", int_hexagon_S2_insertp_rp>;
-
-// STYPE / BIT / Innterleave/deinterleave.
-def Hexagon_S2_interleave:
- di_SInst_di <"interleave", int_hexagon_S2_interleave>;
-def Hexagon_S2_deinterleave:
- di_SInst_di <"deinterleave", int_hexagon_S2_deinterleave>;
-
-// STYPE / BIT / Linear feedback-shift Iteration.
-def Hexagon_S2_lfsp:
- di_SInst_didi <"lfs", int_hexagon_S2_lfsp>;
-
-// STYPE / BIT / Bit reverse.
-def Hexagon_S2_brev:
- si_SInst_si <"brev", int_hexagon_S2_brev>;
-
-// STYPE / BIT / Set/Clear/Toggle Bit.
-def HEXAGON_S2_setbit_i:
- si_SInst_siu5 <"setbit", int_hexagon_S2_setbit_i>;
-def HEXAGON_S2_togglebit_i:
- si_SInst_siu5 <"togglebit", int_hexagon_S2_togglebit_i>;
-def HEXAGON_S2_clrbit_i:
- si_SInst_siu5 <"clrbit", int_hexagon_S2_clrbit_i>;
-def HEXAGON_S2_setbit_r:
- si_SInst_sisi <"setbit", int_hexagon_S2_setbit_r>;
-def HEXAGON_S2_togglebit_r:
- si_SInst_sisi <"togglebit", int_hexagon_S2_togglebit_r>;
-def HEXAGON_S2_clrbit_r:
- si_SInst_sisi <"clrbit", int_hexagon_S2_clrbit_r>;
-
-// STYPE / BIT / Test Bit.
-def HEXAGON_S2_tstbit_i:
- qi_SInst_siu5 <"tstbit", int_hexagon_S2_tstbit_i>;
-def HEXAGON_S2_tstbit_r:
- qi_SInst_sisi <"tstbit", int_hexagon_S2_tstbit_r>;
+/********************************************************************
+* STYPE/BIT *
+*********************************************************************/
+// Count leading/trailing
+def: T_R_pat<S2_cl0, int_hexagon_S2_cl0>;
+def: T_P_pat<S2_cl0p, int_hexagon_S2_cl0p>;
+def: T_R_pat<S2_cl1, int_hexagon_S2_cl1>;
+def: T_P_pat<S2_cl1p, int_hexagon_S2_cl1p>;
+def: T_R_pat<S2_clb, int_hexagon_S2_clb>;
+def: T_P_pat<S2_clbp, int_hexagon_S2_clbp>;
+def: T_R_pat<S2_clbnorm, int_hexagon_S2_clbnorm>;
+def: T_R_pat<S2_ct0, int_hexagon_S2_ct0>;
+def: T_R_pat<S2_ct1, int_hexagon_S2_ct1>;
+
+// Compare bit mask
+def: T_RR_pat<C2_bitsclr, int_hexagon_C2_bitsclr>;
+def: T_RI_pat<C2_bitsclri, int_hexagon_C2_bitsclri>;
+def: T_RR_pat<C2_bitsset, int_hexagon_C2_bitsset>;
+
+// Vector shuffle
+def : T_PP_pat <S2_shuffeb, int_hexagon_S2_shuffeb>;
+def : T_PP_pat <S2_shuffob, int_hexagon_S2_shuffob>;
+def : T_PP_pat <S2_shuffeh, int_hexagon_S2_shuffeh>;
+def : T_PP_pat <S2_shuffoh, int_hexagon_S2_shuffoh>;
+
+// Vector truncate
+def : T_PP_pat <S2_vtrunewh, int_hexagon_S2_vtrunewh>;
+def : T_PP_pat <S2_vtrunowh, int_hexagon_S2_vtrunowh>;
+
+// Linear feedback-shift Iteration.
+def : T_PP_pat <S2_lfsp, int_hexagon_S2_lfsp>;
+
+// Vector splice
+def : T_PPQ_pat <S2_vsplicerb, int_hexagon_S2_vsplicerb>;
+def : T_PPI_pat <S2_vspliceib, int_hexagon_S2_vspliceib>;
+
+// Shift by immediate and add
+def : T_RRI_pat<S2_addasl_rrri, int_hexagon_S2_addasl_rrri>;
+
+// Extract bitfield
+def : T_PII_pat<S2_extractup, int_hexagon_S2_extractup>;
+def : T_RII_pat<S2_extractu, int_hexagon_S2_extractu>;
+def : T_RP_pat <S2_extractu_rp, int_hexagon_S2_extractu_rp>;
+def : T_PP_pat <S2_extractup_rp, int_hexagon_S2_extractup_rp>;
+
+// Insert bitfield
+def : Pat <(int_hexagon_S2_insert_rp IntRegs:$src1, IntRegs:$src2,
+ DoubleRegs:$src3),
+ (S2_insert_rp IntRegs:$src1, IntRegs:$src2, DoubleRegs:$src3)>;
+
+def : Pat<(i64 (int_hexagon_S2_insertp_rp (I64:$src1),
+ (I64:$src2), (I64:$src3))),
+ (i64 (S2_insertp_rp (I64:$src1), (I64:$src2),
+ (I64:$src3)))>;
+
+def : Pat<(int_hexagon_S2_insert IntRegs:$src1, IntRegs:$src2,
+ u5ImmPred:$src3, u5ImmPred:$src4),
+ (S2_insert IntRegs:$src1, IntRegs:$src2,
+ u5ImmPred:$src3, u5ImmPred:$src4)>;
+
+def : Pat<(i64 (int_hexagon_S2_insertp (I64:$src1),
+ (I64:$src2), u6ImmPred:$src3, u6ImmPred:$src4)),
+ (i64 (S2_insertp (I64:$src1), (I64:$src2),
+ u6ImmPred:$src3, u6ImmPred:$src4))>;
+
+
+// Innterleave/deinterleave
+def : T_P_pat <S2_interleave, int_hexagon_S2_interleave>;
+def : T_P_pat <S2_deinterleave, int_hexagon_S2_deinterleave>;
+
+// Set/Clear/Toggle Bit
+def: T_RI_pat<S2_setbit_i, int_hexagon_S2_setbit_i>;
+def: T_RI_pat<S2_clrbit_i, int_hexagon_S2_clrbit_i>;
+def: T_RI_pat<S2_togglebit_i, int_hexagon_S2_togglebit_i>;
+
+def: T_RR_pat<S2_setbit_r, int_hexagon_S2_setbit_r>;
+def: T_RR_pat<S2_clrbit_r, int_hexagon_S2_clrbit_r>;
+def: T_RR_pat<S2_togglebit_r, int_hexagon_S2_togglebit_r>;
+
+// Test Bit
+def: T_RI_pat<S2_tstbit_i, int_hexagon_S2_tstbit_i>;
+def: T_RR_pat<S2_tstbit_r, int_hexagon_S2_tstbit_r>;
/********************************************************************
* STYPE/COMPLEX *
*********************************************************************/
+// Vector Complex conjugate
+def : T_P_pat <A2_vconj, int_hexagon_A2_vconj>;
-// STYPE / COMPLEX / Vector Complex conjugate.
-def HEXAGON_A2_vconj:
- di_SInst_di_sat <"vconj", int_hexagon_A2_vconj>;
-
-// STYPE / COMPLEX / Vector Complex rotate.
-def HEXAGON_S2_vcrotate:
- di_SInst_disi <"vcrotate",int_hexagon_S2_vcrotate>;
-
+// Vector Complex rotate
+def : T_PR_pat <S2_vcrotate, int_hexagon_S2_vcrotate>;
/********************************************************************
* STYPE/PERM *
*********************************************************************/
-// STYPE / PERM / Saturate.
-def HEXAGON_A2_sat:
- si_SInst_di <"sat", int_hexagon_A2_sat>;
-def HEXAGON_A2_satb:
- si_SInst_si <"satb", int_hexagon_A2_satb>;
-def HEXAGON_A2_sath:
- si_SInst_si <"sath", int_hexagon_A2_sath>;
-def HEXAGON_A2_satub:
- si_SInst_si <"satub", int_hexagon_A2_satub>;
-def HEXAGON_A2_satuh:
- si_SInst_si <"satuh", int_hexagon_A2_satuh>;
-
-// STYPE / PERM / Swizzle bytes.
-def HEXAGON_A2_swiz:
- si_SInst_si <"swiz", int_hexagon_A2_swiz>;
-
-// STYPE / PERM / Vector align.
-// Need custom lowering
-def HEXAGON_S2_valignib:
- di_SInst_didiu3 <"valignb", int_hexagon_S2_valignib>;
-def HEXAGON_S2_valignrb:
- di_SInst_didiqi <"valignb", int_hexagon_S2_valignrb>;
-
-// STYPE / PERM / Vector round and pack.
-def HEXAGON_S2_vrndpackwh:
- si_SInst_di <"vrndwh", int_hexagon_S2_vrndpackwh>;
-def HEXAGON_S2_vrndpackwhs:
- si_SInst_di_sat <"vrndwh", int_hexagon_S2_vrndpackwhs>;
-
-// STYPE / PERM / Vector saturate and pack.
-def HEXAGON_S2_svsathb:
- si_SInst_si <"vsathb", int_hexagon_S2_svsathb>;
-def HEXAGON_S2_vsathb:
- si_SInst_di <"vsathb", int_hexagon_S2_vsathb>;
-def HEXAGON_S2_svsathub:
- si_SInst_si <"vsathub", int_hexagon_S2_svsathub>;
-def HEXAGON_S2_vsathub:
- si_SInst_di <"vsathub", int_hexagon_S2_vsathub>;
-def HEXAGON_S2_vsatwh:
- si_SInst_di <"vsatwh", int_hexagon_S2_vsatwh>;
-def HEXAGON_S2_vsatwuh:
- si_SInst_di <"vsatwuh", int_hexagon_S2_vsatwuh>;
-
-// STYPE / PERM / Vector saturate without pack.
-def HEXAGON_S2_vsathb_nopack:
- di_SInst_di <"vsathb", int_hexagon_S2_vsathb_nopack>;
-def HEXAGON_S2_vsathub_nopack:
- di_SInst_di <"vsathub", int_hexagon_S2_vsathub_nopack>;
-def HEXAGON_S2_vsatwh_nopack:
- di_SInst_di <"vsatwh", int_hexagon_S2_vsatwh_nopack>;
-def HEXAGON_S2_vsatwuh_nopack:
- di_SInst_di <"vsatwuh", int_hexagon_S2_vsatwuh_nopack>;
-
-// STYPE / PERM / Vector shuffle.
-def HEXAGON_S2_shuffeb:
- di_SInst_didi <"shuffeb", int_hexagon_S2_shuffeb>;
-def HEXAGON_S2_shuffeh:
- di_SInst_didi <"shuffeh", int_hexagon_S2_shuffeh>;
-def HEXAGON_S2_shuffob:
- di_SInst_didi <"shuffob", int_hexagon_S2_shuffob>;
-def HEXAGON_S2_shuffoh:
- di_SInst_didi <"shuffoh", int_hexagon_S2_shuffoh>;
-
-// STYPE / PERM / Vector splat bytes.
-def HEXAGON_S2_vsplatrb:
- si_SInst_si <"vsplatb", int_hexagon_S2_vsplatrb>;
-
-// STYPE / PERM / Vector splat halfwords.
-def HEXAGON_S2_vsplatrh:
- di_SInst_si <"vsplath", int_hexagon_S2_vsplatrh>;
-
-// STYPE / PERM / Vector splice.
-def Hexagon_S2_vsplicerb:
- di_SInst_didiqi <"vspliceb",int_hexagon_S2_vsplicerb>;
-def Hexagon_S2_vspliceib:
- di_SInst_didiu3 <"vspliceb",int_hexagon_S2_vspliceib>;
-
-// STYPE / PERM / Sign extend.
-def HEXAGON_S2_vsxtbh:
- di_SInst_si <"vsxtbh", int_hexagon_S2_vsxtbh>;
-def HEXAGON_S2_vsxthw:
- di_SInst_si <"vsxthw", int_hexagon_S2_vsxthw>;
-
-// STYPE / PERM / Truncate.
-def HEXAGON_S2_vtrunehb:
- si_SInst_di <"vtrunehb",int_hexagon_S2_vtrunehb>;
-def HEXAGON_S2_vtrunohb:
- si_SInst_di <"vtrunohb",int_hexagon_S2_vtrunohb>;
-def HEXAGON_S2_vtrunewh:
- di_SInst_didi <"vtrunewh",int_hexagon_S2_vtrunewh>;
-def HEXAGON_S2_vtrunowh:
- di_SInst_didi <"vtrunowh",int_hexagon_S2_vtrunowh>;
-
-// STYPE / PERM / Zero extend.
-def HEXAGON_S2_vzxtbh:
- di_SInst_si <"vzxtbh", int_hexagon_S2_vzxtbh>;
-def HEXAGON_S2_vzxthw:
- di_SInst_si <"vzxthw", int_hexagon_S2_vzxthw>;
-
+// Vector saturate without pack
+def : T_P_pat <S2_vsathb_nopack, int_hexagon_S2_vsathb_nopack>;
+def : T_P_pat <S2_vsathub_nopack, int_hexagon_S2_vsathub_nopack>;
+def : T_P_pat <S2_vsatwh_nopack, int_hexagon_S2_vsatwh_nopack>;
+def : T_P_pat <S2_vsatwuh_nopack, int_hexagon_S2_vsatwuh_nopack>;
/********************************************************************
* STYPE/PRED *
*********************************************************************/
-// STYPE / PRED / Mask generate from predicate.
-def HEXAGON_C2_mask:
- di_SInst_qi <"mask", int_hexagon_C2_mask>;
-
-// STYPE / PRED / Predicate transfer.
-def HEXAGON_C2_tfrpr:
- si_SInst_qi <"", int_hexagon_C2_tfrpr>;
-def HEXAGON_C2_tfrrp:
- qi_SInst_si <"", int_hexagon_C2_tfrrp>;
+// Predicate transfer
+def: Pat<(i32 (int_hexagon_C2_tfrpr (I32:$Rs))),
+ (i32 (C2_tfrpr (C2_tfrrp (I32:$Rs))))>;
+def: Pat<(i32 (int_hexagon_C2_tfrrp (I32:$Rs))),
+ (i32 (C2_tfrpr (C2_tfrrp (I32:$Rs))))>;
-// STYPE / PRED / Viterbi pack even and odd predicate bits.
-def HEXAGON_C2_vitpack:
- si_SInst_qiqi <"vitpack",int_hexagon_C2_vitpack>;
+// Mask generate from predicate
+def: Pat<(i64 (int_hexagon_C2_mask (I32:$Rs))),
+ (i64 (C2_mask (C2_tfrrp (I32:$Rs))))>;
+// Viterbi pack even and odd predicate bits
+def: Pat<(i32 (int_hexagon_C2_vitpack (I32:$Rs), (I32:$Rt))),
+ (i32 (C2_vitpack (C2_tfrrp (I32:$Rs)),
+ (C2_tfrrp (I32:$Rt))))>;
/********************************************************************
* STYPE/SHIFT *
*********************************************************************/
-// STYPE / SHIFT / Shift by immediate.
-def HEXAGON_S2_asl_i_r:
- si_SInst_siu5 <"asl", int_hexagon_S2_asl_i_r>;
-def HEXAGON_S2_asr_i_r:
- si_SInst_siu5 <"asr", int_hexagon_S2_asr_i_r>;
-def HEXAGON_S2_lsr_i_r:
- si_SInst_siu5 <"lsr", int_hexagon_S2_lsr_i_r>;
-def HEXAGON_S2_asl_i_p:
- di_SInst_diu6 <"asl", int_hexagon_S2_asl_i_p>;
-def HEXAGON_S2_asr_i_p:
- di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p>;
-def HEXAGON_S2_lsr_i_p:
- di_SInst_diu6 <"lsr", int_hexagon_S2_lsr_i_p>;
-
-// STYPE / SHIFT / Shift by immediate and accumulate.
-def HEXAGON_S2_asl_i_r_acc:
- si_SInst_sisiu5_acc <"asl", int_hexagon_S2_asl_i_r_acc>;
-def HEXAGON_S2_asr_i_r_acc:
- si_SInst_sisiu5_acc <"asr", int_hexagon_S2_asr_i_r_acc>;
-def HEXAGON_S2_lsr_i_r_acc:
- si_SInst_sisiu5_acc <"lsr", int_hexagon_S2_lsr_i_r_acc>;
-def HEXAGON_S2_asl_i_r_nac:
- si_SInst_sisiu5_nac <"asl", int_hexagon_S2_asl_i_r_nac>;
-def HEXAGON_S2_asr_i_r_nac:
- si_SInst_sisiu5_nac <"asr", int_hexagon_S2_asr_i_r_nac>;
-def HEXAGON_S2_lsr_i_r_nac:
- si_SInst_sisiu5_nac <"lsr", int_hexagon_S2_lsr_i_r_nac>;
-def HEXAGON_S2_asl_i_p_acc:
- di_SInst_didiu6_acc <"asl", int_hexagon_S2_asl_i_p_acc>;
-def HEXAGON_S2_asr_i_p_acc:
- di_SInst_didiu6_acc <"asr", int_hexagon_S2_asr_i_p_acc>;
-def HEXAGON_S2_lsr_i_p_acc:
- di_SInst_didiu6_acc <"lsr", int_hexagon_S2_lsr_i_p_acc>;
-def HEXAGON_S2_asl_i_p_nac:
- di_SInst_didiu6_nac <"asl", int_hexagon_S2_asl_i_p_nac>;
-def HEXAGON_S2_asr_i_p_nac:
- di_SInst_didiu6_nac <"asr", int_hexagon_S2_asr_i_p_nac>;
-def HEXAGON_S2_lsr_i_p_nac:
- di_SInst_didiu6_nac <"lsr", int_hexagon_S2_lsr_i_p_nac>;
-
-// STYPE / SHIFT / Shift by immediate and add.
-def HEXAGON_S2_addasl_rrri:
- si_SInst_sisiu3 <"addasl", int_hexagon_S2_addasl_rrri>;
-
-// STYPE / SHIFT / Shift by immediate and logical.
-def HEXAGON_S2_asl_i_r_and:
- si_SInst_sisiu5_and <"asl", int_hexagon_S2_asl_i_r_and>;
-def HEXAGON_S2_asr_i_r_and:
- si_SInst_sisiu5_and <"asr", int_hexagon_S2_asr_i_r_and>;
-def HEXAGON_S2_lsr_i_r_and:
- si_SInst_sisiu5_and <"lsr", int_hexagon_S2_lsr_i_r_and>;
-
-def HEXAGON_S2_asl_i_r_xacc:
- si_SInst_sisiu5_xor <"asl", int_hexagon_S2_asl_i_r_xacc>;
-def HEXAGON_S2_lsr_i_r_xacc:
- si_SInst_sisiu5_xor <"lsr", int_hexagon_S2_lsr_i_r_xacc>;
-
-def HEXAGON_S2_asl_i_r_or:
- si_SInst_sisiu5_or <"asl", int_hexagon_S2_asl_i_r_or>;
-def HEXAGON_S2_asr_i_r_or:
- si_SInst_sisiu5_or <"asr", int_hexagon_S2_asr_i_r_or>;
-def HEXAGON_S2_lsr_i_r_or:
- si_SInst_sisiu5_or <"lsr", int_hexagon_S2_lsr_i_r_or>;
-
-def HEXAGON_S2_asl_i_p_and:
- di_SInst_didiu6_and <"asl", int_hexagon_S2_asl_i_p_and>;
-def HEXAGON_S2_asr_i_p_and:
- di_SInst_didiu6_and <"asr", int_hexagon_S2_asr_i_p_and>;
-def HEXAGON_S2_lsr_i_p_and:
- di_SInst_didiu6_and <"lsr", int_hexagon_S2_lsr_i_p_and>;
-
-def HEXAGON_S2_asl_i_p_xacc:
- di_SInst_didiu6_xor <"asl", int_hexagon_S2_asl_i_p_xacc>;
-def HEXAGON_S2_lsr_i_p_xacc:
- di_SInst_didiu6_xor <"lsr", int_hexagon_S2_lsr_i_p_xacc>;
-
-def HEXAGON_S2_asl_i_p_or:
- di_SInst_didiu6_or <"asl", int_hexagon_S2_asl_i_p_or>;
-def HEXAGON_S2_asr_i_p_or:
- di_SInst_didiu6_or <"asr", int_hexagon_S2_asr_i_p_or>;
-def HEXAGON_S2_lsr_i_p_or:
- di_SInst_didiu6_or <"lsr", int_hexagon_S2_lsr_i_p_or>;
-
-// STYPE / SHIFT / Shift right by immediate with rounding.
-def HEXAGON_S2_asr_i_r_rnd:
- si_SInst_siu5_rnd <"asr", int_hexagon_S2_asr_i_r_rnd>;
-def HEXAGON_S2_asr_i_r_rnd_goodsyntax:
- si_SInst_siu5 <"asrrnd", int_hexagon_S2_asr_i_r_rnd_goodsyntax>;
-
-// STYPE / SHIFT / Shift left by immediate with saturation.
-def HEXAGON_S2_asl_i_r_sat:
- si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_i_r_sat>;
-
-// STYPE / SHIFT / Shift by register.
-def HEXAGON_S2_asl_r_r:
- si_SInst_sisi <"asl", int_hexagon_S2_asl_r_r>;
-def HEXAGON_S2_asr_r_r:
- si_SInst_sisi <"asr", int_hexagon_S2_asr_r_r>;
-def HEXAGON_S2_lsl_r_r:
- si_SInst_sisi <"lsl", int_hexagon_S2_lsl_r_r>;
-def HEXAGON_S2_lsr_r_r:
- si_SInst_sisi <"lsr", int_hexagon_S2_lsr_r_r>;
-def HEXAGON_S2_asl_r_p:
- di_SInst_disi <"asl", int_hexagon_S2_asl_r_p>;
-def HEXAGON_S2_asr_r_p:
- di_SInst_disi <"asr", int_hexagon_S2_asr_r_p>;
-def HEXAGON_S2_lsl_r_p:
- di_SInst_disi <"lsl", int_hexagon_S2_lsl_r_p>;
-def HEXAGON_S2_lsr_r_p:
- di_SInst_disi <"lsr", int_hexagon_S2_lsr_r_p>;
-
-// STYPE / SHIFT / Shift by register and accumulate.
-def HEXAGON_S2_asl_r_r_acc:
- si_SInst_sisisi_acc <"asl", int_hexagon_S2_asl_r_r_acc>;
-def HEXAGON_S2_asr_r_r_acc:
- si_SInst_sisisi_acc <"asr", int_hexagon_S2_asr_r_r_acc>;
-def HEXAGON_S2_lsl_r_r_acc:
- si_SInst_sisisi_acc <"lsl", int_hexagon_S2_lsl_r_r_acc>;
-def HEXAGON_S2_lsr_r_r_acc:
- si_SInst_sisisi_acc <"lsr", int_hexagon_S2_lsr_r_r_acc>;
-def HEXAGON_S2_asl_r_p_acc:
- di_SInst_didisi_acc <"asl", int_hexagon_S2_asl_r_p_acc>;
-def HEXAGON_S2_asr_r_p_acc:
- di_SInst_didisi_acc <"asr", int_hexagon_S2_asr_r_p_acc>;
-def HEXAGON_S2_lsl_r_p_acc:
- di_SInst_didisi_acc <"lsl", int_hexagon_S2_lsl_r_p_acc>;
-def HEXAGON_S2_lsr_r_p_acc:
- di_SInst_didisi_acc <"lsr", int_hexagon_S2_lsr_r_p_acc>;
-
-def HEXAGON_S2_asl_r_r_nac:
- si_SInst_sisisi_nac <"asl", int_hexagon_S2_asl_r_r_nac>;
-def HEXAGON_S2_asr_r_r_nac:
- si_SInst_sisisi_nac <"asr", int_hexagon_S2_asr_r_r_nac>;
-def HEXAGON_S2_lsl_r_r_nac:
- si_SInst_sisisi_nac <"lsl", int_hexagon_S2_lsl_r_r_nac>;
-def HEXAGON_S2_lsr_r_r_nac:
- si_SInst_sisisi_nac <"lsr", int_hexagon_S2_lsr_r_r_nac>;
-def HEXAGON_S2_asl_r_p_nac:
- di_SInst_didisi_nac <"asl", int_hexagon_S2_asl_r_p_nac>;
-def HEXAGON_S2_asr_r_p_nac:
- di_SInst_didisi_nac <"asr", int_hexagon_S2_asr_r_p_nac>;
-def HEXAGON_S2_lsl_r_p_nac:
- di_SInst_didisi_nac <"lsl", int_hexagon_S2_lsl_r_p_nac>;
-def HEXAGON_S2_lsr_r_p_nac:
- di_SInst_didisi_nac <"lsr", int_hexagon_S2_lsr_r_p_nac>;
-
-// STYPE / SHIFT / Shift by register and logical.
-def HEXAGON_S2_asl_r_r_and:
- si_SInst_sisisi_and <"asl", int_hexagon_S2_asl_r_r_and>;
-def HEXAGON_S2_asr_r_r_and:
- si_SInst_sisisi_and <"asr", int_hexagon_S2_asr_r_r_and>;
-def HEXAGON_S2_lsl_r_r_and:
- si_SInst_sisisi_and <"lsl", int_hexagon_S2_lsl_r_r_and>;
-def HEXAGON_S2_lsr_r_r_and:
- si_SInst_sisisi_and <"lsr", int_hexagon_S2_lsr_r_r_and>;
-
-def HEXAGON_S2_asl_r_r_or:
- si_SInst_sisisi_or <"asl", int_hexagon_S2_asl_r_r_or>;
-def HEXAGON_S2_asr_r_r_or:
- si_SInst_sisisi_or <"asr", int_hexagon_S2_asr_r_r_or>;
-def HEXAGON_S2_lsl_r_r_or:
- si_SInst_sisisi_or <"lsl", int_hexagon_S2_lsl_r_r_or>;
-def HEXAGON_S2_lsr_r_r_or:
- si_SInst_sisisi_or <"lsr", int_hexagon_S2_lsr_r_r_or>;
-
-def HEXAGON_S2_asl_r_p_and:
- di_SInst_didisi_and <"asl", int_hexagon_S2_asl_r_p_and>;
-def HEXAGON_S2_asr_r_p_and:
- di_SInst_didisi_and <"asr", int_hexagon_S2_asr_r_p_and>;
-def HEXAGON_S2_lsl_r_p_and:
- di_SInst_didisi_and <"lsl", int_hexagon_S2_lsl_r_p_and>;
-def HEXAGON_S2_lsr_r_p_and:
- di_SInst_didisi_and <"lsr", int_hexagon_S2_lsr_r_p_and>;
-
-def HEXAGON_S2_asl_r_p_or:
- di_SInst_didisi_or <"asl", int_hexagon_S2_asl_r_p_or>;
-def HEXAGON_S2_asr_r_p_or:
- di_SInst_didisi_or <"asr", int_hexagon_S2_asr_r_p_or>;
-def HEXAGON_S2_lsl_r_p_or:
- di_SInst_didisi_or <"lsl", int_hexagon_S2_lsl_r_p_or>;
-def HEXAGON_S2_lsr_r_p_or:
- di_SInst_didisi_or <"lsr", int_hexagon_S2_lsr_r_p_or>;
-
-// STYPE / SHIFT / Shift by register with saturation.
-def HEXAGON_S2_asl_r_r_sat:
- si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_r_r_sat>;
-def HEXAGON_S2_asr_r_r_sat:
- si_SInst_sisi_sat <"asr", int_hexagon_S2_asr_r_r_sat>;
-
-// STYPE / SHIFT / Table Index.
-def Hexagon_S2_tableidxb_goodsyntax:
- si_MInst_sisiu4u5 <"tableidxb",int_hexagon_S2_tableidxb_goodsyntax>;
-def Hexagon_S2_tableidxd_goodsyntax:
- si_MInst_sisiu4u5 <"tableidxd",int_hexagon_S2_tableidxd_goodsyntax>;
-def Hexagon_S2_tableidxh_goodsyntax:
- si_MInst_sisiu4u5 <"tableidxh",int_hexagon_S2_tableidxh_goodsyntax>;
-def Hexagon_S2_tableidxw_goodsyntax:
- si_MInst_sisiu4u5 <"tableidxw",int_hexagon_S2_tableidxw_goodsyntax>;
+def : T_PI_pat <S2_asr_i_p, int_hexagon_S2_asr_i_p>;
+def : T_PI_pat <S2_lsr_i_p, int_hexagon_S2_lsr_i_p>;
+def : T_PI_pat <S2_asl_i_p, int_hexagon_S2_asl_i_p>;
+
+def : T_PR_pat <S2_asr_r_p, int_hexagon_S2_asr_r_p>;
+def : T_PR_pat <S2_lsr_r_p, int_hexagon_S2_lsr_r_p>;
+def : T_PR_pat <S2_asl_r_p, int_hexagon_S2_asl_r_p>;
+def : T_PR_pat <S2_lsl_r_p, int_hexagon_S2_lsl_r_p>;
+
+def : T_RR_pat <S2_asr_r_r, int_hexagon_S2_asr_r_r>;
+def : T_RR_pat <S2_lsr_r_r, int_hexagon_S2_lsr_r_r>;
+def : T_RR_pat <S2_asl_r_r, int_hexagon_S2_asl_r_r>;
+def : T_RR_pat <S2_lsl_r_r, int_hexagon_S2_lsl_r_r>;
+
+def : T_RR_pat <S2_asr_r_r_sat, int_hexagon_S2_asr_r_r_sat>;
+def : T_RR_pat <S2_asl_r_r_sat, int_hexagon_S2_asl_r_r_sat>;
+
+def : T_R_pat <S2_vsxtbh, int_hexagon_S2_vsxtbh>;
+def : T_R_pat <S2_vzxtbh, int_hexagon_S2_vzxtbh>;
+def : T_R_pat <S2_vsxthw, int_hexagon_S2_vsxthw>;
+def : T_R_pat <S2_vzxthw, int_hexagon_S2_vzxthw>;
+def : T_R_pat <S2_vsplatrh, int_hexagon_S2_vsplatrh>;
+def : T_R_pat <A2_sxtw, int_hexagon_A2_sxtw>;
+
+// Vector saturate and pack
+def : T_R_pat <S2_svsathb, int_hexagon_S2_svsathb>;
+def : T_R_pat <S2_svsathub, int_hexagon_S2_svsathub>;
+def : T_P_pat <S2_vsathub, int_hexagon_S2_vsathub>;
+def : T_P_pat <S2_vsatwh, int_hexagon_S2_vsatwh>;
+def : T_P_pat <S2_vsatwuh, int_hexagon_S2_vsatwuh>;
+def : T_P_pat <S2_vsathb, int_hexagon_S2_vsathb>;
+
+def : T_P_pat <S2_vtrunohb, int_hexagon_S2_vtrunohb>;
+def : T_P_pat <S2_vtrunehb, int_hexagon_S2_vtrunehb>;
+def : T_P_pat <S2_vrndpackwh, int_hexagon_S2_vrndpackwh>;
+def : T_P_pat <S2_vrndpackwhs, int_hexagon_S2_vrndpackwhs>;
+def : T_R_pat <S2_brev, int_hexagon_S2_brev>;
+def : T_R_pat <S2_vsplatrb, int_hexagon_S2_vsplatrb>;
+
+def : T_R_pat <A2_abs, int_hexagon_A2_abs>;
+def : T_R_pat <A2_abssat, int_hexagon_A2_abssat>;
+def : T_R_pat <A2_negsat, int_hexagon_A2_negsat>;
+
+def : T_R_pat <A2_swiz, int_hexagon_A2_swiz>;
+
+def : T_P_pat <A2_sat, int_hexagon_A2_sat>;
+def : T_R_pat <A2_sath, int_hexagon_A2_sath>;
+def : T_R_pat <A2_satuh, int_hexagon_A2_satuh>;
+def : T_R_pat <A2_satub, int_hexagon_A2_satub>;
+def : T_R_pat <A2_satb, int_hexagon_A2_satb>;
+
+// Vector arithmetic shift right by immediate with truncate and pack.
+def : T_PI_pat<S2_asr_i_svw_trun, int_hexagon_S2_asr_i_svw_trun>;
+
+def : T_RI_pat <S2_asr_i_r, int_hexagon_S2_asr_i_r>;
+def : T_RI_pat <S2_lsr_i_r, int_hexagon_S2_lsr_i_r>;
+def : T_RI_pat <S2_asl_i_r, int_hexagon_S2_asl_i_r>;
+def : T_RI_pat <S2_asr_i_r_rnd, int_hexagon_S2_asr_i_r_rnd>;
+def : T_RI_pat <S2_asr_i_r_rnd_goodsyntax,
+ int_hexagon_S2_asr_i_r_rnd_goodsyntax>;
+
+// Shift left by immediate with saturation.
+def : T_RI_pat <S2_asl_i_r_sat, int_hexagon_S2_asl_i_r_sat>;
+//===----------------------------------------------------------------------===//
+// Template 'def pat' to map tableidx[bhwd] intrinsics to :raw instructions.
+//===----------------------------------------------------------------------===//
+class S2op_tableidx_pat <Intrinsic IntID, InstHexagon OutputInst,
+ SDNodeXForm XformImm>
+ : Pat <(IntID IntRegs:$src1, IntRegs:$src2, u4ImmPred:$src3, u5ImmPred:$src4),
+ (OutputInst IntRegs:$src1, IntRegs:$src2, u4ImmPred:$src3,
+ (XformImm u5ImmPred:$src4))>;
+
+
+// Table Index : Extract and insert bits.
+// Map to the real hardware instructions after subtracting appropriate
+// values from the 4th input operand. Please note that subtraction is not
+// needed for int_hexagon_S2_tableidxb_goodsyntax.
+
+def : Pat <(int_hexagon_S2_tableidxb_goodsyntax IntRegs:$src1, IntRegs:$src2,
+ u4ImmPred:$src3, u5ImmPred:$src4),
+ (S2_tableidxb IntRegs:$src1, IntRegs:$src2,
+ u4ImmPred:$src3, u5ImmPred:$src4)>;
+
+def : S2op_tableidx_pat <int_hexagon_S2_tableidxh_goodsyntax, S2_tableidxh,
+ DEC_CONST_SIGNED>;
+def : S2op_tableidx_pat <int_hexagon_S2_tableidxw_goodsyntax, S2_tableidxw,
+ DEC2_CONST_SIGNED>;
+def : S2op_tableidx_pat <int_hexagon_S2_tableidxd_goodsyntax, S2_tableidxd,
+ DEC3_CONST_SIGNED>;
/********************************************************************
* STYPE/VH *
*********************************************************************/
-// STYPE / VH / Vector absolute value halfwords.
-// Rdd64=vabsh(Rss64)
-def HEXAGON_A2_vabsh:
- di_SInst_di <"vabsh", int_hexagon_A2_vabsh>;
-def HEXAGON_A2_vabshsat:
- di_SInst_di_sat <"vabsh", int_hexagon_A2_vabshsat>;
-
-// STYPE / VH / Vector shift halfwords by immediate.
-// Rdd64=v[asl/asr/lsr]h(Rss64,Rt32)
-def HEXAGON_S2_asl_i_vh:
- di_SInst_disi <"vaslh", int_hexagon_S2_asl_i_vh>;
-def HEXAGON_S2_asr_i_vh:
- di_SInst_disi <"vasrh", int_hexagon_S2_asr_i_vh>;
-def HEXAGON_S2_lsr_i_vh:
- di_SInst_disi <"vlsrh", int_hexagon_S2_lsr_i_vh>;
-
-// STYPE / VH / Vector shift halfwords by register.
-// Rdd64=v[asl/asr/lsl/lsr]w(Rss64,Rt32)
-def HEXAGON_S2_asl_r_vh:
- di_SInst_disi <"vaslh", int_hexagon_S2_asl_r_vh>;
-def HEXAGON_S2_asr_r_vh:
- di_SInst_disi <"vasrh", int_hexagon_S2_asr_r_vh>;
-def HEXAGON_S2_lsl_r_vh:
- di_SInst_disi <"vlslh", int_hexagon_S2_lsl_r_vh>;
-def HEXAGON_S2_lsr_r_vh:
- di_SInst_disi <"vlsrh", int_hexagon_S2_lsr_r_vh>;
+// Vector absolute value halfwords with and without saturation
+// Rdd64=vabsh(Rss64)[:sat]
+def : T_P_pat <A2_vabsh, int_hexagon_A2_vabsh>;
+def : T_P_pat <A2_vabshsat, int_hexagon_A2_vabshsat>;
+
+// Vector shift halfwords by immediate
+// Rdd64=[vaslh/vasrh/vlsrh](Rss64,u4)
+def : T_PI_pat <S2_asr_i_vh, int_hexagon_S2_asr_i_vh>;
+def : T_PI_pat <S2_lsr_i_vh, int_hexagon_S2_lsr_i_vh>;
+def : T_PI_pat <S2_asl_i_vh, int_hexagon_S2_asl_i_vh>;
+// Vector shift halfwords by register
+// Rdd64=[vaslw/vasrw/vlslw/vlsrw](Rss64,Rt32)
+def : T_PR_pat <S2_asr_r_vh, int_hexagon_S2_asr_r_vh>;
+def : T_PR_pat <S2_lsr_r_vh, int_hexagon_S2_lsr_r_vh>;
+def : T_PR_pat <S2_asl_r_vh, int_hexagon_S2_asl_r_vh>;
+def : T_PR_pat <S2_lsl_r_vh, int_hexagon_S2_lsl_r_vh>;
/********************************************************************
* STYPE/VW *
*********************************************************************/
-// STYPE / VW / Vector absolute value words.
-def HEXAGON_A2_vabsw:
- di_SInst_di <"vabsw", int_hexagon_A2_vabsw>;
-def HEXAGON_A2_vabswsat:
- di_SInst_di_sat <"vabsw", int_hexagon_A2_vabswsat>;
-
-// STYPE / VW / Vector shift words by immediate.
-// Rdd64=v[asl/vsl]w(Rss64,Rt32)
-def HEXAGON_S2_asl_i_vw:
- di_SInst_disi <"vaslw", int_hexagon_S2_asl_i_vw>;
-def HEXAGON_S2_asr_i_vw:
- di_SInst_disi <"vasrw", int_hexagon_S2_asr_i_vw>;
-def HEXAGON_S2_lsr_i_vw:
- di_SInst_disi <"vlsrw", int_hexagon_S2_lsr_i_vw>;
-
-// STYPE / VW / Vector shift words by register.
-// Rdd64=v[asl/vsl]w(Rss64,Rt32)
-def HEXAGON_S2_asl_r_vw:
- di_SInst_disi <"vaslw", int_hexagon_S2_asl_r_vw>;
-def HEXAGON_S2_asr_r_vw:
- di_SInst_disi <"vasrw", int_hexagon_S2_asr_r_vw>;
-def HEXAGON_S2_lsl_r_vw:
- di_SInst_disi <"vlslw", int_hexagon_S2_lsl_r_vw>;
-def HEXAGON_S2_lsr_r_vw:
- di_SInst_disi <"vlsrw", int_hexagon_S2_lsr_r_vw>;
-
-// STYPE / VW / Vector shift words with truncate and pack.
-def HEXAGON_S2_asr_r_svw_trun:
- si_SInst_disi <"vasrw", int_hexagon_S2_asr_r_svw_trun>;
-def HEXAGON_S2_asr_i_svw_trun:
- si_SInst_diu5 <"vasrw", int_hexagon_S2_asr_i_svw_trun>;
-
-// LD / Circular loads.
-def HEXAGON_circ_ldd:
- di_LDInstPI_diu4 <"circ_ldd", int_hexagon_circ_ldd>;
+// Vector absolute value words with and without saturation
+def : T_P_pat <A2_vabsw, int_hexagon_A2_vabsw>;
+def : T_P_pat <A2_vabswsat, int_hexagon_A2_vabswsat>;
+
+// Vector shift words by immediate.
+// Rdd64=[vasrw/vlsrw|vaslw](Rss64,u5)
+def : T_PI_pat <S2_asr_i_vw, int_hexagon_S2_asr_i_vw>;
+def : T_PI_pat <S2_lsr_i_vw, int_hexagon_S2_lsr_i_vw>;
+def : T_PI_pat <S2_asl_i_vw, int_hexagon_S2_asl_i_vw>;
+
+// Vector shift words by register.
+// Rdd64=[vasrw/vlsrw|vaslw|vlslw](Rss64,Rt32)
+def : T_PR_pat <S2_asr_r_vw, int_hexagon_S2_asr_r_vw>;
+def : T_PR_pat <S2_lsr_r_vw, int_hexagon_S2_lsr_r_vw>;
+def : T_PR_pat <S2_asl_r_vw, int_hexagon_S2_asl_r_vw>;
+def : T_PR_pat <S2_lsl_r_vw, int_hexagon_S2_lsl_r_vw>;
+
+// Vector shift words with truncate and pack
+
+def : T_PR_pat <S2_asr_r_svw_trun, int_hexagon_S2_asr_r_svw_trun>;
+
+def : T_R_pat<L2_loadw_locked, int_hexagon_L2_loadw_locked>;
+def : T_R_pat<L4_loadd_locked, int_hexagon_L4_loadd_locked>;
+
+def: Pat<(i32 (int_hexagon_S2_storew_locked (I32:$Rs), (I32:$Rt))),
+ (i32 (C2_tfrpr (S2_storew_locked (I32:$Rs), (I32:$Rt))))>;
+def: Pat<(i32 (int_hexagon_S4_stored_locked (I32:$Rs), (I64:$Rt))),
+ (i32 (C2_tfrpr (S4_stored_locked (I32:$Rs), (I64:$Rt))))>;
include "HexagonIntrinsicsV3.td"
include "HexagonIntrinsicsV4.td"
diff --git a/lib/Target/Hexagon/HexagonIntrinsicsDerived.td b/lib/Target/Hexagon/HexagonIntrinsicsDerived.td
index 2788101..4c28b28 100644
--- a/lib/Target/Hexagon/HexagonIntrinsicsDerived.td
+++ b/lib/Target/Hexagon/HexagonIntrinsicsDerived.td
@@ -13,13 +13,13 @@
//
def : Pat <(mul DoubleRegs:$src1, DoubleRegs:$src2),
(i64
- (COMBINE_rr
- (HEXAGON_M2_maci
- (HEXAGON_M2_maci
+ (A2_combinew
+ (M2_maci
+ (M2_maci
(i32
(EXTRACT_SUBREG
(i64
- (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
+ (M2_dpmpyuu_s0 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
subreg_loreg)),
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
subreg_loreg)))),
@@ -31,7 +31,8 @@ def : Pat <(mul DoubleRegs:$src1, DoubleRegs:$src2),
(i32
(EXTRACT_SUBREG
(i64
- (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
+ (M2_dpmpyuu_s0
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
subreg_loreg)))), subreg_loreg))))>;
diff --git a/lib/Target/Hexagon/HexagonIntrinsicsV3.td b/lib/Target/Hexagon/HexagonIntrinsicsV3.td
index 2a54e62..6152cb0 100644
--- a/lib/Target/Hexagon/HexagonIntrinsicsV3.td
+++ b/lib/Target/Hexagon/HexagonIntrinsicsV3.td
@@ -11,40 +11,17 @@
//
//===----------------------------------------------------------------------===//
-
-
-
-// MTYPE / COMPLEX / Vector reduce complex multiply real or imaginary.
-def Hexagon_M2_vrcmpys_s1:
- di_MInst_disi_s1_sat <"vrcmpys", int_hexagon_M2_vrcmpys_s1>;
-def Hexagon_M2_vrcmpys_acc_s1:
- di_MInst_didisi_acc_s1_sat <"vrcmpys", int_hexagon_M2_vrcmpys_acc_s1>;
-def Hexagon_M2_vrcmpys_s1rp:
- si_MInst_disi_s1_rnd_sat <"vrcmpys", int_hexagon_M2_vrcmpys_s1rp>;
-
-
-
-
-/********************************************************************
-* MTYPE/VB *
-*********************************************************************/
-
-// MTYPE / VB / Vector reduce add unsigned bytes.
-def Hexagon_M2_vradduh:
- si_MInst_didi <"vradduh", int_hexagon_M2_vradduh>;
-
-
-/********************************************************************
-* ALU64/ALU *
-*********************************************************************/
-
-// ALU64 / ALU / Add.
-def Hexagon_A2_addsp:
- di_ALU64_sidi <"add", int_hexagon_A2_addsp>;
-def Hexagon_A2_addpsat:
- di_ALU64_didi <"add", int_hexagon_A2_addpsat>;
-
-def Hexagon_A2_maxp:
- di_ALU64_didi <"max", int_hexagon_A2_maxp>;
-def Hexagon_A2_maxup:
- di_ALU64_didi <"maxu", int_hexagon_A2_maxup>;
+// Vector reduce complex multiply real or imaginary
+def : T_PR_pat <M2_vrcmpys_s1, int_hexagon_M2_vrcmpys_s1>;
+def : T_PPR_pat<M2_vrcmpys_acc_s1, int_hexagon_M2_vrcmpys_acc_s1>;
+def : T_PR_pat <M2_vrcmpys_s1rp, int_hexagon_M2_vrcmpys_s1rp>;
+
+// Vector reduce add unsigned halfwords
+def : T_PP_pat<M2_vradduh, int_hexagon_M2_vradduh>;
+
+def: T_RP_pat<A2_addsp, int_hexagon_A2_addsp>;
+def: T_PP_pat<A2_addpsat, int_hexagon_A2_addpsat>;
+def: T_PP_pat<A2_minp, int_hexagon_A2_minp>;
+def: T_PP_pat<A2_minup, int_hexagon_A2_minup>;
+def: T_PP_pat<A2_maxp, int_hexagon_A2_maxp>;
+def: T_PP_pat<A2_maxup, int_hexagon_A2_maxup>;
diff --git a/lib/Target/Hexagon/HexagonIntrinsicsV4.td b/lib/Target/Hexagon/HexagonIntrinsicsV4.td
index 77b148b..8d068eb 100644
--- a/lib/Target/Hexagon/HexagonIntrinsicsV4.td
+++ b/lib/Target/Hexagon/HexagonIntrinsicsV4.td
@@ -12,359 +12,307 @@
// 80-V9418-12 Rev. A
// June 15, 2010
+// Vector reduce multiply word by signed half (32x16)
+//Rdd=vrmpyweh(Rss,Rtt)[:<<1]
+def : T_PP_pat <M4_vrmpyeh_s0, int_hexagon_M4_vrmpyeh_s0>;
+def : T_PP_pat <M4_vrmpyeh_s1, int_hexagon_M4_vrmpyeh_s1>;
+
+//Rdd=vrmpywoh(Rss,Rtt)[:<<1]
+def : T_PP_pat <M4_vrmpyoh_s0, int_hexagon_M4_vrmpyoh_s0>;
+def : T_PP_pat <M4_vrmpyoh_s1, int_hexagon_M4_vrmpyoh_s1>;
+
+//Rdd+=vrmpyweh(Rss,Rtt)[:<<1]
+def : T_PPP_pat <M4_vrmpyeh_acc_s0, int_hexagon_M4_vrmpyeh_acc_s0>;
+def : T_PPP_pat <M4_vrmpyeh_acc_s1, int_hexagon_M4_vrmpyeh_acc_s1>;
+
+//Rdd=vrmpywoh(Rss,Rtt)[:<<1]
+def : T_PPP_pat <M4_vrmpyoh_acc_s0, int_hexagon_M4_vrmpyoh_acc_s0>;
+def : T_PPP_pat <M4_vrmpyoh_acc_s1, int_hexagon_M4_vrmpyoh_acc_s1>;
+
+// Vector multiply halfwords, signed by unsigned
+// Rdd=vmpyhsu(Rs,Rt)[:<<1]:sat
+def : T_RR_pat <M2_vmpy2su_s0, int_hexagon_M2_vmpy2su_s0>;
+def : T_RR_pat <M2_vmpy2su_s1, int_hexagon_M2_vmpy2su_s1>;
+
+// Rxx+=vmpyhsu(Rs,Rt)[:<<1]:sat
+def : T_PRR_pat <M2_vmac2su_s0, int_hexagon_M2_vmac2su_s0>;
+def : T_PRR_pat <M2_vmac2su_s1, int_hexagon_M2_vmac2su_s1>;
+
+// Vector polynomial multiply halfwords
+// Rdd=vpmpyh(Rs,Rt)
+def : T_RR_pat <M4_vpmpyh, int_hexagon_M4_vpmpyh>;
+// Rxx[^]=vpmpyh(Rs,Rt)
+def : T_PRR_pat <M4_vpmpyh_acc, int_hexagon_M4_vpmpyh_acc>;
+
+// Polynomial multiply words
+// Rdd=pmpyw(Rs,Rt)
+def : T_RR_pat <M4_pmpyw, int_hexagon_M4_pmpyw>;
+// Rxx^=pmpyw(Rs,Rt)
+def : T_PRR_pat <M4_pmpyw_acc, int_hexagon_M4_pmpyw_acc>;
+
+//Rxx^=asr(Rss,Rt)
+def : T_PPR_pat <S2_asr_r_p_xor, int_hexagon_S2_asr_r_p_xor>;
+//Rxx^=asl(Rss,Rt)
+def : T_PPR_pat <S2_asl_r_p_xor, int_hexagon_S2_asl_r_p_xor>;
+//Rxx^=lsr(Rss,Rt)
+def : T_PPR_pat <S2_lsr_r_p_xor, int_hexagon_S2_lsr_r_p_xor>;
+//Rxx^=lsl(Rss,Rt)
+def : T_PPR_pat <S2_lsl_r_p_xor, int_hexagon_S2_lsl_r_p_xor>;
+
+// Multiply and use upper result
+def : MType_R32_pat <int_hexagon_M2_mpysu_up, M2_mpysu_up>;
+def : MType_R32_pat <int_hexagon_M2_mpy_up_s1, M2_mpy_up_s1>;
+def : MType_R32_pat <int_hexagon_M2_hmmpyh_s1, M2_hmmpyh_s1>;
+def : MType_R32_pat <int_hexagon_M2_hmmpyl_s1, M2_hmmpyl_s1>;
+def : MType_R32_pat <int_hexagon_M2_mpy_up_s1_sat, M2_mpy_up_s1_sat>;
+
+// Vector reduce add unsigned halfwords
+def : Pat <(int_hexagon_M2_vraddh DoubleRegs:$src1, DoubleRegs:$src2),
+ (M2_vraddh DoubleRegs:$src1, DoubleRegs:$src2)>;
+
+def : T_P_pat <S2_brevp, int_hexagon_S2_brevp>;
+
+def: T_P_pat <S2_ct0p, int_hexagon_S2_ct0p>;
+def: T_P_pat <S2_ct1p, int_hexagon_S2_ct1p>;
+def: T_RR_pat<C4_nbitsset, int_hexagon_C4_nbitsset>;
+def: T_RR_pat<C4_nbitsclr, int_hexagon_C4_nbitsclr>;
+def: T_RI_pat<C4_nbitsclri, int_hexagon_C4_nbitsclri>;
+
+
+class vcmpImm_pat <InstHexagon MI, Intrinsic IntID, PatLeaf immPred> :
+ Pat <(IntID (i64 DoubleRegs:$src1), immPred:$src2),
+ (MI (i64 DoubleRegs:$src1), immPred:$src2)>;
+
+def : vcmpImm_pat <A4_vcmpbeqi, int_hexagon_A4_vcmpbeqi, u8ImmPred>;
+def : vcmpImm_pat <A4_vcmpbgti, int_hexagon_A4_vcmpbgti, s8ImmPred>;
+def : vcmpImm_pat <A4_vcmpbgtui, int_hexagon_A4_vcmpbgtui, u7ImmPred>;
+
+def : vcmpImm_pat <A4_vcmpheqi, int_hexagon_A4_vcmpheqi, s8ImmPred>;
+def : vcmpImm_pat <A4_vcmphgti, int_hexagon_A4_vcmphgti, s8ImmPred>;
+def : vcmpImm_pat <A4_vcmphgtui, int_hexagon_A4_vcmphgtui, u7ImmPred>;
+
+def : vcmpImm_pat <A4_vcmpweqi, int_hexagon_A4_vcmpweqi, s8ImmPred>;
+def : vcmpImm_pat <A4_vcmpwgti, int_hexagon_A4_vcmpwgti, s8ImmPred>;
+def : vcmpImm_pat <A4_vcmpwgtui, int_hexagon_A4_vcmpwgtui, u7ImmPred>;
+
+def : T_PP_pat<A4_vcmpbeq_any, int_hexagon_A4_vcmpbeq_any>;
+
+def : T_RR_pat<A4_cmpbeq, int_hexagon_A4_cmpbeq>;
+def : T_RR_pat<A4_cmpbgt, int_hexagon_A4_cmpbgt>;
+def : T_RR_pat<A4_cmpbgtu, int_hexagon_A4_cmpbgtu>;
+def : T_RR_pat<A4_cmpheq, int_hexagon_A4_cmpheq>;
+def : T_RR_pat<A4_cmphgt, int_hexagon_A4_cmphgt>;
+def : T_RR_pat<A4_cmphgtu, int_hexagon_A4_cmphgtu>;
+
+def : T_RI_pat<A4_cmpbeqi, int_hexagon_A4_cmpbeqi>;
+def : T_RI_pat<A4_cmpbgti, int_hexagon_A4_cmpbgti>;
+def : T_RI_pat<A4_cmpbgtui, int_hexagon_A4_cmpbgtui>;
+
+def : T_RI_pat<A4_cmpheqi, int_hexagon_A4_cmpheqi>;
+def : T_RI_pat<A4_cmphgti, int_hexagon_A4_cmphgti>;
+def : T_RI_pat<A4_cmphgtui, int_hexagon_A4_cmphgtui>;
+
+def : T_RP_pat <A4_boundscheck, int_hexagon_A4_boundscheck>;
+
+def : T_PR_pat<A4_tlbmatch, int_hexagon_A4_tlbmatch>;
+
+def : Pat <(int_hexagon_M4_mpyrr_addr IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3),
+ (M4_mpyrr_addr IntRegs:$src1, IntRegs:$src2, IntRegs:$src3)>;
+
+def : T_IRR_pat <M4_mpyrr_addi, int_hexagon_M4_mpyrr_addi>;
+def : T_IRI_pat <M4_mpyri_addi, int_hexagon_M4_mpyri_addi>;
+def : T_RIR_pat <M4_mpyri_addr_u2, int_hexagon_M4_mpyri_addr_u2>;
+def : T_RRI_pat <M4_mpyri_addr, int_hexagon_M4_mpyri_addr>;
+// Multiply 32x32 and use upper result
+def : T_RRR_pat <M4_mac_up_s1_sat, int_hexagon_M4_mac_up_s1_sat>;
+def : T_RRR_pat <M4_nac_up_s1_sat, int_hexagon_M4_nac_up_s1_sat>;
+
+// Complex multiply 32x16
+def : T_PR_pat <M4_cmpyi_wh, int_hexagon_M4_cmpyi_wh>;
+def : T_PR_pat <M4_cmpyr_wh, int_hexagon_M4_cmpyr_wh>;
+
+def : T_PR_pat <M4_cmpyi_whc, int_hexagon_M4_cmpyi_whc>;
+def : T_PR_pat <M4_cmpyr_whc, int_hexagon_M4_cmpyr_whc>;
+
+def : T_PP_pat<A4_andnp, int_hexagon_A4_andnp>;
+def : T_PP_pat<A4_ornp, int_hexagon_A4_ornp>;
+
+// Complex add/sub halfwords/words
+def : T_PP_pat <S4_vxaddsubw, int_hexagon_S4_vxaddsubw>;
+def : T_PP_pat <S4_vxsubaddw, int_hexagon_S4_vxsubaddw>;
+def : T_PP_pat <S4_vxaddsubh, int_hexagon_S4_vxaddsubh>;
+def : T_PP_pat <S4_vxsubaddh, int_hexagon_S4_vxsubaddh>;
+
+def : T_PP_pat <S4_vxaddsubhr, int_hexagon_S4_vxaddsubhr>;
+def : T_PP_pat <S4_vxsubaddhr, int_hexagon_S4_vxsubaddhr>;
+
+// Extract bitfield
+def : T_PP_pat <S4_extractp_rp, int_hexagon_S4_extractp_rp>;
+def : T_RP_pat <S4_extract_rp, int_hexagon_S4_extract_rp>;
+def : T_PII_pat <S4_extractp, int_hexagon_S4_extractp>;
+def : T_RII_pat <S4_extract, int_hexagon_S4_extract>;
+
+// Vector conditional negate
+// Rdd=vcnegh(Rss,Rt)
+def : T_PR_pat <S2_vcnegh, int_hexagon_S2_vcnegh>;
+
+// Shift an immediate left by register amount
+def : T_IR_pat<S4_lsli, int_hexagon_S4_lsli>;
+
+// Vector reduce maximum halfwords
+def : T_PPR_pat <A4_vrmaxh, int_hexagon_A4_vrmaxh>;
+def : T_PPR_pat <A4_vrmaxuh, int_hexagon_A4_vrmaxuh>;
-//
-// ALU 32 types.
-//
-
-class si_ALU32_sisi_not<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, ~$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class di_ALU32_s8si<string opc, Intrinsic IntID>
- : ALU32_rr<(outs DoubleRegs:$dst), (ins s8Imm:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "(#$src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID imm:$src1, IntRegs:$src2))]>;
+// Vector reduce maximum words
+def : T_PPR_pat <A4_vrmaxw, int_hexagon_A4_vrmaxw>;
+def : T_PPR_pat <A4_vrmaxuw, int_hexagon_A4_vrmaxuw>;
-class di_ALU32_sis8<string opc, Intrinsic IntID>
- : ALU32_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+// Vector reduce minimum halfwords
+def : T_PPR_pat <A4_vrminh, int_hexagon_A4_vrminh>;
+def : T_PPR_pat <A4_vrminuh, int_hexagon_A4_vrminuh>;
-class qi_neg_ALU32_sisi<string opc, Intrinsic IntID>
- : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = !", !strconcat(opc , "($src1, $src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+// Vector reduce minimum words
+def : T_PPR_pat <A4_vrminw, int_hexagon_A4_vrminw>;
+def : T_PPR_pat <A4_vrminuw, int_hexagon_A4_vrminuw>;
-class qi_neg_ALU32_sis10<string opc, Intrinsic IntID>
- : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, s10Imm:$src2),
- !strconcat("$dst = !", !strconcat(opc , "($src1, #$src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+// Rotate and reduce bytes
+def : Pat <(int_hexagon_S4_vrcrotate DoubleRegs:$src1, IntRegs:$src2,
+ u2ImmPred:$src3),
+ (S4_vrcrotate DoubleRegs:$src1, IntRegs:$src2, u2ImmPred:$src3)>;
+
+// Rotate and reduce bytes with accumulation
+// Rxx+=vrcrotate(Rss,Rt,#u2)
+def : Pat <(int_hexagon_S4_vrcrotate_acc DoubleRegs:$src1, DoubleRegs:$src2,
+ IntRegs:$src3, u2ImmPred:$src4),
+ (S4_vrcrotate_acc DoubleRegs:$src1, DoubleRegs:$src2,
+ IntRegs:$src3, u2ImmPred:$src4)>;
+
+// Vector conditional negate
+def : T_PPR_pat<S2_vrcnegh, int_hexagon_S2_vrcnegh>;
-class qi_neg_ALU32_siu9<string opc, Intrinsic IntID>
- : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, u9Imm:$src2),
- !strconcat("$dst = !", !strconcat(opc , "($src1, #$src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+// Logical xor with xor accumulation
+def : T_PPP_pat<M4_xor_xacc, int_hexagon_M4_xor_xacc>;
+
+// ALU64 - Vector min/max byte
+def : T_PP_pat <A2_vminb, int_hexagon_A2_vminb>;
+def : T_PP_pat <A2_vmaxb, int_hexagon_A2_vmaxb>;
+
+// Shift and add/sub/and/or
+def : T_IRI_pat <S4_andi_asl_ri, int_hexagon_S4_andi_asl_ri>;
+def : T_IRI_pat <S4_ori_asl_ri, int_hexagon_S4_ori_asl_ri>;
+def : T_IRI_pat <S4_addi_asl_ri, int_hexagon_S4_addi_asl_ri>;
+def : T_IRI_pat <S4_subi_asl_ri, int_hexagon_S4_subi_asl_ri>;
+def : T_IRI_pat <S4_andi_lsr_ri, int_hexagon_S4_andi_lsr_ri>;
+def : T_IRI_pat <S4_ori_lsr_ri, int_hexagon_S4_ori_lsr_ri>;
+def : T_IRI_pat <S4_addi_lsr_ri, int_hexagon_S4_addi_lsr_ri>;
+def : T_IRI_pat <S4_subi_lsr_ri, int_hexagon_S4_subi_lsr_ri>;
-class si_neg_ALU32_sisi<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = !", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class si_neg_ALU32_sis8<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2),
- !strconcat("$dst = !", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class si_ALU32_sis8<string opc, Intrinsic IntID>
- : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-
-//
-// SInst Classes.
-//
-class qi_neg_SInst_qiqi<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = !", !strconcat(opc , "($src1, $src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class qi_SInst_qi_andqiqi_neg<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, and($src2, !$src3)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class qi_SInst_qi_andqiqi<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, and($src2, $src3)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class qi_SInst_qi_orqiqi_neg<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, or($src2, !$src3)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class qi_SInst_qi_orqiqi<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, or($src2, $src3)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class si_SInst_si_addsis6<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, s6Imm:$src3),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, add($src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
- imm:$src3))]>;
-
-class si_SInst_si_subs6si<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, sub(#$src2, $src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2,
- IntRegs:$src3))]>;
-
-class di_ALU64_didi_neg<string opc, Intrinsic IntID>
- : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, ~$src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
-
-class di_MInst_dididi_xacc<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- DoubleRegs:$src2),
- !strconcat("$dst ^= ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
- DoubleRegs:$src2))],
- "$dst2 = $dst">;
-
-class si_MInst_sisisi_and<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst &= ", !strconcat(opc , "($src2, $src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class si_MInst_sisisi_andn<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst &= ", !strconcat(opc , "($src2, ~$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class si_SInst_sisis10_andi<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, s10Imm:$src3),
- !strconcat("$dst = ", !strconcat(opc ,
- "($src1, and($src2, #$src3))")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
- imm:$src3))]>;
-
-class si_MInst_sisisi_xor<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst ^= ", !strconcat(opc , "($src2, $src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class si_MInst_sisisi_xorn<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst ^= ", !strconcat(opc , "($src2, ~$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class si_SInst_sisis10_or<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2, s10Imm:$src3),
- !strconcat("$dst |= ", !strconcat(opc , "($src2, #$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
- imm:$src3))]>;
-
-class si_MInst_sisisi_or<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst |= ", !strconcat(opc , "($src2, $src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class si_MInst_sisisi_orn<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst |= ", !strconcat(opc , "($src2, ~$src3)")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-class si_SInst_siu5_sat<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):sat")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+// Split bitfield
+def : T_RI_pat <A4_bitspliti, int_hexagon_A4_bitspliti>;
+def : T_RR_pat <A4_bitsplit, int_hexagon_A4_bitsplit>;
+def: T_RR_pat<S4_parity, int_hexagon_S4_parity>;
+
+def: T_RI_pat<S4_ntstbit_i, int_hexagon_S4_ntstbit_i>;
+def: T_RR_pat<S4_ntstbit_r, int_hexagon_S4_ntstbit_r>;
+
+def: T_RI_pat<S4_clbaddi, int_hexagon_S4_clbaddi>;
+def: T_PI_pat<S4_clbpaddi, int_hexagon_S4_clbpaddi>;
+def: T_P_pat <S4_clbpnorm, int_hexagon_S4_clbpnorm>;
/********************************************************************
* ALU32/ALU *
*********************************************************************/
// ALU32 / ALU / Logical Operations.
-def Hexagon_A4_orn : si_ALU32_sisi_not <"or", int_hexagon_A4_orn>;
-def Hexagon_A4_andn : si_ALU32_sisi_not <"and", int_hexagon_A4_andn>;
-
+def: T_RR_pat<A4_andn, int_hexagon_A4_andn>;
+def: T_RR_pat<A4_orn, int_hexagon_A4_orn>;
/********************************************************************
* ALU32/PERM *
*********************************************************************/
-// ALU32 / PERM / Combine Words Into Doublewords.
-def Hexagon_A4_combineir : di_ALU32_s8si <"combine", int_hexagon_A4_combineir>;
-def Hexagon_A4_combineri : di_ALU32_sis8 <"combine", int_hexagon_A4_combineri>;
-
+// Combine Words Into Doublewords.
+def: T_RI_pat<A4_combineri, int_hexagon_A4_combineri, s8ExtPred>;
+def: T_IR_pat<A4_combineir, int_hexagon_A4_combineir, s8ExtPred>;
/********************************************************************
* ALU32/PRED *
*********************************************************************/
-// ALU32 / PRED / Conditional Shift Halfword.
-// ALU32 / PRED / Conditional Sign Extend.
-// ALU32 / PRED / Conditional Zero Extend.
-// ALU32 / PRED / Compare.
-def Hexagon_C4_cmpltei : qi_neg_ALU32_sis10 <"cmp.gt", int_hexagon_C4_cmpltei>;
-def Hexagon_C4_cmplte : qi_neg_ALU32_sisi <"cmp.gt", int_hexagon_C4_cmplte>;
-def Hexagon_C4_cmplteu : qi_neg_ALU32_sisi <"cmp.gtu",int_hexagon_C4_cmplteu>;
+// Compare
+def : T_RI_pat<C4_cmpneqi, int_hexagon_C4_cmpneqi, s10ExtPred>;
+def : T_RI_pat<C4_cmpltei, int_hexagon_C4_cmpltei, s10ExtPred>;
+def : T_RI_pat<C4_cmplteui, int_hexagon_C4_cmplteui, u9ExtPred>;
-def: T_RI_pat<C4_cmpneqi, int_hexagon_C4_cmpneqi>;
-def: T_RI_pat<C4_cmpltei, int_hexagon_C4_cmpltei>;
-def: T_RI_pat<C4_cmplteui, int_hexagon_C4_cmplteui>;
-
-// ALU32 / PRED / cmpare To General Register.
-def Hexagon_A4_rcmpneq : si_neg_ALU32_sisi <"cmp.eq", int_hexagon_A4_rcmpneq>;
-def Hexagon_A4_rcmpneqi: si_neg_ALU32_sis8 <"cmp.eq", int_hexagon_A4_rcmpneqi>;
-def Hexagon_A4_rcmpeq : si_ALU32_sisi <"cmp.eq", int_hexagon_A4_rcmpeq>;
-def Hexagon_A4_rcmpeqi : si_ALU32_sis8 <"cmp.eq", int_hexagon_A4_rcmpeqi>;
+def: T_RR_pat<A4_rcmpeq, int_hexagon_A4_rcmpeq>;
+def: T_RR_pat<A4_rcmpneq, int_hexagon_A4_rcmpneq>;
+def: T_RI_pat<A4_rcmpeqi, int_hexagon_A4_rcmpeqi>;
+def: T_RI_pat<A4_rcmpneqi, int_hexagon_A4_rcmpneqi>;
/********************************************************************
* CR *
*********************************************************************/
-// CR / Corner Detection Acceleration.
-def Hexagon_C4_fastcorner9:
- qi_SInst_qiqi<"fastcorner9", int_hexagon_C4_fastcorner9>;
-def Hexagon_C4_fastcorner9_not:
- qi_neg_SInst_qiqi<"fastcorner9",int_hexagon_C4_fastcorner9_not>;
-
// CR / Logical Operations On Predicates.
-def Hexagon_C4_and_andn:
- qi_SInst_qi_andqiqi_neg <"and", int_hexagon_C4_and_andn>;
-def Hexagon_C4_and_and:
- qi_SInst_qi_andqiqi <"and", int_hexagon_C4_and_and>;
-def Hexagon_C4_and_orn:
- qi_SInst_qi_orqiqi_neg <"and", int_hexagon_C4_and_orn>;
-def Hexagon_C4_and_or:
- qi_SInst_qi_orqiqi <"and", int_hexagon_C4_and_or>;
-def Hexagon_C4_or_andn:
- qi_SInst_qi_andqiqi_neg <"or", int_hexagon_C4_or_andn>;
-def Hexagon_C4_or_and:
- qi_SInst_qi_andqiqi <"or", int_hexagon_C4_or_and>;
-def Hexagon_C4_or_orn:
- qi_SInst_qi_orqiqi_neg <"or", int_hexagon_C4_or_orn>;
-def Hexagon_C4_or_or:
- qi_SInst_qi_orqiqi <"or", int_hexagon_C4_or_or>;
+class qi_CRInst_qiqiqi_pat<Intrinsic IntID, InstHexagon Inst> :
+ Pat<(i32 (IntID IntRegs:$Rs, IntRegs:$Rt, IntRegs:$Ru)),
+ (i32 (C2_tfrpr (Inst (C2_tfrrp IntRegs:$Rs),
+ (C2_tfrrp IntRegs:$Rt),
+ (C2_tfrrp IntRegs:$Ru))))>;
+
+def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_and, C4_and_and>;
+def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_andn, C4_and_andn>;
+def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_or, C4_and_or>;
+def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_orn, C4_and_orn>;
+def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_and, C4_or_and>;
+def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_andn, C4_or_andn>;
+def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_or, C4_or_or>;
+def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_orn, C4_or_orn>;
/********************************************************************
* XTYPE/ALU *
*********************************************************************/
-// XTYPE / ALU / Add And Accumulate.
-def Hexagon_S4_addaddi:
- si_SInst_si_addsis6 <"add", int_hexagon_S4_addaddi>;
-def Hexagon_S4_subaddi:
- si_SInst_si_subs6si <"add", int_hexagon_S4_subaddi>;
+// Add And Accumulate.
-// XTYPE / ALU / Logical Doublewords.
-def Hexagon_S4_andnp:
- di_ALU64_didi_neg <"and", int_hexagon_A4_andnp>;
-def Hexagon_S4_ornp:
- di_ALU64_didi_neg <"or", int_hexagon_A4_ornp>;
+def : T_RRI_pat <S4_addaddi, int_hexagon_S4_addaddi>;
+def : T_RIR_pat <S4_subaddi, int_hexagon_S4_subaddi>;
-// XTYPE / ALU / Logical-logical Doublewords.
-def Hexagon_M4_xor_xacc:
- di_MInst_dididi_xacc <"xor", int_hexagon_M4_xor_xacc>;
// XTYPE / ALU / Logical-logical Words.
-def HEXAGON_M4_and_and:
- si_MInst_sisisi_and <"and", int_hexagon_M4_and_and>;
-def HEXAGON_M4_and_or:
- si_MInst_sisisi_and <"or", int_hexagon_M4_and_or>;
-def HEXAGON_M4_and_xor:
- si_MInst_sisisi_and <"xor", int_hexagon_M4_and_xor>;
-def HEXAGON_M4_and_andn:
- si_MInst_sisisi_andn <"and", int_hexagon_M4_and_andn>;
-def HEXAGON_M4_xor_and:
- si_MInst_sisisi_xor <"and", int_hexagon_M4_xor_and>;
-def HEXAGON_M4_xor_or:
- si_MInst_sisisi_xor <"or", int_hexagon_M4_xor_or>;
-def HEXAGON_M4_xor_andn:
- si_MInst_sisisi_xorn <"and", int_hexagon_M4_xor_andn>;
-def HEXAGON_M4_or_and:
- si_MInst_sisisi_or <"and", int_hexagon_M4_or_and>;
-def HEXAGON_M4_or_or:
- si_MInst_sisisi_or <"or", int_hexagon_M4_or_or>;
-def HEXAGON_M4_or_xor:
- si_MInst_sisisi_or <"xor", int_hexagon_M4_or_xor>;
-def HEXAGON_M4_or_andn:
- si_MInst_sisisi_orn <"and", int_hexagon_M4_or_andn>;
-def HEXAGON_S4_or_andix:
- si_SInst_sisis10_andi <"or", int_hexagon_S4_or_andix>;
-def HEXAGON_S4_or_andi:
- si_SInst_sisis10_or <"and", int_hexagon_S4_or_andi>;
-def HEXAGON_S4_or_ori:
- si_SInst_sisis10_or <"or", int_hexagon_S4_or_ori>;
-
-// XTYPE / ALU / Modulo wrap.
-def HEXAGON_A4_modwrapu:
- si_ALU64_sisi <"modwrap", int_hexagon_A4_modwrapu>;
-
-// XTYPE / ALU / Round.
-def HEXAGON_A4_cround_ri:
- si_SInst_siu5 <"cround", int_hexagon_A4_cround_ri>;
-def HEXAGON_A4_cround_rr:
- si_SInst_sisi <"cround", int_hexagon_A4_cround_rr>;
-def HEXAGON_A4_round_ri:
- si_SInst_siu5 <"round", int_hexagon_A4_round_ri>;
-def HEXAGON_A4_round_rr:
- si_SInst_sisi <"round", int_hexagon_A4_round_rr>;
-def HEXAGON_A4_round_ri_sat:
- si_SInst_siu5_sat <"round", int_hexagon_A4_round_ri_sat>;
-def HEXAGON_A4_round_rr_sat:
- si_SInst_sisi_sat <"round", int_hexagon_A4_round_rr_sat>;
-
-// XTYPE / ALU / Vector reduce add unsigned halfwords.
-// XTYPE / ALU / Vector add bytes.
-// XTYPE / ALU / Vector conditional negate.
-// XTYPE / ALU / Vector maximum bytes.
-// XTYPE / ALU / Vector reduce maximum halfwords.
-// XTYPE / ALU / Vector reduce maximum words.
-// XTYPE / ALU / Vector minimum bytes.
-// XTYPE / ALU / Vector reduce minimum halfwords.
-// XTYPE / ALU / Vector reduce minimum words.
-// XTYPE / ALU / Vector subtract bytes.
-
-
-/********************************************************************
-* XTYPE/BIT *
-*********************************************************************/
-
-// XTYPE / BIT / Count leading.
-// XTYPE / BIT / Count trailing.
-// XTYPE / BIT / Extract bitfield.
-// XTYPE / BIT / Masked parity.
-// XTYPE / BIT / Bit reverse.
-// XTYPE / BIT / Split bitfield.
-
-
-/********************************************************************
-* XTYPE/COMPLEX *
-*********************************************************************/
-
-// XTYPE / COMPLEX / Complex add/sub halfwords.
-// XTYPE / COMPLEX / Complex add/sub words.
-// XTYPE / COMPLEX / Complex multiply 32x16.
-// XTYPE / COMPLEX / Vector reduce complex rotate.
-
-
-/********************************************************************
-* XTYPE/MPY *
-*********************************************************************/
-
-// XTYPE / COMPLEX / Complex add/sub halfwords.
+def : T_RRR_pat <M4_or_xor, int_hexagon_M4_or_xor>;
+def : T_RRR_pat <M4_and_xor, int_hexagon_M4_and_xor>;
+def : T_RRR_pat <M4_or_and, int_hexagon_M4_or_and>;
+def : T_RRR_pat <M4_and_and, int_hexagon_M4_and_and>;
+def : T_RRR_pat <M4_xor_and, int_hexagon_M4_xor_and>;
+def : T_RRR_pat <M4_or_or, int_hexagon_M4_or_or>;
+def : T_RRR_pat <M4_and_or, int_hexagon_M4_and_or>;
+def : T_RRR_pat <M4_xor_or, int_hexagon_M4_xor_or>;
+def : T_RRR_pat <M4_or_andn, int_hexagon_M4_or_andn>;
+def : T_RRR_pat <M4_and_andn, int_hexagon_M4_and_andn>;
+def : T_RRR_pat <M4_xor_andn, int_hexagon_M4_xor_andn>;
+
+def : T_RRI_pat <S4_or_andi, int_hexagon_S4_or_andi>;
+def : T_RRI_pat <S4_or_andix, int_hexagon_S4_or_andix>;
+def : T_RRI_pat <S4_or_ori, int_hexagon_S4_or_ori>;
+
+// Modulo wrap.
+def : T_RR_pat <A4_modwrapu, int_hexagon_A4_modwrapu>;
+
+// Arithmetic/Convergent round
+// Rd=[cround|round](Rs,Rt)[:sat]
+// Rd=[cround|round](Rs,#u5)[:sat]
+def : T_RI_pat <A4_cround_ri, int_hexagon_A4_cround_ri>;
+def : T_RR_pat <A4_cround_rr, int_hexagon_A4_cround_rr>;
+
+def : T_RI_pat <A4_round_ri, int_hexagon_A4_round_ri>;
+def : T_RR_pat <A4_round_rr, int_hexagon_A4_round_rr>;
+
+def : T_RI_pat <A4_round_ri_sat, int_hexagon_A4_round_ri_sat>;
+def : T_RR_pat <A4_round_rr_sat, int_hexagon_A4_round_rr_sat>;
+
+def : T_P_pat <A2_roundsat, int_hexagon_A2_roundsat>;
diff --git a/lib/Target/Hexagon/HexagonIntrinsicsV5.td b/lib/Target/Hexagon/HexagonIntrinsicsV5.td
index 1d44b52..60e6b1e 100644
--- a/lib/Target/Hexagon/HexagonIntrinsicsV5.td
+++ b/lib/Target/Hexagon/HexagonIntrinsicsV5.td
@@ -1,395 +1,111 @@
-class sf_SInst_sf<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1))]>;
-
-class si_SInst_sf<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1))]>;
-
-class sf_SInst_si<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1))]>;
-
-class sf_SInst_di<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>;
-
-class sf_SInst_df<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>;
-
-class si_SInst_df<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>;
-
-class df_SInst_sf<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>;
-
-class di_SInst_sf<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>;
-
-class df_SInst_si<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>;
-
-class df_SInst_df<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>;
-
-class di_SInst_df<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>;
-
-
-class df_SInst_di<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
- !strconcat("$dst = ", !strconcat(opc , "($src1)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>;
-
-class sf_MInst_sfsf<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class df_MInst_dfdf<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
-
-class qi_ALU64_dfdf<string opc, Intrinsic IntID>
- : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set PredRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
-
-class qi_ALU64_dfu5<string opc, Intrinsic IntID>
- : ALU64_ri<(outs PredRegs:$dst), (ins DoubleRegs:$src1, u5Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set PredRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
-
-
-class sf_MInst_sfsfsf_acc<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
- IntRegs:$dst2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1,
- IntRegs:$src2, IntRegs:$dst2))],
- "$dst2 = $dst">;
-
-class sf_MInst_sfsfsf_nac<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
- IntRegs:$dst2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1, $src2)")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1,
- IntRegs:$src2, IntRegs:$dst2))],
- "$dst2 = $dst">;
-
-
-class sf_MInst_sfsfsfsi_sc<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2, IntRegs:$src3),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1, $src2, $src3):scale")),
- [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
- IntRegs:$src2, IntRegs:$src3))],
- "$dst2 = $dst">;
-
-class sf_MInst_sfsfsf_acc_lib<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
- IntRegs:$dst2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1, $src2):lib")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1,
- IntRegs:$src2, IntRegs:$dst2))],
- "$dst2 = $dst">;
-
-class sf_MInst_sfsfsf_nac_lib<string opc, Intrinsic IntID>
- : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
- IntRegs:$dst2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1, $src2):lib")),
- [(set IntRegs:$dst, (IntID IntRegs:$src1,
- IntRegs:$src2, IntRegs:$dst2))],
- "$dst2 = $dst">;
-
-class df_MInst_dfdfdf_acc<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
- DoubleRegs:$dst2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2, DoubleRegs:$dst2))],
- "$dst2 = $dst">;
-
-class df_MInst_dfdfdf_nac<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
- DoubleRegs:$dst2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1, $src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2, DoubleRegs:$dst2))],
- "$dst2 = $dst">;
-
-
-class df_MInst_dfdfdfsi_sc<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
- DoubleRegs:$src2, IntRegs:$src3),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1, $src2, $src3):scale")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
- DoubleRegs:$src2, IntRegs:$src3))],
- "$dst2 = $dst">;
-
-class df_MInst_dfdfdf_acc_lib<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
- DoubleRegs:$dst2),
- !strconcat("$dst += ", !strconcat(opc ,
- "($src1, $src2):lib")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2, DoubleRegs:$dst2))],
- "$dst2 = $dst">;
-
-class df_MInst_dfdfdf_nac_lib<string opc, Intrinsic IntID>
- : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
- DoubleRegs:$dst2),
- !strconcat("$dst -= ", !strconcat(opc ,
- "($src1, $src2):lib")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
- DoubleRegs:$src2, DoubleRegs:$dst2))],
- "$dst2 = $dst">;
-
-class qi_SInst_sfsf<string opc, Intrinsic IntID>
- : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
-
-class qi_SInst_sfu5<string opc, Intrinsic IntID>
- : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
-
-class sf_ALU64_u10_pos<string opc, Intrinsic IntID>
- : ALU64_ri<(outs IntRegs:$dst), (ins u10Imm:$src1),
- !strconcat("$dst = ", !strconcat(opc , "#$src1):pos")),
- [(set IntRegs:$dst, (IntID imm:$src1))]>;
-
-class sf_ALU64_u10_neg<string opc, Intrinsic IntID>
- : ALU64_ri<(outs IntRegs:$dst), (ins u10Imm:$src1),
- !strconcat("$dst = ", !strconcat(opc , "#$src1):neg")),
- [(set IntRegs:$dst, (IntID imm:$src1))]>;
-
-class df_ALU64_u10_pos<string opc, Intrinsic IntID>
- : ALU64_ri<(outs DoubleRegs:$dst), (ins u10Imm:$src1),
- !strconcat("$dst = ", !strconcat(opc , "#$src1):pos")),
- [(set DoubleRegs:$dst, (IntID imm:$src1))]>;
-
-class df_ALU64_u10_neg<string opc, Intrinsic IntID>
- : ALU64_ri<(outs DoubleRegs:$dst), (ins u10Imm:$src1),
- !strconcat("$dst = ", !strconcat(opc , "#$src1):neg")),
- [(set DoubleRegs:$dst, (IntID imm:$src1))]>;
-
-class di_MInst_diu6<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
-
-class di_MInst_diu4_rnd<string opc, Intrinsic IntID>
- : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
-
-class si_MInst_diu4_rnd_sat<string opc, Intrinsic IntID>
- : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd:sat")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
-
-class si_SInst_diu4_sat<string opc, Intrinsic IntID>
- : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2),
- !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):sat")),
- [(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
-
-
-def HEXAGON_C4_fastcorner9:
- qi_SInst_qiqi <"fastcorner9", int_hexagon_C4_fastcorner9>;
-def HEXAGON_C4_fastcorner9_not:
- qi_SInst_qiqi <"!fastcorner9", int_hexagon_C4_fastcorner9_not>;
-def HEXAGON_M5_vrmpybuu:
- di_MInst_didi <"vrmpybu", int_hexagon_M5_vrmpybuu>;
-def HEXAGON_M5_vrmacbuu:
- di_MInst_dididi_acc <"vrmpybu", int_hexagon_M5_vrmacbuu>;
-def HEXAGON_M5_vrmpybsu:
- di_MInst_didi <"vrmpybsu", int_hexagon_M5_vrmpybsu>;
-def HEXAGON_M5_vrmacbsu:
- di_MInst_dididi_acc <"vrmpybsu", int_hexagon_M5_vrmacbsu>;
-def HEXAGON_M5_vmpybuu:
- di_MInst_sisi <"vmpybu", int_hexagon_M5_vmpybuu>;
-def HEXAGON_M5_vmpybsu:
- di_MInst_sisi <"vmpybsu", int_hexagon_M5_vmpybsu>;
-def HEXAGON_M5_vmacbuu:
- di_MInst_disisi_acc <"vmpybu", int_hexagon_M5_vmacbuu>;
-def HEXAGON_M5_vmacbsu:
- di_MInst_disisi_acc <"vmpybsu", int_hexagon_M5_vmacbsu>;
-def HEXAGON_M5_vdmpybsu:
- di_MInst_didi_sat <"vdmpybsu", int_hexagon_M5_vdmpybsu>;
-def HEXAGON_M5_vdmacbsu:
- di_MInst_dididi_acc_sat <"vdmpybsu", int_hexagon_M5_vdmacbsu>;
-def HEXAGON_A5_vaddhubs:
- si_SInst_didi_sat <"vaddhub", int_hexagon_A5_vaddhubs>;
-def HEXAGON_S5_popcountp:
- si_SInst_di <"popcount", int_hexagon_S5_popcountp>;
-def HEXAGON_S5_asrhub_rnd_sat_goodsyntax:
- si_MInst_diu4_rnd_sat <"vasrhub", int_hexagon_S5_asrhub_rnd_sat_goodsyntax>;
-def HEXAGON_S5_asrhub_sat:
- si_SInst_diu4_sat <"vasrhub", int_hexagon_S5_asrhub_sat>;
-def HEXAGON_S5_vasrhrnd_goodsyntax:
- di_MInst_diu4_rnd <"vasrh", int_hexagon_S5_vasrhrnd_goodsyntax>;
-def HEXAGON_S2_asr_i_p_rnd:
- di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p_rnd>;
-def HEXAGON_S2_asr_i_p_rnd_goodsyntax:
- di_MInst_diu6 <"asrrnd", int_hexagon_S2_asr_i_p_rnd_goodsyntax>;
-def HEXAGON_F2_sfadd:
- sf_MInst_sfsf <"sfadd", int_hexagon_F2_sfadd>;
-def HEXAGON_F2_sfsub:
- sf_MInst_sfsf <"sfsub", int_hexagon_F2_sfsub>;
-def HEXAGON_F2_sfmpy:
- sf_MInst_sfsf <"sfmpy", int_hexagon_F2_sfmpy>;
-def HEXAGON_F2_sffma:
- sf_MInst_sfsfsf_acc <"sfmpy", int_hexagon_F2_sffma>;
-def HEXAGON_F2_sffma_sc:
- sf_MInst_sfsfsfsi_sc <"sfmpy", int_hexagon_F2_sffma_sc>;
-def HEXAGON_F2_sffms:
- sf_MInst_sfsfsf_nac <"sfmpy", int_hexagon_F2_sffms>;
-def HEXAGON_F2_sffma_lib:
- sf_MInst_sfsfsf_acc_lib <"sfmpy", int_hexagon_F2_sffma_lib>;
-def HEXAGON_F2_sffms_lib:
- sf_MInst_sfsfsf_nac_lib <"sfmpy", int_hexagon_F2_sffms_lib>;
-def HEXAGON_F2_sfcmpeq:
- qi_SInst_sfsf <"sfcmp.eq", int_hexagon_F2_sfcmpeq>;
-def HEXAGON_F2_sfcmpgt:
- qi_SInst_sfsf <"sfcmp.gt", int_hexagon_F2_sfcmpgt>;
-def HEXAGON_F2_sfcmpge:
- qi_SInst_sfsf <"sfcmp.ge", int_hexagon_F2_sfcmpge>;
-def HEXAGON_F2_sfcmpuo:
- qi_SInst_sfsf <"sfcmp.uo", int_hexagon_F2_sfcmpuo>;
-def HEXAGON_F2_sfmax:
- sf_MInst_sfsf <"sfmax", int_hexagon_F2_sfmax>;
-def HEXAGON_F2_sfmin:
- sf_MInst_sfsf <"sfmin", int_hexagon_F2_sfmin>;
-def HEXAGON_F2_sfclass:
- qi_SInst_sfu5 <"sfclass", int_hexagon_F2_sfclass>;
-def HEXAGON_F2_sfimm_p:
- sf_ALU64_u10_pos <"sfmake", int_hexagon_F2_sfimm_p>;
-def HEXAGON_F2_sfimm_n:
- sf_ALU64_u10_neg <"sfmake", int_hexagon_F2_sfimm_n>;
-def HEXAGON_F2_sffixupn:
- sf_MInst_sfsf <"sffixupn", int_hexagon_F2_sffixupn>;
-def HEXAGON_F2_sffixupd:
- sf_MInst_sfsf <"sffixupd", int_hexagon_F2_sffixupd>;
-def HEXAGON_F2_sffixupr:
- sf_SInst_sf <"sffixupr", int_hexagon_F2_sffixupr>;
-def HEXAGON_F2_dfadd:
- df_MInst_dfdf <"dfadd", int_hexagon_F2_dfadd>;
-def HEXAGON_F2_dfsub:
- df_MInst_dfdf <"dfsub", int_hexagon_F2_dfsub>;
-def HEXAGON_F2_dfmpy:
- df_MInst_dfdf <"dfmpy", int_hexagon_F2_dfmpy>;
-def HEXAGON_F2_dffma:
- df_MInst_dfdfdf_acc <"dfmpy", int_hexagon_F2_dffma>;
-def HEXAGON_F2_dffms:
- df_MInst_dfdfdf_nac <"dfmpy", int_hexagon_F2_dffms>;
-def HEXAGON_F2_dffma_lib:
- df_MInst_dfdfdf_acc_lib <"dfmpy", int_hexagon_F2_dffma_lib>;
-def HEXAGON_F2_dffms_lib:
- df_MInst_dfdfdf_nac_lib <"dfmpy", int_hexagon_F2_dffms_lib>;
-def HEXAGON_F2_dffma_sc:
- df_MInst_dfdfdfsi_sc <"dfmpy", int_hexagon_F2_dffma_sc>;
-def HEXAGON_F2_dfmax:
- df_MInst_dfdf <"dfmax", int_hexagon_F2_dfmax>;
-def HEXAGON_F2_dfmin:
- df_MInst_dfdf <"dfmin", int_hexagon_F2_dfmin>;
-def HEXAGON_F2_dfcmpeq:
- qi_ALU64_dfdf <"dfcmp.eq", int_hexagon_F2_dfcmpeq>;
-def HEXAGON_F2_dfcmpgt:
- qi_ALU64_dfdf <"dfcmp.gt", int_hexagon_F2_dfcmpgt>;
-def HEXAGON_F2_dfcmpge:
- qi_ALU64_dfdf <"dfcmp.ge", int_hexagon_F2_dfcmpge>;
-def HEXAGON_F2_dfcmpuo:
- qi_ALU64_dfdf <"dfcmp.uo", int_hexagon_F2_dfcmpuo>;
-def HEXAGON_F2_dfclass:
- qi_ALU64_dfu5 <"dfclass", int_hexagon_F2_dfclass>;
-def HEXAGON_F2_dfimm_p:
- df_ALU64_u10_pos <"dfmake", int_hexagon_F2_dfimm_p>;
-def HEXAGON_F2_dfimm_n:
- df_ALU64_u10_neg <"dfmake", int_hexagon_F2_dfimm_n>;
-def HEXAGON_F2_dffixupn:
- df_MInst_dfdf <"dffixupn", int_hexagon_F2_dffixupn>;
-def HEXAGON_F2_dffixupd:
- df_MInst_dfdf <"dffixupd", int_hexagon_F2_dffixupd>;
-def HEXAGON_F2_dffixupr:
- df_SInst_df <"dffixupr", int_hexagon_F2_dffixupr>;
-def HEXAGON_F2_conv_sf2df:
- df_SInst_sf <"convert_sf2df", int_hexagon_F2_conv_sf2df>;
-def HEXAGON_F2_conv_df2sf:
- sf_SInst_df <"convert_df2sf", int_hexagon_F2_conv_df2sf>;
-def HEXAGON_F2_conv_uw2sf:
- sf_SInst_si <"convert_uw2sf", int_hexagon_F2_conv_uw2sf>;
-def HEXAGON_F2_conv_uw2df:
- df_SInst_si <"convert_uw2df", int_hexagon_F2_conv_uw2df>;
-def HEXAGON_F2_conv_w2sf:
- sf_SInst_si <"convert_w2sf", int_hexagon_F2_conv_w2sf>;
-def HEXAGON_F2_conv_w2df:
- df_SInst_si <"convert_w2df", int_hexagon_F2_conv_w2df>;
-def HEXAGON_F2_conv_ud2sf:
- sf_SInst_di <"convert_ud2sf", int_hexagon_F2_conv_ud2sf>;
-def HEXAGON_F2_conv_ud2df:
- df_SInst_di <"convert_ud2df", int_hexagon_F2_conv_ud2df>;
-def HEXAGON_F2_conv_d2sf:
- sf_SInst_di <"convert_d2sf", int_hexagon_F2_conv_d2sf>;
-def HEXAGON_F2_conv_d2df:
- df_SInst_di <"convert_d2df", int_hexagon_F2_conv_d2df>;
-def HEXAGON_F2_conv_sf2uw:
- si_SInst_sf <"convert_sf2uw", int_hexagon_F2_conv_sf2uw>;
-def HEXAGON_F2_conv_sf2w:
- si_SInst_sf <"convert_sf2w", int_hexagon_F2_conv_sf2w>;
-def HEXAGON_F2_conv_sf2ud:
- di_SInst_sf <"convert_sf2ud", int_hexagon_F2_conv_sf2ud>;
-def HEXAGON_F2_conv_sf2d:
- di_SInst_sf <"convert_sf2d", int_hexagon_F2_conv_sf2d>;
-def HEXAGON_F2_conv_df2uw:
- si_SInst_df <"convert_df2uw", int_hexagon_F2_conv_df2uw>;
-def HEXAGON_F2_conv_df2w:
- si_SInst_df <"convert_df2w", int_hexagon_F2_conv_df2w>;
-def HEXAGON_F2_conv_df2ud:
- di_SInst_df <"convert_df2ud", int_hexagon_F2_conv_df2ud>;
-def HEXAGON_F2_conv_df2d:
- di_SInst_df <"convert_df2d", int_hexagon_F2_conv_df2d>;
-def HEXAGON_F2_conv_sf2uw_chop:
- si_SInst_sf <"convert_sf2uw", int_hexagon_F2_conv_sf2uw_chop>;
-def HEXAGON_F2_conv_sf2w_chop:
- si_SInst_sf <"convert_sf2w", int_hexagon_F2_conv_sf2w_chop>;
-def HEXAGON_F2_conv_sf2ud_chop:
- di_SInst_sf <"convert_sf2ud", int_hexagon_F2_conv_sf2ud_chop>;
-def HEXAGON_F2_conv_sf2d_chop:
- di_SInst_sf <"convert_sf2d", int_hexagon_F2_conv_sf2d_chop>;
-def HEXAGON_F2_conv_df2uw_chop:
- si_SInst_df <"convert_df2uw", int_hexagon_F2_conv_df2uw_chop>;
-def HEXAGON_F2_conv_df2w_chop:
- si_SInst_df <"convert_df2w", int_hexagon_F2_conv_df2w_chop>;
-def HEXAGON_F2_conv_df2ud_chop:
- di_SInst_df <"convert_df2ud", int_hexagon_F2_conv_df2ud_chop>;
-def HEXAGON_F2_conv_df2d_chop:
- di_SInst_df <"convert_df2d", int_hexagon_F2_conv_df2d_chop>;
+//===- HexagonIntrinsicsV5.td - V5 Instruction intrinsics --*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//Rdd[+]=vrmpybsu(Rss,Rtt)
+//Rdd[+]=vrmpybuu(Rss,Rtt)
+let Predicates = [HasV5T] in {
+def : T_PP_pat <M5_vrmpybsu, int_hexagon_M5_vrmpybsu>;
+def : T_PP_pat <M5_vrmpybuu, int_hexagon_M5_vrmpybuu>;
+
+def : T_PP_pat <M5_vdmpybsu, int_hexagon_M5_vdmpybsu>;
+
+def : T_PPP_pat <M5_vrmacbsu, int_hexagon_M5_vrmacbsu>;
+def : T_PPP_pat <M5_vrmacbuu, int_hexagon_M5_vrmacbuu>;
+//Rxx+=vdmpybsu(Rss,Rtt):sat
+def : T_PPP_pat <M5_vdmacbsu, int_hexagon_M5_vdmacbsu>;
+
+// Vector multiply bytes
+// Rdd=vmpyb[s]u(Rs,Rt)
+def : T_RR_pat <M5_vmpybsu, int_hexagon_M5_vmpybsu>;
+def : T_RR_pat <M5_vmpybuu, int_hexagon_M5_vmpybuu>;
+
+// Rxx+=vmpyb[s]u(Rs,Rt)
+def : T_PRR_pat <M5_vmacbsu, int_hexagon_M5_vmacbsu>;
+def : T_PRR_pat <M5_vmacbuu, int_hexagon_M5_vmacbuu>;
+
+// Rd=vaddhub(Rss,Rtt):sat
+def : T_PP_pat <A5_vaddhubs, int_hexagon_A5_vaddhubs>;
+}
+
+def : T_FF_pat<F2_sfadd, int_hexagon_F2_sfadd>;
+def : T_FF_pat<F2_sfsub, int_hexagon_F2_sfsub>;
+def : T_FF_pat<F2_sfmpy, int_hexagon_F2_sfmpy>;
+def : T_FF_pat<F2_sfmax, int_hexagon_F2_sfmax>;
+def : T_FF_pat<F2_sfmin, int_hexagon_F2_sfmin>;
+
+def : T_FF_pat<F2_sffixupn, int_hexagon_F2_sffixupn>;
+def : T_FF_pat<F2_sffixupd, int_hexagon_F2_sffixupd>;
+def : T_F_pat <F2_sffixupr, int_hexagon_F2_sffixupr>;
+
+def: qi_CRInst_qiqi_pat<C4_fastcorner9, int_hexagon_C4_fastcorner9>;
+def: qi_CRInst_qiqi_pat<C4_fastcorner9_not, int_hexagon_C4_fastcorner9_not>;
+
+def : T_P_pat <S5_popcountp, int_hexagon_S5_popcountp>;
+def : T_PI_pat <S5_asrhub_sat, int_hexagon_S5_asrhub_sat>;
+
+def : T_PI_pat <S2_asr_i_p_rnd, int_hexagon_S2_asr_i_p_rnd>;
+def : T_PI_pat <S2_asr_i_p_rnd_goodsyntax,
+ int_hexagon_S2_asr_i_p_rnd_goodsyntax>;
+
+def : T_PI_pat <S5_asrhub_rnd_sat_goodsyntax,
+ int_hexagon_S5_asrhub_rnd_sat_goodsyntax>;
+
+def : T_PI_pat <S5_vasrhrnd_goodsyntax, int_hexagon_S5_vasrhrnd_goodsyntax>;
+
+def : T_FFF_pat <F2_sffma, int_hexagon_F2_sffma>;
+def : T_FFF_pat <F2_sffms, int_hexagon_F2_sffms>;
+def : T_FFF_pat <F2_sffma_lib, int_hexagon_F2_sffma_lib>;
+def : T_FFF_pat <F2_sffms_lib, int_hexagon_F2_sffms_lib>;
+def : T_FFFQ_pat <F2_sffma_sc, int_hexagon_F2_sffma_sc>;
+
+// Compare floating-point value
+def : T_FF_pat <F2_sfcmpge, int_hexagon_F2_sfcmpge>;
+def : T_FF_pat <F2_sfcmpuo, int_hexagon_F2_sfcmpuo>;
+def : T_FF_pat <F2_sfcmpeq, int_hexagon_F2_sfcmpeq>;
+def : T_FF_pat <F2_sfcmpgt, int_hexagon_F2_sfcmpgt>;
+
+def : T_DD_pat <F2_dfcmpeq, int_hexagon_F2_dfcmpeq>;
+def : T_DD_pat <F2_dfcmpgt, int_hexagon_F2_dfcmpgt>;
+def : T_DD_pat <F2_dfcmpge, int_hexagon_F2_dfcmpge>;
+def : T_DD_pat <F2_dfcmpuo, int_hexagon_F2_dfcmpuo>;
+
+// Create floating-point value
+def : T_I_pat <F2_sfimm_p, int_hexagon_F2_sfimm_p>;
+def : T_I_pat <F2_sfimm_n, int_hexagon_F2_sfimm_n>;
+def : T_I_pat <F2_dfimm_p, int_hexagon_F2_dfimm_p>;
+def : T_I_pat <F2_dfimm_n, int_hexagon_F2_dfimm_n>;
+
+def : T_DI_pat <F2_dfclass, int_hexagon_F2_dfclass>;
+def : T_FI_pat <F2_sfclass, int_hexagon_F2_sfclass>;
+def : T_F_pat <F2_conv_sf2df, int_hexagon_F2_conv_sf2df>;
+def : T_D_pat <F2_conv_df2sf, int_hexagon_F2_conv_df2sf>;
+def : T_R_pat <F2_conv_uw2sf, int_hexagon_F2_conv_uw2sf>;
+def : T_R_pat <F2_conv_uw2df, int_hexagon_F2_conv_uw2df>;
+def : T_R_pat <F2_conv_w2sf, int_hexagon_F2_conv_w2sf>;
+def : T_R_pat <F2_conv_w2df, int_hexagon_F2_conv_w2df>;
+def : T_P_pat <F2_conv_ud2sf, int_hexagon_F2_conv_ud2sf>;
+def : T_P_pat <F2_conv_ud2df, int_hexagon_F2_conv_ud2df>;
+def : T_P_pat <F2_conv_d2sf, int_hexagon_F2_conv_d2sf>;
+def : T_P_pat <F2_conv_d2df, int_hexagon_F2_conv_d2df>;
+def : T_F_pat <F2_conv_sf2uw, int_hexagon_F2_conv_sf2uw>;
+def : T_F_pat <F2_conv_sf2w, int_hexagon_F2_conv_sf2w>;
+def : T_F_pat <F2_conv_sf2ud, int_hexagon_F2_conv_sf2ud>;
+def : T_F_pat <F2_conv_sf2d, int_hexagon_F2_conv_sf2d>;
+def : T_D_pat <F2_conv_df2uw, int_hexagon_F2_conv_df2uw>;
+def : T_D_pat <F2_conv_df2w, int_hexagon_F2_conv_df2w>;
+def : T_D_pat <F2_conv_df2ud, int_hexagon_F2_conv_df2ud>;
+def : T_D_pat <F2_conv_df2d, int_hexagon_F2_conv_df2d>;
+def : T_F_pat <F2_conv_sf2uw_chop, int_hexagon_F2_conv_sf2uw_chop>;
+def : T_F_pat <F2_conv_sf2w_chop, int_hexagon_F2_conv_sf2w_chop>;
+def : T_F_pat <F2_conv_sf2ud_chop, int_hexagon_F2_conv_sf2ud_chop>;
+def : T_F_pat <F2_conv_sf2d_chop, int_hexagon_F2_conv_sf2d_chop>;
+def : T_D_pat <F2_conv_df2uw_chop, int_hexagon_F2_conv_df2uw_chop>;
+def : T_D_pat <F2_conv_df2w_chop, int_hexagon_F2_conv_df2w_chop>;
+def : T_D_pat <F2_conv_df2ud_chop, int_hexagon_F2_conv_df2ud_chop>;
+def : T_D_pat <F2_conv_df2d_chop, int_hexagon_F2_conv_df2d_chop>;
diff --git a/lib/Target/Hexagon/HexagonMCInstLower.cpp b/lib/Target/Hexagon/HexagonMCInstLower.cpp
index 5e4346d..9c9f3af 100644
--- a/lib/Target/Hexagon/HexagonMCInstLower.cpp
+++ b/lib/Target/Hexagon/HexagonMCInstLower.cpp
@@ -15,7 +15,6 @@
#include "Hexagon.h"
#include "HexagonAsmPrinter.h"
#include "HexagonMachineFunctionInfo.h"
-#include "MCTargetDesc/HexagonMCInst.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Mangler.h"
@@ -39,10 +38,9 @@ static MCOperand GetSymbolRef(const MachineOperand& MO, const MCSymbol* Symbol,
}
// Create an MCInst from a MachineInstr
-void llvm::HexagonLowerToMC(const MachineInstr* MI, HexagonMCInst& MCI,
+void llvm::HexagonLowerToMC(MachineInstr const* MI, MCInst& MCI,
HexagonAsmPrinter& AP) {
MCI.setOpcode(MI->getOpcode());
- MCI.setDesc(MI->getDesc());
for (unsigned i = 0, e = MI->getNumOperands(); i < e; i++) {
const MachineOperand &MO = MI->getOperand(i);
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 97c626f..35f732c 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -205,20 +205,17 @@ void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
// Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
// are disabled, then these HazardRecs will be disabled.
const InstrItineraryData *Itin = DAG->getSchedModel()->getInstrItineraries();
- const TargetMachine &TM = DAG->MF.getTarget();
+ const TargetSubtargetInfo &STI = DAG->MF.getSubtarget();
+ const TargetInstrInfo *TII = STI.getInstrInfo();
delete Top.HazardRec;
delete Bot.HazardRec;
- Top.HazardRec =
- TM.getSubtargetImpl()->getInstrInfo()->CreateTargetMIHazardRecognizer(
- Itin, DAG);
- Bot.HazardRec =
- TM.getSubtargetImpl()->getInstrInfo()->CreateTargetMIHazardRecognizer(
- Itin, DAG);
+ Top.HazardRec = TII->CreateTargetMIHazardRecognizer(Itin, DAG);
+ Bot.HazardRec = TII->CreateTargetMIHazardRecognizer(Itin, DAG);
delete Top.ResourceModel;
delete Bot.ResourceModel;
- Top.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel());
- Bot.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel());
+ Top.ResourceModel = new VLIWResourceModel(STI, DAG->getSchedModel());
+ Bot.ResourceModel = new VLIWResourceModel(STI, DAG->getSchedModel());
assert((!llvm::ForceTopDown || !llvm::ForceBottomUp) &&
"-misched-topdown incompatible with -misched-bottomup");
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.h b/lib/Target/Hexagon/HexagonMachineScheduler.h
index 1e023c3..6034344 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.h
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.h
@@ -54,11 +54,9 @@ class VLIWResourceModel {
unsigned TotalPackets;
public:
-VLIWResourceModel(const TargetMachine &TM, const TargetSchedModel *SM) :
- SchedModel(SM), TotalPackets(0) {
- ResourcesModel =
- TM.getSubtargetImpl()->getInstrInfo()->CreateTargetScheduleState(
- *TM.getSubtargetImpl());
+ VLIWResourceModel(const TargetSubtargetInfo &STI, const TargetSchedModel *SM)
+ : SchedModel(SM), TotalPackets(0) {
+ ResourcesModel = STI.getInstrInfo()->CreateTargetScheduleState(STI);
// This hard requirement could be relaxed,
// but for now do not let it proceed.
diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp
index 782c979..806d448 100644
--- a/lib/Target/Hexagon/HexagonNewValueJump.cpp
+++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -176,7 +176,7 @@ static bool commonChecksToProhibitNewValueJump(bool afterRA,
return false;
// if call in path, bail out.
- if (MII->getOpcode() == Hexagon::CALLv3)
+ if (MII->getOpcode() == Hexagon::J2_call)
return false;
// if NVJ is running prior to RA, do the following checks.
@@ -199,8 +199,7 @@ static bool commonChecksToProhibitNewValueJump(bool afterRA,
// of registers by individual passes in the backend. At this time,
// we don't know the scope of usage and definitions of these
// instructions.
- if (MII->getOpcode() == Hexagon::TFR_condset_rr ||
- MII->getOpcode() == Hexagon::TFR_condset_ii ||
+ if (MII->getOpcode() == Hexagon::TFR_condset_ii ||
MII->getOpcode() == Hexagon::TFR_condset_ri ||
MII->getOpcode() == Hexagon::TFR_condset_ir ||
MII->getOpcode() == Hexagon::LDriw_pred ||
@@ -228,8 +227,8 @@ static bool canCompareBeNewValueJump(const HexagonInstrInfo *QII,
int64_t v = MI->getOperand(2).getImm();
if (!(isUInt<5>(v) ||
- ((MI->getOpcode() == Hexagon::CMPEQri ||
- MI->getOpcode() == Hexagon::CMPGTri) &&
+ ((MI->getOpcode() == Hexagon::C2_cmpeqi ||
+ MI->getOpcode() == Hexagon::C2_cmpgti) &&
(v == -1))))
return false;
}
@@ -299,49 +298,49 @@ static unsigned getNewValueJumpOpcode(MachineInstr *MI, int reg,
taken = true;
switch (MI->getOpcode()) {
- case Hexagon::CMPEQrr:
- return taken ? Hexagon::CMPEQrr_t_Jumpnv_t_V4
- : Hexagon::CMPEQrr_t_Jumpnv_nt_V4;
+ case Hexagon::C2_cmpeq:
+ return taken ? Hexagon::J4_cmpeq_t_jumpnv_t
+ : Hexagon::J4_cmpeq_t_jumpnv_nt;
- case Hexagon::CMPEQri: {
+ case Hexagon::C2_cmpeqi: {
if (reg >= 0)
- return taken ? Hexagon::CMPEQri_t_Jumpnv_t_V4
- : Hexagon::CMPEQri_t_Jumpnv_nt_V4;
+ return taken ? Hexagon::J4_cmpeqi_t_jumpnv_t
+ : Hexagon::J4_cmpeqi_t_jumpnv_nt;
else
- return taken ? Hexagon::CMPEQn1_t_Jumpnv_t_V4
- : Hexagon::CMPEQn1_t_Jumpnv_nt_V4;
+ return taken ? Hexagon::J4_cmpeqn1_t_jumpnv_t
+ : Hexagon::J4_cmpeqn1_t_jumpnv_nt;
}
- case Hexagon::CMPGTrr: {
+ case Hexagon::C2_cmpgt: {
if (secondRegNewified)
- return taken ? Hexagon::CMPLTrr_t_Jumpnv_t_V4
- : Hexagon::CMPLTrr_t_Jumpnv_nt_V4;
+ return taken ? Hexagon::J4_cmplt_t_jumpnv_t
+ : Hexagon::J4_cmplt_t_jumpnv_nt;
else
- return taken ? Hexagon::CMPGTrr_t_Jumpnv_t_V4
- : Hexagon::CMPGTrr_t_Jumpnv_nt_V4;
+ return taken ? Hexagon::J4_cmpgt_t_jumpnv_t
+ : Hexagon::J4_cmpgt_t_jumpnv_nt;
}
- case Hexagon::CMPGTri: {
+ case Hexagon::C2_cmpgti: {
if (reg >= 0)
- return taken ? Hexagon::CMPGTri_t_Jumpnv_t_V4
- : Hexagon::CMPGTri_t_Jumpnv_nt_V4;
+ return taken ? Hexagon::J4_cmpgti_t_jumpnv_t
+ : Hexagon::J4_cmpgti_t_jumpnv_nt;
else
- return taken ? Hexagon::CMPGTn1_t_Jumpnv_t_V4
- : Hexagon::CMPGTn1_t_Jumpnv_nt_V4;
+ return taken ? Hexagon::J4_cmpgtn1_t_jumpnv_t
+ : Hexagon::J4_cmpgtn1_t_jumpnv_nt;
}
- case Hexagon::CMPGTUrr: {
+ case Hexagon::C2_cmpgtu: {
if (secondRegNewified)
- return taken ? Hexagon::CMPLTUrr_t_Jumpnv_t_V4
- : Hexagon::CMPLTUrr_t_Jumpnv_nt_V4;
+ return taken ? Hexagon::J4_cmpltu_t_jumpnv_t
+ : Hexagon::J4_cmpltu_t_jumpnv_nt;
else
- return taken ? Hexagon::CMPGTUrr_t_Jumpnv_t_V4
- : Hexagon::CMPGTUrr_t_Jumpnv_nt_V4;
+ return taken ? Hexagon::J4_cmpgtu_t_jumpnv_t
+ : Hexagon::J4_cmpgtu_t_jumpnv_nt;
}
- case Hexagon::CMPGTUri:
- return taken ? Hexagon::CMPGTUri_t_Jumpnv_t_V4
- : Hexagon::CMPGTUri_t_Jumpnv_nt_V4;
+ case Hexagon::C2_cmpgtui:
+ return taken ? Hexagon::J4_cmpgtui_t_jumpnv_t
+ : Hexagon::J4_cmpgtui_t_jumpnv_nt;
default:
llvm_unreachable("Could not find matching New Value Jump instruction.");
@@ -356,19 +355,15 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
<< "********** Function: "
<< MF.getName() << "\n");
-#if 0
- // for now disable this, if we move NewValueJump before register
- // allocation we need this information.
- LiveVariables &LVs = getAnalysis<LiveVariables>();
-#endif
+ // If we move NewValueJump before register allocation we'll need live variable
+ // analysis here too.
QII = static_cast<const HexagonInstrInfo *>(MF.getSubtarget().getInstrInfo());
QRI = static_cast<const HexagonRegisterInfo *>(
MF.getSubtarget().getRegisterInfo());
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
- if (!QRI->Subtarget.hasV4TOps() ||
- DisableNewValueJumps) {
+ if (DisableNewValueJumps) {
return false;
}
@@ -413,12 +408,12 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "Instr: "; MI->dump(); dbgs() << "\n");
if (!foundJump &&
- (MI->getOpcode() == Hexagon::JMP_t ||
- MI->getOpcode() == Hexagon::JMP_f ||
- MI->getOpcode() == Hexagon::JMP_tnew_t ||
- MI->getOpcode() == Hexagon::JMP_tnew_nt ||
- MI->getOpcode() == Hexagon::JMP_fnew_t ||
- MI->getOpcode() == Hexagon::JMP_fnew_nt)) {
+ (MI->getOpcode() == Hexagon::J2_jumpt ||
+ MI->getOpcode() == Hexagon::J2_jumpf ||
+ MI->getOpcode() == Hexagon::J2_jumptnewpt ||
+ MI->getOpcode() == Hexagon::J2_jumptnew ||
+ MI->getOpcode() == Hexagon::J2_jumpfnewpt ||
+ MI->getOpcode() == Hexagon::J2_jumpfnew)) {
// This is where you would insert your compare and
// instr that feeds compare
jmpPos = MII;
@@ -454,9 +449,9 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
jmpTarget = MI->getOperand(1).getMBB();
foundJump = true;
- if (MI->getOpcode() == Hexagon::JMP_f ||
- MI->getOpcode() == Hexagon::JMP_fnew_t ||
- MI->getOpcode() == Hexagon::JMP_fnew_nt) {
+ if (MI->getOpcode() == Hexagon::J2_jumpf ||
+ MI->getOpcode() == Hexagon::J2_jumpfnewpt ||
+ MI->getOpcode() == Hexagon::J2_jumpfnew) {
invertPredicate = true;
}
continue;
@@ -545,7 +540,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
if (isSecondOpReg) {
// In case of CMPLT, or CMPLTU, or EQ with the second register
// to newify, swap the operands.
- if (cmpInstr->getOpcode() == Hexagon::CMPEQrr &&
+ if (cmpInstr->getOpcode() == Hexagon::C2_cmpeq &&
feederReg == (unsigned) cmpOp2) {
unsigned tmp = cmpReg1;
bool tmpIsKill = MO1IsKill;
@@ -612,8 +607,8 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
.addReg(cmpOp2, getKillRegState(MO2IsKill))
.addMBB(jmpTarget);
- else if ((cmpInstr->getOpcode() == Hexagon::CMPEQri ||
- cmpInstr->getOpcode() == Hexagon::CMPGTri) &&
+ else if ((cmpInstr->getOpcode() == Hexagon::C2_cmpeqi ||
+ cmpInstr->getOpcode() == Hexagon::C2_cmpgti) &&
cmpOp2 == -1 )
// Corresponding new-value compare jump instructions don't have the
// operand for -1 immediate value.
diff --git a/lib/Target/Hexagon/HexagonOperands.td b/lib/Target/Hexagon/HexagonOperands.td
index c79d78f..318ca72 100644
--- a/lib/Target/Hexagon/HexagonOperands.td
+++ b/lib/Target/Hexagon/HexagonOperands.td
@@ -39,6 +39,7 @@ let PrintMethod = "printImmOperand" in {
def u16_0Imm : Operand<i32>;
def u16_1Imm : Operand<i32>;
def u16_2Imm : Operand<i32>;
+ def u16_3Imm : Operand<i32>;
def u11_3Imm : Operand<i32>;
def u10Imm : Operand<i32>;
def u9Imm : Operand<i32>;
@@ -258,6 +259,19 @@ def u16_s8ImmPred : PatLeaf<(i32 imm), [{
return isShiftedUInt<16,8>(v);
}]>;
+def u16_0ImmPred : PatLeaf<(i32 imm), [{
+ // True if the immediate fits in a 16-bit unsigned field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<16>(v);
+}]>;
+
+def u11_3ImmPred : PatLeaf<(i32 imm), [{
+ // True if the immediate fits in a 14-bit unsigned field, and the lowest
+ // three bits are 0.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedUInt<11,3>(v);
+}]>;
+
def u9ImmPred : PatLeaf<(i32 imm), [{
// u9ImmPred predicate - True if the immediate fits in a 9-bit unsigned
// field.
@@ -329,6 +343,12 @@ def u5ImmPred : PatLeaf<(i32 imm), [{
return isUInt<5>(v);
}]>;
+def u4ImmPred : PatLeaf<(i32 imm), [{
+ // u4ImmPred predicate - True if the immediate fits in a 4-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<4>(v);
+}]>;
def u3ImmPred : PatLeaf<(i32 imm), [{
// u3ImmPred predicate - True if the immediate fits in a 3-bit unsigned
@@ -497,309 +517,218 @@ def u0AlwaysExt : Operand<i32>;
// Predicates for constant extendable operands
def s16ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 16-bit sign extended field.
- return isInt<16>(v);
- else {
- if (isInt<16>(v))
- return true;
+ if (isInt<16>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit signed field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit signed field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s10ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 10-bit sign extended field.
- return isInt<10>(v);
- else {
- if (isInt<10>(v))
- return true;
+ if (isInt<10>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit signed field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit signed field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s9ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 9-bit sign extended field.
- return isInt<9>(v);
- else {
- if (isInt<9>(v))
- return true;
+ if (isInt<9>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s8ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 8-bit sign extended field.
- return isInt<8>(v);
- else {
- if (isInt<8>(v))
- return true;
+ if (isInt<8>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit signed field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit signed field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s8_16ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate fits in a 8-bit sign extended field.
- return isInt<8>(v);
- else {
- if (isInt<8>(v))
- return true;
-
- // Return true if extending this immediate is profitable and the value
- // can't fit in a 16-bit signed field. This is required to avoid
- // unnecessary constant extenders.
- return isConstExtProfitable(Node) && !isInt<16>(v);
- }
+ if (isInt<8>(v))
+ return true;
+
+ // Return true if extending this immediate is profitable and the value
+ // can't fit in a 16-bit signed field. This is required to avoid
+ // unnecessary constant extenders.
+ return isConstExtProfitable(Node) && !isInt<16>(v);
}]>;
def s6ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 6-bit sign extended field.
- return isInt<6>(v);
- else {
- if (isInt<6>(v))
- return true;
+ if (isInt<6>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s6_16ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate fits in a 6-bit sign extended field.
- return isInt<6>(v);
- else {
- if (isInt<6>(v))
- return true;
-
- // Return true if extending this immediate is profitable and the value
- // can't fit in a 16-bit signed field. This is required to avoid
- // unnecessary constant extenders.
- return isConstExtProfitable(Node) && !isInt<16>(v);
- }
+ if (isInt<6>(v))
+ return true;
+
+ // Return true if extending this immediate is profitable and the value
+ // can't fit in a 16-bit signed field. This is required to avoid
+ // unnecessary constant extenders.
+ return isConstExtProfitable(Node) && !isInt<16>(v);
}]>;
def s6_10ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 6-bit sign extended field.
- return isInt<6>(v);
- else {
- if (isInt<6>(v))
- return true;
-
- // Return true if extending this immediate is profitable and the value
- // can't fit in a 10-bit signed field. This is required to avoid
- // unnecessary constant extenders.
- return isConstExtProfitable(Node) && !isInt<10>(v);
- }
+ if (isInt<6>(v))
+ return true;
+
+ // Return true if extending this immediate is profitable and the value
+ // can't fit in a 10-bit signed field. This is required to avoid
+ // unnecessary constant extenders.
+ return isConstExtProfitable(Node) && !isInt<10>(v);
}]>;
def s11_0ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 11-bit sign extended field.
- return isShiftedInt<11,0>(v);
- else {
- if (isInt<11>(v))
- return true;
+ if (isInt<11>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit signed field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit signed field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s11_1ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 12-bit sign extended field and
- // is 2 byte aligned.
+ if (isInt<12>(v))
return isShiftedInt<11,1>(v);
- else {
- if (isInt<12>(v))
- return isShiftedInt<11,1>(v);
- // Return true if extending this immediate is profitable and the low 1 bit
- // is zero (2-byte aligned).
- return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 2) == 0);
- }
+ // Return true if extending this immediate is profitable and the low 1 bit
+ // is zero (2-byte aligned).
+ return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 2) == 0);
}]>;
def s11_2ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 13-bit sign extended field and
- // is 4-byte aligned.
+ if (isInt<13>(v))
return isShiftedInt<11,2>(v);
- else {
- if (isInt<13>(v))
- return isShiftedInt<11,2>(v);
- // Return true if extending this immediate is profitable and the low 2-bits
- // are zero (4-byte aligned).
- return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 4) == 0);
- }
+ // Return true if extending this immediate is profitable and the low 2-bits
+ // are zero (4-byte aligned).
+ return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 4) == 0);
}]>;
def s11_3ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 14-bit sign extended field and
- // is 8-byte aligned.
- return isShiftedInt<11,3>(v);
- else {
- if (isInt<14>(v))
- return isShiftedInt<11,3>(v);
-
- // Return true if extending this immediate is profitable and the low 3-bits
- // are zero (8-byte aligned).
- return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 8) == 0);
- }
+ if (isInt<14>(v))
+ return isShiftedInt<11,3>(v);
+
+ // Return true if extending this immediate is profitable and the low 3-bits
+ // are zero (8-byte aligned).
+ return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 8) == 0);
}]>;
def u0AlwaysExtPred : PatLeaf<(i32 imm), [{
// Predicate for an unsigned 32-bit value that always needs to be extended.
- if (Subtarget.hasV4TOps()) {
- if (isConstExtProfitable(Node)) {
- int64_t v = (int64_t)N->getSExtValue();
- return isUInt<32>(v);
- }
+ if (isConstExtProfitable(Node)) {
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<32>(v);
}
return false;
}]>;
def u6ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 6-bit unsigned field.
- return isUInt<6>(v);
- else {
- if (isUInt<6>(v))
- return true;
+ if (isUInt<6>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v);
}]>;
def u7ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 7-bit unsigned field.
- return isUInt<7>(v);
- else {
- if (isUInt<7>(v))
- return true;
+ if (isUInt<7>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v);
}]>;
def u8ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 8-bit unsigned field.
- return isUInt<8>(v);
- else {
- if (isUInt<8>(v))
- return true;
+ if (isUInt<8>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v);
}]>;
def u9ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 9-bit unsigned field.
- return isUInt<9>(v);
- else {
- if (isUInt<9>(v))
- return true;
+ if (isUInt<9>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v);
}]>;
def u6_1ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 7-bit unsigned field and
- // is 2-byte aligned.
+ if (isUInt<7>(v))
return isShiftedUInt<6,1>(v);
- else {
- if (isUInt<7>(v))
- return isShiftedUInt<6,1>(v);
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 2) == 0);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 2) == 0);
}]>;
def u6_2ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 8-bit unsigned field and
- // is 4-byte aligned.
+ if (isUInt<8>(v))
return isShiftedUInt<6,2>(v);
- else {
- if (isUInt<8>(v))
- return isShiftedUInt<6,2>(v);
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 4) == 0);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 4) == 0);
}]>;
def u6_3ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget.hasV4TOps())
- // Return true if the immediate can fit in a 9-bit unsigned field and
- // is 8-byte aligned.
+ if (isUInt<9>(v))
return isShiftedUInt<6,3>(v);
- else {
- if (isUInt<9>(v))
- return isShiftedUInt<6,3>(v);
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 8) == 0);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 8) == 0);
}]>;
+
+// This complex pattern exists only to create a machine instruction operand
+// of type "frame index". There doesn't seem to be a way to do that directly
+// in the patterns.
+def AddrFI : ComplexPattern<i32, 1, "SelectAddrFI", [frameindex], []>;
+
+// These complex patterns are not strictly necessary, since global address
+// folding will happen during DAG combining. For distinguishing between GA
+// and GP, pat frags with HexagonCONST32 and HexagonCONST32_GP can be used.
+def AddrGA : ComplexPattern<i32, 1, "SelectAddrGA", [], []>;
+def AddrGP : ComplexPattern<i32, 1, "SelectAddrGP", [], []>;
+
// Addressing modes.
def ADDRrr : ComplexPattern<i32, 2, "SelectADDRrr", [], []>;
@@ -856,3 +785,12 @@ def symbolHi32 : Operand<i32> {
def symbolLo32 : Operand<i32> {
let PrintMethod = "printSymbolLo";
}
+
+// Return true if for a 32 to 64-bit sign-extended load.
+def is_sext_i32 : PatLeaf<(i64 DoubleRegs:$src1), [{
+ LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
+ if (!LD)
+ return false;
+ return LD->getExtensionType() == ISD::SEXTLOAD &&
+ LD->getMemoryVT().getScalarType() == MVT::i32;
+}]>;
diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp
index 8912152..afd3a17 100644
--- a/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -112,7 +112,7 @@ INITIALIZE_PASS(HexagonPeephole, "hexagon-peephole", "Hexagon Peephole",
bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
QII = static_cast<const HexagonInstrInfo *>(MF.getSubtarget().getInstrInfo());
- QRI = MF.getTarget().getSubtarget<HexagonSubtarget>().getRegisterInfo();
+ QRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
MRI = &MF.getRegInfo();
DenseMap<unsigned, unsigned> PeepholeMap;
@@ -133,7 +133,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
MachineInstr *MI = MII;
// Look for sign extends:
// %vreg170<def> = SXTW %vreg166
- if (!DisableOptSZExt && MI->getOpcode() == Hexagon::SXTW) {
+ if (!DisableOptSZExt && MI->getOpcode() == Hexagon::A2_sxtw) {
assert (MI->getNumOperands() == 2);
MachineOperand &Dst = MI->getOperand(0);
MachineOperand &Src = MI->getOperand(1);
@@ -152,7 +152,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
// Look for %vreg170<def> = COMBINE_ir_V4 (0, %vreg169)
// %vreg170:DoublRegs, %vreg169:IntRegs
if (!DisableOptExtTo64 &&
- MI->getOpcode () == Hexagon::COMBINE_Ir_V4) {
+ MI->getOpcode () == Hexagon::A4_combineir) {
assert (MI->getNumOperands() == 3);
MachineOperand &Dst = MI->getOperand(0);
MachineOperand &Src1 = MI->getOperand(1);
@@ -169,7 +169,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
// %vregIntReg = COPY %vregDoubleReg1:subreg_loreg.
// and convert into
// %vregIntReg = COPY %vregDoubleReg0:subreg_hireg.
- if (MI->getOpcode() == Hexagon::LSRd_ri) {
+ if (MI->getOpcode() == Hexagon::S2_lsr_i_p) {
assert(MI->getNumOperands() == 3);
MachineOperand &Dst = MI->getOperand(0);
MachineOperand &Src1 = MI->getOperand(1);
@@ -184,7 +184,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
// Look for P=NOT(P).
if (!DisablePNotP &&
- (MI->getOpcode() == Hexagon::NOT_p)) {
+ (MI->getOpcode() == Hexagon::C2_not)) {
assert (MI->getNumOperands() == 2);
MachineOperand &Dst = MI->getOperand(0);
MachineOperand &Src = MI->getOperand(1);
@@ -269,10 +269,9 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
unsigned PR = 1, S1 = 2, S2 = 3; // Operand indices.
switch (Op) {
- case Hexagon::TFR_condset_rr:
+ case Hexagon::C2_mux:
+ case Hexagon::C2_muxii:
case Hexagon::TFR_condset_ii:
- case Hexagon::MUX_ii:
- case Hexagon::MUX_rr:
NewOp = Op;
break;
case Hexagon::TFR_condset_ri:
@@ -281,11 +280,11 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
case Hexagon::TFR_condset_ir:
NewOp = Hexagon::TFR_condset_ri;
break;
- case Hexagon::MUX_ri:
- NewOp = Hexagon::MUX_ir;
+ case Hexagon::C2_muxri:
+ NewOp = Hexagon::C2_muxir;
break;
- case Hexagon::MUX_ir:
- NewOp = Hexagon::MUX_ri;
+ case Hexagon::C2_muxir:
+ NewOp = Hexagon::C2_muxri;
break;
}
if (NewOp) {
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/lib/Target/Hexagon/HexagonRegisterInfo.cpp
index 2b6741c..3df98d6 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.cpp
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.cpp
@@ -45,9 +45,6 @@ HexagonRegisterInfo::HexagonRegisterInfo(HexagonSubtarget &st)
const MCPhysReg *
HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- static const MCPhysReg CalleeSavedRegsV2[] = {
- Hexagon::R24, Hexagon::R25, Hexagon::R26, Hexagon::R27, 0
- };
static const MCPhysReg CalleeSavedRegsV3[] = {
Hexagon::R16, Hexagon::R17, Hexagon::R18, Hexagon::R19,
Hexagon::R20, Hexagon::R21, Hexagon::R22, Hexagon::R23,
@@ -55,11 +52,6 @@ HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
};
switch(Subtarget.getHexagonArchVersion()) {
- case HexagonSubtarget::V1:
- break;
- case HexagonSubtarget::V2:
- return CalleeSavedRegsV2;
- case HexagonSubtarget::V3:
case HexagonSubtarget::V4:
case HexagonSubtarget::V5:
return CalleeSavedRegsV3;
@@ -88,10 +80,6 @@ BitVector HexagonRegisterInfo::getReservedRegs(const MachineFunction &MF)
const TargetRegisterClass* const*
HexagonRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- static const TargetRegisterClass * const CalleeSavedRegClassesV2[] = {
- &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
- &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
- };
static const TargetRegisterClass * const CalleeSavedRegClassesV3[] = {
&Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
&Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
@@ -102,11 +90,6 @@ HexagonRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
};
switch(Subtarget.getHexagonArchVersion()) {
- case HexagonSubtarget::V1:
- break;
- case HexagonSubtarget::V2:
- return CalleeSavedRegClassesV2;
- case HexagonSubtarget::V3:
case HexagonSubtarget::V4:
case HexagonSubtarget::V5:
return CalleeSavedRegClassesV3;
@@ -159,20 +142,18 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
//
// r0 = add(r30, #10000)
// r0 = memw(r0)
- if ( (MI.getOpcode() == Hexagon::LDriw) ||
- (MI.getOpcode() == Hexagon::LDrid) ||
- (MI.getOpcode() == Hexagon::LDrih) ||
- (MI.getOpcode() == Hexagon::LDriuh) ||
- (MI.getOpcode() == Hexagon::LDrib) ||
- (MI.getOpcode() == Hexagon::LDriub) ||
- (MI.getOpcode() == Hexagon::LDriw_f) ||
- (MI.getOpcode() == Hexagon::LDrid_f)) {
- unsigned dstReg = (MI.getOpcode() == Hexagon::LDrid) ?
+ if ( (MI.getOpcode() == Hexagon::L2_loadri_io) ||
+ (MI.getOpcode() == Hexagon::L2_loadrd_io) ||
+ (MI.getOpcode() == Hexagon::L2_loadrh_io) ||
+ (MI.getOpcode() == Hexagon::L2_loadruh_io) ||
+ (MI.getOpcode() == Hexagon::L2_loadrb_io) ||
+ (MI.getOpcode() == Hexagon::L2_loadrub_io)) {
+ unsigned dstReg = (MI.getOpcode() == Hexagon::L2_loadrd_io) ?
getSubReg(MI.getOperand(0).getReg(), Hexagon::subreg_loreg) :
MI.getOperand(0).getReg();
// Check if offset can fit in addi.
- if (!TII.isValidOffset(Hexagon::ADD_ri, Offset)) {
+ if (!TII.isValidOffset(Hexagon::A2_addi, Offset)) {
BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
TII.get(Hexagon::CONST32_Int_Real), dstReg).addImm(Offset);
BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
@@ -180,19 +161,16 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
dstReg).addReg(FrameReg).addReg(dstReg);
} else {
BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
- TII.get(Hexagon::ADD_ri),
+ TII.get(Hexagon::A2_addi),
dstReg).addReg(FrameReg).addImm(Offset);
}
MI.getOperand(FIOperandNum).ChangeToRegister(dstReg, false, false,true);
MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
- } else if ((MI.getOpcode() == Hexagon::STriw_indexed) ||
- (MI.getOpcode() == Hexagon::STriw) ||
- (MI.getOpcode() == Hexagon::STrid) ||
- (MI.getOpcode() == Hexagon::STrih) ||
- (MI.getOpcode() == Hexagon::STrib) ||
- (MI.getOpcode() == Hexagon::STrid_f) ||
- (MI.getOpcode() == Hexagon::STriw_f)) {
+ } else if ((MI.getOpcode() == Hexagon::S2_storeri_io) ||
+ (MI.getOpcode() == Hexagon::S2_storerd_io) ||
+ (MI.getOpcode() == Hexagon::S2_storerh_io) ||
+ (MI.getOpcode() == Hexagon::S2_storerb_io)) {
// For stores, we need a reserved register. Change
// memw(r30 + #10000) = r0 to:
//
@@ -201,7 +179,7 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
unsigned resReg = HEXAGON_RESERVED_REG_1;
// Check if offset can fit in addi.
- if (!TII.isValidOffset(Hexagon::ADD_ri, Offset)) {
+ if (!TII.isValidOffset(Hexagon::A2_addi, Offset)) {
BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
TII.get(Hexagon::CONST32_Int_Real), resReg).addImm(Offset);
BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
@@ -209,47 +187,19 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
resReg).addReg(FrameReg).addReg(resReg);
} else {
BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
- TII.get(Hexagon::ADD_ri),
+ TII.get(Hexagon::A2_addi),
resReg).addReg(FrameReg).addImm(Offset);
}
MI.getOperand(FIOperandNum).ChangeToRegister(resReg, false, false,true);
MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
} else if (TII.isMemOp(&MI)) {
// use the constant extender if the instruction provides it
- // and we are V4TOps.
- if (Subtarget.hasV4TOps()) {
- if (TII.isConstExtended(&MI)) {
- MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false);
- MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
- TII.immediateExtend(&MI);
- } else {
- llvm_unreachable("Need to implement for memops");
- }
+ if (TII.isConstExtended(&MI)) {
+ MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false);
+ MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
+ TII.immediateExtend(&MI);
} else {
- // Only V3 and older instructions here.
- unsigned ResReg = HEXAGON_RESERVED_REG_1;
- if (!MFI.hasVarSizedObjects() &&
- TII.isValidOffset(MI.getOpcode(), (FrameSize+Offset))) {
- MI.getOperand(FIOperandNum).ChangeToRegister(getStackRegister(),
- false, false, false);
- MI.getOperand(FIOperandNum+1).ChangeToImmediate(FrameSize+Offset);
- } else if (!TII.isValidOffset(Hexagon::ADD_ri, Offset)) {
- BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
- TII.get(Hexagon::CONST32_Int_Real), ResReg).addImm(Offset);
- BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
- TII.get(Hexagon::A2_add), ResReg).addReg(FrameReg).
- addReg(ResReg);
- MI.getOperand(FIOperandNum).ChangeToRegister(ResReg, false, false,
- true);
- MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
- } else {
- BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
- TII.get(Hexagon::ADD_ri), ResReg).addReg(FrameReg).
- addImm(Offset);
- MI.getOperand(FIOperandNum).ChangeToRegister(ResReg, false, false,
- true);
- MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
- }
+ llvm_unreachable("Need to implement for memops");
}
} else {
unsigned dstReg = MI.getOperand(0).getReg();
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.td b/lib/Target/Hexagon/HexagonRegisterInfo.td
index 9750984..edf1c25 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.td
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.td
@@ -13,20 +13,25 @@
let Namespace = "Hexagon" in {
- class HexagonReg<bits<5> num, string n> : Register<n> {
+ class HexagonReg<bits<5> num, string n, list<string> alt = [],
+ list<Register> alias = []> : Register<n> {
field bits<5> Num;
+ let Aliases = alias;
let HWEncoding{4-0} = num;
}
- class HexagonDoubleReg<bits<5> num, string n, list<Register> subregs> :
+ class HexagonDoubleReg<bits<5> num, string n, list<Register> subregs,
+ list<string> alt = []> :
RegisterWithSubRegs<n, subregs> {
field bits<5> Num;
+
+ let AltNames = alt;
let HWEncoding{4-0} = num;
}
// Registers are identified with 5-bit ID numbers.
// Ri - 32-bit integer registers.
- class Ri<bits<5> num, string n> : HexagonReg<num, n> {
+ class Ri<bits<5> num, string n, list<string> alt = []> : HexagonReg<num, n, alt> {
let Num = num;
}
@@ -49,27 +54,37 @@ let Namespace = "Hexagon" in {
}
// Rc - control registers
- class Rc<bits<5> num, string n> : HexagonReg<num, n> {
+ class Rc<bits<5> num, string n,
+ list<string> alt = [], list<Register> alias = []> :
+ HexagonReg<num, n, alt, alias> {
let Num = num;
}
- // Rj - aliased integer registers
- class Rj<string n, Ri R>: HexagonReg<R.Num, n> {
- let Num = R.Num;
- let Aliases = [R];
+ // Rcc - 64-bit control registers.
+ class Rcc<bits<5> num, string n, list<Register> subregs,
+ list<string> alt = []> :
+ HexagonDoubleReg<num, n, subregs, alt> {
+ let Num = num;
+ let SubRegs = subregs;
+ }
+
+ // Mx - address modifier registers
+ class Mx<bits<1> num, string n> : HexagonReg<{0b0000, num}, n> {
+ let Num = !cast<bits<5>>(num);
}
def subreg_loreg : SubRegIndex<32>;
def subreg_hireg : SubRegIndex<32, 32>;
+ def subreg_overflow : SubRegIndex<1, 0>;
// Integer registers.
- foreach I = 0-31 in {
- def R#I : Ri<I, "r"#I>, DwarfRegNum<[I]>;
+ foreach i = 0-28 in {
+ def R#i : Ri<i, "r"#i>, DwarfRegNum<[i]>;
}
- def SP : Rj<"sp", R29>, DwarfRegNum<[29]>;
- def FP : Rj<"fp", R30>, DwarfRegNum<[30]>;
- def LR : Rj<"lr", R31>, DwarfRegNum<[31]>;
+ def R29 : Ri<29, "r29", ["sp"]>, DwarfRegNum<[29]>;
+ def R30 : Ri<30, "r30", ["fp"]>, DwarfRegNum<[30]>;
+ def R31 : Ri<31, "r31", ["lr"]>, DwarfRegNum<[31]>;
// Aliases of the R* registers used to hold 64-bit int values (doubles).
let SubRegIndices = [subreg_loreg, subreg_hireg], CoveredBySubRegs = 1 in {
@@ -97,44 +112,98 @@ let Namespace = "Hexagon" in {
def P2 : Rp<2, "p2">, DwarfRegNum<[65]>;
def P3 : Rp<3, "p3">, DwarfRegNum<[66]>;
- // Control registers.
- def SA0 : Rc<0, "sa0">, DwarfRegNum<[67]>;
- def LC0 : Rc<1, "lc0">, DwarfRegNum<[68]>;
-
- def SA1 : Rc<2, "sa1">, DwarfRegNum<[69]>;
- def LC1 : Rc<3, "lc1">, DwarfRegNum<[70]>;
+ // Modifier registers.
+ // C6 and C7 can also be M0 and M1, but register names must be unique, even
+ // if belonging to different register classes.
+ def M0 : Mx<0, "m0">, DwarfRegNum<[72]>;
+ def M1 : Mx<1, "m1">, DwarfRegNum<[73]>;
- def M0 : Rc<6, "m0">, DwarfRegNum<[71]>;
- def M1 : Rc<7, "m1">, DwarfRegNum<[72]>;
+ // Fake register to represent USR.OVF bit. Artihmetic/saturating instruc-
+ // tions modify this bit, and multiple such instructions are allowed in the
+ // same packet. We need to ignore output dependencies on this bit, but not
+ // on the entire USR.
+ def USR_OVF : Rc<?, "usr.ovf">;
- def PC : Rc<9, "pc">, DwarfRegNum<[32]>; // is the Dwarf number correct?
- def GP : Rc<11, "gp">, DwarfRegNum<[33]>; // is the Dwarf number correct?
+ // Control registers.
+ def SA0 : Rc<0, "sa0", ["c0"]>, DwarfRegNum<[67]>;
+ def LC0 : Rc<1, "lc0", ["c1"]>, DwarfRegNum<[68]>;
+ def SA1 : Rc<2, "sa1", ["c2"]>, DwarfRegNum<[69]>;
+ def LC1 : Rc<3, "lc1", ["c3"]>, DwarfRegNum<[70]>;
+ def P3_0 : Rc<4, "p3:0", ["c4"], [P0, P1, P2, P3]>,
+ DwarfRegNum<[71]>;
+ def C6 : Rc<6, "c6", [], [M0]>, DwarfRegNum<[72]>;
+ def C7 : Rc<7, "c7", [], [M1]>, DwarfRegNum<[73]>;
+
+ def USR : Rc<8, "usr", ["c8"]>, DwarfRegNum<[74]> {
+ let SubRegIndices = [subreg_overflow];
+ let SubRegs = [USR_OVF];
+ }
+ def PC : Rc<9, "pc">, DwarfRegNum<[75]>;
+ def UGP : Rc<10, "ugp", ["c10"]>, DwarfRegNum<[76]>;
+ def GP : Rc<11, "gp">, DwarfRegNum<[77]>;
+ def CS0 : Rc<12, "cs0", ["c12"]>, DwarfRegNum<[78]>;
+ def CS1 : Rc<13, "cs1", ["c13"]>, DwarfRegNum<[79]>;
+ def UPCL : Rc<14, "upcyclelo", ["c14"]>, DwarfRegNum<[80]>;
+ def UPCH : Rc<15, "upcyclehi", ["c15"]>, DwarfRegNum<[81]>;
}
+ // Control registers pairs.
+ let SubRegIndices = [subreg_loreg, subreg_hireg], CoveredBySubRegs = 1 in {
+ def C1_0 : Rcc<0, "c1:0", [SA0, LC0], ["lc0:sa0"]>, DwarfRegNum<[67]>;
+ def C3_2 : Rcc<2, "c3:2", [SA1, LC1], ["lc1:sa1"]>, DwarfRegNum<[69]>;
+ def C7_6 : Rcc<6, "c7:6", [C6, C7], ["m1:0"]>, DwarfRegNum<[72]>;
+ def C9_8 : Rcc<8, "c9:8", [USR, PC]>, DwarfRegNum<[74]>;
+ def C11_10 : Rcc<10, "c11:10", [UGP, GP]>, DwarfRegNum<[76]>;
+ def CS : Rcc<12, "c13:12", [CS0, CS1], ["cs1:0"]>, DwarfRegNum<[78]>;
+ def UPC : Rcc<14, "c15:14", [UPCL, UPCH]>, DwarfRegNum<[80]>;
+ }
+
// Register classes.
//
// FIXME: the register order should be defined in terms of the preferred
// allocation order...
//
-def IntRegs : RegisterClass<"Hexagon", [i32,f32], 32,
+def IntRegs : RegisterClass<"Hexagon", [i32, f32, v4i8, v2i16], 32,
(add (sequence "R%u", 0, 9),
(sequence "R%u", 12, 28),
R10, R11, R29, R30, R31)> {
}
-def DoubleRegs : RegisterClass<"Hexagon", [i64,f64], 64,
+def DoubleRegs : RegisterClass<"Hexagon", [i64, f64, v8i8, v4i16, v2i32], 64,
(add (sequence "D%u", 0, 4),
(sequence "D%u", 6, 13), D5, D14, D15)>;
-def PredRegs : RegisterClass<"Hexagon", [i1], 32, (add (sequence "P%u", 0, 3))>
+def PredRegs : RegisterClass<"Hexagon",
+ [i1, v2i1, v4i1, v8i1, v4i8, v2i16, i32], 32,
+ (add (sequence "P%u", 0, 3))>
{
let Size = 32;
}
-def CRRegs : RegisterClass<"Hexagon", [i32], 32,
- (add (sequence "LC%u", 0, 1),
- (sequence "SA%u", 0, 1),
- (sequence "M%u", 0, 1), PC, GP)> {
- let Size = 32;
+let Size = 32 in
+def ModRegs : RegisterClass<"Hexagon", [i32], 32, (add M0, M1)>;
+
+let Size = 32, isAllocatable = 0 in
+def CtrRegs : RegisterClass<"Hexagon", [i32], 32,
+ (add LC0, SA0, LC1, SA1,
+ P3_0,
+ M0, M1, C6, C7, CS0, CS1, UPCL, UPCH,
+ USR, USR_OVF, UGP, GP, PC)>;
+
+let Size = 64, isAllocatable = 0 in
+def CtrRegs64 : RegisterClass<"Hexagon", [i64], 64,
+ (add C1_0, C3_2, C7_6, C9_8, C11_10, CS, UPC)>;
+
+def VolatileV3 {
+ list<Register> Regs = [D0, D1, D2, D3, D4, D5, D6, D7,
+ R28, R31,
+ P0, P1, P2, P3,
+ M0, M1,
+ LC0, LC1, SA0, SA1, USR, USR_OVF];
}
+
+def PositiveHalfWord : PatLeaf<(i32 IntRegs:$a),
+[{
+ return isPositiveHalfWord(N);
+}]>;
diff --git a/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp b/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
index 2b459a4..0c24075 100644
--- a/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
+++ b/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
@@ -15,6 +15,7 @@
#include "Hexagon.h"
#include "HexagonTargetMachine.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/StackProtector.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Pass.h"
@@ -42,7 +43,7 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineFunctionAnalysis>();
AU.addPreserved<MachineFunctionAnalysis>();
- AU.addPreserved("stack-protector");
+ AU.addPreserved<StackProtector>();
FunctionPass::getAnalysisUsage(AU);
}
};
diff --git a/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp b/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
index 8fdd493..ce6a39a 100644
--- a/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
+++ b/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
@@ -48,12 +48,9 @@ using namespace llvm;
namespace {
class HexagonSplitConst32AndConst64 : public MachineFunctionPass {
- const HexagonTargetMachine &QTM;
-
public:
static char ID;
- HexagonSplitConst32AndConst64(const HexagonTargetMachine &TM)
- : MachineFunctionPass(ID), QTM(TM) {}
+ HexagonSplitConst32AndConst64() : MachineFunctionPass(ID) {}
const char *getPassName() const override {
return "Hexagon Split Const32s and Const64s";
@@ -68,13 +65,12 @@ char HexagonSplitConst32AndConst64::ID = 0;
bool HexagonSplitConst32AndConst64::runOnMachineFunction(MachineFunction &Fn) {
const HexagonTargetObjectFile &TLOF =
- (const HexagonTargetObjectFile &)QTM.getSubtargetImpl()
- ->getTargetLowering()
- ->getObjFileLowering();
+ *static_cast<const HexagonTargetObjectFile *>(
+ Fn.getTarget().getObjFileLowering());
if (TLOF.IsSmallDataEnabled())
return true;
- const TargetInstrInfo *TII = QTM.getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Fn.getSubtarget().getInstrInfo();
// Loop over all of the basic blocks
for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
@@ -117,9 +113,9 @@ bool HexagonSplitConst32AndConst64::runOnMachineFunction(MachineFunction &Fn) {
MachineOperand &Symbol = MI->getOperand (1);
BuildMI (*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::LO_label), DestReg).addOperand(Symbol);
+ TII->get(Hexagon::LO_PIC), DestReg).addOperand(Symbol);
BuildMI (*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::HI_label), DestReg).addOperand(Symbol);
+ TII->get(Hexagon::HI_PIC), DestReg).addOperand(Symbol);
// MBB->erase returns the iterator to the next instruction, which is the
// one we want to process next
MII = MBB->erase (MI);
@@ -139,9 +135,9 @@ bool HexagonSplitConst32AndConst64::runOnMachineFunction(MachineFunction &Fn) {
else if (Opc == Hexagon::CONST64_Int_Real) {
int DestReg = MI->getOperand(0).getReg();
int64_t ImmValue = MI->getOperand(1).getImm ();
- unsigned DestLo = QTM.getSubtargetImpl()->getRegisterInfo()->getSubReg(
+ unsigned DestLo = Fn.getSubtarget().getRegisterInfo()->getSubReg(
DestReg, Hexagon::subreg_loreg);
- unsigned DestHi = QTM.getSubtargetImpl()->getRegisterInfo()->getSubReg(
+ unsigned DestHi = Fn.getSubtarget().getRegisterInfo()->getSubReg(
DestReg, Hexagon::subreg_hireg);
int32_t LowWord = (ImmValue & 0xFFFFFFFF);
@@ -176,6 +172,6 @@ bool HexagonSplitConst32AndConst64::runOnMachineFunction(MachineFunction &Fn) {
//===----------------------------------------------------------------------===//
FunctionPass *
-llvm::createHexagonSplitConst32AndConst64(const HexagonTargetMachine &TM) {
- return new HexagonSplitConst32AndConst64(TM);
+llvm::createHexagonSplitConst32AndConst64() {
+ return new HexagonSplitConst32AndConst64();
}
diff --git a/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp b/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp
index 1052b80..8873bb9 100644
--- a/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp
+++ b/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp
@@ -58,13 +58,9 @@ namespace llvm {
namespace {
class HexagonSplitTFRCondSets : public MachineFunctionPass {
- const HexagonTargetMachine &QTM;
- const HexagonSubtarget &QST;
-
public:
static char ID;
- HexagonSplitTFRCondSets(const HexagonTargetMachine& TM) :
- MachineFunctionPass(ID), QTM(TM), QST(*TM.getSubtargetImpl()) {
+ HexagonSplitTFRCondSets() : MachineFunctionPass(ID) {
initializeHexagonSplitTFRCondSetsPass(*PassRegistry::getPassRegistry());
}
@@ -80,7 +76,7 @@ char HexagonSplitTFRCondSets::ID = 0;
bool HexagonSplitTFRCondSets::runOnMachineFunction(MachineFunction &Fn) {
- const TargetInstrInfo *TII = QTM.getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Fn.getSubtarget().getInstrInfo();
// Loop over all of the basic blocks.
for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
@@ -90,41 +86,8 @@ bool HexagonSplitTFRCondSets::runOnMachineFunction(MachineFunction &Fn) {
for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
++MII) {
MachineInstr *MI = MII;
- int Opc1, Opc2;
switch(MI->getOpcode()) {
- case Hexagon::TFR_condset_rr:
- case Hexagon::TFR_condset_rr_f:
- case Hexagon::TFR_condset_rr64_f: {
- int DestReg = MI->getOperand(0).getReg();
- int SrcReg1 = MI->getOperand(2).getReg();
- int SrcReg2 = MI->getOperand(3).getReg();
-
- if (MI->getOpcode() == Hexagon::TFR_condset_rr ||
- MI->getOpcode() == Hexagon::TFR_condset_rr_f) {
- Opc1 = Hexagon::TFR_cPt;
- Opc2 = Hexagon::TFR_cNotPt;
- }
- else if (MI->getOpcode() == Hexagon::TFR_condset_rr64_f) {
- Opc1 = Hexagon::TFR64_cPt;
- Opc2 = Hexagon::TFR64_cNotPt;
- }
-
- // Minor optimization: do not emit the predicated copy if the source
- // and the destination is the same register.
- if (DestReg != SrcReg1) {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Opc1),
- DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg1);
- }
- if (DestReg != SrcReg2) {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Opc2),
- DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg2);
- }
- MII = MBB->erase(MI);
- --MII;
- break;
- }
- case Hexagon::TFR_condset_ri:
- case Hexagon::TFR_condset_ri_f: {
+ case Hexagon::TFR_condset_ri: {
int DestReg = MI->getOperand(0).getReg();
int SrcReg1 = MI->getOperand(2).getReg();
@@ -132,77 +95,50 @@ bool HexagonSplitTFRCondSets::runOnMachineFunction(MachineFunction &Fn) {
// is the same register.
if (DestReg != SrcReg1) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::TFR_cPt), DestReg).
+ TII->get(Hexagon::A2_tfrt), DestReg).
addReg(MI->getOperand(1).getReg()).addReg(SrcReg1);
}
- if (MI->getOpcode() == Hexagon::TFR_condset_ri ) {
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::TFRI_cNotPt), DestReg).
- addReg(MI->getOperand(1).getReg()).
- addImm(MI->getOperand(3).getImm());
- } else if (MI->getOpcode() == Hexagon::TFR_condset_ri_f ) {
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::TFRI_cNotPt_f), DestReg).
- addReg(MI->getOperand(1).getReg()).
- addFPImm(MI->getOperand(3).getFPImm());
- }
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::C2_cmoveif), DestReg).
+ addReg(MI->getOperand(1).getReg()).
+ addImm(MI->getOperand(3).getImm());
MII = MBB->erase(MI);
--MII;
break;
}
- case Hexagon::TFR_condset_ir:
- case Hexagon::TFR_condset_ir_f: {
+ case Hexagon::TFR_condset_ir: {
int DestReg = MI->getOperand(0).getReg();
int SrcReg2 = MI->getOperand(3).getReg();
- if (MI->getOpcode() == Hexagon::TFR_condset_ir ) {
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::TFRI_cPt), DestReg).
- addReg(MI->getOperand(1).getReg()).
- addImm(MI->getOperand(2).getImm());
- } else if (MI->getOpcode() == Hexagon::TFR_condset_ir_f ) {
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::TFRI_cPt_f), DestReg).
- addReg(MI->getOperand(1).getReg()).
- addFPImm(MI->getOperand(2).getFPImm());
- }
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::C2_cmoveit), DestReg).
+ addReg(MI->getOperand(1).getReg()).
+ addImm(MI->getOperand(2).getImm());
// Do not emit the predicated copy if the source and
// the destination is the same register.
if (DestReg != SrcReg2) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::TFR_cNotPt), DestReg).
+ TII->get(Hexagon::A2_tfrf), DestReg).
addReg(MI->getOperand(1).getReg()).addReg(SrcReg2);
}
MII = MBB->erase(MI);
--MII;
break;
}
- case Hexagon::TFR_condset_ii:
- case Hexagon::TFR_condset_ii_f: {
+ case Hexagon::TFR_condset_ii: {
int DestReg = MI->getOperand(0).getReg();
int SrcReg1 = MI->getOperand(1).getReg();
- if (MI->getOpcode() == Hexagon::TFR_condset_ii ) {
- int Immed1 = MI->getOperand(2).getImm();
- int Immed2 = MI->getOperand(3).getImm();
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::TFRI_cPt),
- DestReg).addReg(SrcReg1).addImm(Immed1);
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::TFRI_cNotPt),
- DestReg).addReg(SrcReg1).addImm(Immed2);
- } else if (MI->getOpcode() == Hexagon::TFR_condset_ii_f ) {
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::TFRI_cPt_f), DestReg).
- addReg(SrcReg1).
- addFPImm(MI->getOperand(2).getFPImm());
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::TFRI_cNotPt_f), DestReg).
- addReg(SrcReg1).
- addFPImm(MI->getOperand(3).getFPImm());
- }
+ int Immed1 = MI->getOperand(2).getImm();
+ int Immed2 = MI->getOperand(3).getImm();
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::C2_cmoveit),
+ DestReg).addReg(SrcReg1).addImm(Immed1);
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::C2_cmoveif),
+ DestReg).addReg(SrcReg1).addImm(Immed2);
MII = MBB->erase(MI);
--MII;
break;
@@ -231,7 +167,6 @@ void llvm::initializeHexagonSplitTFRCondSetsPass(PassRegistry &Registry) {
CALL_ONCE_INITIALIZATION(initializePassOnce)
}
-FunctionPass*
-llvm::createHexagonSplitTFRCondSets(const HexagonTargetMachine &TM) {
- return new HexagonSplitTFRCondSets(TM);
+FunctionPass *llvm::createHexagonSplitTFRCondSets() {
+ return new HexagonSplitTFRCondSets();
}
diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp
index 657893f..380f023 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -54,12 +54,7 @@ HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
if (CPUString.empty())
CPUString = "hexagonv4";
- if (CPUString == "hexagonv2") {
- HexagonArchVersion = V2;
- } else if (CPUString == "hexagonv3") {
- EnableV3 = true;
- HexagonArchVersion = V3;
- } else if (CPUString == "hexagonv4") {
+ if (CPUString == "hexagonv4") {
HexagonArchVersion = V4;
} else if (CPUString == "hexagonv5") {
HexagonArchVersion = V5;
@@ -74,9 +69,8 @@ HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS,
const TargetMachine &TM)
: HexagonGenSubtargetInfo(TT, CPU, FS), CPUString(CPU.str()),
- DL("e-m:e-p:32:32-i1:32-i64:64-a:0-n32"),
- InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM),
- TSInfo(DL), FrameLowering() {
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
+ TSInfo(*TM.getDataLayout()), FrameLowering() {
// Initialize scheduling itinerary for the specified CPU.
InstrItins = getInstrItineraryForCPU(CPUString);
diff --git a/lib/Target/Hexagon/HexagonSubtarget.h b/lib/Target/Hexagon/HexagonSubtarget.h
index 10776ae..57de546 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/lib/Target/Hexagon/HexagonSubtarget.h
@@ -15,8 +15,8 @@
#define LLVM_LIB_TARGET_HEXAGON_HEXAGONSUBTARGET_H
#include "HexagonFrameLowering.h"
-#include "HexagonInstrInfo.h"
#include "HexagonISelLowering.h"
+#include "HexagonInstrInfo.h"
#include "HexagonSelectionDAGInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
@@ -39,13 +39,12 @@ class HexagonSubtarget : public HexagonGenSubtargetInfo {
public:
enum HexagonArchEnum {
- V1, V2, V3, V4, V5
+ V4, V5
};
HexagonArchEnum HexagonArchVersion;
private:
std::string CPUString;
- const DataLayout DL; // Calculates type size & alignment.
HexagonInstrInfo InstrInfo;
HexagonTargetLowering TLInfo;
HexagonSelectionDAGInfo TSInfo;
@@ -74,7 +73,6 @@ public:
const HexagonSelectionDAGInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
- const DataLayout *getDataLayout() const override { return &DL; }
HexagonSubtarget &initializeSubtargetDependencies(StringRef CPU,
StringRef FS);
@@ -83,18 +81,11 @@ public:
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
- bool hasV2TOps () const { return HexagonArchVersion >= V2; }
- bool hasV2TOpsOnly () const { return HexagonArchVersion == V2; }
- bool hasV3TOps () const { return HexagonArchVersion >= V3; }
- bool hasV3TOpsOnly () const { return HexagonArchVersion == V3; }
- bool hasV4TOps () const { return HexagonArchVersion >= V4; }
- bool hasV4TOpsOnly () const { return HexagonArchVersion == V4; }
- bool useMemOps () const { return HexagonArchVersion >= V4 && UseMemOps; }
- bool hasV5TOps () const { return HexagonArchVersion >= V5; }
- bool hasV5TOpsOnly () const { return HexagonArchVersion == V5; }
- bool modeIEEERndNear () const { return ModeIEEERndNear; }
-
- bool isSubtargetV2() const { return HexagonArchVersion == V2;}
+ bool useMemOps() const { return UseMemOps; }
+ bool hasV5TOps() const { return getHexagonArchVersion() >= V5; }
+ bool hasV5TOpsOnly() const { return getHexagonArchVersion() == V5; }
+ bool modeIEEERndNear() const { return ModeIEEERndNear; }
+
const std::string &getCPUString () const { return CPUString; }
// Threshold for small data section
diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp
index cd18dfb..64f75a3 100644
--- a/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -17,8 +17,8 @@
#include "HexagonMachineScheduler.h"
#include "HexagonTargetObjectFile.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
-#include "llvm/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
@@ -71,7 +71,7 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
TLOF(make_unique<HexagonTargetObjectFile>()),
- Subtarget(TT, CPU, FS, *this) {
+ DL("e-m:e-p:32:32-i1:32-i64:64-a:0-n32"), Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
}
@@ -103,10 +103,10 @@ public:
}
bool addInstSelector() override;
- bool addPreRegAlloc() override;
- bool addPostRegAlloc() override;
- bool addPreSched2() override;
- bool addPreEmitPass() override;
+ void addPreRegAlloc() override;
+ void addPostRegAlloc() override;
+ void addPreSched2() override;
+ void addPreEmitPass() override;
};
} // namespace
@@ -131,51 +131,41 @@ bool HexagonPassConfig::addInstSelector() {
return false;
}
-bool HexagonPassConfig::addPreRegAlloc() {
+void HexagonPassConfig::addPreRegAlloc() {
if (getOptLevel() != CodeGenOpt::None)
if (!DisableHardwareLoops)
- addPass(createHexagonHardwareLoops());
- return false;
+ addPass(createHexagonHardwareLoops(), false);
}
-bool HexagonPassConfig::addPostRegAlloc() {
- const HexagonTargetMachine &TM = getHexagonTargetMachine();
+void HexagonPassConfig::addPostRegAlloc() {
if (getOptLevel() != CodeGenOpt::None)
if (!DisableHexagonCFGOpt)
- addPass(createHexagonCFGOptimizer(TM));
- return false;
+ addPass(createHexagonCFGOptimizer(), false);
}
-bool HexagonPassConfig::addPreSched2() {
- const HexagonTargetMachine &TM = getHexagonTargetMachine();
-
- addPass(createHexagonCopyToCombine());
+void HexagonPassConfig::addPreSched2() {
+ addPass(createHexagonCopyToCombine(), false);
if (getOptLevel() != CodeGenOpt::None)
- addPass(&IfConverterID);
- addPass(createHexagonSplitConst32AndConst64(TM));
- printAndVerify("After hexagon split const32/64 pass");
- return true;
+ addPass(&IfConverterID, false);
+ addPass(createHexagonSplitConst32AndConst64());
}
-bool HexagonPassConfig::addPreEmitPass() {
- const HexagonTargetMachine &TM = getHexagonTargetMachine();
+void HexagonPassConfig::addPreEmitPass() {
bool NoOpt = (getOptLevel() == CodeGenOpt::None);
if (!NoOpt)
- addPass(createHexagonNewValueJump());
+ addPass(createHexagonNewValueJump(), false);
// Expand Spill code for predicate registers.
- addPass(createHexagonExpandPredSpillCode(TM));
+ addPass(createHexagonExpandPredSpillCode(), false);
// Split up TFRcondsets into conditional transfers.
- addPass(createHexagonSplitTFRCondSets(TM));
+ addPass(createHexagonSplitTFRCondSets(), false);
// Create Packets.
if (!NoOpt) {
if (!DisableHardwareLoops)
- addPass(createHexagonFixupHwLoops());
- addPass(createHexagonPacketizer());
+ addPass(createHexagonFixupHwLoops(), false);
+ addPass(createHexagonPacketizer(), false);
}
-
- return false;
}
diff --git a/lib/Target/Hexagon/HexagonTargetMachine.h b/lib/Target/Hexagon/HexagonTargetMachine.h
index 4a9f447..e0b3a9b 100644
--- a/lib/Target/Hexagon/HexagonTargetMachine.h
+++ b/lib/Target/Hexagon/HexagonTargetMachine.h
@@ -24,6 +24,7 @@ class Module;
class HexagonTargetMachine : public LLVMTargetMachine {
std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ const DataLayout DL; // Calculates type size & alignment.
HexagonSubtarget Subtarget;
public:
@@ -32,7 +33,7 @@ public:
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
~HexagonTargetMachine() override;
-
+ const DataLayout *getDataLayout() const override { return &DL; }
const HexagonSubtarget *getSubtargetImpl() const override {
return &Subtarget;
}
diff --git a/lib/Target/Hexagon/HexagonTargetObjectFile.cpp b/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
index f4ab5e2..d8660d3 100644
--- a/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
+++ b/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
@@ -33,14 +33,10 @@ void HexagonTargetObjectFile::Initialize(MCContext &Ctx,
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
InitializeELF(TM.Options.UseInitArray);
- SmallDataSection =
- getContext().getELFSection(".sdata", ELF::SHT_PROGBITS,
- ELF::SHF_WRITE | ELF::SHF_ALLOC,
- SectionKind::getDataRel());
- SmallBSSSection =
- getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
- ELF::SHF_WRITE | ELF::SHF_ALLOC,
- SectionKind::getBSS());
+ SmallDataSection = getContext().getELFSection(
+ ".sdata", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
+ SmallBSSSection = getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC);
}
// sdata/sbss support taken largely from the MIPS Backend.
@@ -79,8 +75,7 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
if (Kind.isBSS() || Kind.isDataNoRel() || Kind.isCommon()) {
Type *Ty = GV->getType()->getElementType();
- return IsInSmallSection(
- TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(Ty));
+ return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty));
}
return false;
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index e7296d6..c123640 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -264,8 +264,7 @@ bool HexagonPacketizer::runOnMachineFunction(MachineFunction &Fn) {
static bool IsIndirectCall(MachineInstr* MI) {
- return ((MI->getOpcode() == Hexagon::CALLR) ||
- (MI->getOpcode() == Hexagon::CALLRv3));
+ return MI->getOpcode() == Hexagon::J2_callr;
}
// Reserve resources for constant extender. Trigure an assertion if
@@ -273,7 +272,7 @@ static bool IsIndirectCall(MachineInstr* MI) {
void HexagonPacketizerList::reserveResourcesForConstExt(MachineInstr* MI) {
const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
MachineFunction *MF = MI->getParent()->getParent();
- MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::IMMEXT_i),
+ MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::A4_ext),
MI->getDebugLoc());
if (ResourceTracker->canReserveResources(PseudoMI)) {
@@ -291,7 +290,7 @@ bool HexagonPacketizerList::canReserveResourcesForConstExt(MachineInstr *MI) {
assert((QII->isExtended(MI) || QII->isConstExtended(MI)) &&
"Should only be called for constant extended instructions");
MachineFunction *MF = MI->getParent()->getParent();
- MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::IMMEXT_i),
+ MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::A4_ext),
MI->getDebugLoc());
bool CanReserve = ResourceTracker->canReserveResources(PseudoMI);
MF->DeleteMachineInstr(PseudoMI);
@@ -303,7 +302,7 @@ bool HexagonPacketizerList::canReserveResourcesForConstExt(MachineInstr *MI) {
bool HexagonPacketizerList::tryAllocateResourcesForConstExt(MachineInstr* MI) {
const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
MachineFunction *MF = MI->getParent()->getParent();
- MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::IMMEXT_i),
+ MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::A4_ext),
MI->getDebugLoc());
if (ResourceTracker->canReserveResources(PseudoMI)) {
@@ -366,12 +365,12 @@ static bool IsRegDependence(const SDep::Kind DepType) {
}
static bool IsDirectJump(MachineInstr* MI) {
- return (MI->getOpcode() == Hexagon::JMP);
+ return (MI->getOpcode() == Hexagon::J2_jump);
}
static bool IsSchedBarrier(MachineInstr* MI) {
switch (MI->getOpcode()) {
- case Hexagon::BARRIER:
+ case Hexagon::Y2_barrier:
return true;
}
return false;
@@ -382,8 +381,8 @@ static bool IsControlFlow(MachineInstr* MI) {
}
static bool IsLoopN(MachineInstr *MI) {
- return (MI->getOpcode() == Hexagon::LOOP0_i ||
- MI->getOpcode() == Hexagon::LOOP0_r);
+ return (MI->getOpcode() == Hexagon::J2_loop0i ||
+ MI->getOpcode() == Hexagon::J2_loop0r);
}
/// DoesModifyCalleeSavedReg - Returns true if the instruction modifies a
@@ -563,8 +562,8 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore(
if (PacketSU->getInstr()->getDesc().mayStore() ||
// if we have mayStore = 1 set on ALLOCFRAME and DEALLOCFRAME,
// then we don't need this
- PacketSU->getInstr()->getOpcode() == Hexagon::ALLOCFRAME ||
- PacketSU->getInstr()->getOpcode() == Hexagon::DEALLOCFRAME)
+ PacketSU->getInstr()->getOpcode() == Hexagon::S2_allocframe ||
+ PacketSU->getInstr()->getOpcode() == Hexagon::L2_deallocframe)
return false;
}
@@ -721,10 +720,7 @@ bool HexagonPacketizerList::CanPromoteToNewValue(
MachineBasicBlock::iterator &MII) {
const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
- const HexagonRegisterInfo *QRI =
- (const HexagonRegisterInfo *)MF.getSubtarget().getRegisterInfo();
- if (!QRI->Subtarget.hasV4TOps() ||
- !QII->mayBeNewStore(MI))
+ if (!QII->mayBeNewStore(MI))
return false;
MachineInstr *PacketMI = PacketSU->getInstr();
@@ -1055,84 +1051,82 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
// first store is not in SLOT0. New value store, new value jump,
// dealloc_return and memop always take SLOT0.
// Arch spec 3.4.4.2
- if (QRI->Subtarget.hasV4TOps()) {
- if (MCIDI.mayStore() && MCIDJ.mayStore() &&
- (QII->isNewValueInst(J) || QII->isMemOp(J) || QII->isMemOp(I))) {
- Dependence = true;
- return false;
- }
+ if (MCIDI.mayStore() && MCIDJ.mayStore() &&
+ (QII->isNewValueInst(J) || QII->isMemOp(J) || QII->isMemOp(I))) {
+ Dependence = true;
+ return false;
+ }
- if ((QII->isMemOp(J) && MCIDI.mayStore())
- || (MCIDJ.mayStore() && QII->isMemOp(I))
- || (QII->isMemOp(J) && QII->isMemOp(I))) {
- Dependence = true;
- return false;
- }
+ if ((QII->isMemOp(J) && MCIDI.mayStore())
+ || (MCIDJ.mayStore() && QII->isMemOp(I))
+ || (QII->isMemOp(J) && QII->isMemOp(I))) {
+ Dependence = true;
+ return false;
+ }
- //if dealloc_return
- if (MCIDJ.mayStore() && QII->isDeallocRet(I)) {
- Dependence = true;
- return false;
- }
+ //if dealloc_return
+ if (MCIDJ.mayStore() && QII->isDeallocRet(I)) {
+ Dependence = true;
+ return false;
+ }
- // If an instruction feeds new value jump, glue it.
- MachineBasicBlock::iterator NextMII = I;
- ++NextMII;
- if (NextMII != I->getParent()->end() && QII->isNewValueJump(NextMII)) {
- MachineInstr *NextMI = NextMII;
+ // If an instruction feeds new value jump, glue it.
+ MachineBasicBlock::iterator NextMII = I;
+ ++NextMII;
+ if (NextMII != I->getParent()->end() && QII->isNewValueJump(NextMII)) {
+ MachineInstr *NextMI = NextMII;
- bool secondRegMatch = false;
- bool maintainNewValueJump = false;
+ bool secondRegMatch = false;
+ bool maintainNewValueJump = false;
- if (NextMI->getOperand(1).isReg() &&
- I->getOperand(0).getReg() == NextMI->getOperand(1).getReg()) {
- secondRegMatch = true;
- maintainNewValueJump = true;
- }
+ if (NextMI->getOperand(1).isReg() &&
+ I->getOperand(0).getReg() == NextMI->getOperand(1).getReg()) {
+ secondRegMatch = true;
+ maintainNewValueJump = true;
+ }
- if (!secondRegMatch &&
- I->getOperand(0).getReg() == NextMI->getOperand(0).getReg()) {
- maintainNewValueJump = true;
- }
+ if (!secondRegMatch &&
+ I->getOperand(0).getReg() == NextMI->getOperand(0).getReg()) {
+ maintainNewValueJump = true;
+ }
- for (std::vector<MachineInstr*>::iterator
- VI = CurrentPacketMIs.begin(),
- VE = CurrentPacketMIs.end();
- (VI != VE && maintainNewValueJump); ++VI) {
- SUnit *PacketSU = MIToSUnit.find(*VI)->second;
+ for (std::vector<MachineInstr*>::iterator
+ VI = CurrentPacketMIs.begin(),
+ VE = CurrentPacketMIs.end();
+ (VI != VE && maintainNewValueJump); ++VI) {
+ SUnit *PacketSU = MIToSUnit.find(*VI)->second;
- // NVJ can not be part of the dual jump - Arch Spec: section 7.8
- if (PacketSU->getInstr()->getDesc().isCall()) {
- Dependence = true;
- break;
- }
- // Validate
- // 1. Packet does not have a store in it.
- // 2. If the first operand of the nvj is newified, and the second
- // operand is also a reg, it (second reg) is not defined in
- // the same packet.
- // 3. If the second operand of the nvj is newified, (which means
- // first operand is also a reg), first reg is not defined in
- // the same packet.
- if (PacketSU->getInstr()->getDesc().mayStore() ||
- PacketSU->getInstr()->getOpcode() == Hexagon::ALLOCFRAME ||
- // Check #2.
- (!secondRegMatch && NextMI->getOperand(1).isReg() &&
- PacketSU->getInstr()->modifiesRegister(
- NextMI->getOperand(1).getReg(), QRI)) ||
- // Check #3.
- (secondRegMatch &&
- PacketSU->getInstr()->modifiesRegister(
- NextMI->getOperand(0).getReg(), QRI))) {
- Dependence = true;
- break;
- }
+ // NVJ can not be part of the dual jump - Arch Spec: section 7.8
+ if (PacketSU->getInstr()->getDesc().isCall()) {
+ Dependence = true;
+ break;
+ }
+ // Validate
+ // 1. Packet does not have a store in it.
+ // 2. If the first operand of the nvj is newified, and the second
+ // operand is also a reg, it (second reg) is not defined in
+ // the same packet.
+ // 3. If the second operand of the nvj is newified, (which means
+ // first operand is also a reg), first reg is not defined in
+ // the same packet.
+ if (PacketSU->getInstr()->getDesc().mayStore() ||
+ PacketSU->getInstr()->getOpcode() == Hexagon::S2_allocframe ||
+ // Check #2.
+ (!secondRegMatch && NextMI->getOperand(1).isReg() &&
+ PacketSU->getInstr()->modifiesRegister(
+ NextMI->getOperand(1).getReg(), QRI)) ||
+ // Check #3.
+ (secondRegMatch &&
+ PacketSU->getInstr()->modifiesRegister(
+ NextMI->getOperand(0).getReg(), QRI))) {
+ Dependence = true;
+ break;
}
- if (!Dependence)
- GlueToNewValueJump = true;
- else
- return false;
}
+ if (!Dependence)
+ GlueToNewValueJump = true;
+ else
+ return false;
}
if (SUJ->isSucc(SUI)) {
@@ -1254,9 +1248,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
else if ((DepType == SDep::Order) &&
!I->hasOrderedMemoryRef() &&
!J->hasOrderedMemoryRef()) {
- if (QRI->Subtarget.hasV4TOps() &&
- // hexagonv4 allows dual store.
- MCIDI.mayStore() && MCIDJ.mayStore()) {
+ if (MCIDI.mayStore() && MCIDJ.mayStore()) {
/* do nothing */
}
// store followed by store-- not OK on V2
@@ -1278,11 +1270,10 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
// packetized in a same packet. This implies that the store is using
// caller's SP. Hence, offset needs to be updated accordingly.
else if (DepType == SDep::Data
- && QRI->Subtarget.hasV4TOps()
- && J->getOpcode() == Hexagon::ALLOCFRAME
- && (I->getOpcode() == Hexagon::STrid
- || I->getOpcode() == Hexagon::STriw
- || I->getOpcode() == Hexagon::STrib)
+ && J->getOpcode() == Hexagon::S2_allocframe
+ && (I->getOpcode() == Hexagon::S2_storerd_io
+ || I->getOpcode() == Hexagon::S2_storeri_io
+ || I->getOpcode() == Hexagon::S2_storerb_io)
&& I->getOperand(0).getReg() == QRI->getStackRegister()
&& QII->isValidOffset(I->getOpcode(),
I->getOperand(1).getImm() -
diff --git a/lib/Target/Hexagon/HexagonVarargsCallingConvention.h b/lib/Target/Hexagon/HexagonVarargsCallingConvention.h
deleted file mode 100644
index edbe29a..0000000
--- a/lib/Target/Hexagon/HexagonVarargsCallingConvention.h
+++ /dev/null
@@ -1,149 +0,0 @@
-//===-- HexagonVarargsCallingConvention.h - Calling Conventions -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the functions that assign locations to outgoing function
-// arguments. Adapted from the target independent version but this handles
-// calls to varargs functions
-//
-//===----------------------------------------------------------------------===//
-//
-
-
-
-
-static bool RetCC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags,
- Hexagon_CCState &State,
- int NonVarArgsParams,
- int CurrentParam,
- bool ForceMem);
-
-
-static bool CC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags,
- Hexagon_CCState &State,
- int NonVarArgsParams,
- int CurrentParam,
- bool ForceMem) {
- unsigned ByValSize = 0;
- if (ArgFlags.isByVal() &&
- ((ByValSize = ArgFlags.getByValSize()) >
- (MVT(MVT::i64).getSizeInBits() / 8))) {
- ForceMem = true;
- }
-
-
- // Only assign registers for named (non-varargs) arguments
- if ( !ForceMem && ((NonVarArgsParams == -1) || (CurrentParam <=
- NonVarArgsParams))) {
-
- if (LocVT == MVT::i32 ||
- LocVT == MVT::i16 ||
- LocVT == MVT::i8 ||
- LocVT == MVT::f32) {
- static const unsigned RegList1[] = {
- Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
- Hexagon::R5
- };
- if (unsigned Reg = State.AllocateReg(RegList1, 6)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT.getSimpleVT(), Reg,
- LocVT.getSimpleVT(), LocInfo));
- return false;
- }
- }
-
- if (LocVT == MVT::i64 ||
- LocVT == MVT::f64) {
- static const unsigned RegList2[] = {
- Hexagon::D0, Hexagon::D1, Hexagon::D2
- };
- if (unsigned Reg = State.AllocateReg(RegList2, 3)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT.getSimpleVT(), Reg,
- LocVT.getSimpleVT(), LocInfo));
- return false;
- }
- }
- }
-
- const Type* ArgTy = LocVT.getTypeForEVT(State.getContext());
- unsigned Alignment = State.getTarget()
- .getSubtargetImpl()
- ->getDataLayout()
- ->getABITypeAlignment(ArgTy);
- unsigned Size =
- State.getTarget().getSubtargetImpl()->getDataLayout()->getTypeSizeInBits(
- ArgTy) /
- 8;
-
- // If it's passed by value, then we need the size of the aggregate not of
- // the pointer.
- if (ArgFlags.isByVal()) {
- Size = ByValSize;
-
- // Hexagon_TODO: Get the alignment of the contained type here.
- Alignment = 8;
- }
-
- unsigned Offset3 = State.AllocateStack(Size, Alignment);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT.getSimpleVT(), Offset3,
- LocVT.getSimpleVT(), LocInfo));
- return false;
-}
-
-
-static bool RetCC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags,
- Hexagon_CCState &State,
- int NonVarArgsParams,
- int CurrentParam,
- bool ForceMem) {
-
- if (LocVT == MVT::i32 ||
- LocVT == MVT::f32) {
- static const unsigned RegList1[] = {
- Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
- Hexagon::R5
- };
- if (unsigned Reg = State.AllocateReg(RegList1, 6)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT.getSimpleVT(), Reg,
- LocVT.getSimpleVT(), LocInfo));
- return false;
- }
- }
-
- if (LocVT == MVT::i64 ||
- LocVT == MVT::f64) {
- static const unsigned RegList2[] = {
- Hexagon::D0, Hexagon::D1, Hexagon::D2
- };
- if (unsigned Reg = State.AllocateReg(RegList2, 3)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT.getSimpleVT(), Reg,
- LocVT.getSimpleVT(), LocInfo));
- return false;
- }
- }
-
- const Type* ArgTy = LocVT.getTypeForEVT(State.getContext());
- unsigned Alignment = State.getTarget()
- .getSubtargetImpl()
- ->getDataLayout()
- ->getABITypeAlignment(ArgTy);
- unsigned Size =
- State.getTarget().getSubtargetImpl()->getDataLayout()->getTypeSizeInBits(
- ArgTy) /
- 8;
-
- unsigned Offset3 = State.AllocateStack(Size, Alignment);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT.getSimpleVT(), Offset3,
- LocVT.getSimpleVT(), LocInfo));
- return false;
-}
diff --git a/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt b/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt
index 2a6124e..4c987ed 100644
--- a/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt
@@ -4,7 +4,7 @@ add_llvm_library(LLVMHexagonDesc
HexagonInstPrinter.cpp
HexagonMCAsmInfo.cpp
HexagonMCCodeEmitter.cpp
- HexagonMCInst.cpp
+ HexagonMCInstrInfo.cpp
HexagonMCTargetDesc.cpp
)
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index c0a3fae..8e02f79 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -19,7 +19,6 @@
#include "HexagonMCTargetDesc.h"
#include "llvm/Support/ErrorHandling.h"
-
#include <stdint.h>
namespace llvm {
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp
index 1fd8d70..6c87c9f 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp
@@ -14,7 +14,7 @@
#include "HexagonAsmPrinter.h"
#include "Hexagon.h"
#include "HexagonInstPrinter.h"
-#include "MCTargetDesc/HexagonMCInst.h"
+#include "MCTargetDesc/HexagonMCInstrInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
@@ -77,46 +77,41 @@ StringRef HexagonInstPrinter::getRegName(unsigned RegNo) const {
return getRegisterName(RegNo);
}
-void HexagonInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
- StringRef Annot) {
- printInst((const HexagonMCInst*)(MI), O, Annot);
-}
-
-void HexagonInstPrinter::printInst(const HexagonMCInst *MI, raw_ostream &O,
+void HexagonInstPrinter::printInst(MCInst const *MI, raw_ostream &O,
StringRef Annot) {
const char startPacket = '{',
endPacket = '}';
// TODO: add outer HW loop when it's supported too.
if (MI->getOpcode() == Hexagon::ENDLOOP0) {
// Ending a harware loop is different from ending an regular packet.
- assert(MI->isPacketEnd() && "Loop-end must also end the packet");
+ assert(HexagonMCInstrInfo::isPacketEnd(*MI) && "Loop-end must also end the packet");
- if (MI->isPacketStart()) {
+ if (HexagonMCInstrInfo::isPacketBegin(*MI)) {
// There must be a packet to end a loop.
// FIXME: when shuffling is always run, this shouldn't be needed.
- HexagonMCInst Nop;
+ MCInst Nop;
StringRef NoAnnot;
- Nop.setOpcode (Hexagon::NOP);
- Nop.setPacketStart (MI->isPacketStart());
+ Nop.setOpcode (Hexagon::A2_nop);
+ HexagonMCInstrInfo::setPacketBegin (Nop, HexagonMCInstrInfo::isPacketBegin(*MI));
printInst (&Nop, O, NoAnnot);
}
// Close the packet.
- if (MI->isPacketEnd())
+ if (HexagonMCInstrInfo::isPacketEnd(*MI))
O << PacketPadding << endPacket;
printInstruction(MI, O);
}
else {
// Prefix the insn opening the packet.
- if (MI->isPacketStart())
+ if (HexagonMCInstrInfo::isPacketBegin(*MI))
O << PacketPadding << startPacket << '\n';
printInstruction(MI, O);
// Suffix the insn closing the packet.
- if (MI->isPacketEnd())
+ if (HexagonMCInstrInfo::isPacketEnd(*MI))
// Suffix the packet in a new line always, since the GNU assembler has
// issues with a closing brace on the same line as CONST{32,64}.
O << '\n' << PacketPadding << endPacket;
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.h b/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.h
index 55ae95c..d02243b 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.h
@@ -18,17 +18,14 @@
#include "llvm/MC/MCInstrInfo.h"
namespace llvm {
- class HexagonMCInst;
-
class HexagonInstPrinter : public MCInstPrinter {
public:
- explicit HexagonInstPrinter(const MCAsmInfo &MAI,
- const MCInstrInfo &MII,
- const MCRegisterInfo &MRI)
+ explicit HexagonInstPrinter(MCAsmInfo const &MAI,
+ MCInstrInfo const &MII,
+ MCRegisterInfo const &MRI)
: MCInstPrinter(MAI, MII, MRI), MII(MII) {}
- void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
- void printInst(const HexagonMCInst *MI, raw_ostream &O, StringRef Annot);
+ void printInst(MCInst const *MI, raw_ostream &O, StringRef Annot) override;
virtual StringRef getOpcodeName(unsigned Opcode) const;
void printInstruction(const MCInst *MI, raw_ostream &O);
StringRef getRegName(unsigned RegNo) const;
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
index 4471977..a5a09ba 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
@@ -10,8 +10,8 @@
#include "Hexagon.h"
#include "MCTargetDesc/HexagonBaseInfo.h"
#include "MCTargetDesc/HexagonMCCodeEmitter.h"
+#include "MCTargetDesc/HexagonMCInstrInfo.h"
#include "MCTargetDesc/HexagonMCTargetDesc.h"
-#include "MCTargetDesc/HexagonMCInst.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
@@ -35,9 +35,9 @@ namespace {
/// Possible values for instruction packet parse field.
enum class ParseField { duplex = 0x0, last0 = 0x1, last1 = 0x2, end = 0x3 };
/// \brief Returns the packet bits based on instruction position.
-uint32_t getPacketBits(HexagonMCInst const &HMI) {
+uint32_t getPacketBits(MCInst const &HMI) {
unsigned const ParseFieldOffset = 14;
- ParseField Field = HMI.isPacketEnd() ? ParseField::end : ParseField::last0;
+ ParseField Field = HexagonMCInstrInfo::isPacketEnd(HMI) ? ParseField::end : ParseField::last0;
return static_cast <uint32_t> (Field) << ParseFieldOffset;
}
void emitLittleEndian(uint64_t Binary, raw_ostream &OS) {
@@ -51,14 +51,15 @@ void emitLittleEndian(uint64_t Binary, raw_ostream &OS) {
HexagonMCCodeEmitter::HexagonMCCodeEmitter(MCInstrInfo const &aMII,
MCSubtargetInfo const &aMST,
MCContext &aMCT)
- : MST(aMST), MCT(aMCT) {}
+ : MST(aMST), MCT(aMCT), MCII (aMII) {}
void HexagonMCCodeEmitter::EncodeInstruction(MCInst const &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
MCSubtargetInfo const &STI) const {
- HexagonMCInst const &HMB = static_cast<HexagonMCInst const &>(MI);
- uint64_t Binary = getBinaryCodeForInstr(HMB, Fixups, STI) | getPacketBits(HMB);
- assert(HMB.getDesc().getSize() == 4 && "All instructions should be 32bit");
+ uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI) | getPacketBits(MI);
+ assert(HexagonMCInstrInfo::getDesc(MCII, MI).getSize() == 4 &&
+ "All instructions should be 32bit");
+ (void)&MCII;
emitLittleEndian(Binary, OS);
++MCNumEmitted;
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h
index 96048ad..db1d707 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h
@@ -28,6 +28,7 @@ namespace llvm {
class HexagonMCCodeEmitter : public MCCodeEmitter {
MCSubtargetInfo const &MST;
MCContext &MCT;
+ MCInstrInfo const &MCII;
public:
HexagonMCCodeEmitter(MCInstrInfo const &aMII, MCSubtargetInfo const &aMST,
@@ -51,8 +52,8 @@ public:
MCSubtargetInfo const &STI) const;
private:
- HexagonMCCodeEmitter(HexagonMCCodeEmitter const &) LLVM_DELETED_FUNCTION;
- void operator=(HexagonMCCodeEmitter const &) LLVM_DELETED_FUNCTION;
+ HexagonMCCodeEmitter(HexagonMCCodeEmitter const &) = delete;
+ void operator=(HexagonMCCodeEmitter const &) = delete;
}; // class HexagonMCCodeEmitter
} // namespace llvm
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.cpp
deleted file mode 100644
index c842b9b..0000000
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.cpp
+++ /dev/null
@@ -1,176 +0,0 @@
-//===- HexagonMCInst.cpp - Hexagon sub-class of MCInst --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class extends MCInst to allow some Hexagon VLIW annotations.
-//
-//===----------------------------------------------------------------------===//
-
-#include "HexagonInstrInfo.h"
-#include "MCTargetDesc/HexagonBaseInfo.h"
-#include "MCTargetDesc/HexagonMCInst.h"
-#include "MCTargetDesc/HexagonMCTargetDesc.h"
-
-using namespace llvm;
-
-// Return the slots used by the insn.
-unsigned HexagonMCInst::getUnits(const HexagonTargetMachine* TM) const {
- const HexagonInstrInfo *QII = TM->getSubtargetImpl()->getInstrInfo();
- const InstrItineraryData *II =
- TM->getSubtargetImpl()->getInstrItineraryData();
- const InstrStage*
- IS = II->beginStage(QII->get(this->getOpcode()).getSchedClass());
-
- return (IS->getUnits());
-}
-
-// Return the Hexagon ISA class for the insn.
-unsigned HexagonMCInst::getType() const {
- const uint64_t F = MCID->TSFlags;
-
- return ((F >> HexagonII::TypePos) & HexagonII::TypeMask);
-}
-
-// Return whether the insn is an actual insn.
-bool HexagonMCInst::isCanon() const {
- return (!MCID->isPseudo() &&
- !isPrefix() &&
- getType() != HexagonII::TypeENDLOOP);
-}
-
-// Return whether the insn is a prefix.
-bool HexagonMCInst::isPrefix() const {
- return (getType() == HexagonII::TypePREFIX);
-}
-
-// Return whether the insn is solo, i.e., cannot be in a packet.
-bool HexagonMCInst::isSolo() const {
- const uint64_t F = MCID->TSFlags;
- return ((F >> HexagonII::SoloPos) & HexagonII::SoloMask);
-}
-
-// Return whether the insn is a new-value consumer.
-bool HexagonMCInst::isNewValue() const {
- const uint64_t F = MCID->TSFlags;
- return ((F >> HexagonII::NewValuePos) & HexagonII::NewValueMask);
-}
-
-// Return whether the instruction is a legal new-value producer.
-bool HexagonMCInst::hasNewValue() const {
- const uint64_t F = MCID->TSFlags;
- return ((F >> HexagonII::hasNewValuePos) & HexagonII::hasNewValueMask);
-}
-
-// Return the operand that consumes or produces a new value.
-const MCOperand& HexagonMCInst::getNewValue() const {
- const uint64_t F = MCID->TSFlags;
- const unsigned O = (F >> HexagonII::NewValueOpPos) &
- HexagonII::NewValueOpMask;
- const MCOperand& MCO = getOperand(O);
-
- assert ((isNewValue() || hasNewValue()) && MCO.isReg());
- return (MCO);
-}
-
-// Return whether the instruction needs to be constant extended.
-// 1) Always return true if the instruction has 'isExtended' flag set.
-//
-// isExtendable:
-// 2) For immediate extended operands, return true only if the value is
-// out-of-range.
-// 3) For global address, always return true.
-
-bool HexagonMCInst::isConstExtended(void) const {
- if (isExtended())
- return true;
-
- if (!isExtendable())
- return false;
-
- short ExtOpNum = getCExtOpNum();
- int MinValue = getMinValue();
- int MaxValue = getMaxValue();
- const MCOperand& MO = getOperand(ExtOpNum);
-
- // We could be using an instruction with an extendable immediate and shoehorn
- // a global address into it. If it is a global address it will be constant
- // extended. We do this for COMBINE.
- // We currently only handle isGlobal() because it is the only kind of
- // object we are going to end up with here for now.
- // In the future we probably should add isSymbol(), etc.
- if (MO.isExpr())
- return true;
-
- // If the extendable operand is not 'Immediate' type, the instruction should
- // have 'isExtended' flag set.
- assert(MO.isImm() && "Extendable operand must be Immediate type");
-
- int ImmValue = MO.getImm();
- return (ImmValue < MinValue || ImmValue > MaxValue);
-}
-
-// Return whether the instruction must be always extended.
-bool HexagonMCInst::isExtended(void) const {
- const uint64_t F = MCID->TSFlags;
- return (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
-}
-
-// Return true if the instruction may be extended based on the operand value.
-bool HexagonMCInst::isExtendable(void) const {
- const uint64_t F = MCID->TSFlags;
- return (F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask;
-}
-
-// Return number of bits in the constant extended operand.
-unsigned HexagonMCInst::getBitCount(void) const {
- const uint64_t F = MCID->TSFlags;
- return ((F >> HexagonII::ExtentBitsPos) & HexagonII::ExtentBitsMask);
-}
-
-// Return constant extended operand number.
-unsigned short HexagonMCInst::getCExtOpNum(void) const {
- const uint64_t F = MCID->TSFlags;
- return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask);
-}
-
-// Return whether the operand can be constant extended.
-bool HexagonMCInst::isOperandExtended(const unsigned short OperandNum) const {
- const uint64_t F = MCID->TSFlags;
- return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
- == OperandNum;
-}
-
-// Return the min value that a constant extendable operand can have
-// without being extended.
-int HexagonMCInst::getMinValue(void) const {
- const uint64_t F = MCID->TSFlags;
- unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
- & HexagonII::ExtentSignedMask;
- unsigned bits = (F >> HexagonII::ExtentBitsPos)
- & HexagonII::ExtentBitsMask;
-
- if (isSigned) // if value is signed
- return -1U << (bits - 1);
- else
- return 0;
-}
-
-// Return the max value that a constant extendable operand can have
-// without being extended.
-int HexagonMCInst::getMaxValue(void) const {
- const uint64_t F = MCID->TSFlags;
- unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
- & HexagonII::ExtentSignedMask;
- unsigned bits = (F >> HexagonII::ExtentBitsPos)
- & HexagonII::ExtentBitsMask;
-
- if (isSigned) // if value is signed
- return ~(-1U << (bits - 1));
- else
- return ~(-1U << bits);
-}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.h
deleted file mode 100644
index 90fbbf3..0000000
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.h
+++ /dev/null
@@ -1,100 +0,0 @@
-//===- HexagonMCInst.h - Hexagon sub-class of MCInst ----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class extends MCInst to allow some VLIW annotations.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_HEXAGON_MCTARGETDESC_HEXAGONMCINST_H
-#define LLVM_LIB_TARGET_HEXAGON_MCTARGETDESC_HEXAGONMCINST_H
-
-#include "HexagonTargetMachine.h"
-#include "llvm/MC/MCInst.h"
-
-namespace llvm {
- class MCOperand;
-
- class HexagonMCInst: public MCInst {
- // MCID is set during instruction lowering.
- // It is needed in order to access TSFlags for
- // use in checking MC instruction properties.
- const MCInstrDesc *MCID;
-
- // Packet start and end markers
- unsigned packetStart: 1, packetEnd: 1;
-
- public:
- explicit HexagonMCInst():
- MCInst(), MCID(nullptr), packetStart(0), packetEnd(0) {};
- HexagonMCInst(const MCInstrDesc& mcid):
- MCInst(), MCID(&mcid), packetStart(0), packetEnd(0) {};
-
- bool isPacketStart() const { return (packetStart); };
- bool isPacketEnd() const { return (packetEnd); };
- void setPacketStart(bool Y) { packetStart = Y; };
- void setPacketEnd(bool Y) { packetEnd = Y; };
- void resetPacket() { setPacketStart(false); setPacketEnd(false); };
-
- // Return the slots used by the insn.
- unsigned getUnits(const HexagonTargetMachine* TM) const;
-
- // Return the Hexagon ISA class for the insn.
- unsigned getType() const;
-
- void setDesc(const MCInstrDesc& mcid) { MCID = &mcid; };
- const MCInstrDesc& getDesc(void) const { return *MCID; };
-
- // Return whether the insn is an actual insn.
- bool isCanon() const;
-
- // Return whether the insn is a prefix.
- bool isPrefix() const;
-
- // Return whether the insn is solo, i.e., cannot be in a packet.
- bool isSolo() const;
-
- // Return whether the instruction needs to be constant extended.
- bool isConstExtended() const;
-
- // Return constant extended operand number.
- unsigned short getCExtOpNum(void) const;
-
- // Return whether the insn is a new-value consumer.
- bool isNewValue() const;
-
- // Return whether the instruction is a legal new-value producer.
- bool hasNewValue() const;
-
- // Return the operand that consumes or produces a new value.
- const MCOperand& getNewValue() const;
-
- // Return number of bits in the constant extended operand.
- unsigned getBitCount(void) const;
-
- private:
- // Return whether the instruction must be always extended.
- bool isExtended() const;
-
- // Return true if the insn may be extended based on the operand value.
- bool isExtendable() const;
-
- // Return true if the operand can be constant extended.
- bool isOperandExtended(const unsigned short OperandNum) const;
-
- // Return the min value that a constant extendable operand can have
- // without being extended.
- int getMinValue() const;
-
- // Return the max value that a constant extendable operand can have
- // without being extended.
- int getMaxValue() const;
- };
-}
-
-#endif
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
new file mode 100644
index 0000000..33e7c81
--- /dev/null
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
@@ -0,0 +1,223 @@
+//===- HexagonMCInstrInfo.cpp - Hexagon sub-class of MCInst ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class extends MCInstrInfo to allow Hexagon specific MCInstr queries
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonMCInstrInfo.h"
+#include "HexagonBaseInfo.h"
+
+namespace llvm {
+void HexagonMCInstrInfo::AppendImplicitOperands(MCInst &MCI) {
+ MCI.addOperand(MCOperand::CreateImm(0));
+ MCI.addOperand(MCOperand::CreateInst(nullptr));
+}
+
+unsigned HexagonMCInstrInfo::getBitCount(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ uint64_t const F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+ return ((F >> HexagonII::ExtentBitsPos) & HexagonII::ExtentBitsMask);
+}
+
+// Return constant extended operand number.
+unsigned short HexagonMCInstrInfo::getCExtOpNum(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ const uint64_t F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+ return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask);
+}
+
+MCInstrDesc const &HexagonMCInstrInfo::getDesc(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ return (MCII.get(MCI.getOpcode()));
+}
+
+std::bitset<16> HexagonMCInstrInfo::GetImplicitBits(MCInst const &MCI) {
+ SanityCheckImplicitOperands(MCI);
+ std::bitset<16> Bits(MCI.getOperand(MCI.getNumOperands() - 2).getImm());
+ return Bits;
+}
+
+// Return the max value that a constant extendable operand can have
+// without being extended.
+int HexagonMCInstrInfo::getMaxValue(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ uint64_t const F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+ unsigned isSigned =
+ (F >> HexagonII::ExtentSignedPos) & HexagonII::ExtentSignedMask;
+ unsigned bits = (F >> HexagonII::ExtentBitsPos) & HexagonII::ExtentBitsMask;
+
+ if (isSigned) // if value is signed
+ return ~(-1U << (bits - 1));
+ else
+ return ~(-1U << bits);
+}
+
+// Return the min value that a constant extendable operand can have
+// without being extended.
+int HexagonMCInstrInfo::getMinValue(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ uint64_t const F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+ unsigned isSigned =
+ (F >> HexagonII::ExtentSignedPos) & HexagonII::ExtentSignedMask;
+ unsigned bits = (F >> HexagonII::ExtentBitsPos) & HexagonII::ExtentBitsMask;
+
+ if (isSigned) // if value is signed
+ return -1U << (bits - 1);
+ else
+ return 0;
+}
+
+// Return the operand that consumes or produces a new value.
+MCOperand const &HexagonMCInstrInfo::getNewValue(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ uint64_t const F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+ unsigned const O =
+ (F >> HexagonII::NewValueOpPos) & HexagonII::NewValueOpMask;
+ MCOperand const &MCO = MCI.getOperand(O);
+
+ assert((HexagonMCInstrInfo::isNewValue(MCII, MCI) ||
+ HexagonMCInstrInfo::hasNewValue(MCII, MCI)) &&
+ MCO.isReg());
+ return (MCO);
+}
+
+// Return the Hexagon ISA class for the insn.
+unsigned HexagonMCInstrInfo::getType(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ const uint64_t F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+
+ return ((F >> HexagonII::TypePos) & HexagonII::TypeMask);
+}
+
+// Return whether the instruction is a legal new-value producer.
+bool HexagonMCInstrInfo::hasNewValue(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ const uint64_t F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+ return ((F >> HexagonII::hasNewValuePos) & HexagonII::hasNewValueMask);
+}
+
+// Return whether the insn is an actual insn.
+bool HexagonMCInstrInfo::isCanon(MCInstrInfo const &MCII, MCInst const &MCI) {
+ return (!HexagonMCInstrInfo::getDesc(MCII, MCI).isPseudo() &&
+ !HexagonMCInstrInfo::isPrefix(MCII, MCI) &&
+ HexagonMCInstrInfo::getType(MCII, MCI) != HexagonII::TypeENDLOOP);
+}
+
+// Return whether the instruction needs to be constant extended.
+// 1) Always return true if the instruction has 'isExtended' flag set.
+//
+// isExtendable:
+// 2) For immediate extended operands, return true only if the value is
+// out-of-range.
+// 3) For global address, always return true.
+
+bool HexagonMCInstrInfo::isConstExtended(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ if (HexagonMCInstrInfo::isExtended(MCII, MCI))
+ return true;
+
+ if (!HexagonMCInstrInfo::isExtendable(MCII, MCI))
+ return false;
+
+ short ExtOpNum = HexagonMCInstrInfo::getCExtOpNum(MCII, MCI);
+ int MinValue = HexagonMCInstrInfo::getMinValue(MCII, MCI);
+ int MaxValue = HexagonMCInstrInfo::getMaxValue(MCII, MCI);
+ MCOperand const &MO = MCI.getOperand(ExtOpNum);
+
+ // We could be using an instruction with an extendable immediate and shoehorn
+ // a global address into it. If it is a global address it will be constant
+ // extended. We do this for COMBINE.
+ // We currently only handle isGlobal() because it is the only kind of
+ // object we are going to end up with here for now.
+ // In the future we probably should add isSymbol(), etc.
+ if (MO.isExpr())
+ return true;
+
+ // If the extendable operand is not 'Immediate' type, the instruction should
+ // have 'isExtended' flag set.
+ assert(MO.isImm() && "Extendable operand must be Immediate type");
+
+ int ImmValue = MO.getImm();
+ return (ImmValue < MinValue || ImmValue > MaxValue);
+}
+
+// Return true if the instruction may be extended based on the operand value.
+bool HexagonMCInstrInfo::isExtendable(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ uint64_t const F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+ return (F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask;
+}
+
+// Return whether the instruction must be always extended.
+bool HexagonMCInstrInfo::isExtended(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ uint64_t const F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+ return (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
+}
+
+// Return whether the insn is a new-value consumer.
+bool HexagonMCInstrInfo::isNewValue(MCInstrInfo const &MCII,
+ MCInst const &MCI) {
+ const uint64_t F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+ return ((F >> HexagonII::NewValuePos) & HexagonII::NewValueMask);
+}
+
+// Return whether the operand can be constant extended.
+bool HexagonMCInstrInfo::isOperandExtended(MCInstrInfo const &MCII,
+ MCInst const &MCI,
+ unsigned short OperandNum) {
+ uint64_t const F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+ return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask) ==
+ OperandNum;
+}
+
+bool HexagonMCInstrInfo::isPacketBegin(MCInst const &MCI) {
+ std::bitset<16> Bits(GetImplicitBits(MCI));
+ return Bits.test(packetBeginIndex);
+}
+
+bool HexagonMCInstrInfo::isPacketEnd(MCInst const &MCI) {
+ std::bitset<16> Bits(GetImplicitBits(MCI));
+ return Bits.test(packetEndIndex);
+}
+
+// Return whether the insn is a prefix.
+bool HexagonMCInstrInfo::isPrefix(MCInstrInfo const &MCII, MCInst const &MCI) {
+ return (HexagonMCInstrInfo::getType(MCII, MCI) == HexagonII::TypePREFIX);
+}
+
+// Return whether the insn is solo, i.e., cannot be in a packet.
+bool HexagonMCInstrInfo::isSolo(MCInstrInfo const &MCII, MCInst const &MCI) {
+ const uint64_t F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
+ return ((F >> HexagonII::SoloPos) & HexagonII::SoloMask);
+}
+
+void HexagonMCInstrInfo::resetPacket(MCInst &MCI) {
+ setPacketBegin(MCI, false);
+ setPacketEnd(MCI, false);
+}
+
+void HexagonMCInstrInfo::SetImplicitBits(MCInst &MCI, std::bitset<16> Bits) {
+ SanityCheckImplicitOperands(MCI);
+ MCI.getOperand(MCI.getNumOperands() - 2).setImm(Bits.to_ulong());
+}
+
+void HexagonMCInstrInfo::setPacketBegin(MCInst &MCI, bool f) {
+ std::bitset<16> Bits(GetImplicitBits(MCI));
+ Bits.set(packetBeginIndex, f);
+ SetImplicitBits(MCI, Bits);
+}
+
+void HexagonMCInstrInfo::setPacketEnd(MCInst &MCI, bool f) {
+ std::bitset<16> Bits(GetImplicitBits(MCI));
+ Bits.set(packetEndIndex, f);
+ SetImplicitBits(MCI, Bits);
+}
+}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h
new file mode 100644
index 0000000..10fc0f3
--- /dev/null
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h
@@ -0,0 +1,106 @@
+//===- HexagonMCInstrInfo.cpp - Hexagon sub-class of MCInst ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Utility functions for Hexagon specific MCInst queries
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_HEXAGON_MCTARGETDESC_HEXAGONMCINSTRINFO_H
+#define LLVM_LIB_TARGET_HEXAGON_MCTARGETDESC_HEXAGONMCINSTRINFO_H
+
+#include "llvm/MC/MCInstrInfo.h"
+
+#include <bitset>
+
+namespace llvm {
+class MCInstrDesc;
+class MCInstrInfo;
+class MCInst;
+class MCOperand;
+namespace HexagonMCInstrInfo {
+void AppendImplicitOperands(MCInst &MCI);
+
+// Return number of bits in the constant extended operand.
+unsigned getBitCount(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return constant extended operand number.
+unsigned short getCExtOpNum(MCInstrInfo const &MCII, MCInst const &MCI);
+
+MCInstrDesc const &getDesc(MCInstrInfo const &MCII, MCInst const &MCI);
+
+std::bitset<16> GetImplicitBits(MCInst const &MCI);
+
+// Return the max value that a constant extendable operand can have
+// without being extended.
+int getMaxValue(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return the min value that a constant extendable operand can have
+// without being extended.
+int getMinValue(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return the operand that consumes or produces a new value.
+MCOperand const &getNewValue(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return the Hexagon ISA class for the insn.
+unsigned getType(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return whether the instruction is a legal new-value producer.
+bool hasNewValue(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return whether the insn is an actual insn.
+bool isCanon(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return whether the instruction needs to be constant extended.
+bool isConstExtended(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return true if the insn may be extended based on the operand value.
+bool isExtendable(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return whether the instruction must be always extended.
+bool isExtended(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return whether the insn is a new-value consumer.
+bool isNewValue(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return true if the operand can be constant extended.
+bool isOperandExtended(MCInstrInfo const &MCII, MCInst const &MCI,
+ unsigned short OperandNum);
+
+bool isPacketBegin(MCInst const &MCI);
+
+bool isPacketEnd(MCInst const &MCI);
+
+// Return whether the insn is a prefix.
+bool isPrefix(MCInstrInfo const &MCII, MCInst const &MCI);
+
+// Return whether the insn is solo, i.e., cannot be in a packet.
+bool isSolo(MCInstrInfo const &MCII, MCInst const &MCI);
+
+static const size_t packetBeginIndex = 0;
+static const size_t packetEndIndex = 1;
+
+void resetPacket(MCInst &MCI);
+
+inline void SanityCheckImplicitOperands(MCInst const &MCI) {
+ assert(MCI.getNumOperands() >= 2 && "At least the two implicit operands");
+ assert(MCI.getOperand(MCI.getNumOperands() - 1).isInst() &&
+ "Implicit bits and flags");
+ assert(MCI.getOperand(MCI.getNumOperands() - 2).isImm() &&
+ "Parent pointer");
+}
+
+void SetImplicitBits(MCInst &MCI, std::bitset<16> Bits);
+
+void setPacketBegin(MCInst &MCI, bool Y);
+
+void setPacketEnd(MCInst &MCI, bool Y);
+}
+}
+
+#endif // LLVM_LIB_TARGET_HEXAGON_MCTARGETDESC_HEXAGONMCINSTRINFO_H
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
index 14ddd9d..09a305b 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
@@ -35,7 +35,7 @@ using namespace llvm;
#define GET_REGINFO_MC_DESC
#include "HexagonGenRegisterInfo.inc"
-static MCInstrInfo *createHexagonMCInstrInfo() {
+MCInstrInfo *llvm::createHexagonMCInstrInfo() {
MCInstrInfo *X = new MCInstrInfo();
InitHexagonMCInstrInfo(X);
return X;
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h
index 02fd516..f074b65 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h
@@ -30,6 +30,8 @@ class raw_ostream;
extern Target TheHexagonTarget;
+MCInstrInfo *createHexagonMCInstrInfo();
+
MCCodeEmitter *createHexagonMCCodeEmitter(MCInstrInfo const &MCII,
MCRegisterInfo const &MRI,
MCSubtargetInfo const &MST,
diff --git a/lib/Target/LLVMBuild.txt b/lib/Target/LLVMBuild.txt
index 1b0837c..4112046 100644
--- a/lib/Target/LLVMBuild.txt
+++ b/lib/Target/LLVMBuild.txt
@@ -16,7 +16,7 @@
;===------------------------------------------------------------------------===;
[common]
-subdirectories = ARM AArch64 CppBackend Hexagon MSP430 NVPTX Mips PowerPC R600 Sparc SystemZ X86 XCore
+subdirectories = ARM AArch64 BPF CppBackend Hexagon MSP430 NVPTX Mips PowerPC R600 Sparc SystemZ X86 XCore
; This is a special group whose required libraries are extended (by llvm-build)
; with the best execution engine (the native JIT, if available, or the
@@ -45,7 +45,7 @@ parent = Libraries
type = Library
name = Target
parent = Libraries
-required_libraries = Core MC Support
+required_libraries = Analysis Core MC Support
; This is a special group whose required libraries are extended (by llvm-build)
; with every built target, which makes it easy for tools to include every
diff --git a/lib/Target/MSP430/MSP430AsmPrinter.cpp b/lib/Target/MSP430/MSP430AsmPrinter.cpp
index 22a973e..fb7823e 100644
--- a/lib/Target/MSP430/MSP430AsmPrinter.cpp
+++ b/lib/Target/MSP430/MSP430AsmPrinter.cpp
@@ -39,8 +39,8 @@ using namespace llvm;
namespace {
class MSP430AsmPrinter : public AsmPrinter {
public:
- MSP430AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {}
+ MSP430AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)) {}
const char *getPassName() const override {
return "MSP430 Assembly Printer";
diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 81c176b..2f70cde 100644
--- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -92,14 +92,9 @@ namespace {
///
namespace {
class MSP430DAGToDAGISel : public SelectionDAGISel {
- const MSP430TargetLowering &Lowering;
- const MSP430Subtarget &Subtarget;
-
public:
MSP430DAGToDAGISel(MSP430TargetMachine &TM, CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(TM, OptLevel),
- Lowering(*TM.getSubtargetImpl()->getTargetLowering()),
- Subtarget(*TM.getSubtargetImpl()) {}
+ : SelectionDAGISel(TM, OptLevel) {}
const char *getPassName() const override {
return "MSP430 DAG->DAG Pattern Instruction Selection";
diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp
index 22936dd..18141a6 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -57,7 +57,8 @@ HWMultMode("msp430-hwmult-mode", cl::Hidden,
"Assume hardware multiplier cannot be used inside interrupts"),
clEnumValEnd));
-MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM)
+MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM,
+ const MSP430Subtarget &STI)
: TargetLowering(TM) {
// Set up the register classes.
@@ -65,7 +66,7 @@ MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM)
addRegisterClass(MVT::i16, &MSP430::GR16RegClass);
// Compute derived properties from the register classes
- computeRegisterProperties();
+ computeRegisterProperties(STI.getRegisterInfo());
// Provide all sorts of operation actions
@@ -80,11 +81,13 @@ MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM)
setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
+ }
// We don't have any truncstores
setTruncStoreAction(MVT::i16, MVT::i8, Expand);
@@ -222,10 +225,10 @@ MSP430TargetLowering::getConstraintType(const std::string &Constraint) const {
return TargetLowering::getConstraintType(Constraint);
}
-std::pair<unsigned, const TargetRegisterClass*>
-MSP430TargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const {
+std::pair<unsigned, const TargetRegisterClass *>
+MSP430TargetLowering::getRegForInlineAsmConstraint(
+ const TargetRegisterInfo *TRI, const std::string &Constraint,
+ MVT VT) const {
if (Constraint.size() == 1) {
// GCC Constraint Letters
switch (Constraint[0]) {
@@ -238,7 +241,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint,
}
}
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
//===----------------------------------------------------------------------===//
@@ -326,7 +329,7 @@ static void AnalyzeArguments(CCState &State,
if (!UseStack && Parts <= RegsLeft) {
unsigned FirstVal = ValNo;
for (unsigned j = 0; j < Parts; j++) {
- unsigned Reg = State.AllocateReg(RegList, NbRegs);
+ unsigned Reg = State.AllocateReg(RegList);
State.addLoc(CCValAssign::getReg(ValNo++, ArgVT, Reg, LocVT, LocInfo));
RegsLeft--;
}
@@ -977,11 +980,7 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
} else {
SDValue Zero = DAG.getConstant(0, VT);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
- SmallVector<SDValue, 4> Ops;
- Ops.push_back(One);
- Ops.push_back(Zero);
- Ops.push_back(TargetCC);
- Ops.push_back(Flag);
+ SDValue Ops[] = {One, Zero, TargetCC, Flag};
return DAG.getNode(MSP430ISD::SELECT_CC, dl, VTs, Ops);
}
}
@@ -999,11 +998,7 @@ SDValue MSP430TargetLowering::LowerSELECT_CC(SDValue Op,
SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
- SmallVector<SDValue, 4> Ops;
- Ops.push_back(TrueV);
- Ops.push_back(FalseV);
- Ops.push_back(TargetCC);
- Ops.push_back(Flag);
+ SDValue Ops[] = {TrueV, FalseV, TargetCC, Flag};
return DAG.getNode(MSP430ISD::SELECT_CC, dl, VTs, Ops);
}
@@ -1199,8 +1194,7 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI,
MachineFunction *F = BB->getParent();
MachineRegisterInfo &RI = F->getRegInfo();
DebugLoc dl = MI->getDebugLoc();
- const TargetInstrInfo &TII =
- *getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo &TII = *F->getSubtarget().getInstrInfo();
unsigned Opc;
const TargetRegisterClass * RC;
@@ -1311,8 +1305,7 @@ MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
Opc == MSP430::Srl8 || Opc == MSP430::Srl16)
return EmitShiftInstr(MI, BB);
- const TargetInstrInfo &TII =
- *getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
assert((Opc == MSP430::Select16 || Opc == MSP430::Select8) &&
diff --git a/lib/Target/MSP430/MSP430ISelLowering.h b/lib/Target/MSP430/MSP430ISelLowering.h
index 073ddc9..9266c3b 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.h
+++ b/lib/Target/MSP430/MSP430ISelLowering.h
@@ -66,9 +66,11 @@ namespace llvm {
};
}
+ class MSP430Subtarget;
class MSP430TargetLowering : public TargetLowering {
public:
- explicit MSP430TargetLowering(const TargetMachine &TM);
+ explicit MSP430TargetLowering(const TargetMachine &TM,
+ const MSP430Subtarget &STI);
MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i8; }
@@ -95,8 +97,9 @@ namespace llvm {
TargetLowering::ConstraintType
getConstraintType(const std::string &Constraint) const override;
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint,
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
MVT VT) const override;
/// isTruncateFree - Return true if it's free to truncate a value of type
diff --git a/lib/Target/MSP430/MSP430InstrInfo.td b/lib/Target/MSP430/MSP430InstrInfo.td
index 7c5aa11..c0c29b9 100644
--- a/lib/Target/MSP430/MSP430InstrInfo.td
+++ b/lib/Target/MSP430/MSP430InstrInfo.td
@@ -153,7 +153,7 @@ let usesCustomInserter = 1 in {
}
}
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def NOP : Pseudo<(outs), (ins), "nop", []>;
//===----------------------------------------------------------------------===//
@@ -224,7 +224,7 @@ let isCall = 1 in
//===----------------------------------------------------------------------===//
// Miscellaneous Instructions...
//
-let Defs = [SP], Uses = [SP], neverHasSideEffects=1 in {
+let Defs = [SP], Uses = [SP], hasSideEffects=0 in {
let mayLoad = 1 in
def POP16r : IForm16<0x0, DstReg, SrcPostInc, Size2Bytes,
(outs GR16:$reg), (ins), "pop.w\t$reg", []>;
@@ -238,7 +238,7 @@ def PUSH16r : II16r<0x0,
// Move Instructions
// FIXME: Provide proper encoding!
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def MOV8rr : I8rr<0x0,
(outs GR8:$dst), (ins GR8:$src),
"mov.b\t{$src, $dst}",
diff --git a/lib/Target/MSP430/MSP430MCInstLower.cpp b/lib/Target/MSP430/MSP430MCInstLower.cpp
index 77b91b7..05352a2 100644
--- a/lib/Target/MSP430/MSP430MCInstLower.cpp
+++ b/lib/Target/MSP430/MSP430MCInstLower.cpp
@@ -26,7 +26,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
MCSymbol *MSP430MCInstLower::
@@ -51,7 +50,7 @@ GetExternalSymbolSymbol(const MachineOperand &MO) const {
MCSymbol *MSP430MCInstLower::
GetJumpTableSymbol(const MachineOperand &MO) const {
- const DataLayout *DL = Printer.TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = Printer.TM.getDataLayout();
SmallString<256> Name;
raw_svector_ostream(Name) << DL->getPrivateGlobalPrefix() << "JTI"
<< Printer.getFunctionNumber() << '_'
@@ -68,7 +67,7 @@ GetJumpTableSymbol(const MachineOperand &MO) const {
MCSymbol *MSP430MCInstLower::
GetConstantPoolIndexSymbol(const MachineOperand &MO) const {
- const DataLayout *DL = Printer.TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = Printer.TM.getDataLayout();
SmallString<256> Name;
raw_svector_ostream(Name) << DL->getPrivateGlobalPrefix() << "CPI"
<< Printer.getFunctionNumber() << '_'
diff --git a/lib/Target/MSP430/MSP430Subtarget.cpp b/lib/Target/MSP430/MSP430Subtarget.cpp
index cb83b92..7468519 100644
--- a/lib/Target/MSP430/MSP430Subtarget.cpp
+++ b/lib/Target/MSP430/MSP430Subtarget.cpp
@@ -32,8 +32,6 @@ MSP430Subtarget &MSP430Subtarget::initializeSubtargetDependencies(StringRef CPU,
MSP430Subtarget::MSP430Subtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM)
- : MSP430GenSubtargetInfo(TT, CPU, FS),
- // FIXME: Check DataLayout string.
- DL("e-m:e-p:16:16-i32:16:32-a:16-n8:16"), FrameLowering(),
- InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM),
- TSInfo(DL) {}
+ : MSP430GenSubtargetInfo(TT, CPU, FS), FrameLowering(),
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
+ TSInfo(*TM.getDataLayout()) {}
diff --git a/lib/Target/MSP430/MSP430Subtarget.h b/lib/Target/MSP430/MSP430Subtarget.h
index d1845db..30d46d3 100644
--- a/lib/Target/MSP430/MSP430Subtarget.h
+++ b/lib/Target/MSP430/MSP430Subtarget.h
@@ -15,8 +15,8 @@
#define LLVM_LIB_TARGET_MSP430_MSP430SUBTARGET_H
#include "MSP430FrameLowering.h"
-#include "MSP430InstrInfo.h"
#include "MSP430ISelLowering.h"
+#include "MSP430InstrInfo.h"
#include "MSP430RegisterInfo.h"
#include "MSP430SelectionDAGInfo.h"
#include "llvm/IR/DataLayout.h"
@@ -32,7 +32,6 @@ class StringRef;
class MSP430Subtarget : public MSP430GenSubtargetInfo {
virtual void anchor();
bool ExtendedInsts;
- const DataLayout DL; // Calculates type size & alignment
MSP430FrameLowering FrameLowering;
MSP430InstrInfo InstrInfo;
MSP430TargetLowering TLInfo;
@@ -55,7 +54,6 @@ public:
return &FrameLowering;
}
const MSP430InstrInfo *getInstrInfo() const override { return &InstrInfo; }
- const DataLayout *getDataLayout() const override { return &DL; }
const TargetRegisterInfo *getRegisterInfo() const override {
return &InstrInfo.getRegisterInfo();
}
diff --git a/lib/Target/MSP430/MSP430TargetMachine.cpp b/lib/Target/MSP430/MSP430TargetMachine.cpp
index 8cee016..348e672 100644
--- a/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -12,11 +12,11 @@
//===----------------------------------------------------------------------===//
#include "MSP430TargetMachine.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "MSP430.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/PassManager.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -32,7 +32,8 @@ MSP430TargetMachine::MSP430TargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
TLOF(make_unique<TargetLoweringObjectFileELF>()),
- Subtarget(TT, CPU, FS, *this) {
+ // FIXME: Check DataLayout string.
+ DL("e-m:e-p:16:16-i32:16:32-a:16-n8:16"), Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
}
@@ -50,7 +51,7 @@ public:
}
bool addInstSelector() override;
- bool addPreEmitPass() override;
+ void addPreEmitPass() override;
};
} // namespace
@@ -64,8 +65,7 @@ bool MSP430PassConfig::addInstSelector() {
return false;
}
-bool MSP430PassConfig::addPreEmitPass() {
+void MSP430PassConfig::addPreEmitPass() {
// Must run branch selection immediately preceding the asm printer.
- addPass(createMSP430BranchSelectionPass());
- return false;
+ addPass(createMSP430BranchSelectionPass(), false);
}
diff --git a/lib/Target/MSP430/MSP430TargetMachine.h b/lib/Target/MSP430/MSP430TargetMachine.h
index 0e54ed6..c6a6a70 100644
--- a/lib/Target/MSP430/MSP430TargetMachine.h
+++ b/lib/Target/MSP430/MSP430TargetMachine.h
@@ -25,6 +25,7 @@ namespace llvm {
///
class MSP430TargetMachine : public LLVMTargetMachine {
std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ const DataLayout DL; // Calculates type size & alignment
MSP430Subtarget Subtarget;
public:
@@ -34,6 +35,7 @@ public:
CodeGenOpt::Level OL);
~MSP430TargetMachine() override;
+ const DataLayout *getDataLayout() const override { return &DL; }
const MSP430Subtarget *getSubtargetImpl() const override {
return &Subtarget;
}
diff --git a/lib/Target/MSP430/README.txt b/lib/Target/MSP430/README.txt
index 5b9634b..e989924 100644
--- a/lib/Target/MSP430/README.txt
+++ b/lib/Target/MSP430/README.txt
@@ -38,3 +38,4 @@ way (currently they emit explicit comparison).
10. Handle imm in comparisons in better way (see comment in MSP430InstrInfo.td)
11. Implement hooks for better memory op folding, etc.
+
diff --git a/lib/Target/Mips/Android.mk b/lib/Target/Mips/Android.mk
index 18d1177..235e788 100644
--- a/lib/Target/Mips/Android.mk
+++ b/lib/Target/Mips/Android.mk
@@ -20,7 +20,6 @@ mips_codegen_SRC_FILES := \
Mips16ISelLowering.cpp \
Mips16InstrInfo.cpp \
Mips16RegisterInfo.cpp \
- MipsABIInfo.cpp \
MipsAnalyzeImmediate.cpp \
MipsAsmPrinter.cpp \
MipsCCState.cpp \
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 0c5b41f..1040bf7 100644
--- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -7,13 +7,14 @@
//
//===----------------------------------------------------------------------===//
+#include "MCTargetDesc/MipsABIInfo.h"
#include "MCTargetDesc/MipsMCExpr.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "MipsRegisterInfo.h"
#include "MipsTargetStreamer.h"
#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
@@ -26,8 +27,8 @@
#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetRegistry.h"
#include <memory>
using namespace llvm;
@@ -75,9 +76,10 @@ public:
Mips::FeatureMips3_32 | Mips::FeatureMips3_32r2 | Mips::FeatureMips4 |
Mips::FeatureMips4_32 | Mips::FeatureMips4_32r2 | Mips::FeatureMips5 |
Mips::FeatureMips5_32r2 | Mips::FeatureMips32 | Mips::FeatureMips32r2 |
- Mips::FeatureMips32r6 | Mips::FeatureMips64 | Mips::FeatureMips64r2 |
- Mips::FeatureMips64r6 | Mips::FeatureCnMips | Mips::FeatureFP64Bit |
- Mips::FeatureGP64Bit | Mips::FeatureNaN2008;
+ Mips::FeatureMips32r3 | Mips::FeatureMips32r5 | Mips::FeatureMips32r6 |
+ Mips::FeatureMips64 | Mips::FeatureMips64r2 | Mips::FeatureMips64r3 |
+ Mips::FeatureMips64r5 | Mips::FeatureMips64r6 | Mips::FeatureCnMips |
+ Mips::FeatureFP64Bit | Mips::FeatureGP64Bit | Mips::FeatureNaN2008;
private:
unsigned ATReg;
@@ -95,6 +97,7 @@ class MipsAsmParser : public MCTargetAsmParser {
}
MCSubtargetInfo &STI;
+ MipsABIInfo ABI;
SmallVector<std::unique_ptr<MipsAssemblerOptions>, 2> AssemblerOptions;
MCSymbol *CurrentFn; // Pointer to the function being parsed. It may be a
// nullptr, which indicates that no function is currently
@@ -147,6 +150,12 @@ class MipsAsmParser : public MCTargetAsmParser {
MipsAsmParser::OperandMatchResultTy parseLSAImm(OperandVector &Operands);
MipsAsmParser::OperandMatchResultTy
+ parseRegisterPair (OperandVector &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMovePRegPair(OperandVector &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
parseRegisterList (OperandVector &Operands);
bool searchSymbolAlias(OperandVector &Operands);
@@ -160,6 +169,9 @@ class MipsAsmParser : public MCTargetAsmParser {
bool expandInstruction(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
+ bool expandJalWithRegs(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions);
+
bool expandLoadImm(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
@@ -168,6 +180,8 @@ class MipsAsmParser : public MCTargetAsmParser {
bool expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
+ bool expandUncondBranchMMPseudo(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions);
void expandLoadAddressSym(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
@@ -175,6 +189,10 @@ class MipsAsmParser : public MCTargetAsmParser {
void expandMemInst(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions, bool isLoad,
bool isImmOpnd);
+
+ bool expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions);
+
bool reportParseError(Twine ErrorMsg);
bool reportParseError(SMLoc Loc, Twine ErrorMsg);
@@ -310,7 +328,9 @@ public:
MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser,
const MCInstrInfo &MII, const MCTargetOptions &Options)
- : MCTargetAsmParser(), STI(sti) {
+ : MCTargetAsmParser(), STI(sti),
+ ABI(MipsABIInfo::computeTargetABI(Triple(sti.getTargetTriple()),
+ sti.getCPU(), Options)) {
MCAsmParserExtension::Initialize(parser);
// Initialize the set of available features.
@@ -326,12 +346,6 @@ public:
getTargetStreamer().updateABIInfo(*this);
- // Assert exactly one ABI was chosen.
- assert((((STI.getFeatureBits() & Mips::FeatureO32) != 0) +
- ((STI.getFeatureBits() & Mips::FeatureEABI) != 0) +
- ((STI.getFeatureBits() & Mips::FeatureN32) != 0) +
- ((STI.getFeatureBits() & Mips::FeatureN64) != 0)) == 1);
-
if (!isABI_O32() && !useOddSPReg() != 0)
report_fatal_error("-mno-odd-spreg requires the O32 ABI");
@@ -343,9 +357,10 @@ public:
bool isGP64bit() const { return STI.getFeatureBits() & Mips::FeatureGP64Bit; }
bool isFP64bit() const { return STI.getFeatureBits() & Mips::FeatureFP64Bit; }
- bool isABI_N32() const { return STI.getFeatureBits() & Mips::FeatureN32; }
- bool isABI_N64() const { return STI.getFeatureBits() & Mips::FeatureN64; }
- bool isABI_O32() const { return STI.getFeatureBits() & Mips::FeatureO32; }
+ const MipsABIInfo &getABI() const { return ABI; }
+ bool isABI_N32() const { return ABI.IsN32(); }
+ bool isABI_N64() const { return ABI.IsN64(); }
+ bool isABI_O32() const { return ABI.IsO32(); }
bool isABI_FPXX() const { return STI.getFeatureBits() & Mips::FeatureFPXX; }
bool useOddSPReg() const {
@@ -372,12 +387,27 @@ public:
bool hasMips64r2() const {
return (STI.getFeatureBits() & Mips::FeatureMips64r2);
}
+ bool hasMips32r3() const {
+ return (STI.getFeatureBits() & Mips::FeatureMips32r3);
+ }
+ bool hasMips64r3() const {
+ return (STI.getFeatureBits() & Mips::FeatureMips64r3);
+ }
+ bool hasMips32r5() const {
+ return (STI.getFeatureBits() & Mips::FeatureMips32r5);
+ }
+ bool hasMips64r5() const {
+ return (STI.getFeatureBits() & Mips::FeatureMips64r5);
+ }
bool hasMips32r6() const {
return (STI.getFeatureBits() & Mips::FeatureMips32r6);
}
bool hasMips64r6() const {
return (STI.getFeatureBits() & Mips::FeatureMips64r6);
}
+ bool hasCnMips() const {
+ return (STI.getFeatureBits() & Mips::FeatureCnMips);
+ }
bool hasDSP() const { return (STI.getFeatureBits() & Mips::FeatureDSP); }
bool hasDSPR2() const { return (STI.getFeatureBits() & Mips::FeatureDSPR2); }
bool hasMSA() const { return (STI.getFeatureBits() & Mips::FeatureMSA); }
@@ -428,7 +458,8 @@ private:
k_PhysRegister, /// A physical register from the Mips namespace
k_RegisterIndex, /// A register index in one or more RegKind.
k_Token, /// A simple token
- k_RegList /// A physical register list
+ k_RegList, /// A physical register list
+ k_RegPair /// A pair of physical register
} Kind;
public:
@@ -663,6 +694,16 @@ public:
Inst.addOperand(MCOperand::CreateReg(getGPRMM16Reg()));
}
+ void addGPRMM16AsmRegZeroOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getGPRMM16Reg()));
+ }
+
+ void addGPRMM16AsmRegMovePOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getGPRMM16Reg()));
+ }
+
/// Render the operand to an MCInst as a GPR64
/// Asserts if the wrong number of operands are requested, or the operand
/// is not a k_RegisterIndex compatible with RegKind_GPR
@@ -760,6 +801,15 @@ public:
addExpr(Inst, Expr);
}
+ void addMicroMipsMemOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+
+ Inst.addOperand(MCOperand::CreateReg(getMemBase()->getGPRMM16Reg()));
+
+ const MCExpr *Expr = getMemOff();
+ addExpr(Inst, Expr);
+ }
+
void addRegListOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
@@ -767,6 +817,19 @@ public:
Inst.addOperand(MCOperand::CreateReg(RegNo));
}
+ void addRegPairOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ unsigned RegNo = getRegPair();
+ Inst.addOperand(MCOperand::CreateReg(RegNo++));
+ Inst.addOperand(MCOperand::CreateReg(RegNo));
+ }
+
+ void addMovePRegPairOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ for (auto RegNo : getRegList())
+ Inst.addOperand(MCOperand::CreateReg(RegNo));
+ }
+
bool isReg() const override {
// As a special case until we sort out the definition of div/divu, pretend
// that $0/$zero are k_PhysRegister so that MCK_ZERO works correctly.
@@ -792,6 +855,37 @@ public:
template <unsigned Bits> bool isMemWithSimmOffset() const {
return isMem() && isConstantMemOff() && isInt<Bits>(getConstantMemOff());
}
+ bool isMemWithGRPMM16Base() const {
+ return isMem() && getMemBase()->isMM16AsmReg();
+ }
+ template <unsigned Bits> bool isMemWithUimmOffsetSP() const {
+ return isMem() && isConstantMemOff() && isUInt<Bits>(getConstantMemOff())
+ && getMemBase()->isRegIdx() && (getMemBase()->getGPR32Reg() == Mips::SP);
+ }
+ template <unsigned Bits> bool isMemWithUimmWordAlignedOffsetSP() const {
+ return isMem() && isConstantMemOff() && isUInt<Bits>(getConstantMemOff())
+ && (getConstantMemOff() % 4 == 0) && getMemBase()->isRegIdx()
+ && (getMemBase()->getGPR32Reg() == Mips::SP);
+ }
+ bool isRegList16() const {
+ if (!isRegList())
+ return false;
+
+ int Size = RegList.List->size();
+ if (Size < 2 || Size > 5 || *RegList.List->begin() != Mips::S0 ||
+ RegList.List->back() != Mips::RA)
+ return false;
+
+ int PrevReg = *RegList.List->begin();
+ for (int i = 1; i < Size - 1; i++) {
+ int Reg = (*(RegList.List))[i];
+ if ( Reg != PrevReg + 1)
+ return false;
+ PrevReg = Reg;
+ }
+
+ return true;
+ }
bool isInvNum() const { return Kind == k_Immediate; }
bool isLSAImm() const {
if (!isConstantImm())
@@ -800,11 +894,31 @@ public:
return 1 <= Val && Val <= 4;
}
bool isRegList() const { return Kind == k_RegList; }
+ bool isMovePRegPair() const {
+ if (Kind != k_RegList || RegList.List->size() != 2)
+ return false;
+
+ unsigned R0 = RegList.List->front();
+ unsigned R1 = RegList.List->back();
+
+ if ((R0 == Mips::A1 && R1 == Mips::A2) ||
+ (R0 == Mips::A1 && R1 == Mips::A3) ||
+ (R0 == Mips::A2 && R1 == Mips::A3) ||
+ (R0 == Mips::A0 && R1 == Mips::S5) ||
+ (R0 == Mips::A0 && R1 == Mips::S6) ||
+ (R0 == Mips::A0 && R1 == Mips::A1) ||
+ (R0 == Mips::A0 && R1 == Mips::A2) ||
+ (R0 == Mips::A0 && R1 == Mips::A3))
+ return true;
+
+ return false;
+ }
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
return StringRef(Tok.Data, Tok.Length);
}
+ bool isRegPair() const { return Kind == k_RegPair; }
unsigned getReg() const override {
// As a special case until we sort out the definition of div/divu, pretend
@@ -846,6 +960,11 @@ public:
return *(RegList.List);
}
+ unsigned getRegPair() const {
+ assert((Kind == k_RegPair) && "Invalid access!");
+ return RegIdx.Index;
+ }
+
static std::unique_ptr<MipsOperand> CreateToken(StringRef Str, SMLoc S,
MipsAsmParser &Parser) {
auto Op = make_unique<MipsOperand>(k_Token, Parser);
@@ -947,14 +1066,21 @@ public:
assert (Regs.size() > 0 && "Empty list not allowed");
auto Op = make_unique<MipsOperand>(k_RegList, Parser);
- Op->RegList.List = new SmallVector<unsigned, 10>();
- for (auto Reg : Regs)
- Op->RegList.List->push_back(Reg);
+ Op->RegList.List = new SmallVector<unsigned, 10>(Regs.begin(), Regs.end());
Op->StartLoc = StartLoc;
Op->EndLoc = EndLoc;
return Op;
}
+ static std::unique_ptr<MipsOperand>
+ CreateRegPair(unsigned RegNo, SMLoc S, SMLoc E, MipsAsmParser &Parser) {
+ auto Op = make_unique<MipsOperand>(k_RegPair, Parser);
+ Op->RegIdx.Index = RegNo;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
bool isGPRAsmReg() const {
return isRegIdx() && RegIdx.Kind & RegKind_GPR && RegIdx.Index <= 31;
}
@@ -964,6 +1090,19 @@ public:
return ((RegIdx.Index >= 2 && RegIdx.Index <= 7)
|| RegIdx.Index == 16 || RegIdx.Index == 17);
}
+ bool isMM16AsmRegZero() const {
+ if (!(isRegIdx() && RegIdx.Kind))
+ return false;
+ return (RegIdx.Index == 0 ||
+ (RegIdx.Index >= 2 && RegIdx.Index <= 7) ||
+ RegIdx.Index == 17);
+ }
+ bool isMM16AsmRegMoveP() const {
+ if (!(isRegIdx() && RegIdx.Kind))
+ return false;
+ return (RegIdx.Index == 0 || (RegIdx.Index >= 2 && RegIdx.Index <= 3) ||
+ (RegIdx.Index >= 16 && RegIdx.Index <= 20));
+ }
bool isFGRAsmReg() const {
// AFGR64 is $0-$15 but we handle this in getAFGR64()
return isRegIdx() && RegIdx.Kind & RegKind_FGR && RegIdx.Index <= 31;
@@ -1014,6 +1153,7 @@ public:
case k_PhysRegister:
case k_RegisterIndex:
case k_Token:
+ case k_RegPair:
break;
}
}
@@ -1047,6 +1187,9 @@ public:
OS << Reg << " ";
OS << ">";
break;
+ case k_RegPair:
+ OS << "RegPair<" << RegIdx.Index << "," << RegIdx.Index + 1 << ">";
+ break;
}
}
}; // class MipsOperand
@@ -1085,6 +1228,13 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
switch (Opcode) {
default:
break;
+ case Mips::BBIT0:
+ case Mips::BBIT032:
+ case Mips::BBIT1:
+ case Mips::BBIT132:
+ assert(hasCnMips() && "instruction only valid for octeon cpus");
+ // Fall through
+
case Mips::BEQ:
case Mips::BNE:
case Mips::BEQ_MM:
@@ -1125,6 +1275,17 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
1LL << (inMicroMipsMode() ? 1 : 2)))
return Error(IDLoc, "branch to misaligned address");
break;
+ case Mips::BEQZ16_MM:
+ case Mips::BNEZ16_MM:
+ assert(MCID.getNumOperands() == 2 && "unexpected number of operands");
+ Offset = Inst.getOperand(1);
+ if (!Offset.isImm())
+ break; // We'll deal with this situation later on when applying fixups.
+ if (!isIntN(8, Offset.getImm()))
+ return Error(IDLoc, "branch target out of range");
+ if (OffsetToAlignment(Offset.getImm(), 2LL))
+ return Error(IDLoc, "branch to misaligned address");
+ break;
}
}
@@ -1136,6 +1297,74 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
"nop instruction");
}
+ if (hasCnMips()) {
+ const unsigned Opcode = Inst.getOpcode();
+ MCOperand Opnd;
+ int Imm;
+
+ switch (Opcode) {
+ default:
+ break;
+
+ case Mips::BBIT0:
+ case Mips::BBIT032:
+ case Mips::BBIT1:
+ case Mips::BBIT132:
+ assert(MCID.getNumOperands() == 3 && "unexpected number of operands");
+ // The offset is handled above
+ Opnd = Inst.getOperand(1);
+ if (!Opnd.isImm())
+ return Error(IDLoc, "expected immediate operand kind");
+ Imm = Opnd.getImm();
+ if (Imm < 0 || Imm > (Opcode == Mips::BBIT0 ||
+ Opcode == Mips::BBIT1 ? 63 : 31))
+ return Error(IDLoc, "immediate operand value out of range");
+ if (Imm > 31) {
+ Inst.setOpcode(Opcode == Mips::BBIT0 ? Mips::BBIT032
+ : Mips::BBIT132);
+ Inst.getOperand(1).setImm(Imm - 32);
+ }
+ break;
+
+ case Mips::CINS:
+ case Mips::CINS32:
+ case Mips::EXTS:
+ case Mips::EXTS32:
+ assert(MCID.getNumOperands() == 4 && "unexpected number of operands");
+ // Check length
+ Opnd = Inst.getOperand(3);
+ if (!Opnd.isImm())
+ return Error(IDLoc, "expected immediate operand kind");
+ Imm = Opnd.getImm();
+ if (Imm < 0 || Imm > 31)
+ return Error(IDLoc, "immediate operand value out of range");
+ // Check position
+ Opnd = Inst.getOperand(2);
+ if (!Opnd.isImm())
+ return Error(IDLoc, "expected immediate operand kind");
+ Imm = Opnd.getImm();
+ if (Imm < 0 || Imm > (Opcode == Mips::CINS ||
+ Opcode == Mips::EXTS ? 63 : 31))
+ return Error(IDLoc, "immediate operand value out of range");
+ if (Imm > 31) {
+ Inst.setOpcode(Opcode == Mips::CINS ? Mips::CINS32 : Mips::EXTS32);
+ Inst.getOperand(2).setImm(Imm - 32);
+ }
+ break;
+
+ case Mips::SEQi:
+ case Mips::SNEi:
+ assert(MCID.getNumOperands() == 3 && "unexpected number of operands");
+ Opnd = Inst.getOperand(2);
+ if (!Opnd.isImm())
+ return Error(IDLoc, "expected immediate operand kind");
+ Imm = Opnd.getImm();
+ if (!isInt<10>(Imm))
+ return Error(IDLoc, "immediate operand value out of range");
+ break;
+ }
+ }
+
if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder()) {
// If this instruction has a delay slot and .set reorder is active,
// emit a NOP after it.
@@ -1189,8 +1418,38 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
} // for
} // if load/store
- // TODO: Handle this with the AsmOperandClass.PredicateMethod.
if (inMicroMipsMode()) {
+ if (MCID.mayLoad()) {
+ // Try to create 16-bit GP relative load instruction.
+ for (unsigned i = 0; i < MCID.getNumOperands(); i++) {
+ const MCOperandInfo &OpInfo = MCID.OpInfo[i];
+ if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY) ||
+ (OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) {
+ MCOperand &Op = Inst.getOperand(i);
+ if (Op.isImm()) {
+ int MemOffset = Op.getImm();
+ MCOperand &DstReg = Inst.getOperand(0);
+ MCOperand &BaseReg = Inst.getOperand(1);
+ if (isIntN(9, MemOffset) && (MemOffset % 4 == 0) &&
+ getContext().getRegisterInfo()->getRegClass(
+ Mips::GPRMM16RegClassID).contains(DstReg.getReg()) &&
+ BaseReg.getReg() == Mips::GP) {
+ MCInst TmpInst;
+ TmpInst.setLoc(IDLoc);
+ TmpInst.setOpcode(Mips::LWGP_MM);
+ TmpInst.addOperand(MCOperand::CreateReg(DstReg.getReg()));
+ TmpInst.addOperand(MCOperand::CreateReg(Mips::GP));
+ TmpInst.addOperand(MCOperand::CreateImm(MemOffset));
+ Instructions.push_back(TmpInst);
+ return false;
+ }
+ }
+ }
+ } // for
+ } // if load
+
+ // TODO: Handle this with the AsmOperandClass.PredicateMethod.
+
MCOperand Opnd;
int Imm;
@@ -1260,6 +1519,57 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
Imm == 64 || Imm == 255 || Imm == 32768 || Imm == 65535))
return Error(IDLoc, "immediate operand value out of range");
break;
+ case Mips::LBU16_MM:
+ Opnd = Inst.getOperand(2);
+ if (!Opnd.isImm())
+ return Error(IDLoc, "expected immediate operand kind");
+ Imm = Opnd.getImm();
+ if (Imm < -1 || Imm > 14)
+ return Error(IDLoc, "immediate operand value out of range");
+ break;
+ case Mips::SB16_MM:
+ Opnd = Inst.getOperand(2);
+ if (!Opnd.isImm())
+ return Error(IDLoc, "expected immediate operand kind");
+ Imm = Opnd.getImm();
+ if (Imm < 0 || Imm > 15)
+ return Error(IDLoc, "immediate operand value out of range");
+ break;
+ case Mips::LHU16_MM:
+ case Mips::SH16_MM:
+ Opnd = Inst.getOperand(2);
+ if (!Opnd.isImm())
+ return Error(IDLoc, "expected immediate operand kind");
+ Imm = Opnd.getImm();
+ if (Imm < 0 || Imm > 30 || (Imm % 2 != 0))
+ return Error(IDLoc, "immediate operand value out of range");
+ break;
+ case Mips::LW16_MM:
+ case Mips::SW16_MM:
+ Opnd = Inst.getOperand(2);
+ if (!Opnd.isImm())
+ return Error(IDLoc, "expected immediate operand kind");
+ Imm = Opnd.getImm();
+ if (Imm < 0 || Imm > 60 || (Imm % 4 != 0))
+ return Error(IDLoc, "immediate operand value out of range");
+ break;
+ case Mips::CACHE:
+ case Mips::PREF:
+ Opnd = Inst.getOperand(2);
+ if (!Opnd.isImm())
+ return Error(IDLoc, "expected immediate operand kind");
+ Imm = Opnd.getImm();
+ if (!isUInt<5>(Imm))
+ return Error(IDLoc, "immediate operand value out of range");
+ break;
+ case Mips::ADDIUPC_MM:
+ MCOperand Opnd = Inst.getOperand(1);
+ if (!Opnd.isImm())
+ return Error(IDLoc, "expected immediate operand kind");
+ int Imm = Opnd.getImm();
+ if ((Imm % 4 != 0) || !isIntN(25, Imm))
+ return Error(IDLoc, "immediate operand value out of range");
+ break;
}
}
@@ -1278,6 +1588,11 @@ bool MipsAsmParser::needsExpansion(MCInst &Inst) {
case Mips::LoadAddr32Imm:
case Mips::LoadAddr32Reg:
case Mips::LoadImm64Reg:
+ case Mips::B_MM_Pseudo:
+ case Mips::LWM_MM:
+ case Mips::SWM_MM:
+ case Mips::JalOneReg:
+ case Mips::JalTwoReg:
return true;
default:
return false;
@@ -1287,9 +1602,7 @@ bool MipsAsmParser::needsExpansion(MCInst &Inst) {
bool MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
switch (Inst.getOpcode()) {
- default:
- assert(0 && "unimplemented expansion");
- return true;
+ default: llvm_unreachable("unimplemented expansion");
case Mips::LoadImm32Reg:
return expandLoadImm(Inst, IDLoc, Instructions);
case Mips::LoadImm64Reg:
@@ -1302,6 +1615,14 @@ bool MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc,
return expandLoadAddressImm(Inst, IDLoc, Instructions);
case Mips::LoadAddr32Reg:
return expandLoadAddressReg(Inst, IDLoc, Instructions);
+ case Mips::B_MM_Pseudo:
+ return expandUncondBranchMMPseudo(Inst, IDLoc, Instructions);
+ case Mips::SWM_MM:
+ case Mips::LWM_MM:
+ return expandLoadStoreMultiple(Inst, IDLoc, Instructions);
+ case Mips::JalOneReg:
+ case Mips::JalTwoReg:
+ return expandJalWithRegs(Inst, IDLoc, Instructions);
}
}
@@ -1336,6 +1657,48 @@ void createShiftOr(int64_t Value, unsigned RegNo, SMLoc IDLoc,
}
}
+bool MipsAsmParser::expandJalWithRegs(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions) {
+ // Create a JALR instruction which is going to replace the pseudo-JAL.
+ MCInst JalrInst;
+ JalrInst.setLoc(IDLoc);
+ const MCOperand FirstRegOp = Inst.getOperand(0);
+ const unsigned Opcode = Inst.getOpcode();
+
+ if (Opcode == Mips::JalOneReg) {
+ // jal $rs => jalr $rs
+ if (inMicroMipsMode()) {
+ JalrInst.setOpcode(Mips::JALR16_MM);
+ JalrInst.addOperand(FirstRegOp);
+ } else {
+ JalrInst.setOpcode(Mips::JALR);
+ JalrInst.addOperand(MCOperand::CreateReg(Mips::RA));
+ JalrInst.addOperand(FirstRegOp);
+ }
+ } else if (Opcode == Mips::JalTwoReg) {
+ // jal $rd, $rs => jalr $rd, $rs
+ JalrInst.setOpcode(inMicroMipsMode() ? Mips::JALR_MM : Mips::JALR);
+ JalrInst.addOperand(FirstRegOp);
+ const MCOperand SecondRegOp = Inst.getOperand(1);
+ JalrInst.addOperand(SecondRegOp);
+ }
+ Instructions.push_back(JalrInst);
+
+ // If .set reorder is active, emit a NOP after it.
+ if (AssemblerOptions.back()->isReorder()) {
+ // This is a 32-bit NOP because these 2 pseudo-instructions
+ // do not have a short delay slot.
+ MCInst NopInst;
+ NopInst.setOpcode(Mips::SLL);
+ NopInst.addOperand(MCOperand::CreateReg(Mips::ZERO));
+ NopInst.addOperand(MCOperand::CreateReg(Mips::ZERO));
+ NopInst.addOperand(MCOperand::CreateImm(0));
+ Instructions.push_back(NopInst);
+ }
+
+ return false;
+}
+
bool MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
MCInst tmpInst;
@@ -1587,6 +1950,49 @@ MipsAsmParser::expandLoadAddressSym(MCInst &Inst, SMLoc IDLoc,
}
}
+bool MipsAsmParser::expandUncondBranchMMPseudo(
+ MCInst &Inst, SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
+ assert(getInstDesc(Inst.getOpcode()).getNumOperands() == 1 &&
+ "unexpected number of operands");
+
+ MCOperand Offset = Inst.getOperand(0);
+ if (Offset.isExpr()) {
+ Inst.clear();
+ Inst.setOpcode(Mips::BEQ_MM);
+ Inst.addOperand(MCOperand::CreateReg(Mips::ZERO));
+ Inst.addOperand(MCOperand::CreateReg(Mips::ZERO));
+ Inst.addOperand(MCOperand::CreateExpr(Offset.getExpr()));
+ } else {
+ assert(Offset.isImm() && "expected immediate operand kind");
+ if (isIntN(11, Offset.getImm())) {
+ // If offset fits into 11 bits then this instruction becomes microMIPS
+ // 16-bit unconditional branch instruction.
+ Inst.setOpcode(Mips::B16_MM);
+ } else {
+ if (!isIntN(17, Offset.getImm()))
+ Error(IDLoc, "branch target out of range");
+ if (OffsetToAlignment(Offset.getImm(), 1LL << 1))
+ Error(IDLoc, "branch to misaligned address");
+ Inst.clear();
+ Inst.setOpcode(Mips::BEQ_MM);
+ Inst.addOperand(MCOperand::CreateReg(Mips::ZERO));
+ Inst.addOperand(MCOperand::CreateReg(Mips::ZERO));
+ Inst.addOperand(MCOperand::CreateImm(Offset.getImm()));
+ }
+ }
+ Instructions.push_back(Inst);
+
+ if (AssemblerOptions.back()->isReorder()) {
+ // If .set reorder is active, emit a NOP after the branch instruction.
+ MCInst NopInst;
+ NopInst.setOpcode(Mips::MOVE16_MM);
+ NopInst.addOperand(MCOperand::CreateReg(Mips::ZERO));
+ NopInst.addOperand(MCOperand::CreateReg(Mips::ZERO));
+ Instructions.push_back(NopInst);
+ }
+ return false;
+}
+
void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions,
bool isLoad, bool isImmOpnd) {
@@ -1703,6 +2109,29 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
TempInst.clear();
}
+bool
+MipsAsmParser::expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions) {
+ unsigned OpNum = Inst.getNumOperands();
+ unsigned Opcode = Inst.getOpcode();
+ unsigned NewOpcode = Opcode == Mips::SWM_MM ? Mips::SWM32_MM : Mips::LWM32_MM;
+
+ assert (Inst.getOperand(OpNum - 1).isImm() &&
+ Inst.getOperand(OpNum - 2).isReg() &&
+ Inst.getOperand(OpNum - 3).isReg() && "Invalid instruction operand.");
+
+ if (OpNum < 8 && Inst.getOperand(OpNum - 1).getImm() <= 60 &&
+ Inst.getOperand(OpNum - 1).getImm() >= 0 &&
+ Inst.getOperand(OpNum - 2).getReg() == Mips::SP &&
+ Inst.getOperand(OpNum - 3).getReg() == Mips::RA)
+ // It can be implemented as SWM16 or LWM16 instruction.
+ NewOpcode = Opcode == Mips::SWM_MM ? Mips::SWM16_MM : Mips::LWM16_MM;
+
+ Inst.setOpcode(NewOpcode);
+ Instructions.push_back(Inst);
+ return false;
+}
+
unsigned MipsAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
// As described by the Mips32r2 spec, the registers Rd and Rs for
// jalr.hb must be different.
@@ -1727,8 +2156,6 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm);
switch (MatchResult) {
- default:
- break;
case Match_Success: {
if (processInstruction(Inst, IDLoc, Instructions))
return true;
@@ -1757,7 +2184,8 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
case Match_RequiresDifferentSrcAndDst:
return Error(IDLoc, "source and destination must be different");
}
- return true;
+
+ llvm_unreachable("Implement any new match types added!");
}
void MipsAsmParser::warnIfAssemblerTemporary(int RegIndex, SMLoc Loc) {
@@ -2642,6 +3070,61 @@ MipsAsmParser::parseRegisterList(OperandVector &Operands) {
return MatchOperand_Success;
}
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseRegisterPair(OperandVector &Operands) {
+ MCAsmParser &Parser = getParser();
+
+ SMLoc S = Parser.getTok().getLoc();
+ if (parseAnyRegister(Operands) != MatchOperand_Success)
+ return MatchOperand_ParseFail;
+
+ SMLoc E = Parser.getTok().getLoc();
+ MipsOperand &Op = static_cast<MipsOperand &>(*Operands.back());
+ unsigned Reg = Op.getGPR32Reg();
+ Operands.pop_back();
+ Operands.push_back(MipsOperand::CreateRegPair(Reg, S, E, *this));
+ return MatchOperand_Success;
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseMovePRegPair(OperandVector &Operands) {
+ MCAsmParser &Parser = getParser();
+ SmallVector<std::unique_ptr<MCParsedAsmOperand>, 8> TmpOperands;
+ SmallVector<unsigned, 10> Regs;
+
+ if (Parser.getTok().isNot(AsmToken::Dollar))
+ return MatchOperand_ParseFail;
+
+ SMLoc S = Parser.getTok().getLoc();
+
+ if (parseAnyRegister(TmpOperands) != MatchOperand_Success)
+ return MatchOperand_ParseFail;
+
+ MipsOperand *Reg = &static_cast<MipsOperand &>(*TmpOperands.back());
+ unsigned RegNo = isGP64bit() ? Reg->getGPR64Reg() : Reg->getGPR32Reg();
+ Regs.push_back(RegNo);
+
+ SMLoc E = Parser.getTok().getLoc();
+ if (Parser.getTok().isNot(AsmToken::Comma)) {
+ Error(E, "',' expected");
+ return MatchOperand_ParseFail;
+ }
+
+ // Remove comma.
+ Parser.Lex();
+
+ if (parseAnyRegister(TmpOperands) != MatchOperand_Success)
+ return MatchOperand_ParseFail;
+
+ Reg = &static_cast<MipsOperand &>(*TmpOperands.back());
+ RegNo = isGP64bit() ? Reg->getGPR64Reg() : Reg->getGPR32Reg();
+ Regs.push_back(RegNo);
+
+ Operands.push_back(MipsOperand::CreateRegList(Regs, S, E, *this));
+
+ return MatchOperand_Success;
+}
+
MCSymbolRefExpr::VariantKind MipsAsmParser::getVariantKind(StringRef Symbol) {
MCSymbolRefExpr::VariantKind VK =
@@ -2804,67 +3287,84 @@ bool MipsAsmParser::reportParseError(SMLoc Loc, Twine ErrorMsg) {
bool MipsAsmParser::parseSetNoAtDirective() {
MCAsmParser &Parser = getParser();
// Line should look like: ".set noat".
- // set at reg to 0.
+
+ // Set the $at register to $0.
AssemblerOptions.back()->setATReg(0);
- // eat noat
- Parser.Lex();
+
+ Parser.Lex(); // Eat "noat".
+
// If this is not the end of the statement, report an error.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
reportParseError("unexpected token, expected end of statement");
return false;
}
+
+ getTargetStreamer().emitDirectiveSetNoAt();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
bool MipsAsmParser::parseSetAtDirective() {
+ // Line can be: ".set at", which sets $at to $1
+ // or ".set at=$reg", which sets $at to $reg.
MCAsmParser &Parser = getParser();
- // Line can be .set at - defaults to $1
- // or .set at=$reg
- int AtRegNo;
- getParser().Lex();
+ Parser.Lex(); // Eat "at".
+
if (getLexer().is(AsmToken::EndOfStatement)) {
+ // No register was specified, so we set $at to $1.
AssemblerOptions.back()->setATReg(1);
+
+ getTargetStreamer().emitDirectiveSetAt();
Parser.Lex(); // Consume the EndOfStatement.
return false;
- } else if (getLexer().is(AsmToken::Equal)) {
- getParser().Lex(); // Eat the '='.
- if (getLexer().isNot(AsmToken::Dollar)) {
- reportParseError("unexpected token, expected dollar sign '$'");
+ }
+
+ if (getLexer().isNot(AsmToken::Equal)) {
+ reportParseError("unexpected token, expected equals sign");
+ return false;
+ }
+ Parser.Lex(); // Eat "=".
+
+ if (getLexer().isNot(AsmToken::Dollar)) {
+ if (getLexer().is(AsmToken::EndOfStatement)) {
+ reportParseError("no register specified");
return false;
- }
- Parser.Lex(); // Eat the '$'.
- const AsmToken &Reg = Parser.getTok();
- if (Reg.is(AsmToken::Identifier)) {
- AtRegNo = matchCPURegisterName(Reg.getIdentifier());
- } else if (Reg.is(AsmToken::Integer)) {
- AtRegNo = Reg.getIntVal();
} else {
- reportParseError("unexpected token, expected identifier or integer");
- return false;
- }
-
- if (AtRegNo < 0 || AtRegNo > 31) {
- reportParseError("unexpected token in statement");
+ reportParseError("unexpected token, expected dollar sign '$'");
return false;
}
+ }
+ Parser.Lex(); // Eat "$".
- if (!AssemblerOptions.back()->setATReg(AtRegNo)) {
- reportParseError("invalid register");
- return false;
- }
- getParser().Lex(); // Eat the register.
+ // Find out what "reg" is.
+ unsigned AtRegNo;
+ const AsmToken &Reg = Parser.getTok();
+ if (Reg.is(AsmToken::Identifier)) {
+ AtRegNo = matchCPURegisterName(Reg.getIdentifier());
+ } else if (Reg.is(AsmToken::Integer)) {
+ AtRegNo = Reg.getIntVal();
+ } else {
+ reportParseError("unexpected token, expected identifier or integer");
+ return false;
+ }
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- reportParseError("unexpected token, expected end of statement");
- return false;
- }
- Parser.Lex(); // Consume the EndOfStatement.
+ // Check if $reg is a valid register. If it is, set $at to $reg.
+ if (!AssemblerOptions.back()->setATReg(AtRegNo)) {
+ reportParseError("invalid register");
return false;
- } else {
- reportParseError("unexpected token in statement");
+ }
+ Parser.Lex(); // Eat "reg".
+
+ // If this is not the end of the statement, report an error.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token, expected end of statement");
return false;
}
+
+ getTargetStreamer().emitDirectiveSetAtWithArg(AtRegNo);
+
+ Parser.Lex(); // Consume the EndOfStatement.
+ return false;
}
bool MipsAsmParser::parseSetReorderDirective() {
@@ -3118,9 +3618,13 @@ bool MipsAsmParser::parseSetArchDirective() {
.Case("mips5", "mips5")
.Case("mips32", "mips32")
.Case("mips32r2", "mips32r2")
+ .Case("mips32r3", "mips32r3")
+ .Case("mips32r5", "mips32r5")
.Case("mips32r6", "mips32r6")
.Case("mips64", "mips64")
.Case("mips64r2", "mips64r2")
+ .Case("mips64r3", "mips64r3")
+ .Case("mips64r5", "mips64r5")
.Case("mips64r6", "mips64r6")
.Case("cnmips", "cnmips")
.Case("r4000", "mips3") // This is an implementation of Mips3.
@@ -3178,6 +3682,14 @@ bool MipsAsmParser::parseSetFeature(uint64_t Feature) {
selectArch("mips32r2");
getTargetStreamer().emitDirectiveSetMips32R2();
break;
+ case Mips::FeatureMips32r3:
+ selectArch("mips32r3");
+ getTargetStreamer().emitDirectiveSetMips32R3();
+ break;
+ case Mips::FeatureMips32r5:
+ selectArch("mips32r5");
+ getTargetStreamer().emitDirectiveSetMips32R5();
+ break;
case Mips::FeatureMips32r6:
selectArch("mips32r6");
getTargetStreamer().emitDirectiveSetMips32R6();
@@ -3190,6 +3702,14 @@ bool MipsAsmParser::parseSetFeature(uint64_t Feature) {
selectArch("mips64r2");
getTargetStreamer().emitDirectiveSetMips64R2();
break;
+ case Mips::FeatureMips64r3:
+ selectArch("mips64r3");
+ getTargetStreamer().emitDirectiveSetMips64R3();
+ break;
+ case Mips::FeatureMips64r5:
+ selectArch("mips64r5");
+ getTargetStreamer().emitDirectiveSetMips64R5();
+ break;
case Mips::FeatureMips64r6:
selectArch("mips64r6");
getTargetStreamer().emitDirectiveSetMips64R6();
@@ -3294,12 +3814,20 @@ bool MipsAsmParser::parseDirectiveCPSetup() {
if (!eatComma("unexpected token, expected comma"))
return true;
- StringRef Name;
- if (Parser.parseIdentifier(Name))
- reportParseError("expected identifier");
- MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+ const MCExpr *Expr;
+ if (Parser.parseExpression(Expr)) {
+ reportParseError("expected expression");
+ return false;
+ }
+
+ if (Expr->getKind() != MCExpr::SymbolRef) {
+ reportParseError("expected symbol");
+ return false;
+ }
+ const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
- getTargetStreamer().emitDirectiveCpsetup(FuncReg, Save, *Sym, SaveIsReg);
+ getTargetStreamer().emitDirectiveCpsetup(FuncReg, Save, Ref->getSymbol(),
+ SaveIsReg);
return false;
}
@@ -3375,12 +3903,20 @@ bool MipsAsmParser::parseDirectiveSet() {
return parseSetFeature(Mips::FeatureMips32);
} else if (Tok.getString() == "mips32r2") {
return parseSetFeature(Mips::FeatureMips32r2);
+ } else if (Tok.getString() == "mips32r3") {
+ return parseSetFeature(Mips::FeatureMips32r3);
+ } else if (Tok.getString() == "mips32r5") {
+ return parseSetFeature(Mips::FeatureMips32r5);
} else if (Tok.getString() == "mips32r6") {
return parseSetFeature(Mips::FeatureMips32r6);
} else if (Tok.getString() == "mips64") {
return parseSetFeature(Mips::FeatureMips64);
} else if (Tok.getString() == "mips64r2") {
return parseSetFeature(Mips::FeatureMips64r2);
+ } else if (Tok.getString() == "mips64r3") {
+ return parseSetFeature(Mips::FeatureMips64r3);
+ } else if (Tok.getString() == "mips64r5") {
+ return parseSetFeature(Mips::FeatureMips64r5);
} else if (Tok.getString() == "mips64r6") {
return parseSetFeature(Mips::FeatureMips64r6);
} else if (Tok.getString() == "dsp") {
@@ -3518,43 +4054,44 @@ bool MipsAsmParser::parseDirectiveModule() {
return false;
}
- if (Lexer.is(AsmToken::Identifier)) {
- StringRef Option = Parser.getTok().getString();
- Parser.Lex();
-
- if (Option == "oddspreg") {
- getTargetStreamer().emitDirectiveModuleOddSPReg(true, isABI_O32());
- clearFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg");
+ StringRef Option;
+ if (Parser.parseIdentifier(Option)) {
+ reportParseError("expected .module option identifier");
+ return false;
+ }
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- reportParseError("unexpected token, expected end of statement");
- return false;
- }
+ if (Option == "oddspreg") {
+ getTargetStreamer().emitDirectiveModuleOddSPReg(true, isABI_O32());
+ clearFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg");
+ // If this is not the end of the statement, report an error.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token, expected end of statement");
return false;
- } else if (Option == "nooddspreg") {
- if (!isABI_O32()) {
- Error(L, "'.module nooddspreg' requires the O32 ABI");
- return false;
- }
+ }
- getTargetStreamer().emitDirectiveModuleOddSPReg(false, isABI_O32());
- setFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg");
+ return false; // parseDirectiveModule has finished successfully.
+ } else if (Option == "nooddspreg") {
+ if (!isABI_O32()) {
+ Error(L, "'.module nooddspreg' requires the O32 ABI");
+ return false;
+ }
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- reportParseError("unexpected token, expected end of statement");
- return false;
- }
+ getTargetStreamer().emitDirectiveModuleOddSPReg(false, isABI_O32());
+ setFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg");
+ // If this is not the end of the statement, report an error.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token, expected end of statement");
return false;
- } else if (Option == "fp") {
- return parseDirectiveModuleFP();
}
+ return false; // parseDirectiveModule has finished successfully.
+ } else if (Option == "fp") {
+ return parseDirectiveModuleFP();
+ } else {
return Error(L, "'" + Twine(Option) + "' is not a valid .module option.");
}
-
- return false;
}
/// parseDirectiveModuleFP
diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt
index 1f201b0..36ba8e5 100644
--- a/lib/Target/Mips/CMakeLists.txt
+++ b/lib/Target/Mips/CMakeLists.txt
@@ -21,7 +21,6 @@ add_llvm_target(MipsCodeGen
Mips16ISelDAGToDAG.cpp
Mips16ISelLowering.cpp
Mips16RegisterInfo.cpp
- MipsABIInfo.cpp
MipsAnalyzeImmediate.cpp
MipsAsmPrinter.cpp
MipsCCState.cpp
diff --git a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
index 48904ce..8849366 100644
--- a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
+++ b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
@@ -30,34 +30,15 @@ typedef MCDisassembler::DecodeStatus DecodeStatus;
namespace {
-/// A disasembler class for Mips.
-class MipsDisassemblerBase : public MCDisassembler {
+class MipsDisassembler : public MCDisassembler {
+ bool IsMicroMips;
+ bool IsBigEndian;
public:
- MipsDisassemblerBase(const MCSubtargetInfo &STI, MCContext &Ctx,
- bool IsBigEndian)
+ MipsDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx, bool IsBigEndian)
: MCDisassembler(STI, Ctx),
- IsN64(STI.getFeatureBits() & Mips::FeatureN64),
+ IsMicroMips(STI.getFeatureBits() & Mips::FeatureMicroMips),
IsBigEndian(IsBigEndian) {}
- virtual ~MipsDisassemblerBase() {}
-
- bool isN64() const { return IsN64; }
-
-private:
- bool IsN64;
-protected:
- bool IsBigEndian;
-};
-
-/// A disasembler class for Mips32.
-class MipsDisassembler : public MipsDisassemblerBase {
- bool IsMicroMips;
-public:
- MipsDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx, bool bigEndian)
- : MipsDisassemblerBase(STI, Ctx, bigEndian) {
- IsMicroMips = STI.getFeatureBits() & Mips::FeatureMicroMips;
- }
-
bool hasMips3() const { return STI.getFeatureBits() & Mips::FeatureMips3; }
bool hasMips32() const { return STI.getFeatureBits() & Mips::FeatureMips32; }
bool hasMips32r6() const {
@@ -77,19 +58,6 @@ public:
raw_ostream &CStream) const override;
};
-/// A disasembler class for Mips64.
-class Mips64Disassembler : public MipsDisassemblerBase {
-public:
- Mips64Disassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
- bool bigEndian) :
- MipsDisassemblerBase(STI, Ctx, bigEndian) {}
-
- DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size,
- ArrayRef<uint8_t> Bytes, uint64_t Address,
- raw_ostream &VStream,
- raw_ostream &CStream) const override;
-};
-
} // end anonymous namespace
// Forward declare these because the autogenerated code will reference them.
@@ -109,6 +77,16 @@ static DecodeStatus DecodeGPRMM16RegisterClass(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeGPRMM16ZeroRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeGPRMM16MovePRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
@@ -223,6 +201,20 @@ static DecodeStatus DecodeBranchTarget26(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+// DecodeBranchTarget7MM - Decode microMIPS branch offset, which is
+// shifted left by 1 bit.
+static DecodeStatus DecodeBranchTarget7MM(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder);
+
+// DecodeBranchTarget10MM - Decode microMIPS branch offset, which is
+// shifted left by 1 bit.
+static DecodeStatus DecodeBranchTarget10MM(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder);
+
// DecodeBranchTargetMM - Decode microMIPS branch offset, which is
// shifted left by 1 bit.
static DecodeStatus DecodeBranchTargetMM(MCInst &Inst,
@@ -247,9 +239,44 @@ static DecodeStatus DecodeCacheOp(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeCacheOpR6(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeCacheOpMM(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeSyncI(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeMemMMImm4(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMemMMSPImm5Lsl2(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMemMMGPImm7Lsl2(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMemMMReglistImm4Lsl2(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeMemMMImm12(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -272,11 +299,35 @@ static DecodeStatus DecodeFMem3(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeFMemCop2R6(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeSpecial3LlSc(MCInst &Inst,
unsigned Insn,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeAddiur2Simm7(MCInst &Inst,
+ unsigned Value,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeUImm6Lsl2(MCInst &Inst,
+ unsigned Value,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeLiSimm7(MCInst &Inst,
+ unsigned Value,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeSimm4(MCInst &Inst,
+ unsigned Value,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeSimm16(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -305,6 +356,18 @@ static DecodeStatus DecodeSimm19Lsl2(MCInst &Inst, unsigned Insn,
static DecodeStatus DecodeSimm18Lsl3(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeSimm9SP(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
+static DecodeStatus DecodeANDI16Imm(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
+static DecodeStatus DecodeUImm5lsl2(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
+static DecodeStatus DecodeSimm23Lsl2(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
/// INSVE_[BHWD] have an implicit operand that the generated decoder doesn't
/// handle.
template <typename InsnType>
@@ -345,6 +408,14 @@ static DecodeStatus DecodeRegListOperand(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeRegListOperand16(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMovePRegPair(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
namespace llvm {
extern Target TheMipselTarget, TheMipsTarget, TheMips64Target,
TheMips64elTarget;
@@ -364,20 +435,6 @@ static MCDisassembler *createMipselDisassembler(
return new MipsDisassembler(STI, Ctx, false);
}
-static MCDisassembler *createMips64Disassembler(
- const Target &T,
- const MCSubtargetInfo &STI,
- MCContext &Ctx) {
- return new Mips64Disassembler(STI, Ctx, true);
-}
-
-static MCDisassembler *createMips64elDisassembler(
- const Target &T,
- const MCSubtargetInfo &STI,
- MCContext &Ctx) {
- return new Mips64Disassembler(STI, Ctx, false);
-}
-
extern "C" void LLVMInitializeMipsDisassembler() {
// Register the disassembler.
TargetRegistry::RegisterMCDisassembler(TheMipsTarget,
@@ -385,15 +442,15 @@ extern "C" void LLVMInitializeMipsDisassembler() {
TargetRegistry::RegisterMCDisassembler(TheMipselTarget,
createMipselDisassembler);
TargetRegistry::RegisterMCDisassembler(TheMips64Target,
- createMips64Disassembler);
+ createMipsDisassembler);
TargetRegistry::RegisterMCDisassembler(TheMips64elTarget,
- createMips64elDisassembler);
+ createMipselDisassembler);
}
#include "MipsGenDisassemblerTables.inc"
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo) {
- const MipsDisassemblerBase *Dis = static_cast<const MipsDisassemblerBase*>(D);
+ const MipsDisassembler *Dis = static_cast<const MipsDisassembler*>(D);
const MCRegisterInfo *RegInfo = Dis->getContext().getRegisterInfo();
return *(RegInfo->getRegClass(RC).begin() + RegNo);
}
@@ -700,6 +757,26 @@ static DecodeStatus DecodeBlezGroupBranch(MCInst &MI, InsnType insn,
return MCDisassembler::Success;
}
+/// Read two bytes from the ArrayRef and return 16 bit halfword sorted
+/// according to the given endianess.
+static DecodeStatus readInstruction16(ArrayRef<uint8_t> Bytes, uint64_t Address,
+ uint64_t &Size, uint32_t &Insn,
+ bool IsBigEndian) {
+ // We want to read exactly 2 Bytes of data.
+ if (Bytes.size() < 2) {
+ Size = 0;
+ return MCDisassembler::Fail;
+ }
+
+ if (IsBigEndian) {
+ Insn = (Bytes[0] << 8) | Bytes[1];
+ } else {
+ Insn = (Bytes[1] << 8) | Bytes[0];
+ }
+
+ return MCDisassembler::Success;
+}
+
/// Read four bytes from the ArrayRef and return 32 bit word sorted
/// according to the given endianess
static DecodeStatus readInstruction32(ArrayRef<uint8_t> Bytes, uint64_t Address,
@@ -711,15 +788,19 @@ static DecodeStatus readInstruction32(ArrayRef<uint8_t> Bytes, uint64_t Address,
return MCDisassembler::Fail;
}
+ // High 16 bits of a 32-bit microMIPS instruction (where the opcode is)
+ // always precede the low 16 bits in the instruction stream (that is, they
+ // are placed at lower addresses in the instruction stream).
+ //
+ // microMIPS byte ordering:
+ // Big-endian: 0 | 1 | 2 | 3
+ // Little-endian: 1 | 0 | 3 | 2
+
if (IsBigEndian) {
// Encoded as a big-endian 32-bit word in the stream.
Insn =
(Bytes[3] << 0) | (Bytes[2] << 8) | (Bytes[1] << 16) | (Bytes[0] << 24);
} else {
- // Encoded as a small-endian 32-bit word in the stream.
- // Little-endian byte ordering:
- // mips32r2: 4 | 3 | 2 | 1
- // microMIPS: 2 | 1 | 4 | 3
if (IsMicroMips) {
Insn = (Bytes[2] << 0) | (Bytes[3] << 8) | (Bytes[0] << 16) |
(Bytes[1] << 24);
@@ -738,14 +819,25 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
raw_ostream &VStream,
raw_ostream &CStream) const {
uint32_t Insn;
-
- DecodeStatus Result =
- readInstruction32(Bytes, Address, Size, Insn, IsBigEndian, IsMicroMips);
- if (Result == MCDisassembler::Fail)
- return MCDisassembler::Fail;
+ DecodeStatus Result;
if (IsMicroMips) {
- DEBUG(dbgs() << "Trying MicroMips32 table (32-bit opcodes):\n");
+ Result = readInstruction16(Bytes, Address, Size, Insn, IsBigEndian);
+
+ DEBUG(dbgs() << "Trying MicroMips16 table (16-bit instructions):\n");
+ // Calling the auto-generated decoder function.
+ Result = decodeInstruction(DecoderTableMicroMips16, Instr, Insn, Address,
+ this, STI);
+ if (Result != MCDisassembler::Fail) {
+ Size = 2;
+ return Result;
+ }
+
+ Result = readInstruction32(Bytes, Address, Size, Insn, IsBigEndian, true);
+ if (Result == MCDisassembler::Fail)
+ return MCDisassembler::Fail;
+
+ DEBUG(dbgs() << "Trying MicroMips32 table (32-bit instructions):\n");
// Calling the auto-generated decoder function.
Result = decodeInstruction(DecoderTableMicroMips32, Instr, Insn, Address,
this, STI);
@@ -756,6 +848,10 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
return MCDisassembler::Fail;
}
+ Result = readInstruction32(Bytes, Address, Size, Insn, IsBigEndian, false);
+ if (Result == MCDisassembler::Fail)
+ return MCDisassembler::Fail;
+
if (hasCOP3()) {
DEBUG(dbgs() << "Trying COP3_ table (32-bit opcodes):\n");
Result =
@@ -786,39 +882,19 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
}
- DEBUG(dbgs() << "Trying Mips table (32-bit opcodes):\n");
- // Calling the auto-generated decoder function.
- Result =
- decodeInstruction(DecoderTableMips32, Instr, Insn, Address, this, STI);
- if (Result != MCDisassembler::Fail) {
- Size = 4;
- return Result;
+ if (isGP64()) {
+ DEBUG(dbgs() << "Trying Mips64 (GPR64) table (32-bit opcodes):\n");
+ Result = decodeInstruction(DecoderTableMips6432, Instr, Insn,
+ Address, this, STI);
+ if (Result != MCDisassembler::Fail) {
+ Size = 4;
+ return Result;
+ }
}
- return MCDisassembler::Fail;
-}
-
-DecodeStatus Mips64Disassembler::getInstruction(MCInst &Instr, uint64_t &Size,
- ArrayRef<uint8_t> Bytes,
- uint64_t Address,
- raw_ostream &VStream,
- raw_ostream &CStream) const {
- uint32_t Insn;
-
- DecodeStatus Result =
- readInstruction32(Bytes, Address, Size, Insn, IsBigEndian, false);
- if (Result == MCDisassembler::Fail)
- return MCDisassembler::Fail;
-
+ DEBUG(dbgs() << "Trying Mips table (32-bit opcodes):\n");
// Calling the auto-generated decoder function.
Result =
- decodeInstruction(DecoderTableMips6432, Instr, Insn, Address, this, STI);
- if (Result != MCDisassembler::Fail) {
- Size = 4;
- return Result;
- }
- // If we fail to decode in Mips64 decoder space we can try in Mips32
- Result =
decodeInstruction(DecoderTableMips32, Instr, Insn, Address, this, STI);
if (Result != MCDisassembler::Fail) {
Size = 4;
@@ -854,7 +930,33 @@ static DecodeStatus DecodeGPRMM16RegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder) {
- return MCDisassembler::Fail;
+ if (RegNo > 7)
+ return MCDisassembler::Fail;
+ unsigned Reg = getReg(Decoder, Mips::GPRMM16RegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeGPRMM16ZeroRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 7)
+ return MCDisassembler::Fail;
+ unsigned Reg = getReg(Decoder, Mips::GPRMM16ZeroRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeGPRMM16MovePRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 7)
+ return MCDisassembler::Fail;
+ unsigned Reg = getReg(Decoder, Mips::GPRMM16MovePRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
}
static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst,
@@ -872,7 +974,7 @@ static DecodeStatus DecodePtrRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder) {
- if (static_cast<const MipsDisassembler *>(Decoder)->isN64())
+ if (static_cast<const MipsDisassembler *>(Decoder)->isGP64())
return DecodeGPR64RegisterClass(Inst, RegNo, Address, Decoder);
return DecodeGPR32RegisterClass(Inst, RegNo, Address, Decoder);
@@ -953,7 +1055,8 @@ static DecodeStatus DecodeMem(MCInst &Inst,
Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg);
Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
- if(Inst.getOpcode() == Mips::SC){
+ if(Inst.getOpcode() == Mips::SC ||
+ Inst.getOpcode() == Mips::SCD){
Inst.addOperand(MCOperand::CreateReg(Reg));
}
@@ -981,6 +1084,55 @@ static DecodeStatus DecodeCacheOp(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeCacheOpMM(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ int Offset = SignExtend32<12>(Insn & 0xfff);
+ unsigned Base = fieldFromInstruction(Insn, 16, 5);
+ unsigned Hint = fieldFromInstruction(Insn, 21, 5);
+
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::CreateReg(Base));
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+ Inst.addOperand(MCOperand::CreateImm(Hint));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeCacheOpR6(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ int Offset = fieldFromInstruction(Insn, 7, 9);
+ unsigned Hint = fieldFromInstruction(Insn, 16, 5);
+ unsigned Base = fieldFromInstruction(Insn, 21, 5);
+
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::CreateReg(Base));
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+ Inst.addOperand(MCOperand::CreateImm(Hint));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeSyncI(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ int Offset = SignExtend32<16>(Insn & 0xffff);
+ unsigned Base = fieldFromInstruction(Insn, 21, 5);
+
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::CreateReg(Base));
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
int Offset = SignExtend32<10>(fieldFromInstruction(Insn, 16, 10));
@@ -1027,6 +1179,106 @@ static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeMemMMImm4(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ unsigned Offset = Insn & 0xf;
+ unsigned Reg = fieldFromInstruction(Insn, 7, 3);
+ unsigned Base = fieldFromInstruction(Insn, 4, 3);
+
+ switch (Inst.getOpcode()) {
+ case Mips::LBU16_MM:
+ case Mips::LHU16_MM:
+ case Mips::LW16_MM:
+ if (DecodeGPRMM16RegisterClass(Inst, Reg, Address, Decoder)
+ == MCDisassembler::Fail)
+ return MCDisassembler::Fail;
+ break;
+ case Mips::SB16_MM:
+ case Mips::SH16_MM:
+ case Mips::SW16_MM:
+ if (DecodeGPRMM16ZeroRegisterClass(Inst, Reg, Address, Decoder)
+ == MCDisassembler::Fail)
+ return MCDisassembler::Fail;
+ break;
+ }
+
+ if (DecodeGPRMM16RegisterClass(Inst, Base, Address, Decoder)
+ == MCDisassembler::Fail)
+ return MCDisassembler::Fail;
+
+ switch (Inst.getOpcode()) {
+ case Mips::LBU16_MM:
+ if (Offset == 0xf)
+ Inst.addOperand(MCOperand::CreateImm(-1));
+ else
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+ break;
+ case Mips::SB16_MM:
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+ break;
+ case Mips::LHU16_MM:
+ case Mips::SH16_MM:
+ Inst.addOperand(MCOperand::CreateImm(Offset << 1));
+ break;
+ case Mips::LW16_MM:
+ case Mips::SW16_MM:
+ Inst.addOperand(MCOperand::CreateImm(Offset << 2));
+ break;
+ }
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMemMMSPImm5Lsl2(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ unsigned Offset = Insn & 0x1F;
+ unsigned Reg = fieldFromInstruction(Insn, 5, 5);
+
+ Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg);
+
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ Inst.addOperand(MCOperand::CreateReg(Mips::SP));
+ Inst.addOperand(MCOperand::CreateImm(Offset << 2));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMemMMGPImm7Lsl2(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ unsigned Offset = Insn & 0x7F;
+ unsigned Reg = fieldFromInstruction(Insn, 7, 3);
+
+ Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg);
+
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ Inst.addOperand(MCOperand::CreateReg(Mips::GP));
+ Inst.addOperand(MCOperand::CreateImm(Offset << 2));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMemMMReglistImm4Lsl2(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ int Offset = SignExtend32<4>(Insn & 0xf);
+
+ if (DecodeRegListOperand16(Inst, Insn, Address, Decoder)
+ == MCDisassembler::Fail)
+ return MCDisassembler::Fail;
+
+ Inst.addOperand(MCOperand::CreateReg(Mips::SP));
+ Inst.addOperand(MCOperand::CreateImm(Offset << 2));
+
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeMemMMImm12(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -1052,6 +1304,9 @@ static DecodeStatus DecodeMemMMImm12(MCInst &Inst,
// fallthrough
default:
Inst.addOperand(MCOperand::CreateReg(Reg));
+ if (Inst.getOpcode() == Mips::LWP_MM || Inst.getOpcode() == Mips::SWP_MM)
+ Inst.addOperand(MCOperand::CreateReg(Reg+1));
+
Inst.addOperand(MCOperand::CreateReg(Base));
Inst.addOperand(MCOperand::CreateImm(Offset));
}
@@ -1131,6 +1386,23 @@ static DecodeStatus DecodeFMem3(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeFMemCop2R6(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ int Offset = SignExtend32<11>(Insn & 0x07ff);
+ unsigned Reg = fieldFromInstruction(Insn, 16, 5);
+ unsigned Base = fieldFromInstruction(Insn, 11, 5);
+
+ Reg = getReg(Decoder, Mips::COP2RegClassID, Reg);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ Inst.addOperand(MCOperand::CreateReg(Base));
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+
+ return MCDisassembler::Success;
+}
static DecodeStatus DecodeSpecial3LlSc(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -1324,6 +1596,24 @@ static DecodeStatus DecodeBranchTarget26(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeBranchTarget7MM(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder) {
+ int32_t BranchOffset = SignExtend32<7>(Offset) << 1;
+ Inst.addOperand(MCOperand::CreateImm(BranchOffset));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeBranchTarget10MM(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder) {
+ int32_t BranchOffset = SignExtend32<10>(Offset) << 1;
+ Inst.addOperand(MCOperand::CreateImm(BranchOffset));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeBranchTargetMM(MCInst &Inst,
unsigned Offset,
uint64_t Address,
@@ -1342,6 +1632,46 @@ static DecodeStatus DecodeJumpTargetMM(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeAddiur2Simm7(MCInst &Inst,
+ unsigned Value,
+ uint64_t Address,
+ const void *Decoder) {
+ if (Value == 0)
+ Inst.addOperand(MCOperand::CreateImm(1));
+ else if (Value == 0x7)
+ Inst.addOperand(MCOperand::CreateImm(-1));
+ else
+ Inst.addOperand(MCOperand::CreateImm(Value << 2));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeUImm6Lsl2(MCInst &Inst,
+ unsigned Value,
+ uint64_t Address,
+ const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm(Value << 2));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeLiSimm7(MCInst &Inst,
+ unsigned Value,
+ uint64_t Address,
+ const void *Decoder) {
+ if (Value == 0x7F)
+ Inst.addOperand(MCOperand::CreateImm(-1));
+ else
+ Inst.addOperand(MCOperand::CreateImm(Value));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeSimm4(MCInst &Inst,
+ unsigned Value,
+ uint64_t Address,
+ const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm(SignExtend32<4>(Value)));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeSimm16(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -1391,6 +1721,36 @@ static DecodeStatus DecodeSimm18Lsl3(MCInst &Inst, unsigned Insn,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeSimm9SP(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ int32_t DecodedValue;
+ switch (Insn) {
+ case 0: DecodedValue = 256; break;
+ case 1: DecodedValue = 257; break;
+ case 510: DecodedValue = -258; break;
+ case 511: DecodedValue = -257; break;
+ default: DecodedValue = SignExtend32<9>(Insn); break;
+ }
+ Inst.addOperand(MCOperand::CreateImm(DecodedValue * 4));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeANDI16Imm(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ // Insn must be >= 0, since it is unsigned that condition is always true.
+ assert(Insn < 16);
+ int32_t DecodedValues[] = {128, 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64,
+ 255, 32768, 65535};
+ Inst.addOperand(MCOperand::CreateImm(DecodedValues[Insn]));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeUImm5lsl2(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm(Insn << 2));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeRegListOperand(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -1413,3 +1773,69 @@ static DecodeStatus DecodeRegListOperand(MCInst &Inst,
return MCDisassembler::Success;
}
+
+static DecodeStatus DecodeRegListOperand16(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ unsigned Regs[] = {Mips::S0, Mips::S1, Mips::S2, Mips::S3};
+ unsigned RegLst = fieldFromInstruction(Insn, 4, 2);
+ unsigned RegNum = RegLst & 0x3;
+
+ for (unsigned i = 0; i <= RegNum; i++)
+ Inst.addOperand(MCOperand::CreateReg(Regs[i]));
+
+ Inst.addOperand(MCOperand::CreateReg(Mips::RA));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMovePRegPair(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+
+ unsigned RegPair = fieldFromInstruction(Insn, 7, 3);
+
+ switch (RegPair) {
+ default:
+ return MCDisassembler::Fail;
+ case 0:
+ Inst.addOperand(MCOperand::CreateReg(Mips::A1));
+ Inst.addOperand(MCOperand::CreateReg(Mips::A2));
+ break;
+ case 1:
+ Inst.addOperand(MCOperand::CreateReg(Mips::A1));
+ Inst.addOperand(MCOperand::CreateReg(Mips::A3));
+ break;
+ case 2:
+ Inst.addOperand(MCOperand::CreateReg(Mips::A2));
+ Inst.addOperand(MCOperand::CreateReg(Mips::A3));
+ break;
+ case 3:
+ Inst.addOperand(MCOperand::CreateReg(Mips::A0));
+ Inst.addOperand(MCOperand::CreateReg(Mips::S5));
+ break;
+ case 4:
+ Inst.addOperand(MCOperand::CreateReg(Mips::A0));
+ Inst.addOperand(MCOperand::CreateReg(Mips::S6));
+ break;
+ case 5:
+ Inst.addOperand(MCOperand::CreateReg(Mips::A0));
+ Inst.addOperand(MCOperand::CreateReg(Mips::A1));
+ break;
+ case 6:
+ Inst.addOperand(MCOperand::CreateReg(Mips::A0));
+ Inst.addOperand(MCOperand::CreateReg(Mips::A2));
+ break;
+ case 7:
+ Inst.addOperand(MCOperand::CreateReg(Mips::A0));
+ Inst.addOperand(MCOperand::CreateReg(Mips::A3));
+ break;
+ }
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeSimm23Lsl2(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm(SignExtend32<23>(Insn) << 2));
+ return MCDisassembler::Success;
+}
diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
index ab6b225..aad549d 100644
--- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
+++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
@@ -134,8 +134,8 @@ static void printExpr(const MCExpr *Expr, raw_ostream &OS) {
} else if (const MipsMCExpr *ME = dyn_cast<MipsMCExpr>(Expr)) {
ME->print(OS);
return;
- } else if (!(SRE = dyn_cast<MCSymbolRefExpr>(Expr)))
- assert(false && "Unexpected MCExpr type.");
+ } else
+ SRE = cast<MCSymbolRefExpr>(Expr);
MCSymbolRefExpr::VariantKind Kind = SRE->getKind();
@@ -233,6 +233,8 @@ printMemOperand(const MCInst *MI, int opNum, raw_ostream &O) {
break;
case Mips::SWM32_MM:
case Mips::LWM32_MM:
+ case Mips::SWM16_MM:
+ case Mips::LWM16_MM:
opNum = MI->getNumOperands() - 2;
break;
}
@@ -260,6 +262,11 @@ printFCCOperand(const MCInst *MI, int opNum, raw_ostream &O) {
}
void MipsInstPrinter::
+printRegisterPair(const MCInst *MI, int opNum, raw_ostream &O) {
+ printRegName(O, MI->getOperand(opNum).getReg());
+}
+
+void MipsInstPrinter::
printSHFMask(const MCInst *MI, int opNum, raw_ostream &O) {
llvm_unreachable("TODO");
}
@@ -283,6 +290,7 @@ bool MipsInstPrinter::printAlias(const char *Str, const MCInst &MI,
bool MipsInstPrinter::printAlias(const MCInst &MI, raw_ostream &OS) {
switch (MI.getOpcode()) {
case Mips::BEQ:
+ case Mips::BEQ_MM:
// beq $zero, $zero, $L2 => b $L2
// beq $r0, $zero, $L2 => beqz $r0, $L2
return (isReg<Mips::ZERO>(MI, 0) && isReg<Mips::ZERO>(MI, 1) &&
diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.h b/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
index 42df013..468dc07 100644
--- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
+++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
@@ -99,6 +99,7 @@ private:
void printMemOperand(const MCInst *MI, int opNum, raw_ostream &O);
void printMemOperandEA(const MCInst *MI, int opNum, raw_ostream &O);
void printFCCOperand(const MCInst *MI, int opNum, raw_ostream &O);
+ void printRegisterPair(const MCInst *MI, int opNum, raw_ostream &O);
void printSHFMask(const MCInst *MI, int opNum, raw_ostream &O);
bool printAlias(const char *Str, const MCInst &MI, unsigned OpNo,
diff --git a/lib/Target/Mips/MCTargetDesc/Android.mk b/lib/Target/Mips/MCTargetDesc/Android.mk
index 89e132d..7f462d3 100644
--- a/lib/Target/Mips/MCTargetDesc/Android.mk
+++ b/lib/Target/Mips/MCTargetDesc/Android.mk
@@ -8,6 +8,7 @@ mips_mc_desc_TBLGEN_TABLES := \
mips_mc_desc_SRC_FILES := \
MipsABIFlagsSection.cpp \
+ MipsABIInfo.cpp \
MipsAsmBackend.cpp \
MipsELFObjectWriter.cpp \
MipsELFStreamer.cpp \
diff --git a/lib/Target/Mips/MCTargetDesc/CMakeLists.txt b/lib/Target/Mips/MCTargetDesc/CMakeLists.txt
index 6b3788c..c63af7c 100644
--- a/lib/Target/Mips/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/Mips/MCTargetDesc/CMakeLists.txt
@@ -1,4 +1,5 @@
add_llvm_library(LLVMMipsDesc
+ MipsABIInfo.cpp
MipsABIFlagsSection.cpp
MipsAsmBackend.cpp
MipsELFObjectWriter.cpp
diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
index 8bcfb0f..473f4f2 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
@@ -145,6 +145,10 @@ public:
ISALevel = 64;
if (P.hasMips64r6())
ISARevision = 6;
+ else if (P.hasMips64r5())
+ ISARevision = 5;
+ else if (P.hasMips64r3())
+ ISARevision = 3;
else if (P.hasMips64r2())
ISARevision = 2;
else
@@ -153,6 +157,10 @@ public:
ISALevel = 32;
if (P.hasMips32r6())
ISARevision = 6;
+ else if (P.hasMips32r5())
+ ISARevision = 5;
+ else if (P.hasMips32r3())
+ ISARevision = 3;
else if (P.hasMips32r2())
ISARevision = 2;
else
diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp
new file mode 100644
index 0000000..faf9741
--- /dev/null
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp
@@ -0,0 +1,92 @@
+//===---- MipsABIInfo.cpp - Information about MIPS ABI's ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsABIInfo.h"
+#include "MipsRegisterInfo.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/MC/MCTargetOptions.h"
+
+using namespace llvm;
+
+namespace {
+static const MCPhysReg O32IntRegs[4] = {Mips::A0, Mips::A1, Mips::A2, Mips::A3};
+
+static const MCPhysReg Mips64IntRegs[8] = {
+ Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
+ Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64};
+}
+
+const ArrayRef<MCPhysReg> MipsABIInfo::GetByValArgRegs() const {
+ if (IsO32())
+ return makeArrayRef(O32IntRegs);
+ if (IsN32() || IsN64())
+ return makeArrayRef(Mips64IntRegs);
+ llvm_unreachable("Unhandled ABI");
+}
+
+const ArrayRef<MCPhysReg> MipsABIInfo::GetVarArgRegs() const {
+ if (IsO32())
+ return makeArrayRef(O32IntRegs);
+ if (IsN32() || IsN64())
+ return makeArrayRef(Mips64IntRegs);
+ llvm_unreachable("Unhandled ABI");
+}
+
+unsigned MipsABIInfo::GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const {
+ if (IsO32())
+ return CC != CallingConv::Fast ? 16 : 0;
+ if (IsN32() || IsN64() || IsEABI())
+ return 0;
+ llvm_unreachable("Unhandled ABI");
+}
+
+MipsABIInfo MipsABIInfo::computeTargetABI(Triple TT, StringRef CPU,
+ const MCTargetOptions &Options) {
+ if (Options.getABIName().startswith("o32"))
+ return MipsABIInfo::O32();
+ else if (Options.getABIName().startswith("n32"))
+ return MipsABIInfo::N32();
+ else if (Options.getABIName().startswith("n64"))
+ return MipsABIInfo::N64();
+ else if (Options.getABIName().startswith("eabi"))
+ return MipsABIInfo::EABI();
+ else if (!Options.getABIName().empty())
+ llvm_unreachable("Unknown ABI option for MIPS");
+
+ // FIXME: This shares code with the selectMipsCPU routine that's
+ // used and not shared in a couple of other places. This needs unifying
+ // at some level.
+ if (CPU.empty() || CPU == "generic") {
+ if (TT.getArch() == Triple::mips || TT.getArch() == Triple::mipsel)
+ CPU = "mips32";
+ else
+ CPU = "mips64";
+ }
+
+ return StringSwitch<MipsABIInfo>(CPU)
+ .Case("mips1", MipsABIInfo::O32())
+ .Case("mips2", MipsABIInfo::O32())
+ .Case("mips32", MipsABIInfo::O32())
+ .Case("mips32r2", MipsABIInfo::O32())
+ .Case("mips32r3", MipsABIInfo::O32())
+ .Case("mips32r5", MipsABIInfo::O32())
+ .Case("mips32r6", MipsABIInfo::O32())
+ .Case("mips16", MipsABIInfo::O32())
+ .Case("mips3", MipsABIInfo::N64())
+ .Case("mips4", MipsABIInfo::N64())
+ .Case("mips5", MipsABIInfo::N64())
+ .Case("mips64", MipsABIInfo::N64())
+ .Case("mips64r2", MipsABIInfo::N64())
+ .Case("mips64r3", MipsABIInfo::N64())
+ .Case("mips64r5", MipsABIInfo::N64())
+ .Case("mips64r6", MipsABIInfo::N64())
+ .Case("octeon", MipsABIInfo::N64())
+ .Default(MipsABIInfo::Unknown());
+}
diff --git a/lib/Target/Mips/MipsABIInfo.h b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.h
index bea585e..008e08e 100644
--- a/lib/Target/Mips/MipsABIInfo.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.h
@@ -7,15 +7,19 @@
//
//===----------------------------------------------------------------------===//
-#ifndef MIPSABIINFO_H
-#define MIPSABIINFO_H
+#ifndef LLVM_LIB_TARGET_MIPS_MCTARGETDESC_MIPSABIINFO_H
+#define LLVM_LIB_TARGET_MIPS_MCTARGETDESC_MIPSABIINFO_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/IR/CallingConv.h"
+#include "llvm/MC/MCRegisterInfo.h"
namespace llvm {
+class MCTargetOptions;
+class StringRef;
+
class MipsABIInfo {
public:
enum class ABI { Unknown, O32, N32, N64, EABI };
@@ -31,6 +35,8 @@ public:
static MipsABIInfo N32() { return MipsABIInfo(ABI::N32); }
static MipsABIInfo N64() { return MipsABIInfo(ABI::N64); }
static MipsABIInfo EABI() { return MipsABIInfo(ABI::EABI); }
+ static MipsABIInfo computeTargetABI(Triple TT, StringRef CPU,
+ const MCTargetOptions &Options);
bool IsKnown() const { return ThisABI != ABI::Unknown; }
bool IsO32() const { return ThisABI == ABI::O32; }
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index efeb54d..acf6f21 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -103,6 +103,22 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
case Mips::fixup_MICROMIPS_26_S1:
Value >>= 1;
break;
+ case Mips::fixup_MICROMIPS_PC7_S1:
+ Value -= 4;
+ // Forcing a signed division because Value can be negative.
+ Value = (int64_t) Value / 2;
+ // We now check if Value can be encoded as a 7-bit signed immediate.
+ if (!isIntN(7, Value) && Ctx)
+ Ctx->FatalError(Fixup.getLoc(), "out of range PC7 fixup");
+ break;
+ case Mips::fixup_MICROMIPS_PC10_S1:
+ Value -= 2;
+ // Forcing a signed division because Value can be negative.
+ Value = (int64_t) Value / 2;
+ // We now check if Value can be encoded as a 10-bit signed immediate.
+ if (!isIntN(10, Value) && Ctx)
+ Ctx->FatalError(Fixup.getLoc(), "out of range PC10 fixup");
+ break;
case Mips::fixup_MICROMIPS_PC16_S1:
Value -= 4;
// Forcing a signed division because Value can be negative.
@@ -149,7 +165,8 @@ MCObjectWriter *MipsAsmBackend::createObjectWriter(raw_ostream &OS) const {
// microMIPS: x | x | a | b
static bool needsMMLEByteOrder(unsigned Kind) {
- return Kind >= Mips::fixup_MICROMIPS_26_S1 &&
+ return Kind != Mips::fixup_MICROMIPS_PC10_S1 &&
+ Kind >= Mips::fixup_MICROMIPS_26_S1 &&
Kind < Mips::LastTargetFixupKind;
}
@@ -182,6 +199,7 @@ void MipsAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
switch ((unsigned)Kind) {
case FK_Data_2:
case Mips::fixup_Mips_16:
+ case Mips::fixup_MICROMIPS_PC10_S1:
FullSize = 2;
break;
case FK_Data_8:
@@ -271,6 +289,8 @@ getFixupKindInfo(MCFixupKind Kind) const {
{ "fixup_MICROMIPS_HI16", 0, 16, 0 },
{ "fixup_MICROMIPS_LO16", 0, 16, 0 },
{ "fixup_MICROMIPS_GOT16", 0, 16, 0 },
+ { "fixup_MICROMIPS_PC7_S1", 0, 7, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_PC10_S1", 0, 10, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_MICROMIPS_PC16_S1", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_MICROMIPS_CALL16", 0, 16, 0 },
{ "fixup_MICROMIPS_GOT_DISP", 0, 16, 0 },
@@ -334,6 +354,8 @@ getFixupKindInfo(MCFixupKind Kind) const {
{ "fixup_MICROMIPS_HI16", 16, 16, 0 },
{ "fixup_MICROMIPS_LO16", 16, 16, 0 },
{ "fixup_MICROMIPS_GOT16", 16, 16, 0 },
+ { "fixup_MICROMIPS_PC7_S1", 9, 7, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_PC10_S1", 6, 10, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_MICROMIPS_PC16_S1",16, 16, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_MICROMIPS_CALL16", 16, 16, 0 },
{ "fixup_MICROMIPS_GOT_DISP", 16, 16, 0 },
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
index d4f4983..dd0e54c 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
@@ -16,8 +16,8 @@
#define LLVM_LIB_TARGET_MIPS_MCTARGETDESC_MIPSASMBACKEND_H
#include "MCTargetDesc/MipsFixupKinds.h"
-#include "llvm/MC/MCAsmBackend.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCAsmBackend.h"
namespace llvm {
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 4ea7846..e14dc8d 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -11,6 +11,7 @@
#include "MCTargetDesc/MipsFixupKinds.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCELF.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSection.h"
@@ -161,6 +162,12 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
case Mips::fixup_MICROMIPS_GOT16:
Type = ELF::R_MICROMIPS_GOT16;
break;
+ case Mips::fixup_MICROMIPS_PC7_S1:
+ Type = ELF::R_MICROMIPS_PC7_S1;
+ break;
+ case Mips::fixup_MICROMIPS_PC10_S1:
+ Type = ELF::R_MICROMIPS_PC10_S1;
+ break;
case Mips::fixup_MICROMIPS_PC16_S1:
Type = ELF::R_MICROMIPS_PC16_S1;
break;
@@ -219,7 +226,7 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
bool
MipsELFObjectWriter::needsRelocateWithSymbol(const MCSymbolData &SD,
unsigned Type) const {
- // FIXME: This is extremelly conservative. This really needs to use a
+ // FIXME: This is extremely conservative. This really needs to use a
// whitelist with a clear explanation for why each realocation needs to
// point to the symbol, not to the section.
switch (Type) {
@@ -244,8 +251,11 @@ MipsELFObjectWriter::needsRelocateWithSymbol(const MCSymbolData &SD,
case ELF::R_MICROMIPS_LO16:
return true;
- case ELF::R_MIPS_26:
case ELF::R_MIPS_32:
+ if (MCELF::getOther(SD) & (ELF::STO_MIPS_MICROMIPS >> 2))
+ return true;
+ // falltrough
+ case ELF::R_MIPS_26:
case ELF::R_MIPS_64:
case ELF::R_MIPS_GPREL16:
return false;
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h
index 136146b..bc76d8a 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h
@@ -37,7 +37,7 @@ public:
MCCodeEmitter *Emitter, const MCSubtargetInfo &STI)
: MCELFStreamer(Context, MAB, OS, Emitter) {
- RegInfoRecord = new MipsRegInfoRecord(this, Context, STI);
+ RegInfoRecord = new MipsRegInfoRecord(this, Context);
MipsOptionRecords.push_back(
std::unique_ptr<MipsRegInfoRecord>(RegInfoRecord));
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h b/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
index 317db16..fa8d6a6 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
@@ -158,6 +158,12 @@ namespace Mips {
// resulting in - R_MICROMIPS_GOT16
fixup_MICROMIPS_GOT16,
+ // resulting in - R_MICROMIPS_PC7_S1
+ fixup_MICROMIPS_PC7_S1,
+
+ // resulting in - R_MICROMIPS_PC10_S1
+ fixup_MICROMIPS_PC10_S1,
+
// resulting in - R_MICROMIPS_PC16_S1
fixup_MICROMIPS_PC16_S1,
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
index 2f5d196..e2bd5a8 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
@@ -34,6 +34,7 @@ MipsMCAsmInfo::MipsMCAsmInfo(StringRef TT) {
Data32bitsDirective = "\t.4byte\t";
Data64bitsDirective = "\t.8byte\t";
PrivateGlobalPrefix = "$";
+ PrivateLabelPrefix = "$";
CommentString = "#";
ZeroDirective = "\t.space\t";
GPRel32Directive = "\t.gpword\t";
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
index d632c27..8208725 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
@@ -20,9 +20,9 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/raw_ostream.h"
@@ -173,7 +173,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
// Unfortunately in MIPS both NOP and SLL will come in with Binary == 0
// so we have to special check for them.
unsigned Opcode = TmpInst.getOpcode();
- if ((Opcode != Mips::NOP) && (Opcode != Mips::SLL) && !Binary)
+ if ((Opcode != Mips::NOP) && (Opcode != Mips::SLL) &&
+ (Opcode != Mips::SLL_MM) && !Binary)
llvm_unreachable("unimplemented opcode in EncodeInstruction()");
if (STI.getFeatureBits() & Mips::FeatureMicroMips) {
@@ -219,6 +220,50 @@ getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
return 0;
}
+/// getBranchTarget7OpValueMM - Return binary encoding of the microMIPS branch
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTarget7OpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ // If the destination is an immediate, divide by 2.
+ if (MO.isImm()) return MO.getImm() >> 1;
+
+ assert(MO.isExpr() &&
+ "getBranchTargetOpValueMM expects only expressions or immediates");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_MICROMIPS_PC7_S1)));
+ return 0;
+}
+
+/// getBranchTargetOpValueMMPC10 - Return binary encoding of the microMIPS
+/// 10-bit branch target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTargetOpValueMMPC10(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ // If the destination is an immediate, divide by 2.
+ if (MO.isImm()) return MO.getImm() >> 1;
+
+ assert(MO.isExpr() &&
+ "getBranchTargetOpValuePC10 expects only expressions or immediates");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_MICROMIPS_PC10_S1)));
+ return 0;
+}
+
/// getBranchTargetOpValue - Return binary encoding of the microMIPS branch
/// target operand. If the machine operand requires relocation,
/// record the relocation and return zero.
@@ -635,6 +680,77 @@ MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo,
}
unsigned MipsMCCodeEmitter::
+getMemEncodingMMImm4(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Base register is encoded in bits 6-4, offset is encoded in bits 3-0.
+ assert(MI.getOperand(OpNo).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),
+ Fixups, STI) << 4;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1),
+ Fixups, STI);
+
+ return (OffBits & 0xF) | RegBits;
+}
+
+unsigned MipsMCCodeEmitter::
+getMemEncodingMMImm4Lsl1(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Base register is encoded in bits 6-4, offset is encoded in bits 3-0.
+ assert(MI.getOperand(OpNo).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),
+ Fixups, STI) << 4;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1),
+ Fixups, STI) >> 1;
+
+ return (OffBits & 0xF) | RegBits;
+}
+
+unsigned MipsMCCodeEmitter::
+getMemEncodingMMImm4Lsl2(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Base register is encoded in bits 6-4, offset is encoded in bits 3-0.
+ assert(MI.getOperand(OpNo).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),
+ Fixups, STI) << 4;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1),
+ Fixups, STI) >> 2;
+
+ return (OffBits & 0xF) | RegBits;
+}
+
+unsigned MipsMCCodeEmitter::
+getMemEncodingMMSPImm5Lsl2(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Register is encoded in bits 9-5, offset is encoded in bits 4-0.
+ assert(MI.getOperand(OpNo).isReg() &&
+ MI.getOperand(OpNo).getReg() == Mips::SP &&
+ "Unexpected base register!");
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1),
+ Fixups, STI) >> 2;
+
+ return OffBits & 0x1F;
+}
+
+unsigned MipsMCCodeEmitter::
+getMemEncodingMMGPImm7Lsl2(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Register is encoded in bits 9-7, offset is encoded in bits 6-0.
+ assert(MI.getOperand(OpNo).isReg() &&
+ MI.getOperand(OpNo).getReg() == Mips::GP &&
+ "Unexpected base register!");
+
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1),
+ Fixups, STI) >> 2;
+
+ return OffBits & 0x7F;
+}
+
+unsigned MipsMCCodeEmitter::
getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
@@ -657,6 +773,30 @@ getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
return (OffBits & 0x0FFF) | RegBits;
}
+unsigned MipsMCCodeEmitter::
+getMemEncodingMMImm4sp(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // opNum can be invalid if instruction had reglist as operand
+ // MemOperand is always last operand of instruction (base + offset)
+ switch (MI.getOpcode()) {
+ default:
+ break;
+ case Mips::SWM16_MM:
+ case Mips::LWM16_MM:
+ OpNo = MI.getNumOperands() - 2;
+ break;
+ }
+
+ // Offset is encoded in bits 4-0.
+ assert(MI.getOperand(OpNo).isReg());
+ // Base register is always SP - thus it is not encoded.
+ assert(MI.getOperand(OpNo+1).isImm());
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI);
+
+ return ((OffBits >> 2) & 0x0F);
+}
+
unsigned
MipsMCCodeEmitter::getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
@@ -788,4 +928,64 @@ MipsMCCodeEmitter::getRegisterListOpValue(const MCInst &MI, unsigned OpNo,
return res;
}
+unsigned
+MipsMCCodeEmitter::getRegisterListOpValue16(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ return (MI.getNumOperands() - 4);
+}
+
+unsigned
+MipsMCCodeEmitter::getRegisterPairOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ return getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, STI);
+}
+
+unsigned
+MipsMCCodeEmitter::getMovePRegPairOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ unsigned res = 0;
+
+ if (MI.getOperand(0).getReg() == Mips::A1 &&
+ MI.getOperand(1).getReg() == Mips::A2)
+ res = 0;
+ else if (MI.getOperand(0).getReg() == Mips::A1 &&
+ MI.getOperand(1).getReg() == Mips::A3)
+ res = 1;
+ else if (MI.getOperand(0).getReg() == Mips::A2 &&
+ MI.getOperand(1).getReg() == Mips::A3)
+ res = 2;
+ else if (MI.getOperand(0).getReg() == Mips::A0 &&
+ MI.getOperand(1).getReg() == Mips::S5)
+ res = 3;
+ else if (MI.getOperand(0).getReg() == Mips::A0 &&
+ MI.getOperand(1).getReg() == Mips::S6)
+ res = 4;
+ else if (MI.getOperand(0).getReg() == Mips::A0 &&
+ MI.getOperand(1).getReg() == Mips::A1)
+ res = 5;
+ else if (MI.getOperand(0).getReg() == Mips::A0 &&
+ MI.getOperand(1).getReg() == Mips::A2)
+ res = 6;
+ else if (MI.getOperand(0).getReg() == Mips::A0 &&
+ MI.getOperand(1).getReg() == Mips::A3)
+ res = 7;
+
+ return res;
+}
+
+unsigned
+MipsMCCodeEmitter::getSimm23Lsl2Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ assert(MO.isImm() && "getSimm23Lsl2Encoding expects only an immediate");
+ // The immediate is encoded as 'immediate >> 2'.
+ unsigned Res = static_cast<unsigned>(MO.getImm());
+ assert((Res & 3) == 0);
+ return Res >> 2;
+}
+
#include "MipsGenMCCodeEmitter.inc"
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h
index 9016fcf..b01726d 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h
@@ -31,8 +31,8 @@ class MCSubtargetInfo;
class raw_ostream;
class MipsMCCodeEmitter : public MCCodeEmitter {
- MipsMCCodeEmitter(const MipsMCCodeEmitter &) LLVM_DELETED_FUNCTION;
- void operator=(const MipsMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ MipsMCCodeEmitter(const MipsMCCodeEmitter &) = delete;
+ void operator=(const MipsMCCodeEmitter &) = delete;
const MCInstrInfo &MCII;
MCContext &Ctx;
bool IsLittleEndian;
@@ -101,6 +101,20 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ // getBranchTarget7OpValue - Return binary encoding of the microMIPS branch
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTarget7OpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getBranchTargetOpValueMMPC10 - Return binary encoding of the microMIPS
+ // 10-bit branch target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTargetOpValueMMPC10(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
// getBranchTargetOpValue - Return binary encoding of the microMIPS branch
// target operand. If the machine operand requires relocation,
// record the relocation and return zero.
@@ -142,9 +156,27 @@ public:
unsigned getMemEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ unsigned getMemEncodingMMImm4(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getMemEncodingMMImm4Lsl1(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getMemEncodingMMImm4Lsl2(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getMemEncodingMMSPImm5Lsl2(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getMemEncodingMMGPImm7Lsl2(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ unsigned getMemEncodingMMImm4sp(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
@@ -172,12 +204,28 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ unsigned getRegisterPairOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned getMovePRegPairOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned getSimm23Lsl2Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
unsigned getExprOpValue(const MCExpr *Expr, SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
unsigned getRegisterListOpValue(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+
+ unsigned getRegisterListOpValue16(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
}; // class MipsMCCodeEmitter
} // namespace llvm.
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index bab4254..9b56067 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -43,7 +43,7 @@ using namespace llvm;
/// Select the Mips CPU for the given triple and cpu name.
/// FIXME: Merge with the copy in MipsSubtarget.cpp
-static inline StringRef selectMipsCPU(StringRef TT, StringRef CPU) {
+StringRef MIPS_MC::selectMipsCPU(StringRef TT, StringRef CPU) {
if (CPU.empty() || CPU == "generic") {
Triple TheTriple(TT);
if (TheTriple.getArch() == Triple::mips ||
@@ -69,7 +69,7 @@ static MCRegisterInfo *createMipsMCRegisterInfo(StringRef TT) {
static MCSubtargetInfo *createMipsMCSubtargetInfo(StringRef TT, StringRef CPU,
StringRef FS) {
- CPU = selectMipsCPU(TT, CPU);
+ CPU = MIPS_MC::selectMipsCPU(TT, CPU);
MCSubtargetInfo *X = new MCSubtargetInfo();
InitMipsMCSubtargetInfo(X, TT, CPU, FS);
return X;
@@ -130,10 +130,8 @@ createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
return S;
}
-static MCStreamer *createMipsNullStreamer(MCContext &Ctx) {
- MCStreamer *S = llvm::createNullStreamer(Ctx);
- new MipsTargetStreamer(*S);
- return S;
+static MCTargetStreamer *createMipsNullTargetStreamer(MCStreamer &S) {
+ return new MipsTargetStreamer(S);
}
extern "C" void LLVMInitializeMipsTargetMC() {
@@ -190,11 +188,14 @@ extern "C" void LLVMInitializeMipsTargetMC() {
TargetRegistry::RegisterAsmStreamer(TheMips64Target, createMCAsmStreamer);
TargetRegistry::RegisterAsmStreamer(TheMips64elTarget, createMCAsmStreamer);
- TargetRegistry::RegisterNullStreamer(TheMipsTarget, createMipsNullStreamer);
- TargetRegistry::RegisterNullStreamer(TheMipselTarget, createMipsNullStreamer);
- TargetRegistry::RegisterNullStreamer(TheMips64Target, createMipsNullStreamer);
- TargetRegistry::RegisterNullStreamer(TheMips64elTarget,
- createMipsNullStreamer);
+ TargetRegistry::RegisterNullTargetStreamer(TheMipsTarget,
+ createMipsNullTargetStreamer);
+ TargetRegistry::RegisterNullTargetStreamer(TheMipselTarget,
+ createMipsNullTargetStreamer);
+ TargetRegistry::RegisterNullTargetStreamer(TheMips64Target,
+ createMipsNullTargetStreamer);
+ TargetRegistry::RegisterNullTargetStreamer(TheMips64elTarget,
+ createMipsNullTargetStreamer);
// Register the asm backend.
TargetRegistry::RegisterMCAsmBackend(TheMipsTarget,
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
index f08a8f4..9528b4e 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
@@ -55,10 +55,13 @@ MCAsmBackend *createMipsAsmBackendEL64(const Target &T,
const MCRegisterInfo &MRI, StringRef TT,
StringRef CPU);
-MCObjectWriter *createMipsELFObjectWriter(raw_ostream &OS,
- uint8_t OSABI,
- bool IsLittleEndian,
- bool Is64Bit);
+MCObjectWriter *createMipsELFObjectWriter(raw_ostream &OS, uint8_t OSABI,
+ bool IsLittleEndian, bool Is64Bit);
+
+namespace MIPS_MC {
+StringRef selectMipsCPU(StringRef TT, StringRef CPU);
+}
+
} // End llvm namespace
// Defines symbolic names for Mips registers. This defines a mapping from
diff --git a/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp b/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
index 0ef2208..188e3e8 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
@@ -9,14 +9,15 @@
#include "MipsOptionRecord.h"
#include "MipsELFStreamer.h"
+#include "MipsTargetStreamer.h"
#include "llvm/MC/MCSectionELF.h"
using namespace llvm;
void MipsRegInfoRecord::EmitMipsOptionRecord() {
MCAssembler &MCA = Streamer->getAssembler();
- Triple T(STI.getTargetTriple());
- uint64_t Features = STI.getFeatureBits();
+ MipsTargetStreamer *MTS =
+ static_cast<MipsTargetStreamer *>(Streamer->getTargetStreamer());
Streamer->PushSection();
@@ -24,17 +25,16 @@ void MipsRegInfoRecord::EmitMipsOptionRecord() {
// we don't emit .Mips.options for other ELFs other than N64.
// Since .reginfo has the same information as .Mips.options (ODK_REGINFO),
// we can use the same abstraction (MipsRegInfoRecord class) to handle both.
- if (Features & Mips::FeatureN64) {
+ if (MTS->getABI().IsN64()) {
// The EntrySize value of 1 seems strange since the records are neither
// 1-byte long nor fixed length but it matches the value GAS emits.
const MCSectionELF *Sec =
Context.getELFSection(".MIPS.options", ELF::SHT_MIPS_OPTIONS,
- ELF::SHF_ALLOC | ELF::SHF_MIPS_NOSTRIP,
- SectionKind::getMetadata(), 1, "");
+ ELF::SHF_ALLOC | ELF::SHF_MIPS_NOSTRIP, 1, "");
MCA.getOrCreateSectionData(*Sec).setAlignment(8);
Streamer->SwitchSection(Sec);
- Streamer->EmitIntValue(1, 1); // kind
+ Streamer->EmitIntValue(ELF::ODK_REGINFO, 1); // kind
Streamer->EmitIntValue(40, 1); // size
Streamer->EmitIntValue(0, 2); // section
Streamer->EmitIntValue(0, 4); // info
@@ -46,11 +46,10 @@ void MipsRegInfoRecord::EmitMipsOptionRecord() {
Streamer->EmitIntValue(ri_cprmask[3], 4);
Streamer->EmitIntValue(ri_gp_value, 8);
} else {
- const MCSectionELF *Sec =
- Context.getELFSection(".reginfo", ELF::SHT_MIPS_REGINFO, ELF::SHF_ALLOC,
- SectionKind::getMetadata(), 24, "");
+ const MCSectionELF *Sec = Context.getELFSection(
+ ".reginfo", ELF::SHT_MIPS_REGINFO, ELF::SHF_ALLOC, 24, "");
MCA.getOrCreateSectionData(*Sec)
- .setAlignment(Features & Mips::FeatureN32 ? 8 : 4);
+ .setAlignment(MTS->getABI().IsN32() ? 8 : 4);
Streamer->SwitchSection(Sec);
Streamer->EmitIntValue(ri_gprmask, 4);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
index 1e092f2..64d7cab 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -43,6 +43,9 @@ void MipsTargetStreamer::emitDirectiveSetNoMacro() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetMsa() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetNoMsa() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetAt() { forbidModuleDirective(); }
+void MipsTargetStreamer::emitDirectiveSetAtWithArg(unsigned RegNo) {
+ forbidModuleDirective();
+}
void MipsTargetStreamer::emitDirectiveSetNoAt() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveEnd(StringRef Name) {}
void MipsTargetStreamer::emitDirectiveEnt(const MCSymbol &Symbol) {}
@@ -67,9 +70,13 @@ void MipsTargetStreamer::emitDirectiveSetMips4() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetMips5() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetMips32() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetMips32R2() { forbidModuleDirective(); }
+void MipsTargetStreamer::emitDirectiveSetMips32R3() { forbidModuleDirective(); }
+void MipsTargetStreamer::emitDirectiveSetMips32R5() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetMips32R6() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetMips64() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetMips64R2() { forbidModuleDirective(); }
+void MipsTargetStreamer::emitDirectiveSetMips64R3() { forbidModuleDirective(); }
+void MipsTargetStreamer::emitDirectiveSetMips64R5() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetMips64R6() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetPop() {}
void MipsTargetStreamer::emitDirectiveSetPush() {}
@@ -144,6 +151,11 @@ void MipsTargetAsmStreamer::emitDirectiveSetAt() {
MipsTargetStreamer::emitDirectiveSetAt();
}
+void MipsTargetAsmStreamer::emitDirectiveSetAtWithArg(unsigned RegNo) {
+ OS << "\t.set\tat=$" << Twine(RegNo) << "\n";
+ MipsTargetStreamer::emitDirectiveSetAtWithArg(RegNo);
+}
+
void MipsTargetAsmStreamer::emitDirectiveSetNoAt() {
OS << "\t.set\tnoat\n";
MipsTargetStreamer::emitDirectiveSetNoAt();
@@ -223,6 +235,16 @@ void MipsTargetAsmStreamer::emitDirectiveSetMips32R2() {
MipsTargetStreamer::emitDirectiveSetMips32R2();
}
+void MipsTargetAsmStreamer::emitDirectiveSetMips32R3() {
+ OS << "\t.set\tmips32r3\n";
+ MipsTargetStreamer::emitDirectiveSetMips32R3();
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetMips32R5() {
+ OS << "\t.set\tmips32r5\n";
+ MipsTargetStreamer::emitDirectiveSetMips32R5();
+}
+
void MipsTargetAsmStreamer::emitDirectiveSetMips32R6() {
OS << "\t.set\tmips32r6\n";
MipsTargetStreamer::emitDirectiveSetMips32R6();
@@ -238,6 +260,16 @@ void MipsTargetAsmStreamer::emitDirectiveSetMips64R2() {
MipsTargetStreamer::emitDirectiveSetMips64R2();
}
+void MipsTargetAsmStreamer::emitDirectiveSetMips64R3() {
+ OS << "\t.set\tmips64r3\n";
+ MipsTargetStreamer::emitDirectiveSetMips64R3();
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetMips64R5() {
+ OS << "\t.set\tmips64r5\n";
+ MipsTargetStreamer::emitDirectiveSetMips64R5();
+}
+
void MipsTargetAsmStreamer::emitDirectiveSetMips64R6() {
OS << "\t.set\tmips64r6\n";
MipsTargetStreamer::emitDirectiveSetMips64R6();
@@ -335,19 +367,32 @@ MipsTargetELFStreamer::MipsTargetELFStreamer(MCStreamer &S,
const MCSubtargetInfo &STI)
: MipsTargetStreamer(S), MicroMipsEnabled(false), STI(STI) {
MCAssembler &MCA = getStreamer().getAssembler();
- uint64_t Features = STI.getFeatureBits();
Triple T(STI.getTargetTriple());
Pic = (MCA.getContext().getObjectFileInfo()->getRelocM() == Reloc::PIC_)
? true
: false;
- // Update e_header flags
- unsigned EFlags = 0;
+ uint64_t Features = STI.getFeatureBits();
+
+ // Set the header flags that we can in the constructor.
+ // FIXME: This is a fairly terrible hack. We set the rest
+ // of these in the destructor. The problem here is two-fold:
+ //
+ // a: Some of the eflags can be set/reset by directives.
+ // b: There aren't any usage paths that initialize the ABI
+ // pointer until after we initialize either an assembler
+ // or the target machine.
+ // We can fix this by making the target streamer construct
+ // the ABI, but this is fraught with wide ranging dependency
+ // issues as well.
+ unsigned EFlags = MCA.getELFHeaderEFlags();
// Architecture
if (Features & Mips::FeatureMips64r6)
EFlags |= ELF::EF_MIPS_ARCH_64R6;
- else if (Features & Mips::FeatureMips64r2)
+ else if (Features & Mips::FeatureMips64r2 ||
+ Features & Mips::FeatureMips64r3 ||
+ Features & Mips::FeatureMips64r5)
EFlags |= ELF::EF_MIPS_ARCH_64R2;
else if (Features & Mips::FeatureMips64)
EFlags |= ELF::EF_MIPS_ARCH_64;
@@ -359,7 +404,9 @@ MipsTargetELFStreamer::MipsTargetELFStreamer(MCStreamer &S,
EFlags |= ELF::EF_MIPS_ARCH_3;
else if (Features & Mips::FeatureMips32r6)
EFlags |= ELF::EF_MIPS_ARCH_32R6;
- else if (Features & Mips::FeatureMips32r2)
+ else if (Features & Mips::FeatureMips32r2 ||
+ Features & Mips::FeatureMips32r3 ||
+ Features & Mips::FeatureMips32r5)
EFlags |= ELF::EF_MIPS_ARCH_32R2;
else if (Features & Mips::FeatureMips32)
EFlags |= ELF::EF_MIPS_ARCH_32;
@@ -368,19 +415,6 @@ MipsTargetELFStreamer::MipsTargetELFStreamer(MCStreamer &S,
else
EFlags |= ELF::EF_MIPS_ARCH_1;
- // ABI
- // N64 does not require any ABI bits.
- if (Features & Mips::FeatureO32)
- EFlags |= ELF::EF_MIPS_ABI_O32;
- else if (Features & Mips::FeatureN32)
- EFlags |= ELF::EF_MIPS_ABI2;
-
- if (Features & Mips::FeatureGP64Bit) {
- if (Features & Mips::FeatureO32)
- EFlags |= ELF::EF_MIPS_32BITMODE; /* Compatibility Mode */
- } else if (Features & Mips::FeatureMips64r2 || Features & Mips::FeatureMips64)
- EFlags |= ELF::EF_MIPS_32BITMODE;
-
// Other options.
if (Features & Mips::FeatureNaN2008)
EFlags |= ELF::EF_MIPS_NAN2008;
@@ -388,8 +422,6 @@ MipsTargetELFStreamer::MipsTargetELFStreamer(MCStreamer &S,
// -mabicalls and -mplt are not implemented but we should act as if they were
// given.
EFlags |= ELF::EF_MIPS_CPIC;
- if (Features & Mips::FeatureN64)
- EFlags |= ELF::EF_MIPS_PIC;
MCA.setELFHeaderEFlags(EFlags);
}
@@ -424,6 +456,32 @@ void MipsTargetELFStreamer::finish() {
DataSectionData.setAlignment(std::max(16u, DataSectionData.getAlignment()));
BSSSectionData.setAlignment(std::max(16u, BSSSectionData.getAlignment()));
+ uint64_t Features = STI.getFeatureBits();
+
+ // Update e_header flags. See the FIXME and comment above in
+ // the constructor for a full rundown on this.
+ unsigned EFlags = MCA.getELFHeaderEFlags();
+
+ // ABI
+ // N64 does not require any ABI bits.
+ if (getABI().IsO32())
+ EFlags |= ELF::EF_MIPS_ABI_O32;
+ else if (getABI().IsN32())
+ EFlags |= ELF::EF_MIPS_ABI2;
+
+ if (Features & Mips::FeatureGP64Bit) {
+ if (getABI().IsO32())
+ EFlags |= ELF::EF_MIPS_32BITMODE; /* Compatibility Mode */
+ } else if (Features & Mips::FeatureMips64r2 || Features & Mips::FeatureMips64)
+ EFlags |= ELF::EF_MIPS_32BITMODE;
+
+ // If we've set the cpic eflag and we're n64, go ahead and set the pic
+ // one as well.
+ if (EFlags & ELF::EF_MIPS_CPIC && getABI().IsN64())
+ EFlags |= ELF::EF_MIPS_PIC;
+
+ MCA.setELFHeaderEFlags(EFlags);
+
// Emit all the option records.
// At the moment we are only emitting .Mips.options (ODK_REGINFO) and
// .reginfo.
@@ -493,9 +551,8 @@ void MipsTargetELFStreamer::emitDirectiveEnd(StringRef Name) {
MCContext &Context = MCA.getContext();
MCStreamer &OS = getStreamer();
- const MCSectionELF *Sec = Context.getELFSection(".pdr", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHT_REL,
- SectionKind::getMetadata());
+ const MCSectionELF *Sec = Context.getELFSection(
+ ".pdr", ELF::SHT_PROGBITS, ELF::SHF_ALLOC | ELF::SHT_REL);
const MCSymbolRefExpr *ExprRef =
MCSymbolRefExpr::Create(Name, MCSymbolRefExpr::VK_None, Context);
@@ -604,7 +661,7 @@ void MipsTargetELFStreamer::emitDirectiveCpLoad(unsigned RegNo) {
// addui $gp, $gp, %lo(_gp_disp)
// addu $gp, $gp, $reg
// when support for position independent code is enabled.
- if (!Pic || (isN32() || isN64()))
+ if (!Pic || (getABI().IsN32() || getABI().IsN64()))
return;
// There's a GNU extension controlled by -mno-shared that allows
@@ -653,7 +710,7 @@ void MipsTargetELFStreamer::emitDirectiveCpsetup(unsigned RegNo,
const MCSymbol &Sym,
bool IsReg) {
// Only N32 and N64 emit anything for .cpsetup iff PIC is set.
- if (!Pic || !(isN32() || isN64()))
+ if (!Pic || !(getABI().IsN32() || getABI().IsN64()))
return;
MCAssembler &MCA = getStreamer().getAssembler();
@@ -677,9 +734,10 @@ void MipsTargetELFStreamer::emitDirectiveCpsetup(unsigned RegNo,
Inst.clear();
const MCSymbolRefExpr *HiExpr = MCSymbolRefExpr::Create(
- Sym.getName(), MCSymbolRefExpr::VK_Mips_GPOFF_HI, MCA.getContext());
+ &Sym, MCSymbolRefExpr::VK_Mips_GPOFF_HI, MCA.getContext());
const MCSymbolRefExpr *LoExpr = MCSymbolRefExpr::Create(
- Sym.getName(), MCSymbolRefExpr::VK_Mips_GPOFF_LO, MCA.getContext());
+ &Sym, MCSymbolRefExpr::VK_Mips_GPOFF_LO, MCA.getContext());
+
// lui $gp, %hi(%neg(%gp_rel(funcSym)))
Inst.setOpcode(Mips::LUi);
Inst.addOperand(MCOperand::CreateReg(Mips::GP));
@@ -709,9 +767,8 @@ void MipsTargetELFStreamer::emitMipsAbiFlags() {
MCAssembler &MCA = getStreamer().getAssembler();
MCContext &Context = MCA.getContext();
MCStreamer &OS = getStreamer();
- const MCSectionELF *Sec =
- Context.getELFSection(".MIPS.abiflags", ELF::SHT_MIPS_ABIFLAGS,
- ELF::SHF_ALLOC, SectionKind::getMetadata(), 24, "");
+ const MCSectionELF *Sec = Context.getELFSection(
+ ".MIPS.abiflags", ELF::SHT_MIPS_ABIFLAGS, ELF::SHF_ALLOC, 24, "");
MCSectionData &ABIShndxSD = MCA.getOrCreateSectionData(*Sec);
ABIShndxSD.setAlignment(8);
OS.SwitchSection(Sec);
diff --git a/lib/Target/Mips/MicroMipsInstrFormats.td b/lib/Target/Mips/MicroMipsInstrFormats.td
index 59bf949..560afa4 100644
--- a/lib/Target/Mips/MicroMipsInstrFormats.td
+++ b/lib/Target/Mips/MicroMipsInstrFormats.td
@@ -108,6 +108,40 @@ class ADDIUR2_FM_MM16 {
let Inst{0} = 0;
}
+class LOAD_STORE_FM_MM16<bits<6> op> {
+ bits<3> rt;
+ bits<7> addr;
+
+ bits<16> Inst;
+
+ let Inst{15-10} = op;
+ let Inst{9-7} = rt;
+ let Inst{6-4} = addr{6-4};
+ let Inst{3-0} = addr{3-0};
+}
+
+class LOAD_STORE_SP_FM_MM16<bits<6> op> {
+ bits<5> rt;
+ bits<5> offset;
+
+ bits<16> Inst;
+
+ let Inst{15-10} = op;
+ let Inst{9-5} = rt;
+ let Inst{4-0} = offset;
+}
+
+class LOAD_GP_FM_MM16<bits<6> op> {
+ bits<3> rt;
+ bits<7> offset;
+
+ bits<16> Inst;
+
+ let Inst{15-10} = op;
+ let Inst{9-7} = rt;
+ let Inst{6-0} = offset;
+}
+
class ADDIUS5_FM_MM16 {
bits<5> rd;
bits<4> imm;
@@ -195,6 +229,49 @@ class ADDIUR1SP_FM_MM16 {
let Inst{0} = 1;
}
+class BRKSDBBP16_FM_MM<bits<6> op> {
+ bits<4> code_;
+ bits<16> Inst;
+
+ let Inst{15-10} = 0x11;
+ let Inst{9-4} = op;
+ let Inst{3-0} = code_;
+}
+
+class BEQNEZ_FM_MM16<bits<6> op> {
+ bits<3> rs;
+ bits<7> offset;
+
+ bits<16> Inst;
+
+ let Inst{15-10} = op;
+ let Inst{9-7} = rs;
+ let Inst{6-0} = offset;
+}
+
+class B16_FM {
+ bits<10> offset;
+
+ bits<16> Inst;
+
+ let Inst{15-10} = 0x33;
+ let Inst{9-0} = offset;
+}
+
+class MOVEP_FM_MM16 {
+ bits<3> dst_regs;
+ bits<3> rt;
+ bits<3> rs;
+
+ bits<16> Inst;
+
+ let Inst{15-10} = 0x21;
+ let Inst{9-7} = dst_regs;
+ let Inst{6-4} = rt;
+ let Inst{3-1} = rs;
+ let Inst{0} = 0;
+}
+
//===----------------------------------------------------------------------===//
// MicroMIPS 32-bit Instruction Formats
//===----------------------------------------------------------------------===//
@@ -817,3 +894,52 @@ class LWM_FM_MM<bits<4> funct> : MMArch {
let Inst{15-12} = funct;
let Inst{11-0} = addr{11-0};
}
+
+class LWM_FM_MM16<bits<4> funct> : MMArch {
+ bits<2> rt;
+ bits<4> addr;
+
+ bits<16> Inst;
+
+ let Inst{15-10} = 0x11;
+ let Inst{9-6} = funct;
+ let Inst{5-4} = rt;
+ let Inst{3-0} = addr;
+}
+
+class CACHE_PREF_FM_MM<bits<6> op, bits<4> funct> : MMArch {
+ bits<21> addr;
+ bits<5> hint;
+ bits<5> base = addr{20-16};
+ bits<12> offset = addr{11-0};
+
+ bits<32> Inst;
+
+ let Inst{31-26} = op;
+ let Inst{25-21} = hint;
+ let Inst{20-16} = base;
+ let Inst{15-12} = funct;
+ let Inst{11-0} = offset;
+}
+
+class BARRIER_FM_MM<bits<5> op> : MMArch {
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x0;
+ let Inst{25-21} = 0x0;
+ let Inst{20-16} = 0x0;
+ let Inst{15-11} = op;
+ let Inst{10-6} = 0x0;
+ let Inst{5-0} = 0x0;
+}
+
+class ADDIUPC_FM_MM {
+ bits<3> rs;
+ bits<23> imm;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x1e;
+ let Inst{25-23} = rs;
+ let Inst{22-0} = imm;
+}
diff --git a/lib/Target/Mips/MicroMipsInstrInfo.td b/lib/Target/Mips/MicroMipsInstrInfo.td
index e854620..e20df2f 100644
--- a/lib/Target/Mips/MicroMipsInstrInfo.td
+++ b/lib/Target/Mips/MicroMipsInstrInfo.td
@@ -1,7 +1,13 @@
def addrimm12 : ComplexPattern<iPTR, 2, "selectIntAddrMM", [frameindex]>;
+def addrimm4lsl2 : ComplexPattern<iPTR, 2, "selectIntAddrLSL2MM", [frameindex]>;
-def simm4 : Operand<i32>;
+def simm4 : Operand<i32> {
+ let DecoderMethod = "DecodeSimm4";
+}
def simm7 : Operand<i32>;
+def li_simm7 : Operand<i32> {
+ let DecoderMethod = "DecodeLiSimm7";
+}
def simm12 : Operand<i32> {
let DecoderMethod = "DecodeSimm12";
@@ -9,14 +15,17 @@ def simm12 : Operand<i32> {
def uimm5_lsl2 : Operand<OtherVT> {
let EncoderMethod = "getUImm5Lsl2Encoding";
+ let DecoderMethod = "DecodeUImm5lsl2";
}
def uimm6_lsl2 : Operand<i32> {
let EncoderMethod = "getUImm6Lsl2Encoding";
+ let DecoderMethod = "DecodeUImm6Lsl2";
}
def simm9_addiusp : Operand<i32> {
let EncoderMethod = "getSImm9AddiuspValue";
+ let DecoderMethod = "DecodeSimm9SP";
}
def uimm3_shift : Operand<i32> {
@@ -25,10 +34,12 @@ def uimm3_shift : Operand<i32> {
def simm3_lsa2 : Operand<i32> {
let EncoderMethod = "getSImm3Lsa2Value";
+ let DecoderMethod = "DecodeAddiur2Simm7";
}
def uimm4_andi : Operand<i32> {
let EncoderMethod = "getUImm4AndValue";
+ let DecoderMethod = "DecodeANDI16Imm";
}
def immSExtAddiur2 : ImmLeaf<i32, [{return Imm == 1 || Imm == -1 ||
@@ -46,6 +57,54 @@ def immZExt2Shift : ImmLeaf<i32, [{return Imm >= 1 && Imm <= 8;}]>;
def immLi16 : ImmLeaf<i32, [{return Imm >= -1 && Imm <= 126;}]>;
+def MicroMipsMemGPRMM16AsmOperand : AsmOperandClass {
+ let Name = "MicroMipsMem";
+ let RenderMethod = "addMicroMipsMemOperands";
+ let ParserMethod = "parseMemOperand";
+ let PredicateMethod = "isMemWithGRPMM16Base";
+}
+
+class mem_mm_4_generic : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops GPRMM16, simm4);
+ let OperandType = "OPERAND_MEMORY";
+ let ParserMatchClass = MicroMipsMemGPRMM16AsmOperand;
+}
+
+def mem_mm_4 : mem_mm_4_generic {
+ let EncoderMethod = "getMemEncodingMMImm4";
+}
+
+def mem_mm_4_lsl1 : mem_mm_4_generic {
+ let EncoderMethod = "getMemEncodingMMImm4Lsl1";
+}
+
+def mem_mm_4_lsl2 : mem_mm_4_generic {
+ let EncoderMethod = "getMemEncodingMMImm4Lsl2";
+}
+
+def MicroMipsMemSPAsmOperand : AsmOperandClass {
+ let Name = "MicroMipsMemSP";
+ let RenderMethod = "addMemOperands";
+ let ParserMethod = "parseMemOperand";
+ let PredicateMethod = "isMemWithUimmWordAlignedOffsetSP<7>";
+}
+
+def mem_mm_sp_imm5_lsl2 : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops GPR32:$base, simm5:$offset);
+ let OperandType = "OPERAND_MEMORY";
+ let ParserMatchClass = MicroMipsMemSPAsmOperand;
+ let EncoderMethod = "getMemEncodingMMSPImm5Lsl2";
+}
+
+def mem_mm_gp_imm7_lsl2 : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops GPRMM16:$base, simm7:$offset);
+ let OperandType = "OPERAND_MEMORY";
+ let EncoderMethod = "getMemEncodingMMGPImm7Lsl2";
+}
+
def mem_mm_12 : Operand<i32> {
let PrintMethod = "printMemOperand";
let MIOperandInfo = (ops GPR32, simm12);
@@ -54,6 +113,22 @@ def mem_mm_12 : Operand<i32> {
let OperandType = "OPERAND_MEMORY";
}
+def MipsMemUimm4AsmOperand : AsmOperandClass {
+ let Name = "MemOffsetUimm4";
+ let SuperClasses = [MipsMemAsmOperand];
+ let RenderMethod = "addMemOperands";
+ let ParserMethod = "parseMemOperand";
+ let PredicateMethod = "isMemWithUimmOffsetSP<6>";
+}
+
+def mem_mm_4sp : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops GPR32, uimm8);
+ let EncoderMethod = "getMemEncodingMMImm4sp";
+ let ParserMatchClass = MipsMemUimm4AsmOperand;
+ let OperandType = "OPERAND_MEMORY";
+}
+
def jmptarget_mm : Operand<OtherVT> {
let EncoderMethod = "getJumpTargetOpValueMM";
}
@@ -62,10 +137,30 @@ def calltarget_mm : Operand<iPTR> {
let EncoderMethod = "getJumpTargetOpValueMM";
}
+def brtarget7_mm : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTarget7OpValueMM";
+ let OperandType = "OPERAND_PCREL";
+ let DecoderMethod = "DecodeBranchTarget7MM";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
+
+def brtarget10_mm : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTargetOpValueMMPC10";
+ let OperandType = "OPERAND_PCREL";
+ let DecoderMethod = "DecodeBranchTarget10MM";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
+
def brtarget_mm : Operand<OtherVT> {
let EncoderMethod = "getBranchTargetOpValueMM";
let OperandType = "OPERAND_PCREL";
let DecoderMethod = "DecodeBranchTargetMM";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
+
+def simm23_lsl2 : Operand<i32> {
+ let EncoderMethod = "getSimm23Lsl2Encoding";
+ let DecoderMethod = "DecodeSimm23Lsl2";
}
class CompactBranchMM<string opstr, DAGOperand opnd, PatFrag cond_op,
@@ -97,6 +192,58 @@ class StoreLeftRightMM<string opstr, SDNode OpNode, RegisterOperand RO,
let DecoderMethod = "DecodeMemMMImm12";
}
+/// A register pair used by movep instruction.
+def MovePRegPairAsmOperand : AsmOperandClass {
+ let Name = "MovePRegPair";
+ let ParserMethod = "parseMovePRegPair";
+ let PredicateMethod = "isMovePRegPair";
+}
+
+def movep_regpair : Operand<i32> {
+ let EncoderMethod = "getMovePRegPairOpValue";
+ let ParserMatchClass = MovePRegPairAsmOperand;
+ let PrintMethod = "printRegisterList";
+ let DecoderMethod = "DecodeMovePRegPair";
+ let MIOperandInfo = (ops GPR32Opnd, GPR32Opnd);
+}
+
+class MovePMM16<string opstr, RegisterOperand RO> :
+MicroMipsInst16<(outs movep_regpair:$dst_regs), (ins RO:$rs, RO:$rt),
+ !strconcat(opstr, "\t$dst_regs, $rs, $rt"), [],
+ NoItinerary, FrmR> {
+ let isReMaterializable = 1;
+}
+
+/// A register pair used by load/store pair instructions.
+def RegPairAsmOperand : AsmOperandClass {
+ let Name = "RegPair";
+ let ParserMethod = "parseRegisterPair";
+}
+
+def regpair : Operand<i32> {
+ let EncoderMethod = "getRegisterPairOpValue";
+ let ParserMatchClass = RegPairAsmOperand;
+ let PrintMethod = "printRegisterPair";
+ let DecoderMethod = "DecodeRegPairOperand";
+ let MIOperandInfo = (ops GPR32Opnd, GPR32Opnd);
+}
+
+class StorePairMM<string opstr, InstrItinClass Itin = NoItinerary,
+ ComplexPattern Addr = addr> :
+ InstSE<(outs), (ins regpair:$rt, mem_mm_12:$addr),
+ !strconcat(opstr, "\t$rt, $addr"), [], Itin, FrmI, opstr> {
+ let DecoderMethod = "DecodeMemMMImm12";
+ let mayStore = 1;
+}
+
+class LoadPairMM<string opstr, InstrItinClass Itin = NoItinerary,
+ ComplexPattern Addr = addr> :
+ InstSE<(outs regpair:$rt), (ins mem_mm_12:$addr),
+ !strconcat(opstr, "\t$rt, $addr"), [], Itin, FrmI, opstr> {
+ let DecoderMethod = "DecodeMemMMImm12";
+ let mayLoad = 1;
+}
+
class LLBaseMM<string opstr, RegisterOperand RO> :
InstSE<(outs RO:$rt), (ins mem_mm_12:$addr),
!strconcat(opstr, "\t$rt, $addr"), [], NoItinerary, FrmI> {
@@ -156,6 +303,50 @@ class ShiftIMM16<string opstr, Operand ImmOpnd, RegisterOperand RO,
MicroMipsInst16<(outs RO:$rd), (ins RO:$rt, ImmOpnd:$shamt),
!strconcat(opstr, "\t$rd, $rt, $shamt"), [], Itin, FrmR>;
+class LoadMM16<string opstr, DAGOperand RO, SDPatternOperator OpNode,
+ InstrItinClass Itin, Operand MemOpnd> :
+ MicroMipsInst16<(outs RO:$rt), (ins MemOpnd:$addr),
+ !strconcat(opstr, "\t$rt, $addr"), [], Itin, FrmI> {
+ let DecoderMethod = "DecodeMemMMImm4";
+ let canFoldAsLoad = 1;
+ let mayLoad = 1;
+}
+
+class StoreMM16<string opstr, DAGOperand RTOpnd, DAGOperand RO,
+ SDPatternOperator OpNode, InstrItinClass Itin,
+ Operand MemOpnd> :
+ MicroMipsInst16<(outs), (ins RTOpnd:$rt, MemOpnd:$addr),
+ !strconcat(opstr, "\t$rt, $addr"), [], Itin, FrmI> {
+ let DecoderMethod = "DecodeMemMMImm4";
+ let mayStore = 1;
+}
+
+class LoadSPMM16<string opstr, DAGOperand RO, InstrItinClass Itin,
+ Operand MemOpnd> :
+ MicroMipsInst16<(outs RO:$rt), (ins MemOpnd:$offset),
+ !strconcat(opstr, "\t$rt, $offset"), [], Itin, FrmI> {
+ let DecoderMethod = "DecodeMemMMSPImm5Lsl2";
+ let canFoldAsLoad = 1;
+ let mayLoad = 1;
+}
+
+class StoreSPMM16<string opstr, DAGOperand RO, InstrItinClass Itin,
+ Operand MemOpnd> :
+ MicroMipsInst16<(outs), (ins RO:$rt, MemOpnd:$offset),
+ !strconcat(opstr, "\t$rt, $offset"), [], Itin, FrmI> {
+ let DecoderMethod = "DecodeMemMMSPImm5Lsl2";
+ let mayStore = 1;
+}
+
+class LoadGPMM16<string opstr, DAGOperand RO, InstrItinClass Itin,
+ Operand MemOpnd> :
+ MicroMipsInst16<(outs RO:$rt), (ins MemOpnd:$offset),
+ !strconcat(opstr, "\t$rt, $offset"), [], Itin, FrmI> {
+ let DecoderMethod = "DecodeMemMMGPImm7Lsl2";
+ let canFoldAsLoad = 1;
+ let mayLoad = 1;
+}
+
class AddImmUR2<string opstr, RegisterOperand RO> :
MicroMipsInst16<(outs RO:$rd), (ins RO:$rs, simm3_lsa2:$imm),
!strconcat(opstr, "\t$rd, $rs, $imm"),
@@ -192,8 +383,7 @@ class MoveMM16<string opstr, RegisterOperand RO, bit isComm = 0,
let isReMaterializable = 1;
}
-class LoadImmMM16<string opstr, Operand Od, RegisterOperand RO,
- SDPatternOperator imm_type = null_frag> :
+class LoadImmMM16<string opstr, Operand Od, RegisterOperand RO> :
MicroMipsInst16<(outs RO:$rd), (ins Od:$imm),
!strconcat(opstr, "\t$rd, $imm"), [], NoItinerary, FrmI> {
let isReMaterializable = 1;
@@ -223,7 +413,6 @@ class JumpRAddiuStackMM16 :
[], IIBranch, FrmR> {
let isTerminator = 1;
let isBarrier = 1;
- let hasDelaySlot = 1;
let isBranch = 1;
let isIndirectBranch = 1;
}
@@ -247,6 +436,21 @@ class JumpRegCMM16<string opstr, RegisterOperand RO> :
let isIndirectBranch = 1;
}
+// Break16 and Sdbbp16
+class BrkSdbbp16MM<string opstr> :
+ MicroMipsInst16<(outs), (ins uimm4:$code_),
+ !strconcat(opstr, "\t$code_"),
+ [], NoItinerary, FrmOther>;
+
+class CBranchZeroMM<string opstr, DAGOperand opnd, RegisterOperand RO> :
+ MicroMipsInst16<(outs), (ins RO:$rs, opnd:$offset),
+ !strconcat(opstr, "\t$rs, $offset"), [], IIBranch, FrmI> {
+ let isBranch = 1;
+ let isTerminator = 1;
+ let hasDelaySlot = 1;
+ let Defs = [AT];
+}
+
// MicroMIPS Jump and Link (Call) - Short Delay Slot
let isCall = 1, hasDelaySlot = 1, Defs = [RA] in {
class JumpLinkMM<string opstr, DAGOperand opnd> :
@@ -271,6 +475,10 @@ class LoadWordIndexedScaledMM<string opstr, RegisterOperand RO,
InstSE<(outs RO:$rd), (ins PtrRC:$base, PtrRC:$index),
!strconcat(opstr, "\t$rd, ${index}(${base})"), [], Itin, FrmFI>;
+class AddImmUPC<string opstr, RegisterOperand RO> :
+ InstSE<(outs RO:$rs), (ins simm23_lsl2:$imm),
+ !strconcat(opstr, "\t$rs, $imm"), [], NoItinerary, FrmR>;
+
/// A list of registers used by load/store multiple instructions.
def RegListAsmOperand : AsmOperandClass {
let Name = "RegList";
@@ -284,6 +492,20 @@ def reglist : Operand<i32> {
let DecoderMethod = "DecodeRegListOperand";
}
+def RegList16AsmOperand : AsmOperandClass {
+ let Name = "RegList16";
+ let ParserMethod = "parseRegisterList";
+ let PredicateMethod = "isRegList16";
+ let RenderMethod = "addRegListOperands";
+}
+
+def reglist16 : Operand<i32> {
+ let EncoderMethod = "getRegisterListOpValue16";
+ let DecoderMethod = "DecodeRegListOperand16";
+ let PrintMethod = "printRegisterList";
+ let ParserMatchClass = RegList16AsmOperand;
+}
+
class StoreMultMM<string opstr,
InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> :
InstSE<(outs), (ins reglist:$rt, mem_mm_12:$addr),
@@ -300,6 +522,36 @@ class LoadMultMM<string opstr,
let mayLoad = 1;
}
+class StoreMultMM16<string opstr,
+ InstrItinClass Itin = NoItinerary,
+ ComplexPattern Addr = addr> :
+ MicroMipsInst16<(outs), (ins reglist16:$rt, mem_mm_4sp:$addr),
+ !strconcat(opstr, "\t$rt, $addr"), [], Itin, FrmI> {
+ let DecoderMethod = "DecodeMemMMReglistImm4Lsl2";
+ let mayStore = 1;
+}
+
+class LoadMultMM16<string opstr,
+ InstrItinClass Itin = NoItinerary,
+ ComplexPattern Addr = addr> :
+ MicroMipsInst16<(outs reglist16:$rt), (ins mem_mm_4sp:$addr),
+ !strconcat(opstr, "\t$rt, $addr"), [], Itin, FrmI> {
+ let DecoderMethod = "DecodeMemMMReglistImm4Lsl2";
+ let mayLoad = 1;
+}
+
+class UncondBranchMM16<string opstr> :
+ MicroMipsInst16<(outs), (ins brtarget10_mm:$offset),
+ !strconcat(opstr, "\t$offset"),
+ [], IIBranch, FrmI> {
+ let isBranch = 1;
+ let isTerminator = 1;
+ let isBarrier = 1;
+ let hasDelaySlot = 1;
+ let Predicates = [RelocPIC, InMicroMips];
+ let Defs = [AT];
+}
+
def ADDU16_MM : ArithRMM16<"addu16", GPRMM16Opnd, 1, II_ADDU, add>,
ARITH_FM_MM16<0>;
def SUBU16_MM : ArithRMM16<"subu16", GPRMM16Opnd, 0, II_SUBU, sub>,
@@ -316,6 +568,25 @@ def SLL16_MM : ShiftIMM16<"sll16", uimm3_shift, GPRMM16Opnd, II_SLL>,
SHIFT_FM_MM16<0>;
def SRL16_MM : ShiftIMM16<"srl16", uimm3_shift, GPRMM16Opnd, II_SRL>,
SHIFT_FM_MM16<1>;
+def LBU16_MM : LoadMM16<"lbu16", GPRMM16Opnd, zextloadi8, II_LBU,
+ mem_mm_4>, LOAD_STORE_FM_MM16<0x02>;
+def LHU16_MM : LoadMM16<"lhu16", GPRMM16Opnd, zextloadi16, II_LHU,
+ mem_mm_4_lsl1>, LOAD_STORE_FM_MM16<0x0a>;
+def LW16_MM : LoadMM16<"lw16", GPRMM16Opnd, load, II_LW, mem_mm_4_lsl2>,
+ LOAD_STORE_FM_MM16<0x1a>;
+def SB16_MM : StoreMM16<"sb16", GPRMM16OpndZero, GPRMM16Opnd, truncstorei8,
+ II_SB, mem_mm_4>, LOAD_STORE_FM_MM16<0x22>;
+def SH16_MM : StoreMM16<"sh16", GPRMM16OpndZero, GPRMM16Opnd, truncstorei16,
+ II_SH, mem_mm_4_lsl1>,
+ LOAD_STORE_FM_MM16<0x2a>;
+def SW16_MM : StoreMM16<"sw16", GPRMM16OpndZero, GPRMM16Opnd, store, II_SW,
+ mem_mm_4_lsl2>, LOAD_STORE_FM_MM16<0x3a>;
+def LWGP_MM : LoadGPMM16<"lw", GPRMM16Opnd, II_LW, mem_mm_gp_imm7_lsl2>,
+ LOAD_GP_FM_MM16<0x19>;
+def LWSP_MM : LoadSPMM16<"lw", GPR32Opnd, II_LW, mem_mm_sp_imm5_lsl2>,
+ LOAD_STORE_SP_FM_MM16<0x12>;
+def SWSP_MM : StoreSPMM16<"sw", GPR32Opnd, II_SW, mem_mm_sp_imm5_lsl2>,
+ LOAD_STORE_SP_FM_MM16<0x32>;
def ADDIUR1SP_MM : AddImmUR1SP<"addiur1sp", GPRMM16Opnd>, ADDIUR1SP_FM_MM16;
def ADDIUR2_MM : AddImmUR2<"addiur2", GPRMM16Opnd>, ADDIUR2_FM_MM16;
def ADDIUS5_MM : AddImmUS5<"addius5", GPR32Opnd>, ADDIUS5_FM_MM16;
@@ -323,13 +594,21 @@ def ADDIUSP_MM : AddImmUSP<"addiusp">, ADDIUSP_FM_MM16;
def MFHI16_MM : MoveFromHILOMM<"mfhi", GPR32Opnd, AC0>, MFHILO_FM_MM16<0x10>;
def MFLO16_MM : MoveFromHILOMM<"mflo", GPR32Opnd, AC0>, MFHILO_FM_MM16<0x12>;
def MOVE16_MM : MoveMM16<"move", GPR32Opnd>, MOVE_FM_MM16<0x03>;
-def LI16_MM : LoadImmMM16<"li16", simm7, GPRMM16Opnd, immLi16>,
- LI_FM_MM16, IsAsCheapAsAMove;
+def MOVEP_MM : MovePMM16<"movep", GPRMM16OpndMoveP>, MOVEP_FM_MM16;
+def LI16_MM : LoadImmMM16<"li16", li_simm7, GPRMM16Opnd>, LI_FM_MM16,
+ IsAsCheapAsAMove;
def JALR16_MM : JumpLinkRegMM16<"jalr", GPR32Opnd>, JALR_FM_MM16<0x0e>;
def JALRS16_MM : JumpLinkRegSMM16<"jalrs16", GPR32Opnd>, JALR_FM_MM16<0x0f>;
def JRC16_MM : JumpRegCMM16<"jrc", GPR32Opnd>, JALR_FM_MM16<0x0d>;
def JRADDIUSP : JumpRAddiuStackMM16, JRADDIUSP_FM_MM16<0x18>;
def JR16_MM : JumpRegMM16<"jr16", GPR32Opnd>, JALR_FM_MM16<0x0c>;
+def BEQZ16_MM : CBranchZeroMM<"beqz16", brtarget7_mm, GPRMM16Opnd>,
+ BEQNEZ_FM_MM16<0x23>;
+def BNEZ16_MM : CBranchZeroMM<"bnez16", brtarget7_mm, GPRMM16Opnd>,
+ BEQNEZ_FM_MM16<0x2b>;
+def B16_MM : UncondBranchMM16<"b16">, B16_FM;
+def BREAK16_MM : BrkSdbbp16MM<"break16">, BRKSDBBP16_FM_MM<0x28>;
+def SDBBP16_MM : BrkSdbbp16MM<"sdbbp16">, BRKSDBBP16_FM_MM<0x2C>;
class WaitMM<string opstr> :
InstSE<(outs), (ins uimm10:$code_), !strconcat(opstr, "\t$code_"), [],
@@ -387,6 +666,9 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def UDIV_MM : MMRel, Div<"divu", II_DIVU, GPR32Opnd, [HI0, LO0]>,
MULT_FM_MM<0x2ec>;
+ /// Arithmetic Instructions with PC and Immediate
+ def ADDIUPC_MM : AddImmUPC<"addiupc", GPRMM16Opnd>, ADDIUPC_FM_MM;
+
/// Shift Instructions
def SLL_MM : MMRel, shift_rotate_imm<"sll", uimm5, GPR32Opnd, II_SLL>,
SRA_FM_MM<0, 0>;
@@ -434,6 +716,25 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
/// Load and Store Instructions - multiple
def SWM32_MM : StoreMultMM<"swm32">, LWM_FM_MM<0xd>;
def LWM32_MM : LoadMultMM<"lwm32">, LWM_FM_MM<0x5>;
+ def SWM16_MM : StoreMultMM16<"swm16">, LWM_FM_MM16<0x5>;
+ def LWM16_MM : LoadMultMM16<"lwm16">, LWM_FM_MM16<0x4>;
+
+ /// Load and Store Pair Instructions
+ def SWP_MM : StorePairMM<"swp">, LWM_FM_MM<0x9>;
+ def LWP_MM : LoadPairMM<"lwp">, LWM_FM_MM<0x1>;
+
+ /// Load and Store multiple pseudo Instructions
+ class LoadWordMultMM<string instr_asm > :
+ MipsAsmPseudoInst<(outs reglist:$rt), (ins mem_mm_12:$addr),
+ !strconcat(instr_asm, "\t$rt, $addr")> ;
+
+ class StoreWordMultMM<string instr_asm > :
+ MipsAsmPseudoInst<(outs), (ins reglist:$rt, mem_mm_12:$addr),
+ !strconcat(instr_asm, "\t$rt, $addr")> ;
+
+
+ def SWM_MM : StoreWordMultMM<"swm">;
+ def LWM_MM : LoadWordMultMM<"lwm">;
/// Move Conditional
def MOVZ_I_MM : MMRel, CMov_I_I_FT<"movz", GPR32Opnd, GPR32Opnd,
@@ -487,6 +788,7 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def J_MM : MMRel, JumpFJ<jmptarget_mm, "j", br, bb, "j">,
J_FM_MM<0x35>;
def JAL_MM : MMRel, JumpLink<"jal", calltarget_mm>, J_FM_MM<0x3d>;
+ def JALX_MM : MMRel, JumpLink<"jalx", calltarget>, J_FM_MM<0x3c>;
}
def JR_MM : MMRel, IndirectBranch<"jr", GPR32Opnd>, JR_FM_MM<0x3c>;
def JALR_MM : JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM_MM<0x03c>;
@@ -550,6 +852,16 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def LL_MM : LLBaseMM<"ll", GPR32Opnd>, LL_FM_MM<0x3>;
def SC_MM : SCBaseMM<"sc", GPR32Opnd>, LL_FM_MM<0xb>;
+ let DecoderMethod = "DecodeCacheOpMM" in {
+ def CACHE_MM : MMRel, CacheOp<"cache", mem_mm_12>,
+ CACHE_PREF_FM_MM<0x08, 0x6>;
+ def PREF_MM : MMRel, CacheOp<"pref", mem_mm_12>,
+ CACHE_PREF_FM_MM<0x18, 0x2>;
+ }
+ def SSNOP_MM : MMRel, Barrier<"ssnop">, BARRIER_FM_MM<0x1>;
+ def EHB_MM : MMRel, Barrier<"ehb">, BARRIER_FM_MM<0x3>;
+ def PAUSE_MM : MMRel, Barrier<"pause">, BARRIER_FM_MM<0x5>;
+
def TLBP_MM : MMRel, TLB<"tlbp">, COP0_TLB_FM_MM<0x0d>;
def TLBR_MM : MMRel, TLB<"tlbr">, COP0_TLB_FM_MM<0x4d>;
def TLBWI_MM : MMRel, TLB<"tlbwi">, COP0_TLB_FM_MM<0x8d>;
@@ -565,6 +877,13 @@ let Predicates = [InMicroMips] in {
// MicroMips arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
+def : MipsPat<(i32 immLi16:$imm),
+ (LI16_MM immLi16:$imm)>;
+def : MipsPat<(i32 immSExt16:$imm),
+ (ADDiu_MM ZERO, immSExt16:$imm)>;
+def : MipsPat<(i32 immZExt16:$imm),
+ (ORi_MM ZERO, immZExt16:$imm)>;
+
def : MipsPat<(add GPRMM16:$src, immSExtAddiur2:$imm),
(ADDIUR2_MM GPRMM16:$src, immSExtAddiur2:$imm)>;
def : MipsPat<(add GPR32:$src, immSExtAddius5:$imm),
@@ -587,9 +906,27 @@ def : MipsPat<(srl GPRMM16:$src, immZExt2Shift:$imm),
def : MipsPat<(srl GPR32:$src, immZExt5:$imm),
(SRL_MM GPR32:$src, immZExt5:$imm)>;
+def : MipsPat<(store GPRMM16:$src, addrimm4lsl2:$addr),
+ (SW16_MM GPRMM16:$src, addrimm4lsl2:$addr)>;
+def : MipsPat<(store GPR32:$src, addr:$addr),
+ (SW_MM GPR32:$src, addr:$addr)>;
+
+def : MipsPat<(load addrimm4lsl2:$addr),
+ (LW16_MM addrimm4lsl2:$addr)>;
+def : MipsPat<(load addr:$addr),
+ (LW_MM addr:$addr)>;
+
//===----------------------------------------------------------------------===//
// MicroMips instruction aliases
//===----------------------------------------------------------------------===//
+class UncondBranchMMPseudo<string opstr> :
+ MipsAsmPseudoInst<(outs), (ins brtarget_mm:$offset),
+ !strconcat(opstr, "\t$offset")>;
+
+ def B_MM_Pseudo : UncondBranchMMPseudo<"b">;
+
def : MipsInstAlias<"wait", (WAIT_MM 0x0), 1>;
+ def : MipsInstAlias<"nop", (SLL_MM ZERO, ZERO, 0), 1>;
+ def : MipsInstAlias<"nop", (MOVE16_MM ZERO, ZERO), 1>;
}
diff --git a/lib/Target/Mips/Mips.h b/lib/Target/Mips/Mips.h
index 87f1b04..cb09c1a 100644
--- a/lib/Target/Mips/Mips.h
+++ b/lib/Target/Mips/Mips.h
@@ -22,7 +22,6 @@ namespace llvm {
class MipsTargetMachine;
class FunctionPass;
- FunctionPass *createMipsISelDag(MipsTargetMachine &TM);
FunctionPass *createMipsOptimizePICCallPass(MipsTargetMachine &TM);
FunctionPass *createMipsDelaySlotFillerPass(MipsTargetMachine &TM);
FunctionPass *createMipsLongBranchPass(MipsTargetMachine &TM);
diff --git a/lib/Target/Mips/Mips.td b/lib/Target/Mips/Mips.td
index 3e1d047..01c548e 100644
--- a/lib/Target/Mips/Mips.td
+++ b/lib/Target/Mips/Mips.td
@@ -69,14 +69,6 @@ def FeatureNaN2008 : SubtargetFeature<"nan2008", "IsNaN2008bit", "true",
"IEEE 754-2008 NaN encoding.">;
def FeatureSingleFloat : SubtargetFeature<"single-float", "IsSingleFloat",
"true", "Only supports single precision float">;
-def FeatureO32 : SubtargetFeature<"o32", "ABI", "MipsABIInfo::O32()",
- "Enable o32 ABI">;
-def FeatureN32 : SubtargetFeature<"n32", "ABI", "MipsABIInfo::N32()",
- "Enable n32 ABI">;
-def FeatureN64 : SubtargetFeature<"n64", "ABI", "MipsABIInfo::N64()",
- "Enable n64 ABI">;
-def FeatureEABI : SubtargetFeature<"eabi", "ABI", "MipsABIInfo::EABI()",
- "Enable eabi ABI">;
def FeatureNoOddSPReg : SubtargetFeature<"nooddspreg", "UseOddSPReg", "false",
"Disable odd numbered single-precision "
"registers">;
@@ -122,10 +114,16 @@ def FeatureMips32r2 : SubtargetFeature<"mips32r2", "MipsArchVersion",
"Mips32r2", "Mips32r2 ISA Support",
[FeatureMips3_32r2, FeatureMips4_32r2,
FeatureMips5_32r2, FeatureMips32]>;
+def FeatureMips32r3 : SubtargetFeature<"mips32r3", "MipsArchVersion",
+ "Mips32r3", "Mips32r3 ISA Support",
+ [FeatureMips32r2]>;
+def FeatureMips32r5 : SubtargetFeature<"mips32r5", "MipsArchVersion",
+ "Mips32r5", "Mips32r5 ISA Support",
+ [FeatureMips32r3]>;
def FeatureMips32r6 : SubtargetFeature<"mips32r6", "MipsArchVersion",
"Mips32r6",
"Mips32r6 ISA Support [experimental]",
- [FeatureMips32r2, FeatureFP64Bit,
+ [FeatureMips32r5, FeatureFP64Bit,
FeatureNaN2008]>;
def FeatureMips64 : SubtargetFeature<"mips64", "MipsArchVersion",
"Mips64", "Mips64 ISA Support",
@@ -133,10 +131,16 @@ def FeatureMips64 : SubtargetFeature<"mips64", "MipsArchVersion",
def FeatureMips64r2 : SubtargetFeature<"mips64r2", "MipsArchVersion",
"Mips64r2", "Mips64r2 ISA Support",
[FeatureMips64, FeatureMips32r2]>;
+def FeatureMips64r3 : SubtargetFeature<"mips64r3", "MipsArchVersion",
+ "Mips64r3", "Mips64r3 ISA Support",
+ [FeatureMips64r2, FeatureMips32r3]>;
+def FeatureMips64r5 : SubtargetFeature<"mips64r5", "MipsArchVersion",
+ "Mips64r5", "Mips64r5 ISA Support",
+ [FeatureMips64r3, FeatureMips32r5]>;
def FeatureMips64r6 : SubtargetFeature<"mips64r6", "MipsArchVersion",
"Mips64r6",
"Mips64r6 ISA Support [experimental]",
- [FeatureMips32r6, FeatureMips64r2,
+ [FeatureMips32r6, FeatureMips64r5,
FeatureNaN2008]>;
def FeatureMips16 : SubtargetFeature<"mips16", "InMips16Mode", "true",
@@ -162,20 +166,24 @@ def FeatureCnMips : SubtargetFeature<"cnmips", "HasCnMips",
class Proc<string Name, list<SubtargetFeature> Features>
: Processor<Name, MipsGenericItineraries, Features>;
-def : Proc<"mips1", [FeatureMips1, FeatureO32]>;
-def : Proc<"mips2", [FeatureMips2, FeatureO32]>;
-def : Proc<"mips32", [FeatureMips32, FeatureO32]>;
-def : Proc<"mips32r2", [FeatureMips32r2, FeatureO32]>;
-def : Proc<"mips32r6", [FeatureMips32r6, FeatureO32]>;
-
-def : Proc<"mips3", [FeatureMips3, FeatureN64]>;
-def : Proc<"mips4", [FeatureMips4, FeatureN64]>;
-def : Proc<"mips5", [FeatureMips5, FeatureN64]>;
-def : Proc<"mips64", [FeatureMips64, FeatureN64]>;
-def : Proc<"mips64r2", [FeatureMips64r2, FeatureN64]>;
-def : Proc<"mips64r6", [FeatureMips64r6, FeatureN64]>;
-def : Proc<"mips16", [FeatureMips16, FeatureO32]>;
-def : Proc<"octeon", [FeatureMips64r2, FeatureN64, FeatureCnMips]>;
+def : Proc<"mips1", [FeatureMips1]>;
+def : Proc<"mips2", [FeatureMips2]>;
+def : Proc<"mips32", [FeatureMips32]>;
+def : Proc<"mips32r2", [FeatureMips32r2]>;
+def : Proc<"mips32r3", [FeatureMips32r3]>;
+def : Proc<"mips32r5", [FeatureMips32r5]>;
+def : Proc<"mips32r6", [FeatureMips32r6]>;
+
+def : Proc<"mips3", [FeatureMips3]>;
+def : Proc<"mips4", [FeatureMips4]>;
+def : Proc<"mips5", [FeatureMips5]>;
+def : Proc<"mips64", [FeatureMips64]>;
+def : Proc<"mips64r2", [FeatureMips64r2]>;
+def : Proc<"mips64r3", [FeatureMips64r3]>;
+def : Proc<"mips64r5", [FeatureMips64r5]>;
+def : Proc<"mips64r6", [FeatureMips64r6]>;
+def : Proc<"mips16", [FeatureMips16]>;
+def : Proc<"octeon", [FeatureMips64r2, FeatureCnMips]>;
def MipsAsmParser : AsmParser {
let ShouldEmitMatchRegisterName = 0;
diff --git a/lib/Target/Mips/Mips16FrameLowering.cpp b/lib/Target/Mips/Mips16FrameLowering.cpp
index 6070276..abecfa0 100644
--- a/lib/Target/Mips/Mips16FrameLowering.cpp
+++ b/lib/Target/Mips/Mips16FrameLowering.cpp
@@ -36,7 +36,7 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front();
MachineFrameInfo *MFI = MF.getFrameInfo();
const Mips16InstrInfo &TII =
- *static_cast<const Mips16InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ *static_cast<const Mips16InstrInfo *>(STI.getInstrInfo());
MachineBasicBlock::iterator MBBI = MBB.begin();
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
uint64_t StackSize = MFI->getStackSize();
@@ -84,7 +84,7 @@ void Mips16FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
MachineFrameInfo *MFI = MF.getFrameInfo();
const Mips16InstrInfo &TII =
- *static_cast<const Mips16InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ *static_cast<const Mips16InstrInfo *>(STI.getInstrInfo());
DebugLoc dl = MBBI->getDebugLoc();
uint64_t StackSize = MFI->getStackSize();
@@ -154,7 +154,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
Amount = -Amount;
const Mips16InstrInfo &TII =
- *static_cast<const Mips16InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ *static_cast<const Mips16InstrInfo *>(STI.getInstrInfo());
TII.adjustStackPtr(Mips::SP, Amount, MBB, I);
}
@@ -174,7 +174,7 @@ void Mips16FrameLowering::
processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS) const {
const Mips16InstrInfo &TII =
- *static_cast<const Mips16InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ *static_cast<const Mips16InstrInfo *>(STI.getInstrInfo());
const MipsRegisterInfo &RI = TII.getRegisterInfo();
const BitVector Reserved = RI.getReservedRegs(MF);
bool SaveS2 = Reserved[Mips::S2];
diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp
index 9488e63..32dc90a 100644
--- a/lib/Target/Mips/Mips16HardFloat.cpp
+++ b/lib/Target/Mips/Mips16HardFloat.cpp
@@ -247,12 +247,12 @@ static void swapFPIntParams
// Having called needsFPHelperFromSig
//
static void assureFPCallStub(Function &F, Module *M,
- const MipsSubtarget &Subtarget) {
+ const MipsTargetMachine &TM) {
// for now we only need them for static relocation
- if (Subtarget.getRelocationModel() == Reloc::PIC_)
+ if (TM.getRelocationModel() == Reloc::PIC_)
return;
LLVMContext &Context = M->getContext();
- bool LE = Subtarget.isLittle();
+ bool LE = TM.isLittleEndian();
std::string Name = F.getName();
std::string SectionName = ".mips16.call.fp." + Name;
std::string StubName = "__call_stub_fp_" + Name;
@@ -362,8 +362,8 @@ static bool isIntrinsicInline(Function *F) {
// Returns of float, double and complex need to be handled with a helper
// function.
//
-static bool fixupFPReturnAndCall
- (Function &F, Module *M, const MipsSubtarget &Subtarget) {
+static bool fixupFPReturnAndCall(Function &F, Module *M,
+ const MipsTargetMachine &TM) {
bool Modified = false;
LLVMContext &C = M->getContext();
Type *MyVoid = Type::getVoidTy(C);
@@ -426,9 +426,9 @@ static bool fixupFPReturnAndCall
Modified=true;
F.addFnAttr("saveS2");
}
- if (Subtarget.getRelocationModel() != Reloc::PIC_ ) {
+ if (TM.getRelocationModel() != Reloc::PIC_ ) {
if (needsFPHelperFromSig(*F_)) {
- assureFPCallStub(*F_, M, Subtarget);
+ assureFPCallStub(*F_, M, TM);
Modified=true;
}
}
@@ -439,9 +439,9 @@ static bool fixupFPReturnAndCall
}
static void createFPFnStub(Function *F, Module *M, FPParamVariant PV,
- const MipsSubtarget &Subtarget ) {
- bool PicMode = Subtarget.getRelocationModel() == Reloc::PIC_;
- bool LE = Subtarget.isLittle();
+ const MipsTargetMachine &TM) {
+ bool PicMode = TM.getRelocationModel() == Reloc::PIC_;
+ bool LE = TM.isLittleEndian();
LLVMContext &Context = M->getContext();
std::string Name = F->getName();
std::string SectionName = ".mips16.fn." + Name;
@@ -458,7 +458,6 @@ static void createFPFnStub(Function *F, Module *M, FPParamVariant PV,
FStub->setSection(SectionName);
BasicBlock *BB = BasicBlock::Create(Context, "entry", FStub);
InlineAsmHelper IAH(Context, BB);
- IAH.Out(" .set macro");
if (PicMode) {
IAH.Out(".set noreorder");
IAH.Out(".cpload $$25");
@@ -467,7 +466,6 @@ static void createFPFnStub(Function *F, Module *M, FPParamVariant PV,
IAH.Out("la $$25," + LocalName);
}
else {
- IAH.Out(".set reorder");
IAH.Out("la $$25," + Name);
}
swapFPIntParams(PV, M, IAH, LE, false);
@@ -522,11 +520,11 @@ bool Mips16HardFloat::runOnModule(Module &M) {
}
if (F->isDeclaration() || F->hasFnAttribute("mips16_fp_stub") ||
F->hasFnAttribute("nomips16")) continue;
- Modified |= fixupFPReturnAndCall(*F, &M, Subtarget);
+ Modified |= fixupFPReturnAndCall(*F, &M, TM);
FPParamVariant V = whichFPParamVariantNeeded(*F);
if (V != NoSig) {
Modified = true;
- createFPFnStub(F, &M, V, Subtarget);
+ createFPFnStub(F, &M, V, TM);
}
}
return Modified;
diff --git a/lib/Target/Mips/Mips16HardFloat.h b/lib/Target/Mips/Mips16HardFloat.h
index 19b7bf2..586cc25 100644
--- a/lib/Target/Mips/Mips16HardFloat.h
+++ b/lib/Target/Mips/Mips16HardFloat.h
@@ -25,26 +25,16 @@ using namespace llvm;
namespace llvm {
class Mips16HardFloat : public ModulePass {
-
public:
static char ID;
- Mips16HardFloat(MipsTargetMachine &TM_) : ModulePass(ID),
- TM(TM_), Subtarget(TM.getSubtarget<MipsSubtarget>()) {
- }
-
- const char *getPassName() const override {
- return "MIPS16 Hard Float Pass";
- }
+ Mips16HardFloat(MipsTargetMachine &TM_) : ModulePass(ID), TM(TM_) {}
+ const char *getPassName() const override { return "MIPS16 Hard Float Pass"; }
bool runOnModule(Module &M) override;
protected:
- /// Keep a pointer to the MipsSubtarget around so that we can make the right
- /// decision when generating code for different targets.
- const TargetMachine &TM;
- const MipsSubtarget &Subtarget;
-
+ const MipsTargetMachine &TM;
};
ModulePass *createMips16HardFloat(MipsTargetMachine &TM);
diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
index 7732be4..3221ccb 100644
--- a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
+++ b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
@@ -37,7 +37,7 @@ using namespace llvm;
#define DEBUG_TYPE "mips-isel"
bool Mips16DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
- Subtarget = &TM.getSubtarget<MipsSubtarget>();
+ Subtarget = &static_cast<const MipsSubtarget &>(MF.getSubtarget());
if (!Subtarget->inMips16Mode())
return false;
return MipsDAGToDAGISel::runOnMachineFunction(MF);
@@ -72,11 +72,10 @@ void Mips16DAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
MachineBasicBlock &MBB = MF.front();
MachineBasicBlock::iterator I = MBB.begin();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
unsigned V0, V1, V2, GlobalBaseReg = MipsFI->getGlobalBaseReg();
- const TargetRegisterClass *RC =
- (const TargetRegisterClass*)&Mips::CPU16RegsRegClass;
+ const TargetRegisterClass *RC = &Mips::CPU16RegsRegClass;
V0 = RegInfo.createVirtualRegister(RC);
V1 = RegInfo.createVirtualRegister(RC);
@@ -103,7 +102,7 @@ void Mips16DAGToDAGISel::initMips16SPAliasReg(MachineFunction &MF) {
MachineBasicBlock &MBB = MF.front();
MachineBasicBlock::iterator I = MBB.begin();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
unsigned Mips16SPAliasReg = MipsFI->getMips16SPAliasReg();
@@ -135,7 +134,7 @@ void Mips16DAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) {
switch (SD->getMemoryVT().getSizeInBits()) {
case 8:
case 16:
- AliasReg = TM.getSubtargetImpl()->getFrameLowering()->hasFP(*MF)
+ AliasReg = Subtarget->getFrameLowering()->hasFP(*MF)
? AliasFPReg
: getMips16SPAliasReg();
return;
@@ -147,7 +146,7 @@ void Mips16DAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) {
switch (SD->getMemoryVT().getSizeInBits()) {
case 8:
case 16:
- AliasReg = TM.getSubtargetImpl()->getFrameLowering()->hasFP(*MF)
+ AliasReg = Subtarget->getFrameLowering()->hasFP(*MF)
? AliasFPReg
: getMips16SPAliasReg();
return;
diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp
index d4852c4..ede4f37 100644
--- a/lib/Target/Mips/Mips16ISelLowering.cpp
+++ b/lib/Target/Mips/Mips16ISelLowering.cpp
@@ -149,7 +149,7 @@ Mips16TargetLowering::Mips16TargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
setOperationAction(ISD::BSWAP, MVT::i64, Expand);
- computeRegisterProperties();
+ computeRegisterProperties(STI.getRegisterInfo());
}
const MipsTargetLowering *
@@ -497,14 +497,14 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
SDValue JumpTarget = Callee;
// T9 should contain the address of the callee function if
- // -reloction-model=pic or it is an indirect call.
+ // -relocation-model=pic or it is an indirect call.
if (IsPICCall || !GlobalOrExternal) {
unsigned V0Reg = Mips::V0;
if (NeedMips16Helper) {
RegsToPass.push_front(std::make_pair(V0Reg, Callee));
JumpTarget = DAG.getExternalSymbol(Mips16HelperFunction, getPointerTy());
ExternalSymbolSDNode *S = cast<ExternalSymbolSDNode>(JumpTarget);
- JumpTarget = getAddrGlobal(S, JumpTarget.getValueType(), DAG,
+ JumpTarget = getAddrGlobal(S, CLI.DL, JumpTarget.getValueType(), DAG,
MipsII::MO_GOT, Chain,
FuncInfo->callPtrInfo(S->getSymbol()));
} else
@@ -522,8 +522,7 @@ MachineBasicBlock *Mips16TargetLowering::
emitSel16(unsigned Opc, MachineInstr *MI, MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
@@ -580,13 +579,12 @@ emitSel16(unsigned Opc, MachineInstr *MI, MachineBasicBlock *BB) const {
return BB;
}
-MachineBasicBlock *Mips16TargetLowering::emitSelT16
- (unsigned Opc1, unsigned Opc2,
- MachineInstr *MI, MachineBasicBlock *BB) const {
+MachineBasicBlock *
+Mips16TargetLowering::emitSelT16(unsigned Opc1, unsigned Opc2, MachineInstr *MI,
+ MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
@@ -645,13 +643,13 @@ MachineBasicBlock *Mips16TargetLowering::emitSelT16
}
-MachineBasicBlock *Mips16TargetLowering::emitSeliT16
- (unsigned Opc1, unsigned Opc2,
- MachineInstr *MI, MachineBasicBlock *BB) const {
+MachineBasicBlock *
+Mips16TargetLowering::emitSeliT16(unsigned Opc1, unsigned Opc2,
+ MachineInstr *MI,
+ MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
@@ -710,14 +708,13 @@ MachineBasicBlock *Mips16TargetLowering::emitSeliT16
}
-MachineBasicBlock
- *Mips16TargetLowering::emitFEXT_T8I816_ins(unsigned BtOpc, unsigned CmpOpc,
- MachineInstr *MI,
- MachineBasicBlock *BB) const {
+MachineBasicBlock *
+Mips16TargetLowering::emitFEXT_T8I816_ins(unsigned BtOpc, unsigned CmpOpc,
+ MachineInstr *MI,
+ MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
unsigned regX = MI->getOperand(0).getReg();
unsigned regY = MI->getOperand(1).getReg();
MachineBasicBlock *target = MI->getOperand(2).getMBB();
@@ -729,12 +726,11 @@ MachineBasicBlock
}
MachineBasicBlock *Mips16TargetLowering::emitFEXT_T8I8I16_ins(
- unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc, bool ImmSigned,
- MachineInstr *MI, MachineBasicBlock *BB) const {
+ unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc, bool ImmSigned,
+ MachineInstr *MI, MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
unsigned regX = MI->getOperand(0).getReg();
int64_t imm = MI->getOperand(1).getImm();
MachineBasicBlock *target = MI->getOperand(2).getMBB();
@@ -763,13 +759,12 @@ static unsigned Mips16WhichOp8uOr16simm
llvm_unreachable("immediate field not usable");
}
-MachineBasicBlock *Mips16TargetLowering::emitFEXT_CCRX16_ins(
- unsigned SltOpc,
- MachineInstr *MI, MachineBasicBlock *BB) const {
+MachineBasicBlock *
+Mips16TargetLowering::emitFEXT_CCRX16_ins(unsigned SltOpc, MachineInstr *MI,
+ MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
unsigned CC = MI->getOperand(0).getReg();
unsigned regX = MI->getOperand(1).getReg();
unsigned regY = MI->getOperand(2).getReg();
@@ -781,13 +776,13 @@ MachineBasicBlock *Mips16TargetLowering::emitFEXT_CCRX16_ins(
return BB;
}
-MachineBasicBlock *Mips16TargetLowering::emitFEXT_CCRXI16_ins(
- unsigned SltiOpc, unsigned SltiXOpc,
- MachineInstr *MI, MachineBasicBlock *BB )const {
+MachineBasicBlock *
+Mips16TargetLowering::emitFEXT_CCRXI16_ins(unsigned SltiOpc, unsigned SltiXOpc,
+ MachineInstr *MI,
+ MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
unsigned CC = MI->getOperand(0).getReg();
unsigned regX = MI->getOperand(1).getReg();
int64_t Imm = MI->getOperand(2).getImm();
diff --git a/lib/Target/Mips/Mips16InstrInfo.cpp b/lib/Target/Mips/Mips16InstrInfo.cpp
index 4dd9af2..976becc 100644
--- a/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -144,7 +144,6 @@ bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
/// opcode, e.g. turning BEQ to BNE.
unsigned Mips16InstrInfo::getOppositeBranchOpc(unsigned Opc) const {
switch (Opc) {
- default: llvm_unreachable("Illegal opcode!");
case Mips::BeqzRxImmX16: return Mips::BnezRxImmX16;
case Mips::BnezRxImmX16: return Mips::BeqzRxImmX16;
case Mips::BeqzRxImm16: return Mips::BnezRxImm16;
@@ -166,8 +165,7 @@ unsigned Mips16InstrInfo::getOppositeBranchOpc(unsigned Opc) const {
case Mips::BtnezT8SltX16: return Mips::BteqzT8SltX16;
case Mips::BtnezT8SltiX16: return Mips::BteqzT8SltiX16;
}
- assert(false && "Implement this function.");
- return 0;
+ llvm_unreachable("Illegal opcode!");
}
static void addSaveRestoreRegs(MachineInstrBuilder &MIB,
@@ -288,7 +286,7 @@ void Mips16InstrInfo::adjustStackPtrBig(unsigned SP, int64_t Amount,
void Mips16InstrInfo::adjustStackPtrBigUnrestricted(
unsigned SP, int64_t Amount, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- assert(false && "adjust stack pointer amount exceeded");
+ llvm_unreachable("adjust stack pointer amount exceeded");
}
/// Adjust SP by Amount bytes.
diff --git a/lib/Target/Mips/Mips16InstrInfo.td b/lib/Target/Mips/Mips16InstrInfo.td
index 2364f4d..10fff03 100644
--- a/lib/Target/Mips/Mips16InstrInfo.td
+++ b/lib/Target/Mips/Mips16InstrInfo.td
@@ -502,7 +502,7 @@ class ArithLogic16Defs<bit isCom=0> {
bits<5> shamt = 0;
bit isCommutable = isCom;
bit isReMaterializable = 1;
- bit neverHasSideEffects = 1;
+ bit hasSideEffects = 0;
}
class branch16 {
@@ -879,7 +879,7 @@ def MoveR3216: FI8_MOVR3216_ins<"move", IIAlu>;
//
def Mfhi16: FRR16_M_ins<0b10000, "mfhi", IIAlu> {
let Uses = [HI0];
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
}
//
@@ -889,7 +889,7 @@ def Mfhi16: FRR16_M_ins<0b10000, "mfhi", IIAlu> {
//
def Mflo16: FRR16_M_ins<0b10010, "mflo", IIAlu> {
let Uses = [LO0];
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
}
//
@@ -897,13 +897,13 @@ def Mflo16: FRR16_M_ins<0b10010, "mflo", IIAlu> {
//
def MultRxRy16: FMULT16_ins<"mult", IIAlu> {
let isCommutable = 1;
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
let Defs = [HI0, LO0];
}
def MultuRxRy16: FMULT16_ins<"multu", IIAlu> {
let isCommutable = 1;
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
let Defs = [HI0, LO0];
}
@@ -914,7 +914,7 @@ def MultuRxRy16: FMULT16_ins<"multu", IIAlu> {
//
def MultRxRyRz16: FMULT16_LO_ins<"mult", IIAlu> {
let isCommutable = 1;
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
let Defs = [HI0, LO0];
}
@@ -925,7 +925,7 @@ def MultRxRyRz16: FMULT16_LO_ins<"mult", IIAlu> {
//
def MultuRxRyRz16: FMULT16_LO_ins<"multu", IIAlu> {
let isCommutable = 1;
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
let Defs = [HI0, LO0];
}
@@ -1910,7 +1910,7 @@ def cpinst_operand : Operand<i32> {
// is the index into the MachineConstantPool that this is, the third is the
// size in bytes of this constant pool entry.
//
-let neverHasSideEffects = 1, isNotDuplicable = 1 in
+let hasSideEffects = 0, isNotDuplicable = 1 in
def CONSTPOOL_ENTRY :
MipsPseudo16<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
i32imm:$size), "foo", []>;
diff --git a/lib/Target/Mips/Mips16RegisterInfo.cpp b/lib/Target/Mips/Mips16RegisterInfo.cpp
index 0bb452a..c45acc4 100644
--- a/lib/Target/Mips/Mips16RegisterInfo.cpp
+++ b/lib/Target/Mips/Mips16RegisterInfo.cpp
@@ -65,7 +65,7 @@ bool Mips16RegisterInfo::saveScavengerRegister
const TargetRegisterClass *RC,
unsigned Reg) const {
DebugLoc DL;
- const TargetInstrInfo &TII = *MBB.getParent()->getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
TII.copyPhysReg(MBB, I, DL, Mips::T0, Reg, true);
TII.copyPhysReg(MBB, UseMI, DL, Reg, Mips::T0, true);
return true;
@@ -106,7 +106,7 @@ void Mips16RegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
if (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI)
FrameReg = Mips::SP;
else {
- const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+ const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
if (TFI->hasFP(MF)) {
FrameReg = Mips::S0;
}
@@ -140,8 +140,7 @@ void Mips16RegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
DebugLoc DL = II->getDebugLoc();
unsigned NewImm;
const Mips16InstrInfo &TII =
- *static_cast<const Mips16InstrInfo *>(
- MBB.getParent()->getSubtarget().getInstrInfo());
+ *static_cast<const Mips16InstrInfo *>(Subtarget.getInstrInfo());
FrameReg = TII.loadImmediate(FrameReg, Offset, MBB, II, DL, NewImm);
Offset = SignExtend64<16>(NewImm);
IsKill = true;
diff --git a/lib/Target/Mips/Mips32r6InstrInfo.td b/lib/Target/Mips/Mips32r6InstrInfo.td
index 6d6735b..49c6322 100644
--- a/lib/Target/Mips/Mips32r6InstrInfo.td
+++ b/lib/Target/Mips/Mips32r6InstrInfo.td
@@ -379,7 +379,6 @@ class JMP_IDX_COMPACT_DESC_BASE<string opstr, DAGOperand opnd,
list<dag> Pattern = [];
bit isTerminator = 1;
bit hasDelaySlot = 0;
- string DecoderMethod = "DecodeSimm16";
}
class JIALC_DESC : JMP_IDX_COMPACT_DESC_BASE<"jialc", calloffset16,
@@ -550,6 +549,7 @@ class CACHE_HINT_DESC<string instr_asm, Operand MemOpnd,
dag InOperandList = (ins MemOpnd:$addr, uimm5:$hint);
string AsmString = !strconcat(instr_asm, "\t$hint, $addr");
list<dag> Pattern = [];
+ string DecoderMethod = "DecodeCacheOpR6";
}
class CACHE_DESC : CACHE_HINT_DESC<"cache", mem_simm9, GPR32Opnd>;
@@ -561,6 +561,7 @@ class COP2LD_DESC_BASE<string instr_asm, RegisterOperand COPOpnd> {
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
list<dag> Pattern = [];
bit mayLoad = 1;
+ string DecoderMethod = "DecodeFMemCop2R6";
}
class LDC2_R6_DESC : COP2LD_DESC_BASE<"ldc2", COP2Opnd>;
@@ -572,6 +573,7 @@ class COP2ST_DESC_BASE<string instr_asm, RegisterOperand COPOpnd> {
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
list<dag> Pattern = [];
bit mayStore = 1;
+ string DecoderMethod = "DecodeFMemCop2R6";
}
class SDC2_R6_DESC : COP2ST_DESC_BASE<"sdc2", COP2Opnd>;
@@ -756,7 +758,7 @@ def : MipsPat<(setge f32:$lhs, f32:$rhs), (CMP_LT_S f32:$rhs, f32:$lhs)>,
ISA_MIPS32R6;
def : MipsPat<(setlt f32:$lhs, f32:$rhs), (CMP_LT_S f32:$lhs, f32:$rhs)>,
ISA_MIPS32R6;
-def : MipsPat<(setlt f32:$lhs, f32:$rhs), (CMP_LE_S f32:$lhs, f32:$rhs)>,
+def : MipsPat<(setle f32:$lhs, f32:$rhs), (CMP_LE_S f32:$lhs, f32:$rhs)>,
ISA_MIPS32R6;
def : MipsPat<(setne f32:$lhs, f32:$rhs),
(NOR (CMP_EQ_S f32:$lhs, f32:$rhs), ZERO)>, ISA_MIPS32R6;
@@ -776,7 +778,7 @@ def : MipsPat<(setge f64:$lhs, f64:$rhs), (CMP_LT_D f64:$rhs, f64:$lhs)>,
ISA_MIPS32R6;
def : MipsPat<(setlt f64:$lhs, f64:$rhs), (CMP_LT_D f64:$lhs, f64:$rhs)>,
ISA_MIPS32R6;
-def : MipsPat<(setlt f64:$lhs, f64:$rhs), (CMP_LE_D f64:$lhs, f64:$rhs)>,
+def : MipsPat<(setle f64:$lhs, f64:$rhs), (CMP_LE_D f64:$lhs, f64:$rhs)>,
ISA_MIPS32R6;
def : MipsPat<(setne f64:$lhs, f64:$rhs),
(NOR (CMP_EQ_D f64:$lhs, f64:$rhs), ZERO)>, ISA_MIPS32R6;
diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td
index 4e2dcd8..776e473 100644
--- a/lib/Target/Mips/Mips64InstrInfo.td
+++ b/lib/Target/Mips/Mips64InstrInfo.td
@@ -16,6 +16,10 @@
//===----------------------------------------------------------------------===//
// Unsigned Operand
+def uimm5_64 : Operand<i64> {
+ let PrintMethod = "printUnsignedImm";
+}
+
def uimm16_64 : Operand<i64> {
let PrintMethod = "printUnsignedImm";
}
@@ -41,6 +45,38 @@ def immSExt10_64 : PatLeaf<(i64 imm),
def immZExt16_64 : PatLeaf<(i64 imm),
[{ return isInt<16>(N->getZExtValue()); }]>;
+def immZExt5_64 : ImmLeaf<i64, [{ return Imm == (Imm & 0x1f); }]>;
+
+// Transformation function: get log2 of low 32 bits of immediate
+def Log2LO : SDNodeXForm<imm, [{
+ return getImm(N, Log2_64((unsigned) N->getZExtValue()));
+}]>;
+
+// Transformation function: get log2 of high 32 bits of immediate
+def Log2HI : SDNodeXForm<imm, [{
+ return getImm(N, Log2_64((unsigned) (N->getZExtValue() >> 32)));
+}]>;
+
+// Predicate: True if immediate is a power of 2 and fits 32 bits
+def PowerOf2LO : PatLeaf<(imm), [{
+ if (N->getValueType(0) == MVT::i64) {
+ uint64_t Imm = N->getZExtValue();
+ return isPowerOf2_64(Imm) && (Imm & 0xffffffff) == Imm;
+ }
+ else
+ return false;
+}]>;
+
+// Predicate: True if immediate is a power of 2 and exceeds 32 bits
+def PowerOf2HI : PatLeaf<(imm), [{
+ if (N->getValueType(0) == MVT::i64) {
+ uint64_t Imm = N->getZExtValue();
+ return isPowerOf2_64(Imm) && (Imm & 0xffffffff00000000) == Imm;
+ }
+ else
+ return false;
+}]>;
+
//===----------------------------------------------------------------------===//
// Instructions specific format
//===----------------------------------------------------------------------===//
@@ -290,7 +326,8 @@ class ExtsCins<string opstr, SDPatternOperator Op = null_frag>:
class SetCC64_R<string opstr, PatFrag cond_op> :
InstSE<(outs GPR64Opnd:$rd), (ins GPR64Opnd:$rs, GPR64Opnd:$rt),
!strconcat(opstr, "\t$rd, $rs, $rt"),
- [(set GPR64Opnd:$rd, (cond_op GPR64Opnd:$rs, GPR64Opnd:$rt))],
+ [(set GPR64Opnd:$rd, (zext (cond_op GPR64Opnd:$rs,
+ GPR64Opnd:$rt)))],
II_SEQ_SNE, FrmR, opstr> {
let TwoOperandAliasConstraint = "$rd = $rs";
}
@@ -298,17 +335,40 @@ class SetCC64_R<string opstr, PatFrag cond_op> :
class SetCC64_I<string opstr, PatFrag cond_op>:
InstSE<(outs GPR64Opnd:$rt), (ins GPR64Opnd:$rs, simm10_64:$imm10),
!strconcat(opstr, "\t$rt, $rs, $imm10"),
- [(set GPR64Opnd:$rt, (cond_op GPR64Opnd:$rs, immSExt10_64:$imm10))],
+ [(set GPR64Opnd:$rt, (zext (cond_op GPR64Opnd:$rs,
+ immSExt10_64:$imm10)))],
II_SEQI_SNEI, FrmI, opstr> {
let TwoOperandAliasConstraint = "$rt = $rs";
}
+class CBranchBitNum<string opstr, DAGOperand opnd, PatFrag cond_op,
+ RegisterOperand RO, bits<64> shift = 1> :
+ InstSE<(outs), (ins RO:$rs, uimm5_64:$p, opnd:$offset),
+ !strconcat(opstr, "\t$rs, $p, $offset"),
+ [(brcond (i32 (cond_op (and RO:$rs, (shl shift, immZExt5_64:$p)), 0)),
+ bb:$offset)], IIBranch, FrmI, opstr> {
+ let isBranch = 1;
+ let isTerminator = 1;
+ let hasDelaySlot = 1;
+ let Defs = [AT];
+}
+
// Unsigned Byte Add
let Pattern = [(set GPR64Opnd:$rd,
(and (add GPR64Opnd:$rs, GPR64Opnd:$rt), 255))] in
def BADDu : ArithLogicR<"baddu", GPR64Opnd, 1, II_BADDU>,
ADD_FM<0x1c, 0x28>;
+// Branch on Bit Clear /+32
+def BBIT0 : CBranchBitNum<"bbit0", brtarget, seteq, GPR64Opnd>, BBIT_FM<0x32>;
+def BBIT032: CBranchBitNum<"bbit032", brtarget, seteq, GPR64Opnd, 0x100000000>,
+ BBIT_FM<0x36>;
+
+// Branch on Bit Set /+32
+def BBIT1 : CBranchBitNum<"bbit1", brtarget, setne, GPR64Opnd>, BBIT_FM<0x3a>;
+def BBIT132: CBranchBitNum<"bbit132", brtarget, setne, GPR64Opnd, 0x100000000>,
+ BBIT_FM<0x3e>;
+
// Multiply Doubleword to GPR
let Defs = [HI0, LO0, P0, P1, P2] in
def DMUL : ArithLogicR<"dmul", GPR64Opnd, 1, II_DMUL, mul>,
@@ -359,6 +419,14 @@ def VMULU : ArithLogicR<"vmulu", GPR64Opnd, 0, II_DMUL>,
}
+/// Move between CPU and coprocessor registers
+let DecoderNamespace = "Mips64", Predicates = [HasMips64] in {
+def DMFC0 : MFC3OP<"dmfc0", GPR64Opnd>, MFC3OP_FM<0x10, 1>;
+def DMTC0 : MFC3OP<"dmtc0", GPR64Opnd>, MFC3OP_FM<0x10, 5>, ISA_MIPS3;
+def DMFC2 : MFC3OP<"dmfc2", GPR64Opnd>, MFC3OP_FM<0x12, 1>, ISA_MIPS3;
+def DMTC2 : MFC3OP<"dmtc2", GPR64Opnd>, MFC3OP_FM<0x12, 5>, ISA_MIPS3;
+}
+
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
@@ -426,6 +494,14 @@ def : MipsPat<(trunc (assertzext GPR64:$src)),
def : MipsPat<(i32 (trunc GPR64:$src)),
(SLL (EXTRACT_SUBREG GPR64:$src, sub_32), 0)>;
+// Bypass trunc nodes for bitwise ops.
+def : MipsPat<(i32 (trunc (and GPR64:$lhs, GPR64:$rhs))),
+ (EXTRACT_SUBREG (AND64 GPR64:$lhs, GPR64:$rhs), sub_32)>;
+def : MipsPat<(i32 (trunc (or GPR64:$lhs, GPR64:$rhs))),
+ (EXTRACT_SUBREG (OR64 GPR64:$lhs, GPR64:$rhs), sub_32)>;
+def : MipsPat<(i32 (trunc (xor GPR64:$lhs, GPR64:$rhs))),
+ (EXTRACT_SUBREG (XOR64 GPR64:$lhs, GPR64:$rhs), sub_32)>;
+
// 32-to-64-bit extension
def : MipsPat<(i64 (anyext GPR32:$src)), (SLL64_32 GPR32:$src)>;
def : MipsPat<(i64 (zext GPR32:$src)), (DSRL (DSLL64_32 GPR32:$src), 32)>;
@@ -438,6 +514,28 @@ def : MipsPat<(i64 (sext_inreg GPR64:$src, i32)),
// bswap MipsPattern
def : MipsPat<(bswap GPR64:$rt), (DSHD (DSBH GPR64:$rt))>;
+// Carry pattern
+def : MipsPat<(subc GPR64:$lhs, GPR64:$rhs),
+ (DSUBu GPR64:$lhs, GPR64:$rhs)>;
+let AdditionalPredicates = [NotDSP] in {
+ def : MipsPat<(addc GPR64:$lhs, GPR64:$rhs),
+ (DADDu GPR64:$lhs, GPR64:$rhs)>;
+ def : MipsPat<(addc GPR64:$lhs, immSExt16:$imm),
+ (DADDiu GPR64:$lhs, imm:$imm)>;
+}
+
+// Octeon bbit0/bbit1 MipsPattern
+let Predicates = [HasMips64, HasCnMips] in {
+def : MipsPat<(brcond (i32 (seteq (and i64:$lhs, PowerOf2LO:$mask), 0)), bb:$dst),
+ (BBIT0 i64:$lhs, (Log2LO PowerOf2LO:$mask), bb:$dst)>;
+def : MipsPat<(brcond (i32 (seteq (and i64:$lhs, PowerOf2HI:$mask), 0)), bb:$dst),
+ (BBIT032 i64:$lhs, (Log2HI PowerOf2HI:$mask), bb:$dst)>;
+def : MipsPat<(brcond (i32 (setne (and i64:$lhs, PowerOf2LO:$mask), 0)), bb:$dst),
+ (BBIT1 i64:$lhs, (Log2LO PowerOf2LO:$mask), bb:$dst)>;
+def : MipsPat<(brcond (i32 (setne (and i64:$lhs, PowerOf2HI:$mask), 0)), bb:$dst),
+ (BBIT132 i64:$lhs, (Log2HI PowerOf2HI:$mask), bb:$dst)>;
+}
+
//===----------------------------------------------------------------------===//
// Instruction aliases
//===----------------------------------------------------------------------===//
@@ -489,19 +587,6 @@ def : MipsInstAlias<"dsrl $rd, $rt, $rs",
(DSRLV GPR64Opnd:$rd, GPR64Opnd:$rt, GPR32Opnd:$rs), 0>,
ISA_MIPS3;
-class LoadImm64< string instr_asm, Operand Od, RegisterOperand RO> :
- MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm64),
- !strconcat(instr_asm, "\t$rt, $imm64")> ;
-def LoadImm64Reg : LoadImm64<"dli", imm64, GPR64Opnd>;
-
-/// Move between CPU and coprocessor registers
-let DecoderNamespace = "Mips64", Predicates = [HasMips64] in {
-def DMFC0 : MFC3OP<"dmfc0", GPR64Opnd>, MFC3OP_FM<0x10, 1>;
-def DMTC0 : MFC3OP<"dmtc0", GPR64Opnd>, MFC3OP_FM<0x10, 5>, ISA_MIPS3;
-def DMFC2 : MFC3OP<"dmfc2", GPR64Opnd>, MFC3OP_FM<0x12, 1>, ISA_MIPS3;
-def DMTC2 : MFC3OP<"dmtc2", GPR64Opnd>, MFC3OP_FM<0x12, 5>, ISA_MIPS3;
-}
-
// Two operand (implicit 0 selector) versions:
def : MipsInstAlias<"dmfc0 $rt, $rd", (DMFC0 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
def : MipsInstAlias<"dmtc0 $rt, $rd", (DMTC0 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
@@ -514,3 +599,12 @@ def : MipsInstAlias<"syncs", (SYNC 0x6), 0>;
def : MipsInstAlias<"syncw", (SYNC 0x4), 0>;
def : MipsInstAlias<"syncws", (SYNC 0x5), 0>;
}
+
+//===----------------------------------------------------------------------===//
+// Assembler Pseudo Instructions
+//===----------------------------------------------------------------------===//
+
+class LoadImm64<string instr_asm, Operand Od, RegisterOperand RO> :
+ MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm64),
+ !strconcat(instr_asm, "\t$rt, $imm64")> ;
+def LoadImm64Reg : LoadImm64<"dli", imm64, GPR64Opnd>;
diff --git a/lib/Target/Mips/MipsABIInfo.cpp b/lib/Target/Mips/MipsABIInfo.cpp
deleted file mode 100644
index f885369..0000000
--- a/lib/Target/Mips/MipsABIInfo.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-//===---- MipsABIInfo.cpp - Information about MIPS ABI's ------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MipsABIInfo.h"
-#include "MipsRegisterInfo.h"
-
-using namespace llvm;
-
-namespace {
-static const MCPhysReg O32IntRegs[4] = {Mips::A0, Mips::A1, Mips::A2, Mips::A3};
-
-static const MCPhysReg Mips64IntRegs[8] = {
- Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
- Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64};
-}
-
-const ArrayRef<MCPhysReg> MipsABIInfo::GetByValArgRegs() const {
- if (IsO32())
- return makeArrayRef(O32IntRegs);
- if (IsN32() || IsN64())
- return makeArrayRef(Mips64IntRegs);
- llvm_unreachable("Unhandled ABI");
-}
-
-const ArrayRef<MCPhysReg> MipsABIInfo::GetVarArgRegs() const {
- if (IsO32())
- return makeArrayRef(O32IntRegs);
- if (IsN32() || IsN64())
- return makeArrayRef(Mips64IntRegs);
- llvm_unreachable("Unhandled ABI");
-}
-
-unsigned MipsABIInfo::GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const {
- if (IsO32())
- return CC != CallingConv::Fast ? 16 : 0;
- if (IsN32() || IsN64() || IsEABI())
- return 0;
- llvm_unreachable("Unhandled ABI");
-}
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index 832fa05..c662e13 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -19,6 +19,7 @@
#include "MipsAsmPrinter.h"
#include "MipsInstrInfo.h"
#include "MipsMCInstLower.h"
+#include "MipsTargetMachine.h"
#include "MipsTargetStreamer.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -53,12 +54,12 @@ using namespace llvm;
#define DEBUG_TYPE "mips-asm-printer"
-MipsTargetStreamer &MipsAsmPrinter::getTargetStreamer() {
+MipsTargetStreamer &MipsAsmPrinter::getTargetStreamer() const {
return static_cast<MipsTargetStreamer &>(*OutStreamer.getTargetStreamer());
}
bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
- Subtarget = &TM.getSubtarget<MipsSubtarget>();
+ Subtarget = &MF.getSubtarget<MipsSubtarget>();
// Initialize TargetLoweringObjectFile.
const_cast<TargetLoweringObjectFile &>(getObjFileLowering())
@@ -319,7 +320,7 @@ void MipsAsmPrinter::emitFrameDirective() {
/// Emit Set directives.
const char *MipsAsmPrinter::getCurrentABIString() const {
- switch (Subtarget->getABI().GetEnumValue()) {
+ switch (static_cast<MipsTargetMachine &>(TM).getABI().GetEnumValue()) {
case MipsABIInfo::ABI::O32: return "abi32";
case MipsABIInfo::ABI::N32: return "abiN32";
case MipsABIInfo::ABI::N64: return "abi64";
@@ -357,10 +358,7 @@ void MipsAsmPrinter::EmitFunctionBodyStart() {
MCInstLowering.Initialize(&MF->getContext());
- bool IsNakedFunction =
- MF->getFunction()->
- getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::Naked);
+ bool IsNakedFunction = MF->getFunction()->hasFnAttribute(Attribute::Naked);
if (!IsNakedFunction)
emitFrameDirective();
@@ -560,7 +558,7 @@ bool MipsAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
void MipsAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
raw_ostream &O) {
- const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = TM.getDataLayout();
const MachineOperand &MO = MI->getOperand(opNum);
bool closeP = false;
@@ -689,7 +687,21 @@ printRegisterList(const MachineInstr *MI, int opNum, raw_ostream &O) {
}
void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
- bool IsABICalls = Subtarget->isABICalls();
+
+ // Compute MIPS architecture attributes based on the default subtarget
+ // that we'd have constructed. Module level directives aren't LTO
+ // clean anyhow.
+ // FIXME: For ifunc related functions we could iterate over and look
+ // for a feature string that doesn't match the default one.
+ StringRef TT = TM.getTargetTriple();
+ StringRef CPU =
+ MIPS_MC::selectMipsCPU(TM.getTargetTriple(), TM.getTargetCPU());
+ StringRef FS = TM.getTargetFeatureString();
+ const MipsTargetMachine &MTM = static_cast<const MipsTargetMachine &>(TM);
+ const MipsSubtarget STI(TT, CPU, FS, MTM.isLittleEndian(), MTM);
+
+ bool IsABICalls = STI.isABICalls();
+ const MipsABIInfo &ABI = MTM.getABI();
if (IsABICalls) {
getTargetStreamer().emitDirectiveAbiCalls();
Reloc::Model RM = TM.getRelocationModel();
@@ -697,68 +709,88 @@ void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
// Ideally it should test for properties of the ABI and not the ABI
// itself.
// For the moment, I'm only correcting enough to make MIPS-IV work.
- if (RM == Reloc::Static && !Subtarget->isABI_N64())
+ if (RM == Reloc::Static && !ABI.IsN64())
getTargetStreamer().emitDirectiveOptionPic0();
}
// Tell the assembler which ABI we are using
std::string SectionName = std::string(".mdebug.") + getCurrentABIString();
- OutStreamer.SwitchSection(OutContext.getELFSection(
- SectionName, ELF::SHT_PROGBITS, 0, SectionKind::getDataRel()));
+ OutStreamer.SwitchSection(
+ OutContext.getELFSection(SectionName, ELF::SHT_PROGBITS, 0));
// NaN: At the moment we only support:
// 1. .nan legacy (default)
// 2. .nan 2008
- Subtarget->isNaN2008() ? getTargetStreamer().emitDirectiveNaN2008()
- : getTargetStreamer().emitDirectiveNaNLegacy();
+ STI.isNaN2008() ? getTargetStreamer().emitDirectiveNaN2008()
+ : getTargetStreamer().emitDirectiveNaNLegacy();
// TODO: handle O64 ABI
- if (Subtarget->isABI_EABI()) {
- if (Subtarget->isGP32bit())
- OutStreamer.SwitchSection(
- OutContext.getELFSection(".gcc_compiled_long32", ELF::SHT_PROGBITS, 0,
- SectionKind::getDataRel()));
+ if (ABI.IsEABI()) {
+ if (STI.isGP32bit())
+ OutStreamer.SwitchSection(OutContext.getELFSection(".gcc_compiled_long32",
+ ELF::SHT_PROGBITS, 0));
else
- OutStreamer.SwitchSection(
- OutContext.getELFSection(".gcc_compiled_long64", ELF::SHT_PROGBITS, 0,
- SectionKind::getDataRel()));
+ OutStreamer.SwitchSection(OutContext.getELFSection(".gcc_compiled_long64",
+ ELF::SHT_PROGBITS, 0));
}
- getTargetStreamer().updateABIInfo(*Subtarget);
+ getTargetStreamer().updateABIInfo(STI);
// We should always emit a '.module fp=...' but binutils 2.24 does not accept
// it. We therefore emit it when it contradicts the ABI defaults (-mfpxx or
// -mfp64) and omit it otherwise.
- if (Subtarget->isABI_O32() && (Subtarget->isABI_FPXX() ||
- Subtarget->isFP64bit()))
+ if (ABI.IsO32() && (STI.isABI_FPXX() || STI.isFP64bit()))
getTargetStreamer().emitDirectiveModuleFP();
// We should always emit a '.module [no]oddspreg' but binutils 2.24 does not
// accept it. We therefore emit it when it contradicts the default or an
// option has changed the default (i.e. FPXX) and omit it otherwise.
- if (Subtarget->isABI_O32() && (!Subtarget->useOddSPReg() ||
- Subtarget->isABI_FPXX()))
- getTargetStreamer().emitDirectiveModuleOddSPReg(Subtarget->useOddSPReg(),
- Subtarget->isABI_O32());
+ if (ABI.IsO32() && (!STI.useOddSPReg() || STI.isABI_FPXX()))
+ getTargetStreamer().emitDirectiveModuleOddSPReg(STI.useOddSPReg(),
+ ABI.IsO32());
+}
+
+void MipsAsmPrinter::emitInlineAsmStart() const {
+ MipsTargetStreamer &TS = getTargetStreamer();
+
+ // GCC's choice of assembler options for inline assembly code ('at', 'macro'
+ // and 'reorder') is different from LLVM's choice for generated code ('noat',
+ // 'nomacro' and 'noreorder').
+ // In order to maintain compatibility with inline assembly code which depends
+ // on GCC's assembler options being used, we have to switch to those options
+ // for the duration of the inline assembly block and then switch back.
+ TS.emitDirectiveSetPush();
+ TS.emitDirectiveSetAt();
+ TS.emitDirectiveSetMacro();
+ TS.emitDirectiveSetReorder();
+ OutStreamer.AddBlankLine();
}
-void MipsAsmPrinter::EmitJal(MCSymbol *Symbol) {
+void MipsAsmPrinter::emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
+ const MCSubtargetInfo *EndInfo) const {
+ OutStreamer.AddBlankLine();
+ getTargetStreamer().emitDirectiveSetPop();
+}
+
+void MipsAsmPrinter::EmitJal(const MCSubtargetInfo &STI, MCSymbol *Symbol) {
MCInst I;
I.setOpcode(Mips::JAL);
I.addOperand(
MCOperand::CreateExpr(MCSymbolRefExpr::Create(Symbol, OutContext)));
- OutStreamer.EmitInstruction(I, getSubtargetInfo());
+ OutStreamer.EmitInstruction(I, STI);
}
-void MipsAsmPrinter::EmitInstrReg(unsigned Opcode, unsigned Reg) {
+void MipsAsmPrinter::EmitInstrReg(const MCSubtargetInfo &STI, unsigned Opcode,
+ unsigned Reg) {
MCInst I;
I.setOpcode(Opcode);
I.addOperand(MCOperand::CreateReg(Reg));
- OutStreamer.EmitInstruction(I, getSubtargetInfo());
+ OutStreamer.EmitInstruction(I, STI);
}
-void MipsAsmPrinter::EmitInstrRegReg(unsigned Opcode, unsigned Reg1,
+void MipsAsmPrinter::EmitInstrRegReg(const MCSubtargetInfo &STI,
+ unsigned Opcode, unsigned Reg1,
unsigned Reg2) {
MCInst I;
//
@@ -774,20 +806,22 @@ void MipsAsmPrinter::EmitInstrRegReg(unsigned Opcode, unsigned Reg1,
I.setOpcode(Opcode);
I.addOperand(MCOperand::CreateReg(Reg1));
I.addOperand(MCOperand::CreateReg(Reg2));
- OutStreamer.EmitInstruction(I, getSubtargetInfo());
+ OutStreamer.EmitInstruction(I, STI);
}
-void MipsAsmPrinter::EmitInstrRegRegReg(unsigned Opcode, unsigned Reg1,
+void MipsAsmPrinter::EmitInstrRegRegReg(const MCSubtargetInfo &STI,
+ unsigned Opcode, unsigned Reg1,
unsigned Reg2, unsigned Reg3) {
MCInst I;
I.setOpcode(Opcode);
I.addOperand(MCOperand::CreateReg(Reg1));
I.addOperand(MCOperand::CreateReg(Reg2));
I.addOperand(MCOperand::CreateReg(Reg3));
- OutStreamer.EmitInstruction(I, getSubtargetInfo());
+ OutStreamer.EmitInstruction(I, STI);
}
-void MipsAsmPrinter::EmitMovFPIntPair(unsigned MovOpc, unsigned Reg1,
+void MipsAsmPrinter::EmitMovFPIntPair(const MCSubtargetInfo &STI,
+ unsigned MovOpc, unsigned Reg1,
unsigned Reg2, unsigned FPReg1,
unsigned FPReg2, bool LE) {
if (!LE) {
@@ -795,59 +829,60 @@ void MipsAsmPrinter::EmitMovFPIntPair(unsigned MovOpc, unsigned Reg1,
Reg1 = Reg2;
Reg2 = temp;
}
- EmitInstrRegReg(MovOpc, Reg1, FPReg1);
- EmitInstrRegReg(MovOpc, Reg2, FPReg2);
+ EmitInstrRegReg(STI, MovOpc, Reg1, FPReg1);
+ EmitInstrRegReg(STI, MovOpc, Reg2, FPReg2);
}
-void MipsAsmPrinter::EmitSwapFPIntParams(Mips16HardFloatInfo::FPParamVariant PV,
+void MipsAsmPrinter::EmitSwapFPIntParams(const MCSubtargetInfo &STI,
+ Mips16HardFloatInfo::FPParamVariant PV,
bool LE, bool ToFP) {
using namespace Mips16HardFloatInfo;
unsigned MovOpc = ToFP ? Mips::MTC1 : Mips::MFC1;
switch (PV) {
case FSig:
- EmitInstrRegReg(MovOpc, Mips::A0, Mips::F12);
+ EmitInstrRegReg(STI, MovOpc, Mips::A0, Mips::F12);
break;
case FFSig:
- EmitMovFPIntPair(MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F14, LE);
+ EmitMovFPIntPair(STI, MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F14, LE);
break;
case FDSig:
- EmitInstrRegReg(MovOpc, Mips::A0, Mips::F12);
- EmitMovFPIntPair(MovOpc, Mips::A2, Mips::A3, Mips::F14, Mips::F15, LE);
+ EmitInstrRegReg(STI, MovOpc, Mips::A0, Mips::F12);
+ EmitMovFPIntPair(STI, MovOpc, Mips::A2, Mips::A3, Mips::F14, Mips::F15, LE);
break;
case DSig:
- EmitMovFPIntPair(MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F13, LE);
+ EmitMovFPIntPair(STI, MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F13, LE);
break;
case DDSig:
- EmitMovFPIntPair(MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F13, LE);
- EmitMovFPIntPair(MovOpc, Mips::A2, Mips::A3, Mips::F14, Mips::F15, LE);
+ EmitMovFPIntPair(STI, MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F13, LE);
+ EmitMovFPIntPair(STI, MovOpc, Mips::A2, Mips::A3, Mips::F14, Mips::F15, LE);
break;
case DFSig:
- EmitMovFPIntPair(MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F13, LE);
- EmitInstrRegReg(MovOpc, Mips::A2, Mips::F14);
+ EmitMovFPIntPair(STI, MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F13, LE);
+ EmitInstrRegReg(STI, MovOpc, Mips::A2, Mips::F14);
break;
case NoSig:
return;
}
}
-void
-MipsAsmPrinter::EmitSwapFPIntRetval(Mips16HardFloatInfo::FPReturnVariant RV,
- bool LE) {
+void MipsAsmPrinter::EmitSwapFPIntRetval(
+ const MCSubtargetInfo &STI, Mips16HardFloatInfo::FPReturnVariant RV,
+ bool LE) {
using namespace Mips16HardFloatInfo;
unsigned MovOpc = Mips::MFC1;
switch (RV) {
case FRet:
- EmitInstrRegReg(MovOpc, Mips::V0, Mips::F0);
+ EmitInstrRegReg(STI, MovOpc, Mips::V0, Mips::F0);
break;
case DRet:
- EmitMovFPIntPair(MovOpc, Mips::V0, Mips::V1, Mips::F0, Mips::F1, LE);
+ EmitMovFPIntPair(STI, MovOpc, Mips::V0, Mips::V1, Mips::F0, Mips::F1, LE);
break;
case CFRet:
- EmitMovFPIntPair(MovOpc, Mips::V0, Mips::V1, Mips::F0, Mips::F1, LE);
+ EmitMovFPIntPair(STI, MovOpc, Mips::V0, Mips::V1, Mips::F0, Mips::F1, LE);
break;
case CDRet:
- EmitMovFPIntPair(MovOpc, Mips::V0, Mips::V1, Mips::F0, Mips::F1, LE);
- EmitMovFPIntPair(MovOpc, Mips::A0, Mips::A1, Mips::F2, Mips::F3, LE);
+ EmitMovFPIntPair(STI, MovOpc, Mips::V0, Mips::V1, Mips::F0, Mips::F1, LE);
+ EmitMovFPIntPair(STI, MovOpc, Mips::A0, Mips::A1, Mips::F2, Mips::F3, LE);
break;
case NoFPRet:
break;
@@ -858,7 +893,14 @@ void MipsAsmPrinter::EmitFPCallStub(
const char *Symbol, const Mips16HardFloatInfo::FuncSignature *Signature) {
MCSymbol *MSymbol = OutContext.GetOrCreateSymbol(StringRef(Symbol));
using namespace Mips16HardFloatInfo;
- bool LE = Subtarget->isLittle();
+ bool LE = getDataLayout().isLittleEndian();
+ // Construct a local MCSubtargetInfo here.
+ // This is because the MachineFunction won't exist (but have not yet been
+ // freed) and since we're at the global level we can use the default
+ // constructed subtarget.
+ std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo(
+ TM.getTargetTriple(), TM.getTargetCPU(), TM.getTargetFeatureString()));
+
//
// .global xxxx
//
@@ -921,7 +963,7 @@ void MipsAsmPrinter::EmitFPCallStub(
//
const MCSectionELF *M = OutContext.getELFSection(
".mips16.call.fp." + std::string(Symbol), ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_EXECINSTR, SectionKind::getText());
+ ELF::SHF_ALLOC | ELF::SHF_EXECINSTR);
OutStreamer.SwitchSection(M, nullptr);
//
// .align 2
@@ -946,13 +988,10 @@ void MipsAsmPrinter::EmitFPCallStub(
OutContext.GetOrCreateSymbol("__call_stub_fp_" + Twine(Symbol));
OutStreamer.EmitSymbolAttribute(MType, MCSA_ELF_TypeFunction);
OutStreamer.EmitLabel(Stub);
- //
- // we just handle non pic for now. these function will not be
- // called otherwise. when the full stub generation is moved here
- // we need to deal with pic.
- //
- if (Subtarget->getRelocationModel() == Reloc::PIC_)
- llvm_unreachable("should not be here if we are compiling pic");
+
+ // Only handle non-pic for now.
+ assert(TM.getRelocationModel() != Reloc::PIC_ &&
+ "should not be here if we are compiling pic");
TS.emitDirectiveSetReorder();
//
// We need to add a MipsMCExpr class to MCTargetDesc to fully implement
@@ -969,22 +1008,22 @@ void MipsAsmPrinter::EmitFPCallStub(
//
// Mov $18, $31
- EmitInstrRegRegReg(Mips::ADDu, Mips::S2, Mips::RA, Mips::ZERO);
+ EmitInstrRegRegReg(*STI, Mips::ADDu, Mips::S2, Mips::RA, Mips::ZERO);
- EmitSwapFPIntParams(Signature->ParamSig, LE, true);
+ EmitSwapFPIntParams(*STI, Signature->ParamSig, LE, true);
// Jal xxxx
//
- EmitJal(MSymbol);
+ EmitJal(*STI, MSymbol);
// fix return values
- EmitSwapFPIntRetval(Signature->RetSig, LE);
+ EmitSwapFPIntRetval(*STI, Signature->RetSig, LE);
//
// do the return
// if (Signature->RetSig == NoFPRet)
// llvm_unreachable("should not be any stubs here with no return value");
// else
- EmitInstrReg(Mips::JR, Mips::S2);
+ EmitInstrReg(*STI, Mips::JR, Mips::S2);
MCSymbol *Tmp = OutContext.CreateTempSymbol();
OutStreamer.EmitLabel(Tmp);
diff --git a/lib/Target/Mips/MipsAsmPrinter.h b/lib/Target/Mips/MipsAsmPrinter.h
index 0582e21..d4c5b80 100644
--- a/lib/Target/Mips/MipsAsmPrinter.h
+++ b/lib/Target/Mips/MipsAsmPrinter.h
@@ -31,7 +31,7 @@ class Module;
class raw_ostream;
class LLVM_LIBRARY_VISIBILITY MipsAsmPrinter : public AsmPrinter {
- MipsTargetStreamer &getTargetStreamer();
+ MipsTargetStreamer &getTargetStreamer() const;
void EmitInstrWithMacroNoAT(const MachineInstr *MI);
@@ -60,22 +60,31 @@ private:
std::map<const char *, const llvm::Mips16HardFloatInfo::FuncSignature *>
StubsNeeded;
- void EmitJal(MCSymbol *Symbol);
+ void emitInlineAsmStart() const override;
- void EmitInstrReg(unsigned Opcode, unsigned Reg);
+ void emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
+ const MCSubtargetInfo *EndInfo) const override;
- void EmitInstrRegReg(unsigned Opcode, unsigned Reg1, unsigned Reg2);
+ void EmitJal(const MCSubtargetInfo &STI, MCSymbol *Symbol);
- void EmitInstrRegRegReg(unsigned Opcode, unsigned Reg1, unsigned Reg2,
- unsigned Reg3);
+ void EmitInstrReg(const MCSubtargetInfo &STI, unsigned Opcode, unsigned Reg);
- void EmitMovFPIntPair(unsigned MovOpc, unsigned Reg1, unsigned Reg2,
- unsigned FPReg1, unsigned FPReg2, bool LE);
+ void EmitInstrRegReg(const MCSubtargetInfo &STI, unsigned Opcode,
+ unsigned Reg1, unsigned Reg2);
- void EmitSwapFPIntParams(Mips16HardFloatInfo::FPParamVariant, bool LE,
+ void EmitInstrRegRegReg(const MCSubtargetInfo &STI, unsigned Opcode,
+ unsigned Reg1, unsigned Reg2, unsigned Reg3);
+
+ void EmitMovFPIntPair(const MCSubtargetInfo &STI, unsigned MovOpc,
+ unsigned Reg1, unsigned Reg2, unsigned FPReg1,
+ unsigned FPReg2, bool LE);
+
+ void EmitSwapFPIntParams(const MCSubtargetInfo &STI,
+ Mips16HardFloatInfo::FPParamVariant, bool LE,
bool ToFP);
- void EmitSwapFPIntRetval(Mips16HardFloatInfo::FPReturnVariant, bool LE);
+ void EmitSwapFPIntRetval(const MCSubtargetInfo &STI,
+ Mips16HardFloatInfo::FPReturnVariant, bool LE);
void EmitFPCallStub(const char *, const Mips16HardFloatInfo::FuncSignature *);
@@ -89,14 +98,10 @@ public:
const MipsFunctionInfo *MipsFI;
MipsMCInstLower MCInstLowering;
- // We initialize the subtarget here and in runOnMachineFunction
- // since there are certain target specific flags (ABI) that could
- // reside on the TargetMachine, but are on the subtarget currently
- // and we need them for the beginning of file output before we've
- // seen a single function.
- explicit MipsAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), MCP(nullptr), InConstantPool(false),
- Subtarget(&TM.getSubtarget<MipsSubtarget>()), MCInstLowering(*this) {}
+ explicit MipsAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)), MCP(nullptr),
+ InConstantPool(false), MCInstLowering(*this) {}
const char *getPassName() const override {
return "Mips Assembly Printer";
diff --git a/lib/Target/Mips/MipsCCState.cpp b/lib/Target/Mips/MipsCCState.cpp
index e18cc8b..b808129 100644
--- a/lib/Target/Mips/MipsCCState.cpp
+++ b/lib/Target/Mips/MipsCCState.cpp
@@ -132,8 +132,8 @@ void MipsCCState::PreAnalyzeFormalArgumentsForF128(
continue;
}
- assert(Ins[i].OrigArgIndex < MF.getFunction()->arg_size());
- std::advance(FuncArg, Ins[i].OrigArgIndex);
+ assert(Ins[i].getOrigArgIndex() < MF.getFunction()->arg_size());
+ std::advance(FuncArg, Ins[i].getOrigArgIndex());
OriginalArgWasF128.push_back(
originalTypeIsF128(FuncArg->getType(), nullptr));
diff --git a/lib/Target/Mips/MipsCCState.h b/lib/Target/Mips/MipsCCState.h
index cc4531d..081c393 100644
--- a/lib/Target/Mips/MipsCCState.h
+++ b/lib/Target/Mips/MipsCCState.h
@@ -10,9 +10,9 @@
#ifndef MIPSCCSTATE_H
#define MIPSCCSTATE_H
+#include "MipsISelLowering.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/CallingConvLower.h"
-#include "MipsISelLowering.h"
namespace llvm {
class SDNode;
@@ -85,10 +85,10 @@ public:
// provide a means of accessing ArgListEntry::IsFixed. Delete them from this
// class. This doesn't stop them being used via the base class though.
void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
- CCAssignFn Fn) LLVM_DELETED_FUNCTION;
+ CCAssignFn Fn) = delete;
void AnalyzeCallOperands(const SmallVectorImpl<MVT> &Outs,
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
- CCAssignFn Fn) LLVM_DELETED_FUNCTION;
+ CCAssignFn Fn) = delete;
void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn) {
diff --git a/lib/Target/Mips/MipsCallingConv.td b/lib/Target/Mips/MipsCallingConv.td
index 7318de2..abee185 100644
--- a/lib/Target/Mips/MipsCallingConv.td
+++ b/lib/Target/Mips/MipsCallingConv.td
@@ -20,6 +20,29 @@ class CCIfSubtarget<string F, CCAction A, string Invert = "">
// The inverse of CCIfSubtarget
class CCIfSubtargetNot<string F, CCAction A> : CCIfSubtarget<F, A, "!">;
+/// Match if the original argument (before lowering) was a float.
+/// For example, this is true for i32's that were lowered from soft-float.
+class CCIfOrigArgWasNotFloat<CCAction A>
+ : CCIf<"!static_cast<MipsCCState *>(&State)->WasOriginalArgFloat(ValNo)",
+ A>;
+
+/// Match if the original argument (before lowering) was a 128-bit float (i.e.
+/// long double).
+class CCIfOrigArgWasF128<CCAction A>
+ : CCIf<"static_cast<MipsCCState *>(&State)->WasOriginalArgF128(ValNo)", A>;
+
+/// Match if this specific argument is a vararg.
+/// This is slightly different fro CCIfIsVarArg which matches if any argument is
+/// a vararg.
+class CCIfArgIsVarArg<CCAction A>
+ : CCIf<"!static_cast<MipsCCState *>(&State)->IsCallOperandFixed(ValNo)", A>;
+
+
+/// Match if the special calling conv is the specified value.
+class CCIfSpecialCallingConv<string CC, CCAction A>
+ : CCIf<"static_cast<MipsCCState *>(&State)->getSpecialCallingConv() == "
+ "MipsCCState::" # CC, A>;
+
// For soft-float, f128 values are returned in A0_64 rather than V1_64.
def RetCC_F128SoftFloat : CallingConv<[
CCAssignToReg<[V0_64, A0_64]>
@@ -105,9 +128,7 @@ def CC_MipsN : CallingConv<[
CCIfInReg<CCPromoteToUpperBitsInType<i64>>>>,
// All integers (except soft-float integers) are promoted to 64-bit.
- CCIfType<[i8, i16, i32],
- CCIf<"!static_cast<MipsCCState *>(&State)->WasOriginalArgFloat(ValNo)",
- CCPromoteToType<i64>>>,
+ CCIfType<[i8, i16, i32], CCIfOrigArgWasNotFloat<CCPromoteToType<i64>>>,
// The only i32's we have left are soft-float arguments.
CCIfSubtarget<"abiUsesSoftFloat()", CCIfType<[i32], CCDelegateTo<CC_MipsN_SoftFloat>>>,
@@ -138,6 +159,10 @@ def CC_MipsN : CallingConv<[
// N32/64 variable arguments.
// All arguments are passed in integer registers.
def CC_MipsN_VarArg : CallingConv<[
+ CCIfType<[i8, i16, i32, i64],
+ CCIfSubtargetNot<"isLittle()",
+ CCIfInReg<CCPromoteToUpperBitsInType<i64>>>>,
+
// All integers are promoted to 64-bit.
CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
@@ -162,9 +187,7 @@ def RetCC_MipsN : CallingConv<[
//
// f128 should only occur for the N64 ABI where long double is 128-bit. On
// N32, long double is equivalent to double.
- CCIfType<[i64],
- CCIf<"static_cast<MipsCCState *>(&State)->WasOriginalArgF128(ValNo)",
- CCDelegateTo<RetCC_F128>>>,
+ CCIfType<[i64], CCIfOrigArgWasF128<CCDelegateTo<RetCC_F128>>>,
// Aggregate returns are positioned at the lowest address in the slot for
// both little and big-endian targets. When passing in registers, this
@@ -330,8 +353,7 @@ def CC_Mips16RetHelper : CallingConv<[
def CC_Mips_FixedArg : CallingConv<[
// Mips16 needs special handling on some functions.
CCIf<"State.getCallingConv() != CallingConv::Fast",
- CCIf<"static_cast<MipsCCState *>(&State)->getSpecialCallingConv() == "
- "MipsCCState::Mips16RetHelperConv",
+ CCIfSpecialCallingConv<"Mips16RetHelperConv",
CCDelegateTo<CC_Mips16RetHelper>>>,
CCIfByVal<CCDelegateTo<CC_Mips_ByVal>>,
@@ -348,8 +370,7 @@ def CC_Mips_FixedArg : CallingConv<[
// N32, long double is equivalent to double.
CCIfType<[i64],
CCIfSubtargetNot<"abiUsesSoftFloat()",
- CCIf<"static_cast<MipsCCState *>(&State)->WasOriginalArgF128(ValNo)",
- CCBitConvertToType<f64>>>>,
+ CCIfOrigArgWasF128<CCBitConvertToType<f64>>>>,
CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_Mips_FastCC>>,
@@ -369,9 +390,7 @@ def CC_Mips_VarArg : CallingConv<[
]>;
def CC_Mips : CallingConv<[
- CCIfVarArg<
- CCIf<"!static_cast<MipsCCState *>(&State)->IsCallOperandFixed(ValNo)",
- CCDelegateTo<CC_Mips_VarArg>>>,
+ CCIfVarArg<CCIfArgIsVarArg<CCDelegateTo<CC_Mips_VarArg>>>,
CCDelegateTo<CC_Mips_FixedArg>
]>;
diff --git a/lib/Target/Mips/MipsCondMov.td b/lib/Target/Mips/MipsCondMov.td
index 690f626..af10cd4 100644
--- a/lib/Target/Mips/MipsCondMov.td
+++ b/lib/Target/Mips/MipsCondMov.td
@@ -263,3 +263,40 @@ defm : MovnPats<GPR32, FGR64, MOVN_I_D64, XOR>, INSN_MIPS4_32_NOT_32R6_64R6,
FGR_64;
defm : MovnPats<GPR64, FGR64, MOVN_I64_D64, XOR64>, INSN_MIPS4_32_NOT_32R6_64R6,
FGR_64;
+
+// For targets that don't have conditional-move instructions
+// we have to match SELECT nodes with pseudo instructions.
+let usesCustomInserter = 1 in {
+ class Select_Pseudo<RegisterOperand RC> :
+ PseudoSE<(outs RC:$dst), (ins GPR32Opnd:$cond, RC:$T, RC:$F),
+ [(set RC:$dst, (select GPR32Opnd:$cond, RC:$T, RC:$F))]>,
+ ISA_MIPS1_NOT_4_32;
+
+ class SelectFP_Pseudo_T<RegisterOperand RC> :
+ PseudoSE<(outs RC:$dst), (ins GPR32Opnd:$cond, RC:$T, RC:$F),
+ [(set RC:$dst, (MipsCMovFP_T RC:$T, GPR32Opnd:$cond, RC:$F))]>,
+ ISA_MIPS1_NOT_4_32;
+
+ class SelectFP_Pseudo_F<RegisterOperand RC> :
+ PseudoSE<(outs RC:$dst), (ins GPR32Opnd:$cond, RC:$T, RC:$F),
+ [(set RC:$dst, (MipsCMovFP_F RC:$T, GPR32Opnd:$cond, RC:$F))]>,
+ ISA_MIPS1_NOT_4_32;
+}
+
+def PseudoSELECT_I : Select_Pseudo<GPR32Opnd>;
+def PseudoSELECT_I64 : Select_Pseudo<GPR64Opnd>;
+def PseudoSELECT_S : Select_Pseudo<FGR32Opnd>;
+def PseudoSELECT_D32 : Select_Pseudo<AFGR64Opnd>, FGR_32;
+def PseudoSELECT_D64 : Select_Pseudo<FGR64Opnd>, FGR_64;
+
+def PseudoSELECTFP_T_I : SelectFP_Pseudo_T<GPR32Opnd>;
+def PseudoSELECTFP_T_I64 : SelectFP_Pseudo_T<GPR64Opnd>;
+def PseudoSELECTFP_T_S : SelectFP_Pseudo_T<FGR32Opnd>;
+def PseudoSELECTFP_T_D32 : SelectFP_Pseudo_T<AFGR64Opnd>, FGR_32;
+def PseudoSELECTFP_T_D64 : SelectFP_Pseudo_T<FGR64Opnd>, FGR_64;
+
+def PseudoSELECTFP_F_I : SelectFP_Pseudo_F<GPR32Opnd>;
+def PseudoSELECTFP_F_I64 : SelectFP_Pseudo_F<GPR64Opnd>;
+def PseudoSELECTFP_F_S : SelectFP_Pseudo_F<FGR32Opnd>;
+def PseudoSELECTFP_F_D32 : SelectFP_Pseudo_F<AFGR64Opnd>, FGR_32;
+def PseudoSELECTFP_F_D64 : SelectFP_Pseudo_F<FGR64Opnd>, FGR_64;
diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp
index c4e5ac0..96553d2 100644
--- a/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -448,14 +448,12 @@ bool MipsConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// FIXME:
MF = &mf;
MCP = mf.getConstantPool();
- STI = &mf.getTarget().getSubtarget<MipsSubtarget>();
+ STI = &static_cast<const MipsSubtarget &>(mf.getSubtarget());
DEBUG(dbgs() << "constant island machine function " << "\n");
if (!STI->inMips16Mode() || !MipsSubtarget::useConstantIslands()) {
return false;
}
- TII = (const Mips16InstrInfo *)MF->getTarget()
- .getSubtargetImpl()
- ->getInstrInfo();
+ TII = (const Mips16InstrInfo *)STI->getInstrInfo();
MFI = MF->getInfo<MipsFunctionInfo>();
DEBUG(dbgs() << "constant island processing " << "\n");
//
@@ -562,7 +560,7 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
// identity mapping of CPI's to CPE's.
const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
- const DataLayout &TD = *MF->getSubtarget().getDataLayout();
+ const DataLayout &TD = *MF->getTarget().getDataLayout();
for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
assert(Size >= 4 && "Too small constant pool entry");
diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp
index d7ba6d4..ac03c0b 100644
--- a/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -69,7 +69,7 @@ namespace {
class RegDefsUses {
public:
- RegDefsUses(TargetMachine &TM);
+ RegDefsUses(const TargetRegisterInfo &TRI);
void init(const MachineInstr &MI);
/// This function sets all caller-saved registers in Defs.
@@ -196,6 +196,12 @@ namespace {
private:
bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
+ Iter replaceWithCompactBranch(MachineBasicBlock &MBB,
+ Iter Branch, DebugLoc DL);
+
+ Iter replaceWithCompactJump(MachineBasicBlock &MBB,
+ Iter Jump, DebugLoc DL);
+
/// This function checks if it is valid to move Candidate to the delay slot
/// and returns true if it isn't. It also updates memory and register
/// dependence information.
@@ -207,7 +213,7 @@ namespace {
template<typename IterTy>
bool searchRange(MachineBasicBlock &MBB, IterTy Begin, IterTy End,
RegDefsUses &RegDU, InspectMemInstr &IM,
- IterTy &Filler) const;
+ IterTy &Filler, Iter Slot) const;
/// This function searches in the backward direction for an instruction that
/// can be moved to the delay slot. Returns true on success.
@@ -275,11 +281,7 @@ static void addLiveInRegs(Iter Filler, MachineBasicBlock &MBB) {
#ifndef NDEBUG
const MachineFunction &MF = *MBB.getParent();
- assert(MF.getTarget()
- .getSubtargetImpl()
- ->getRegisterInfo()
- ->getAllocatableSet(MF)
- .test(R) &&
+ assert(MF.getSubtarget().getRegisterInfo()->getAllocatableSet(MF).test(R) &&
"Shouldn't move an instruction with unallocatable registers across "
"basic block boundaries.");
#endif
@@ -289,9 +291,8 @@ static void addLiveInRegs(Iter Filler, MachineBasicBlock &MBB) {
}
}
-RegDefsUses::RegDefsUses(TargetMachine &TM)
- : TRI(*TM.getSubtargetImpl()->getRegisterInfo()),
- Defs(TRI.getNumRegs(), false), Uses(TRI.getNumRegs(), false) {}
+RegDefsUses::RegDefsUses(const TargetRegisterInfo &TRI)
+ : TRI(TRI), Defs(TRI.getNumRegs(), false), Uses(TRI.getNumRegs(), false) {}
void RegDefsUses::init(const MachineInstr &MI) {
// Add all register operands which are explicit and non-variadic.
@@ -494,42 +495,135 @@ getUnderlyingObjects(const MachineInstr &MI,
return true;
}
+// Replace Branch with the compact branch instruction.
+Iter Filler::replaceWithCompactBranch(MachineBasicBlock &MBB,
+ Iter Branch, DebugLoc DL) {
+ const MipsInstrInfo *TII =
+ MBB.getParent()->getSubtarget<MipsSubtarget>().getInstrInfo();
+
+ unsigned NewOpcode =
+ (((unsigned) Branch->getOpcode()) == Mips::BEQ) ? Mips::BEQZC_MM
+ : Mips::BNEZC_MM;
+
+ const MCInstrDesc &NewDesc = TII->get(NewOpcode);
+ MachineInstrBuilder MIB = BuildMI(MBB, Branch, DL, NewDesc);
+
+ MIB.addReg(Branch->getOperand(0).getReg());
+ MIB.addMBB(Branch->getOperand(2).getMBB());
+
+ Iter tmpIter = Branch;
+ Branch = std::prev(Branch);
+ MBB.erase(tmpIter);
+
+ return Branch;
+}
+
+// Replace Jumps with the compact jump instruction.
+Iter Filler::replaceWithCompactJump(MachineBasicBlock &MBB,
+ Iter Jump, DebugLoc DL) {
+ const MipsInstrInfo *TII =
+ MBB.getParent()->getSubtarget<MipsSubtarget>().getInstrInfo();
+
+ const MCInstrDesc &NewDesc = TII->get(Mips::JRC16_MM);
+ MachineInstrBuilder MIB = BuildMI(MBB, Jump, DL, NewDesc);
+
+ MIB.addReg(Jump->getOperand(0).getReg());
+
+ Iter tmpIter = Jump;
+ Jump = std::prev(Jump);
+ MBB.erase(tmpIter);
+
+ return Jump;
+}
+
+// For given opcode returns opcode of corresponding instruction with short
+// delay slot.
+static int getEquivalentCallShort(int Opcode) {
+ switch (Opcode) {
+ case Mips::BGEZAL:
+ return Mips::BGEZALS_MM;
+ case Mips::BLTZAL:
+ return Mips::BLTZALS_MM;
+ case Mips::JAL:
+ return Mips::JALS_MM;
+ case Mips::JALR:
+ return Mips::JALRS_MM;
+ case Mips::JALR16_MM:
+ return Mips::JALRS16_MM;
+ default:
+ llvm_unreachable("Unexpected call instruction for microMIPS.");
+ }
+}
+
/// runOnMachineBasicBlock - Fill in delay slots for the given basic block.
/// We assume there is only one delay slot per delayed instruction.
bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
- bool InMicroMipsMode = TM.getSubtarget<MipsSubtarget>().inMicroMipsMode();
+ const MipsSubtarget &STI = MBB.getParent()->getSubtarget<MipsSubtarget>();
+ bool InMicroMipsMode = STI.inMicroMipsMode();
+ const MipsInstrInfo *TII = STI.getInstrInfo();
for (Iter I = MBB.begin(); I != MBB.end(); ++I) {
if (!hasUnoccupiedSlot(&*I))
continue;
- // For microMIPS, at the moment, do not fill delay slots of call
- // instructions.
- //
- // TODO: Support for replacing regular call instructions with corresponding
- // short delay slot instructions should be implemented.
- if (!InMicroMipsMode || !I->isCall()) {
- ++FilledSlots;
- Changed = true;
-
- // Delay slot filling is disabled at -O0.
- if (!DisableDelaySlotFiller && (TM.getOptLevel() != CodeGenOpt::None)) {
- if (searchBackward(MBB, I))
- continue;
+ ++FilledSlots;
+ Changed = true;
- if (I->isTerminator()) {
- if (searchSuccBBs(MBB, I))
- continue;
- } else if (searchForward(MBB, I)) {
- continue;
+ // Delay slot filling is disabled at -O0.
+ if (!DisableDelaySlotFiller && (TM.getOptLevel() != CodeGenOpt::None)) {
+ bool Filled = false;
+
+ if (searchBackward(MBB, I)) {
+ Filled = true;
+ } else if (I->isTerminator()) {
+ if (searchSuccBBs(MBB, I)) {
+ Filled = true;
+ }
+ } else if (searchForward(MBB, I)) {
+ Filled = true;
+ }
+
+ if (Filled) {
+ // Get instruction with delay slot.
+ MachineBasicBlock::instr_iterator DSI(I);
+
+ if (InMicroMipsMode && TII->GetInstSizeInBytes(std::next(DSI)) == 2 &&
+ DSI->isCall()) {
+ // If instruction in delay slot is 16b change opcode to
+ // corresponding instruction with short delay slot.
+ DSI->setDesc(TII->get(getEquivalentCallShort(DSI->getOpcode())));
}
+
+ continue;
}
}
+ // If instruction is BEQ or BNE with one ZERO register, then instead of
+ // adding NOP replace this instruction with the corresponding compact
+ // branch instruction, i.e. BEQZC or BNEZC.
+ unsigned Opcode = I->getOpcode();
+ if (InMicroMipsMode) {
+ switch (Opcode) {
+ case Mips::BEQ:
+ case Mips::BNE:
+ if (((unsigned) I->getOperand(1).getReg()) == Mips::ZERO) {
+ I = replaceWithCompactBranch(MBB, I, I->getDebugLoc());
+ continue;
+ }
+ break;
+ case Mips::JR:
+ case Mips::PseudoReturn:
+ case Mips::PseudoIndirectBranch:
+ // For microMIPS the PseudoReturn and PseudoIndirectBranch are allways
+ // expanded to JR_MM, so they can be replaced with JRC16_MM.
+ I = replaceWithCompactJump(MBB, I, I->getDebugLoc());
+ continue;
+ default:
+ break;
+ }
+ }
// Bundle the NOP to the instruction with the delay slot.
- const MipsInstrInfo *TII = static_cast<const MipsInstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
BuildMI(MBB, std::next(I), I->getDebugLoc(), TII->get(Mips::NOP));
MIBundleBuilder(MBB, I, std::next(I, 2));
}
@@ -546,7 +640,7 @@ FunctionPass *llvm::createMipsDelaySlotFillerPass(MipsTargetMachine &tm) {
template<typename IterTy>
bool Filler::searchRange(MachineBasicBlock &MBB, IterTy Begin, IterTy End,
RegDefsUses &RegDU, InspectMemInstr& IM,
- IterTy &Filler) const {
+ IterTy &Filler, Iter Slot) const {
for (IterTy I = Begin; I != End; ++I) {
// skip debug value
if (I->isDebugValue())
@@ -561,7 +655,8 @@ bool Filler::searchRange(MachineBasicBlock &MBB, IterTy Begin, IterTy End,
if (delayHasHazard(*I, RegDU, IM))
continue;
- if (TM.getSubtarget<MipsSubtarget>().isTargetNaCl()) {
+ const MipsSubtarget &STI = MBB.getParent()->getSubtarget<MipsSubtarget>();
+ if (STI.isTargetNaCl()) {
// In NaCl, instructions that must be masked are forbidden in delay slots.
// We only check for loads, stores and SP changes. Calls, returns and
// branches are not checked because non-NaCl targets never put them in
@@ -569,11 +664,18 @@ bool Filler::searchRange(MachineBasicBlock &MBB, IterTy Begin, IterTy End,
unsigned AddrIdx;
if ((isBasePlusOffsetMemoryAccess(I->getOpcode(), &AddrIdx) &&
baseRegNeedsLoadStoreMask(I->getOperand(AddrIdx).getReg())) ||
- I->modifiesRegister(Mips::SP,
- TM.getSubtargetImpl()->getRegisterInfo()))
+ I->modifiesRegister(Mips::SP, STI.getRegisterInfo()))
continue;
}
+ bool InMicroMipsMode = STI.inMicroMipsMode();
+ const MipsInstrInfo *TII = STI.getInstrInfo();
+ unsigned Opcode = (*Slot).getOpcode();
+ if (InMicroMipsMode && TII->GetInstSizeInBytes(&(*I)) == 2 &&
+ (Opcode == Mips::JR || Opcode == Mips::PseudoIndirectBranch ||
+ Opcode == Mips::PseudoReturn))
+ continue;
+
Filler = I;
return true;
}
@@ -585,13 +687,14 @@ bool Filler::searchBackward(MachineBasicBlock &MBB, Iter Slot) const {
if (DisableBackwardSearch)
return false;
- RegDefsUses RegDU(TM);
+ RegDefsUses RegDU(*MBB.getParent()->getSubtarget().getRegisterInfo());
MemDefsUses MemDU(MBB.getParent()->getFrameInfo());
ReverseIter Filler;
RegDU.init(*Slot);
- if (!searchRange(MBB, ReverseIter(Slot), MBB.rend(), RegDU, MemDU, Filler))
+ if (!searchRange(MBB, ReverseIter(Slot), MBB.rend(), RegDU, MemDU, Filler,
+ Slot))
return false;
MBB.splice(std::next(Slot), &MBB, std::next(Filler).base());
@@ -605,13 +708,13 @@ bool Filler::searchForward(MachineBasicBlock &MBB, Iter Slot) const {
if (DisableForwardSearch || !Slot->isCall())
return false;
- RegDefsUses RegDU(TM);
+ RegDefsUses RegDU(*MBB.getParent()->getSubtarget().getRegisterInfo());
NoMemInstr NM;
Iter Filler;
RegDU.setCallerSaved(*Slot);
- if (!searchRange(MBB, std::next(Slot), MBB.end(), RegDU, NM, Filler))
+ if (!searchRange(MBB, std::next(Slot), MBB.end(), RegDU, NM, Filler, Slot))
return false;
MBB.splice(std::next(Slot), &MBB, Filler);
@@ -629,7 +732,7 @@ bool Filler::searchSuccBBs(MachineBasicBlock &MBB, Iter Slot) const {
if (!SuccBB)
return false;
- RegDefsUses RegDU(TM);
+ RegDefsUses RegDU(*MBB.getParent()->getSubtarget().getRegisterInfo());
bool HasMultipleSuccs = false;
BB2BrMap BrMap;
std::unique_ptr<InspectMemInstr> IM;
@@ -654,7 +757,8 @@ bool Filler::searchSuccBBs(MachineBasicBlock &MBB, Iter Slot) const {
IM.reset(new MemDefsUses(MFI));
}
- if (!searchRange(MBB, SuccBB->begin(), SuccBB->end(), RegDU, *IM, Filler))
+ if (!searchRange(MBB, SuccBB->begin(), SuccBB->end(), RegDU, *IM, Filler,
+ Slot))
return false;
insertDelayFiller(Filler, BrMap);
@@ -681,7 +785,7 @@ MachineBasicBlock *Filler::selectSuccBB(MachineBasicBlock &B) const {
std::pair<MipsInstrInfo::BranchType, MachineInstr *>
Filler::getBranch(MachineBasicBlock &MBB, const MachineBasicBlock &Dst) const {
const MipsInstrInfo *TII =
- static_cast<const MipsInstrInfo *>(TM.getSubtargetImpl()->getInstrInfo());
+ MBB.getParent()->getSubtarget<MipsSubtarget>().getInstrInfo();
MachineBasicBlock *TrueBB = nullptr, *FalseBB = nullptr;
SmallVector<MachineInstr*, 2> BranchInstrs;
SmallVector<MachineOperand, 2> Cond;
diff --git a/lib/Target/Mips/MipsFastISel.cpp b/lib/Target/Mips/MipsFastISel.cpp
index 2bb16e3..7d69659 100644
--- a/lib/Target/Mips/MipsFastISel.cpp
+++ b/lib/Target/Mips/MipsFastISel.cpp
@@ -1,19 +1,21 @@
//===-- MipsastISel.cpp - Mips FastISel implementation
//---------------------===//
-#include "llvm/CodeGen/FunctionLoweringInfo.h"
-#include "llvm/CodeGen/FastISel.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/IR/GlobalAlias.h"
-#include "llvm/IR/GlobalVariable.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "MipsCCState.h"
-#include "MipsRegisterInfo.h"
+#include "MipsInstrInfo.h"
#include "MipsISelLowering.h"
#include "MipsMachineFunction.h"
+#include "MipsRegisterInfo.h"
#include "MipsSubtarget.h"
#include "MipsTargetMachine.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
@@ -43,6 +45,7 @@ class MipsFastISel final : public FastISel {
void setKind(BaseKind K) { Kind = K; }
BaseKind getKind() const { return Kind; }
bool isRegBase() const { return Kind == RegBase; }
+ bool isFIBase() const { return Kind == FrameIndexBase; }
void setReg(unsigned Reg) {
assert(isRegBase() && "Invalid base register access!");
Base.Reg = Reg;
@@ -51,6 +54,15 @@ class MipsFastISel final : public FastISel {
assert(isRegBase() && "Invalid base register access!");
return Base.Reg;
}
+ void setFI(unsigned FI) {
+ assert(isFIBase() && "Invalid base frame index access!");
+ Base.FI = FI;
+ }
+ unsigned getFI() const {
+ assert(isFIBase() && "Invalid base frame index access!");
+ return Base.FI;
+ }
+
void setOffset(int64_t Offset_) { Offset = Offset_; }
int64_t getOffset() const { return Offset; }
void setGlobalValue(const GlobalValue *G) { GV = G; }
@@ -59,11 +71,10 @@ class MipsFastISel final : public FastISel {
/// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
/// make the right decision when generating code for different targets.
- Module &M;
const TargetMachine &TM;
+ const MipsSubtarget *Subtarget;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
- const MipsSubtarget *Subtarget;
MipsFunctionInfo *MFI;
// Convenience variables to avoid some queries.
@@ -94,6 +105,7 @@ private:
bool isLoadTypeLegal(Type *Ty, MVT &VT);
bool computeAddress(const Value *Obj, Address &Addr);
bool computeCallAddress(const Value *V, Address &Addr);
+ void simplifyAddress(Address &Addr);
// Emit helper routines.
bool emitCmp(unsigned DestReg, const CmpInst *CI);
@@ -157,17 +169,15 @@ public:
// Backend specific FastISel code.
explicit MipsFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo)
- : FastISel(funcInfo, libInfo),
- M(const_cast<Module &>(*funcInfo.Fn->getParent())),
- TM(funcInfo.MF->getTarget()),
- TII(*TM.getSubtargetImpl()->getInstrInfo()),
- TLI(*TM.getSubtargetImpl()->getTargetLowering()),
- Subtarget(&TM.getSubtarget<MipsSubtarget>()) {
+ : FastISel(funcInfo, libInfo), TM(funcInfo.MF->getTarget()),
+ Subtarget(&funcInfo.MF->getSubtarget<MipsSubtarget>()),
+ TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
Context = &funcInfo.Fn->getContext();
- TargetSupported = ((Subtarget->getRelocationModel() == Reloc::PIC_) &&
- ((Subtarget->hasMips32r2() || Subtarget->hasMips32()) &&
- (Subtarget->isABI_O32())));
+ TargetSupported =
+ ((TM.getRelocationModel() == Reloc::PIC_) &&
+ ((Subtarget->hasMips32r2() || Subtarget->hasMips32()) &&
+ (static_cast<const MipsTargetMachine &>(TM).getABI().IsO32())));
UnsupportedFPMode = Subtarget->isFP64bit();
}
@@ -188,9 +198,9 @@ static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT,
llvm_unreachable("should not be called");
}
-bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State) {
+static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
llvm_unreachable("should not be called");
}
@@ -306,14 +316,82 @@ unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) {
}
bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
- // This construct looks a big awkward but it is how other ports handle this
- // and as this function is more fully completed, these cases which
- // return false will have additional code in them.
- //
- if (isa<Instruction>(Obj))
- return false;
- else if (isa<ConstantExpr>(Obj))
+
+ const User *U = nullptr;
+ unsigned Opcode = Instruction::UserOp1;
+ if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
+ // Don't walk into other basic blocks unless the object is an alloca from
+ // another block, otherwise it may not have a virtual register assigned.
+ if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
+ FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ Opcode = I->getOpcode();
+ U = I;
+ }
+ } else if (isa<ConstantExpr>(Obj))
return false;
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::BitCast: {
+ // Look through bitcasts.
+ return computeAddress(U->getOperand(0), Addr);
+ }
+ case Instruction::GetElementPtr: {
+ Address SavedAddr = Addr;
+ uint64_t TmpOffset = Addr.getOffset();
+ // Iterate through the GEP folding the constants into offsets where
+ // we can.
+ gep_type_iterator GTI = gep_type_begin(U);
+ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
+ ++i, ++GTI) {
+ const Value *Op = *i;
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ const StructLayout *SL = DL.getStructLayout(STy);
+ unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
+ TmpOffset += SL->getElementOffset(Idx);
+ } else {
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
+ for (;;) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ TmpOffset += CI->getSExtValue() * S;
+ break;
+ }
+ if (canFoldAddIntoGEP(U, Op)) {
+ // A compatible add with a constant operand. Fold the constant.
+ ConstantInt *CI =
+ cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ TmpOffset += CI->getSExtValue() * S;
+ // Iterate on the other operand.
+ Op = cast<AddOperator>(Op)->getOperand(0);
+ continue;
+ }
+ // Unsupported
+ goto unsupported_gep;
+ }
+ }
+ }
+ // Try to grab the base operand now.
+ Addr.setOffset(TmpOffset);
+ if (computeAddress(U->getOperand(0), Addr))
+ return true;
+ // We failed, restore everything and try the other options.
+ Addr = SavedAddr;
+ unsupported_gep:
+ break;
+ }
+ case Instruction::Alloca: {
+ const AllocaInst *AI = cast<AllocaInst>(Obj);
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ Addr.setKind(Address::FrameIndexBase);
+ Addr.setFI(SI->second);
+ return true;
+ }
+ break;
+ }
+ }
Addr.setReg(getRegForValue(Obj));
return Addr.getReg() != 0;
}
@@ -519,8 +597,26 @@ bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
default:
return false;
}
- emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
- return true;
+ if (Addr.isRegBase()) {
+ simplifyAddress(Addr);
+ emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
+ return true;
+ }
+ if (Addr.isFIBase()) {
+ unsigned FI = Addr.getFI();
+ unsigned Align = 4;
+ unsigned Offset = Addr.getOffset();
+ MachineFrameInfo &MFI = *MF->getFrameInfo();
+ MachineMemOperand *MMO = MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(FI), MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FI), Align);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addFrameIndex(FI)
+ .addImm(Offset)
+ .addMemOperand(MMO);
+ return true;
+ }
+ return false;
}
bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr,
@@ -552,8 +648,27 @@ bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr,
default:
return false;
}
- emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
- return true;
+ if (Addr.isRegBase()) {
+ simplifyAddress(Addr);
+ emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
+ return true;
+ }
+ if (Addr.isFIBase()) {
+ unsigned FI = Addr.getFI();
+ unsigned Align = 4;
+ unsigned Offset = Addr.getOffset();
+ MachineFrameInfo &MFI = *MF->getFrameInfo();
+ MachineMemOperand *MMO = MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(FI), MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FI), Align);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
+ .addReg(SrcReg)
+ .addFrameIndex(FI)
+ .addImm(Offset)
+ .addMemOperand(MMO);
+ return true;
+ }
+ return false;
}
bool MipsFastISel::selectLoad(const Instruction *I) {
@@ -972,28 +1087,93 @@ bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
CLI.Call = MIB;
- // Add implicit physical register uses to the call.
- for (auto Reg : CLI.OutRegs)
- MIB.addReg(Reg, RegState::Implicit);
-
- // Add a register mask with the call-preserved registers. Proper
- // defs for return values will be added by setPhysRegsDeadExcept().
- MIB.addRegMask(TRI.getCallPreservedMask(CC));
-
- CLI.Call = MIB;
// Finish off the call including any return values.
return finishCall(CLI, RetVT, NumBytes);
}
bool MipsFastISel::selectRet(const Instruction *I) {
+ const Function &F = *I->getParent()->getParent();
const ReturnInst *Ret = cast<ReturnInst>(I);
if (!FuncInfo.CanLowerReturn)
return false;
+
+ // Build a list of return value registers.
+ SmallVector<unsigned, 4> RetRegs;
+
if (Ret->getNumOperands() > 0) {
- return false;
+ CallingConv::ID CC = F.getCallingConv();
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ValLocs;
+ MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs,
+ I->getContext());
+ CCAssignFn *RetCC = RetCC_Mips;
+ CCInfo.AnalyzeReturn(Outs, RetCC);
+
+ // Only handle a single return value for now.
+ if (ValLocs.size() != 1)
+ return false;
+
+ CCValAssign &VA = ValLocs[0];
+ const Value *RV = Ret->getOperand(0);
+
+ // Don't bother handling odd stuff for now.
+ if ((VA.getLocInfo() != CCValAssign::Full) &&
+ (VA.getLocInfo() != CCValAssign::BCvt))
+ return false;
+
+ // Only handle register returns for now.
+ if (!VA.isRegLoc())
+ return false;
+
+ unsigned Reg = getRegForValue(RV);
+ if (Reg == 0)
+ return false;
+
+ unsigned SrcReg = Reg + VA.getValNo();
+ unsigned DestReg = VA.getLocReg();
+ // Avoid a cross-class copy. This is very unlikely.
+ if (!MRI.getRegClass(SrcReg)->contains(DestReg))
+ return false;
+
+ EVT RVEVT = TLI.getValueType(RV->getType());
+ if (!RVEVT.isSimple())
+ return false;
+
+ if (RVEVT.isVector())
+ return false;
+
+ MVT RVVT = RVEVT.getSimpleVT();
+ if (RVVT == MVT::f128)
+ return false;
+
+ MVT DestVT = VA.getValVT();
+ // Special handling for extended integers.
+ if (RVVT != DestVT) {
+ if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
+ return false;
+
+ if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
+ return false;
+
+ bool IsZExt = Outs[0].Flags.isZExt();
+ SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
+ if (SrcReg == 0)
+ return false;
+ }
+
+ // Make the copy.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
+
+ // Add register to return instruction.
+ RetRegs.push_back(VA.getLocReg());
}
- emitInst(Mips::RetRA);
+ MachineInstrBuilder MIB = emitInst(Mips::RetRA);
+ for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
+ MIB.addReg(RetRegs[i], RegState::Implicit);
return true;
}
@@ -1118,7 +1298,8 @@ bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
bool isZExt) {
unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
- return emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
+ bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
+ return Success ? DestReg : 0;
}
bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
@@ -1170,6 +1351,17 @@ unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
return VReg;
}
+void MipsFastISel::simplifyAddress(Address &Addr) {
+ if (!isInt<16>(Addr.getOffset())) {
+ unsigned TempReg =
+ materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass);
+ unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
+ emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg());
+ Addr.setReg(DestReg);
+ Addr.setOffset(0);
+ }
+}
+
namespace llvm {
FastISel *Mips::createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) {
diff --git a/lib/Target/Mips/MipsFrameLowering.cpp b/lib/Target/Mips/MipsFrameLowering.cpp
index 3014a0d..8b8b019 100644
--- a/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/lib/Target/Mips/MipsFrameLowering.cpp
@@ -100,7 +100,7 @@ bool MipsFrameLowering::hasFP(const MachineFunction &MF) const {
uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+ const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
int64_t Offset = 0;
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp
index 0bdabf3..21fc8ce 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -47,7 +47,7 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
bool MipsDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
- Subtarget = &TM.getSubtarget<MipsSubtarget>();
+ Subtarget = &static_cast<const MipsSubtarget &>(MF.getSubtarget());
bool Ret = SelectionDAGISel::runOnMachineFunction(MF);
processFunctionAfterISel(MF);
@@ -95,6 +95,12 @@ bool MipsDAGToDAGISel::selectIntAddrMM(SDValue Addr, SDValue &Base,
return false;
}
+bool MipsDAGToDAGISel::selectIntAddrLSL2MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
bool MipsDAGToDAGISel::selectIntAddrMSA(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
llvm_unreachable("Unimplemented function.");
@@ -230,12 +236,3 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
OutOps.push_back(Op);
return false;
}
-
-/// createMipsISelDag - This pass converts a legalized DAG into a
-/// MIPS-specific DAG, ready for instruction scheduling.
-FunctionPass *llvm::createMipsISelDag(MipsTargetMachine &TM) {
- if (TM.getSubtargetImpl()->inMips16Mode())
- return llvm::createMips16ISelDag(TM);
-
- return llvm::createMipsSEISelDag(TM);
-}
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.h b/lib/Target/Mips/MipsISelDAGToDAG.h
index ff8760d..6b72877 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsISelDAGToDAG.h
@@ -73,6 +73,9 @@ private:
virtual bool selectIntAddrMM(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ virtual bool selectIntAddrLSL2MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
/// Match addr+simm10 and addr
virtual bool selectIntAddrMSA(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
@@ -125,11 +128,6 @@ private:
char ConstraintCode,
std::vector<SDValue> &OutOps) override;
};
-
-/// createMipsISelDag - This pass converts a legalized DAG into a
-/// MIPS-specific DAG, ready for instruction scheduling.
-FunctionPass *createMipsISelDag(MipsTargetMachine &TM);
-
}
#endif
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index ff2bfb3..9253b2e 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -70,7 +70,7 @@ static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
if (!isShiftedMask_64(I))
return false;
- Size = CountPopulation_64(I);
+ Size = countPopulation(I);
Pos = countTrailingZeros(I);
return true;
}
@@ -203,7 +203,7 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
const MipsSubtarget &STI)
- : TargetLowering(TM), Subtarget(STI) {
+ : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {
// Mips does not have i1 type, so use i32 for
// setcc operations results (slt, sgt, ...).
setBooleanContents(ZeroOrOneBooleanContent);
@@ -215,12 +215,15 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
ZeroOrNegativeOneBooleanContent);
// Load extented operations for i1 types must be promoted
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ }
// MIPS doesn't have extending float->double load/store
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ for (MVT VT : MVT::fp_valuetypes())
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// Used by legalize types to correctly generate the setcc result.
@@ -258,6 +261,9 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::LOAD, MVT::i64, Custom);
setOperationAction(ISD::STORE, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
+ setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
}
if (!Subtarget.isGP64bit()) {
@@ -368,9 +374,9 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::BSWAP, MVT::i64, Expand);
if (Subtarget.isGP64bit()) {
- setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::i32, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Custom);
setTruncStoreAction(MVT::i64, MVT::i32, Custom);
}
@@ -387,14 +393,12 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
// The arguments on the stack are defined in terms of 4-byte slots on O32
// and 8-byte slots on N32/N64.
- setMinStackArgumentAlignment(
- (Subtarget.isABI_N32() || Subtarget.isABI_N64()) ? 8 : 4);
+ setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? 8 : 4);
- setStackPointerRegisterToSaveRestore(Subtarget.isABI_N64() ? Mips::SP_64
- : Mips::SP);
+ setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
- setExceptionPointerRegister(Subtarget.isABI_N64() ? Mips::A0_64 : Mips::A0);
- setExceptionSelectorRegister(Subtarget.isABI_N64() ? Mips::A1_64 : Mips::A1);
+ setExceptionPointerRegister(ABI.IsN64() ? Mips::A0_64 : Mips::A0);
+ setExceptionSelectorRegister(ABI.IsN64() ? Mips::A1_64 : Mips::A1);
MaxStoresPerMemcpy = 16;
@@ -933,18 +937,35 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case Mips::DIVU:
case Mips::MOD:
case Mips::MODU:
- return insertDivByZeroTrap(
- MI, *BB, *getTargetMachine().getSubtargetImpl()->getInstrInfo(), false);
+ return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false);
case Mips::PseudoDSDIV:
case Mips::PseudoDUDIV:
case Mips::DDIV:
case Mips::DDIVU:
case Mips::DMOD:
case Mips::DMODU:
- return insertDivByZeroTrap(
- MI, *BB, *getTargetMachine().getSubtargetImpl()->getInstrInfo(), true);
+ return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true);
case Mips::SEL_D:
return emitSEL_D(MI, BB);
+
+ case Mips::PseudoSELECT_I:
+ case Mips::PseudoSELECT_I64:
+ case Mips::PseudoSELECT_S:
+ case Mips::PseudoSELECT_D32:
+ case Mips::PseudoSELECT_D64:
+ return emitPseudoSELECT(MI, BB, false, Mips::BNE);
+ case Mips::PseudoSELECTFP_F_I:
+ case Mips::PseudoSELECTFP_F_I64:
+ case Mips::PseudoSELECTFP_F_S:
+ case Mips::PseudoSELECTFP_F_D32:
+ case Mips::PseudoSELECTFP_F_D64:
+ return emitPseudoSELECT(MI, BB, true, Mips::BC1F);
+ case Mips::PseudoSELECTFP_T_I:
+ case Mips::PseudoSELECTFP_T_I64:
+ case Mips::PseudoSELECTFP_T_S:
+ case Mips::PseudoSELECTFP_T_D32:
+ case Mips::PseudoSELECTFP_T_D64:
+ return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
}
}
@@ -959,8 +980,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned LL, SC, AND, NOR, ZERO, BEQ;
@@ -1043,8 +1063,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
unsigned SrcReg) const {
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
if (Subtarget.hasMips32r2() && Size == 1) {
@@ -1080,8 +1099,7 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Dest = MI->getOperand(0).getReg();
@@ -1178,7 +1196,8 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
// beq success,$0,loopMBB
BB = loopMBB;
- BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
+ unsigned LL = isMicroMips ? Mips::LL_MM : Mips::LL;
+ BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
if (Nand) {
// and andres, oldval, incr2
// nor binopres, $0, andres
@@ -1201,7 +1220,8 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal0).addReg(NewVal);
- BuildMI(BB, DL, TII->get(Mips::SC), Success)
+ unsigned SC = isMicroMips ? Mips::SC_MM : Mips::SC;
+ BuildMI(BB, DL, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
@@ -1231,8 +1251,7 @@ MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned LL, SC, ZERO, BNE, BEQ;
@@ -1314,8 +1333,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Dest = MI->getOperand(0).getReg();
@@ -1412,7 +1430,8 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
// and maskedoldval0,oldval,mask
// bne maskedoldval0,shiftedcmpval,sinkMBB
BB = loop1MBB;
- BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
+ unsigned LL = isMicroMips ? Mips::LL_MM : Mips::LL;
+ BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal0)
.addReg(OldVal).addReg(Mask);
BuildMI(BB, DL, TII->get(Mips::BNE))
@@ -1428,7 +1447,8 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal1).addReg(ShiftedNewVal);
- BuildMI(BB, DL, TII->get(Mips::SC), Success)
+ unsigned SC = isMicroMips ? Mips::SC_MM : Mips::SC;
+ BuildMI(BB, DL, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
@@ -1450,10 +1470,8 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
MachineBasicBlock *MipsTargetLowering::emitSEL_D(MachineInstr *MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
DebugLoc DL = MI->getDebugLoc();
MachineBasicBlock::iterator II(MI);
@@ -1497,8 +1515,7 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
false, 0);
Chain = Addr.getValue(1);
- if ((getTargetMachine().getRelocationModel() == Reloc::PIC_) ||
- Subtarget.isABI_N64()) {
+ if ((getTargetMachine().getRelocationModel() == Reloc::PIC_) || ABI.IsN64()) {
// For PIC, the sequence is:
// BRIND(load(Jumptable + index) + RelocBase)
// RelocBase can be JumpTable, GOT or some sort of global base.
@@ -1580,32 +1597,29 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = N->getGlobal();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget.isABI_N64()) {
- const MipsTargetObjectFile &TLOF =
- (const MipsTargetObjectFile&)getObjFileLowering();
-
- if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine()))
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64()) {
+ const MipsTargetObjectFile *TLOF =
+ static_cast<const MipsTargetObjectFile *>(
+ getTargetMachine().getObjFileLowering());
+ if (TLOF->IsGlobalInSmallSection(GV, getTargetMachine()))
// %gp_rel relocation
- return getAddrGPRel(N, Ty, DAG);
+ return getAddrGPRel(N, SDLoc(N), Ty, DAG);
// %hi/%lo relocation
- return getAddrNonPIC(N, Ty, DAG);
+ return getAddrNonPIC(N, SDLoc(N), Ty, DAG);
}
if (GV->hasInternalLinkage() || (GV->hasLocalLinkage() && !isa<Function>(GV)))
- return getAddrLocal(N, Ty, DAG,
- Subtarget.isABI_N32() || Subtarget.isABI_N64());
+ return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
if (LargeGOT)
- return getAddrGlobalLargeGOT(N, Ty, DAG, MipsII::MO_GOT_HI16,
+ return getAddrGlobalLargeGOT(N, SDLoc(N), Ty, DAG, MipsII::MO_GOT_HI16,
MipsII::MO_GOT_LO16, DAG.getEntryNode(),
MachinePointerInfo::getGOT());
- return getAddrGlobal(N, Ty, DAG,
- (Subtarget.isABI_N32() || Subtarget.isABI_N64())
- ? MipsII::MO_GOT_DISP
- : MipsII::MO_GOT16,
+ return getAddrGlobal(N, SDLoc(N), Ty, DAG,
+ (ABI.IsN32() || ABI.IsN64()) ? MipsII::MO_GOT_DISP
+ : MipsII::MO_GOT16,
DAG.getEntryNode(), MachinePointerInfo::getGOT());
}
@@ -1614,12 +1628,10 @@ SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget.isABI_N64())
- return getAddrNonPIC(N, Ty, DAG);
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64())
+ return getAddrNonPIC(N, SDLoc(N), Ty, DAG);
- return getAddrLocal(N, Ty, DAG,
- Subtarget.isABI_N32() || Subtarget.isABI_N64());
+ return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
}
SDValue MipsTargetLowering::
@@ -1707,12 +1719,10 @@ lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget.isABI_N64())
- return getAddrNonPIC(N, Ty, DAG);
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64())
+ return getAddrNonPIC(N, SDLoc(N), Ty, DAG);
- return getAddrLocal(N, Ty, DAG,
- Subtarget.isABI_N32() || Subtarget.isABI_N64());
+ return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
}
SDValue MipsTargetLowering::
@@ -1721,20 +1731,19 @@ lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget.isABI_N64()) {
- const MipsTargetObjectFile &TLOF =
- (const MipsTargetObjectFile&)getObjFileLowering();
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64()) {
+ const MipsTargetObjectFile *TLOF =
+ static_cast<const MipsTargetObjectFile *>(
+ getTargetMachine().getObjFileLowering());
- if (TLOF.IsConstantInSmallSection(N->getConstVal(), getTargetMachine()))
+ if (TLOF->IsConstantInSmallSection(N->getConstVal(), getTargetMachine()))
// %gp_rel relocation
- return getAddrGPRel(N, Ty, DAG);
+ return getAddrGPRel(N, SDLoc(N), Ty, DAG);
- return getAddrNonPIC(N, Ty, DAG);
+ return getAddrNonPIC(N, SDLoc(N), Ty, DAG);
}
- return getAddrLocal(N, Ty, DAG,
- Subtarget.isABI_N32() || Subtarget.isABI_N64());
+ return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
}
SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
@@ -1760,8 +1769,7 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
unsigned Align = Node->getConstantOperandVal(3);
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
SDLoc DL(Node);
- unsigned ArgSlotSizeInBytes =
- (Subtarget.isABI_N32() || Subtarget.isABI_N64()) ? 8 : 4;
+ unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
SDValue VAListLoad = DAG.getLoad(getPointerTy(), DL, Chain, VAListPtr,
MachinePointerInfo(SV), false, false, false,
@@ -1924,9 +1932,8 @@ lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
SDLoc DL(Op);
- SDValue FrameAddr =
- DAG.getCopyFromReg(DAG.getEntryNode(), DL,
- Subtarget.isABI_N64() ? Mips::FP_64 : Mips::FP, VT);
+ SDValue FrameAddr = DAG.getCopyFromReg(
+ DAG.getEntryNode(), DL, ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
return FrameAddr;
}
@@ -1942,7 +1949,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MVT VT = Op.getSimpleValueType();
- unsigned RA = Subtarget.isABI_N64() ? Mips::RA_64 : Mips::RA;
+ unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
MFI->setReturnAddressIsTaken(true);
// Return RA, which contains the return address. Mark it an implicit live-in.
@@ -1964,12 +1971,12 @@ SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
SDLoc DL(Op);
- EVT Ty = Subtarget.isABI_N64() ? MVT::i64 : MVT::i32;
+ EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
// Store stack offset in V1, store jump target in V0. Glue CopyToReg and
// EH_RETURN nodes, so that instructions are emitted back-to-back.
- unsigned OffsetReg = Subtarget.isABI_N64() ? Mips::V1_64 : Mips::V1;
- unsigned AddrReg = Subtarget.isABI_N64() ? Mips::V0_64 : Mips::V0;
+ unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
+ unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
@@ -1991,10 +1998,11 @@ SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
+ MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
+
SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
SDValue Shamt = Op.getOperand(2);
-
- // if shamt < 32:
+ // if shamt < (VT.bits):
// lo = (shl lo, shamt)
// hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt))
// else:
@@ -2002,18 +2010,17 @@ SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
// hi = (shl lo, shamt[4:0])
SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
DAG.getConstant(-1, MVT::i32));
- SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo,
- DAG.getConstant(1, MVT::i32));
- SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, ShiftRight1Lo,
- Not);
- SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi, Shamt);
- SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
- SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, MVT::i32, Lo, Shamt);
+ SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
+ DAG.getConstant(1, VT));
+ SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
+ SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
+ SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
+ SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
DAG.getConstant(0x20, MVT::i32));
- Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
- DAG.getConstant(0, MVT::i32), ShiftLeftLo);
- Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftLeftLo, Or);
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
+ DAG.getConstant(0, VT), ShiftLeftLo);
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
SDValue Ops[2] = {Lo, Hi};
return DAG.getMergeValues(Ops, DL);
@@ -2024,8 +2031,9 @@ SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
SDLoc DL(Op);
SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
SDValue Shamt = Op.getOperand(2);
+ MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
- // if shamt < 32:
+ // if shamt < (VT.bits):
// lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt))
// if isSRA:
// hi = (sra hi, shamt)
@@ -2040,21 +2048,19 @@ SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
// hi = 0
SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
DAG.getConstant(-1, MVT::i32));
- SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
- DAG.getConstant(1, MVT::i32));
- SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, ShiftLeft1Hi, Not);
- SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo, Shamt);
- SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
- SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, DL, MVT::i32,
- Hi, Shamt);
+ SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
+ DAG.getConstant(1, VT));
+ SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
+ SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
+ SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
+ SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
+ DL, VT, Hi, Shamt);
SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
DAG.getConstant(0x20, MVT::i32));
- SDValue Shift31 = DAG.getNode(ISD::SRA, DL, MVT::i32, Hi,
- DAG.getConstant(31, MVT::i32));
- Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftRightHi, Or);
- Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
- IsSRA ? Shift31 : DAG.getConstant(0, MVT::i32),
- ShiftRightHi);
+ SDValue Shift31 = DAG.getNode(ISD::SRA, DL, VT, Hi, DAG.getConstant(31, VT));
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
+ IsSRA ? Shift31 : DAG.getConstant(0, VT), ShiftRightHi);
SDValue Ops[2] = {Lo, Hi};
return DAG.getMergeValues(Ops, DL);
@@ -2266,9 +2272,9 @@ SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State, const MCPhysReg *F64Regs) {
-
- static const unsigned IntRegsSize = 4, FloatRegsSize = 2;
+ CCState &State, ArrayRef<MCPhysReg> F64Regs) {
+ const MipsSubtarget &Subtarget = static_cast<const MipsSubtarget &>(
+ State.getMachineFunction().getSubtarget());
static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
@@ -2278,6 +2284,19 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
return true;
// Promote i8 and i16
+ if (ArgFlags.isInReg() && !Subtarget.isLittle()) {
+ if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
+ LocVT = MVT::i32;
+ if (ArgFlags.isSExt())
+ LocInfo = CCValAssign::SExtUpper;
+ else if (ArgFlags.isZExt())
+ LocInfo = CCValAssign::ZExtUpper;
+ else
+ LocInfo = CCValAssign::AExtUpper;
+ }
+ }
+
+ // Promote i8 and i16
if (LocVT == MVT::i8 || LocVT == MVT::i16) {
LocVT = MVT::i32;
if (ArgFlags.isSExt())
@@ -2293,39 +2312,39 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
// f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
// is true: function is vararg, argument is 3rd or higher, there is previous
// argument which is not f32 or f64.
- bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1
- || State.getFirstUnallocated(F32Regs, FloatRegsSize) != ValNo;
+ bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
+ State.getFirstUnallocated(F32Regs) != ValNo;
unsigned OrigAlign = ArgFlags.getOrigAlign();
bool isI64 = (ValVT == MVT::i32 && OrigAlign == 8);
if (ValVT == MVT::i32 || (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
- Reg = State.AllocateReg(IntRegs, IntRegsSize);
+ Reg = State.AllocateReg(IntRegs);
// If this is the first part of an i64 arg,
// the allocated register must be either A0 or A2.
if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
- Reg = State.AllocateReg(IntRegs, IntRegsSize);
+ Reg = State.AllocateReg(IntRegs);
LocVT = MVT::i32;
} else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
// Allocate int register and shadow next int register. If first
// available register is Mips::A1 or Mips::A3, shadow it too.
- Reg = State.AllocateReg(IntRegs, IntRegsSize);
+ Reg = State.AllocateReg(IntRegs);
if (Reg == Mips::A1 || Reg == Mips::A3)
- Reg = State.AllocateReg(IntRegs, IntRegsSize);
- State.AllocateReg(IntRegs, IntRegsSize);
+ Reg = State.AllocateReg(IntRegs);
+ State.AllocateReg(IntRegs);
LocVT = MVT::i32;
} else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
// we are guaranteed to find an available float register
if (ValVT == MVT::f32) {
- Reg = State.AllocateReg(F32Regs, FloatRegsSize);
+ Reg = State.AllocateReg(F32Regs);
// Shadow int register
- State.AllocateReg(IntRegs, IntRegsSize);
+ State.AllocateReg(IntRegs);
} else {
- Reg = State.AllocateReg(F64Regs, FloatRegsSize);
+ Reg = State.AllocateReg(F64Regs);
// Shadow int registers
- unsigned Reg2 = State.AllocateReg(IntRegs, IntRegsSize);
+ unsigned Reg2 = State.AllocateReg(IntRegs);
if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
- State.AllocateReg(IntRegs, IntRegsSize);
- State.AllocateReg(IntRegs, IntRegsSize);
+ State.AllocateReg(IntRegs);
+ State.AllocateReg(IntRegs);
}
} else
llvm_unreachable("Cannot handle this ValVT.");
@@ -2407,8 +2426,8 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
// used for the function (that is, Mips linker doesn't generate lazy binding
// stub for a function whose address is taken in the program).
if (IsPICCall && !InternalLinkage && IsCallReloc) {
- unsigned GPReg = Subtarget.isABI_N64() ? Mips::GP_64 : Mips::GP;
- EVT Ty = Subtarget.isABI_N64() ? MVT::i64 : MVT::i32;
+ unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
+ EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
}
@@ -2431,8 +2450,7 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
RegsToPass[i].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CLI.CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
if (Subtarget.inMips16HardFloat()) {
@@ -2468,7 +2486,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
+ const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
@@ -2480,7 +2498,6 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Allocate the reserved argument area. It seems strange to do this from the
// caller side but removing it breaks the frame size calculation.
- const MipsABIInfo &ABI = Subtarget.getABI();
CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(), Callee.getNode());
@@ -2511,8 +2528,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal, DL);
SDValue StackPtr = DAG.getCopyFromReg(
- Chain, DL, Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP,
- getPointerTy());
+ Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP, getPointerTy());
// With EABI is it possible to have 16 args on registers.
std::deque< std::pair<unsigned, SDValue> > RegsToPass;
@@ -2626,9 +2642,8 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
- bool IsPICCall =
- (Subtarget.isABI_N64() || IsPIC); // true if calls are translated to
- // jalr $25
+ bool IsPICCall = (ABI.IsN64() || IsPIC); // true if calls are translated to
+ // jalr $25
bool GlobalOrExternal = false, InternalLinkage = false, IsCallReloc = false;
SDValue CalleeLo;
EVT Ty = Callee.getValueType();
@@ -2639,15 +2654,14 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
InternalLinkage = Val->hasInternalLinkage();
if (InternalLinkage)
- Callee = getAddrLocal(G, Ty, DAG,
- Subtarget.isABI_N32() || Subtarget.isABI_N64());
+ Callee = getAddrLocal(G, DL, Ty, DAG, ABI.IsN32() || ABI.IsN64());
else if (LargeGOT) {
- Callee = getAddrGlobalLargeGOT(G, Ty, DAG, MipsII::MO_CALL_HI16,
+ Callee = getAddrGlobalLargeGOT(G, DL, Ty, DAG, MipsII::MO_CALL_HI16,
MipsII::MO_CALL_LO16, Chain,
FuncInfo->callPtrInfo(Val));
IsCallReloc = true;
} else {
- Callee = getAddrGlobal(G, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
+ Callee = getAddrGlobal(G, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
FuncInfo->callPtrInfo(Val));
IsCallReloc = true;
}
@@ -2659,16 +2673,16 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
const char *Sym = S->getSymbol();
- if (!Subtarget.isABI_N64() && !IsPIC) // !N64 && static
- Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(),
- MipsII::MO_NO_FLAG);
+ if (!ABI.IsN64() && !IsPIC) // !N64 && static
+ Callee =
+ DAG.getTargetExternalSymbol(Sym, getPointerTy(), MipsII::MO_NO_FLAG);
else if (LargeGOT) {
- Callee = getAddrGlobalLargeGOT(S, Ty, DAG, MipsII::MO_CALL_HI16,
+ Callee = getAddrGlobalLargeGOT(S, DL, Ty, DAG, MipsII::MO_CALL_HI16,
MipsII::MO_CALL_LO16, Chain,
FuncInfo->callPtrInfo(Sym));
IsCallReloc = true;
} else { // N64 || PIC
- Callee = getAddrGlobal(S, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
+ Callee = getAddrGlobal(S, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
FuncInfo->callPtrInfo(Sym));
IsCallReloc = true;
}
@@ -2844,7 +2858,6 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<CCValAssign, 16> ArgLocs;
MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
- const MipsABIInfo &ABI = Subtarget.getABI();
CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
Function::const_arg_iterator FuncArg =
DAG.getMachineFunction().getFunction()->arg_begin();
@@ -2858,13 +2871,16 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- std::advance(FuncArg, Ins[i].OrigArgIndex - CurArgIdx);
- CurArgIdx = Ins[i].OrigArgIndex;
+ if (Ins[i].isOrigArg()) {
+ std::advance(FuncArg, Ins[i].getOrigArgIndex() - CurArgIdx);
+ CurArgIdx = Ins[i].getOrigArgIndex();
+ }
EVT ValVT = VA.getValVT();
ISD::ArgFlagsTy Flags = Ins[i].Flags;
bool IsRegLoc = VA.isRegLoc();
if (Flags.isByVal()) {
+ assert(Ins[i].isOrigArg() && "Byval arguments cannot be implicit");
unsigned FirstByValReg, LastByValReg;
unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
@@ -2897,7 +2913,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
(RegVT == MVT::i64 && ValVT == MVT::f64) ||
(RegVT == MVT::f64 && ValVT == MVT::i64))
ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
- else if (Subtarget.isABI_O32() && RegVT == MVT::i32 &&
+ else if (ABI.IsO32() && RegVT == MVT::i32 &&
ValVT == MVT::f64) {
unsigned Reg2 = addLiveIn(DAG.getMachineFunction(),
getNextIntArgReg(ArgReg), RC);
@@ -2912,7 +2928,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
} else { // VA.isRegLoc()
MVT LocVT = VA.getLocVT();
- if (Subtarget.isABI_O32()) {
+ if (ABI.IsO32()) {
// We ought to be able to use LocVT directly but O32 sets it to i32
// when allocating floating point values to integer registers.
// This shouldn't influence how we load the value into registers unless
@@ -2949,7 +2965,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
unsigned Reg = MipsFI->getSRetReturnReg();
if (!Reg) {
Reg = MF.getRegInfo().createVirtualRegister(
- getRegClassFor(Subtarget.isABI_N64() ? MVT::i64 : MVT::i32));
+ getRegClassFor(ABI.IsN64() ? MVT::i64 : MVT::i32));
MipsFI->setSRetReturnReg(Reg);
}
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
@@ -3066,7 +3082,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
if (!Reg)
llvm_unreachable("sret virtual register not created in the entry block");
SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
- unsigned V0 = Subtarget.isABI_N64() ? Mips::V0_64 : Mips::V0;
+ unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Flag);
Flag = Chain.getValue(1);
@@ -3201,7 +3217,7 @@ parsePhysicalReg(StringRef C, std::string &Prefix,
std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
+ Subtarget.getRegisterInfo();
const TargetRegisterClass *RC;
std::string Prefix;
unsigned long long Reg;
@@ -3275,9 +3291,10 @@ parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
/// Given a register class constraint, like 'r', if this corresponds directly
/// to an LLVM register class, return a register of 0 and the register class
/// pointer.
-std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
-{
+std::pair<unsigned, const TargetRegisterClass *>
+MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
@@ -3333,7 +3350,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
if (R.second)
return R;
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
@@ -3477,7 +3494,7 @@ bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
}
unsigned MipsTargetLowering::getJumpTableEncoding() const {
- if (Subtarget.isABI_N64())
+ if (ABI.IsN64())
return MachineJumpTableInfo::EK_GPRel64BlockAddress;
return TargetLowering::getJumpTableEncoding();
@@ -3495,7 +3512,6 @@ void MipsTargetLowering::copyByValRegs(
unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
int FrameObjOffset;
- const MipsABIInfo &ABI = Subtarget.getABI();
ArrayRef<MCPhysReg> ByValArgRegs = ABI.GetByValArgRegs();
if (RegAreaSize)
@@ -3547,7 +3563,7 @@ void MipsTargetLowering::passByValArg(
unsigned NumRegs = LastReg - FirstReg;
if (NumRegs) {
- const ArrayRef<MCPhysReg> ArgRegs = Subtarget.getABI().GetByValArgRegs();
+ const ArrayRef<MCPhysReg> ArgRegs = ABI.GetByValArgRegs();
bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
unsigned I = 0;
@@ -3630,8 +3646,8 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
SDValue Chain, SDLoc DL,
SelectionDAG &DAG,
CCState &State) const {
- const ArrayRef<MCPhysReg> ArgRegs = Subtarget.getABI().GetVarArgRegs();
- unsigned Idx = State.getFirstUnallocated(ArgRegs.data(), ArgRegs.size());
+ const ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs();
+ unsigned Idx = State.getFirstUnallocated(ArgRegs);
unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
const TargetRegisterClass *RC = getRegClassFor(RegTy);
@@ -3646,7 +3662,6 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
VaArgOffset =
RoundUpToAlignment(State.getNextStackOffset(), RegSizeInBytes);
else {
- const MipsABIInfo &ABI = Subtarget.getABI();
VaArgOffset =
(int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
(int)(RegSizeInBytes * (ArgRegs.size() - Idx));
@@ -3677,8 +3692,7 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
unsigned Align) const {
- MachineFunction &MF = State->getMachineFunction();
- const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
+ const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
assert(Size && "Byval argument's size shouldn't be 0.");
@@ -3689,10 +3703,10 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
if (State->getCallingConv() != CallingConv::Fast) {
unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
- const ArrayRef<MCPhysReg> IntArgRegs = Subtarget.getABI().GetByValArgRegs();
+ const ArrayRef<MCPhysReg> IntArgRegs = ABI.GetByValArgRegs();
// FIXME: The O32 case actually describes no shadow registers.
const MCPhysReg *ShadowRegs =
- Subtarget.isABI_O32() ? IntArgRegs.data() : Mips64DPRegs;
+ ABI.IsO32() ? IntArgRegs.data() : Mips64DPRegs;
// We used to check the size as well but we can't do that anymore since
// CCState::HandleByVal() rounds up the size after calling this function.
@@ -3700,7 +3714,7 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
"Byval argument's alignment should be a multiple of"
"RegSizeInBytes.");
- FirstReg = State->getFirstUnallocated(IntArgRegs.data(), IntArgRegs.size());
+ FirstReg = State->getFirstUnallocated(IntArgRegs);
// If Align > RegSizeInBytes, the first arg register must be even.
// FIXME: This condition happens to do the right thing but it's not the
@@ -3720,3 +3734,102 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
}
+
+MachineBasicBlock *
+MipsTargetLowering::emitPseudoSELECT(MachineInstr *MI, MachineBasicBlock *BB,
+ bool isFPCmp, unsigned Opc) const {
+ assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
+ "Subtarget already supports SELECT nodes with the use of"
+ "conditional-move instructions.");
+
+ const TargetInstrInfo *TII =
+ Subtarget.getInstrInfo();
+ DebugLoc DL = MI->getDebugLoc();
+
+ // To "insert" a SELECT instruction, we actually have to insert the
+ // diamond control-flow pattern. The incoming instruction knows the
+ // destination vreg to set, the condition code register to branch on, the
+ // true/false values to select between, and a branch opcode to use.
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction::iterator It = BB;
+ ++It;
+
+ // thisMBB:
+ // ...
+ // TrueVal = ...
+ // setcc r1, r2, r3
+ // bNE r1, r0, copy1MBB
+ // fallthrough --> copy0MBB
+ MachineBasicBlock *thisMBB = BB;
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, copy0MBB);
+ F->insert(It, sinkMBB);
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ // Next, add the true and fallthrough blocks as its successors.
+ BB->addSuccessor(copy0MBB);
+ BB->addSuccessor(sinkMBB);
+
+ if (isFPCmp) {
+ // bc1[tf] cc, sinkMBB
+ BuildMI(BB, DL, TII->get(Opc))
+ .addReg(MI->getOperand(1).getReg())
+ .addMBB(sinkMBB);
+ } else {
+ // bne rs, $0, sinkMBB
+ BuildMI(BB, DL, TII->get(Opc))
+ .addReg(MI->getOperand(1).getReg())
+ .addReg(Mips::ZERO)
+ .addMBB(sinkMBB);
+ }
+
+ // copy0MBB:
+ // %FalseValue = ...
+ // # fallthrough to sinkMBB
+ BB = copy0MBB;
+
+ // Update machine-CFG edges
+ BB->addSuccessor(sinkMBB);
+
+ // sinkMBB:
+ // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
+ // ...
+ BB = sinkMBB;
+
+ BuildMI(*BB, BB->begin(), DL,
+ TII->get(Mips::PHI), MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB)
+ .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+
+ return BB;
+}
+
+// FIXME? Maybe this could be a TableGen attribute on some registers and
+// this table could be generated automatically from RegInfo.
+unsigned MipsTargetLowering::getRegisterByName(const char* RegName,
+ EVT VT) const {
+ // Named registers is expected to be fairly rare. For now, just support $28
+ // since the linux kernel uses it.
+ if (Subtarget.isGP64bit()) {
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("$28", Mips::GP_64)
+ .Default(0);
+ if (Reg)
+ return Reg;
+ } else {
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("$28", Mips::GP)
+ .Default(0);
+ if (Reg)
+ return Reg;
+ }
+ report_fatal_error("Invalid register name global variable");
+}
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index 60e53da..9f86a43 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -15,6 +15,7 @@
#ifndef LLVM_LIB_TARGET_MIPS_MIPSISELLOWERING_H
#define LLVM_LIB_TARGET_MIPS_MIPSISELLOWERING_H
+#include "MCTargetDesc/MipsABIInfo.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "Mips.h"
#include "llvm/CodeGen/CallingConvLower.h"
@@ -262,6 +263,8 @@ namespace llvm {
void HandleByVal(CCState *, unsigned &, unsigned) const override;
+ unsigned getRegisterByName(const char* RegName, EVT VT) const override;
+
protected:
SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const;
@@ -270,9 +273,8 @@ namespace llvm {
//
// (add (load (wrapper $gp, %got(sym)), %lo(sym))
template <class NodeTy>
- SDValue getAddrLocal(NodeTy *N, EVT Ty, SelectionDAG &DAG,
+ SDValue getAddrLocal(NodeTy *N, SDLoc DL, EVT Ty, SelectionDAG &DAG,
bool IsN32OrN64) const {
- SDLoc DL(N);
unsigned GOTFlag = IsN32OrN64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
SDValue GOT = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
getTargetNode(N, Ty, DAG, GOTFlag));
@@ -289,11 +291,10 @@ namespace llvm {
// computing a global symbol's address:
//
// (load (wrapper $gp, %got(sym)))
- template<class NodeTy>
- SDValue getAddrGlobal(NodeTy *N, EVT Ty, SelectionDAG &DAG,
+ template <class NodeTy>
+ SDValue getAddrGlobal(NodeTy *N, SDLoc DL, EVT Ty, SelectionDAG &DAG,
unsigned Flag, SDValue Chain,
const MachinePointerInfo &PtrInfo) const {
- SDLoc DL(N);
SDValue Tgt = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
getTargetNode(N, Ty, DAG, Flag));
return DAG.getLoad(Ty, DL, Chain, Tgt, PtrInfo, false, false, false, 0);
@@ -303,14 +304,13 @@ namespace llvm {
// computing a global symbol's address in large-GOT mode:
//
// (load (wrapper (add %hi(sym), $gp), %lo(sym)))
- template<class NodeTy>
- SDValue getAddrGlobalLargeGOT(NodeTy *N, EVT Ty, SelectionDAG &DAG,
- unsigned HiFlag, unsigned LoFlag,
- SDValue Chain,
+ template <class NodeTy>
+ SDValue getAddrGlobalLargeGOT(NodeTy *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG, unsigned HiFlag,
+ unsigned LoFlag, SDValue Chain,
const MachinePointerInfo &PtrInfo) const {
- SDLoc DL(N);
- SDValue Hi = DAG.getNode(MipsISD::Hi, DL, Ty,
- getTargetNode(N, Ty, DAG, HiFlag));
+ SDValue Hi =
+ DAG.getNode(MipsISD::Hi, DL, Ty, getTargetNode(N, Ty, DAG, HiFlag));
Hi = DAG.getNode(ISD::ADD, DL, Ty, Hi, getGlobalReg(DAG, Ty));
SDValue Wrapper = DAG.getNode(MipsISD::Wrapper, DL, Ty, Hi,
getTargetNode(N, Ty, DAG, LoFlag));
@@ -322,9 +322,9 @@ namespace llvm {
// computing a symbol's address in non-PIC mode:
//
// (add %hi(sym), %lo(sym))
- template<class NodeTy>
- SDValue getAddrNonPIC(NodeTy *N, EVT Ty, SelectionDAG &DAG) const {
- SDLoc DL(N);
+ template <class NodeTy>
+ SDValue getAddrNonPIC(NodeTy *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG) const {
SDValue Hi = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_HI);
SDValue Lo = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_LO);
return DAG.getNode(ISD::ADD, DL, Ty,
@@ -336,9 +336,8 @@ namespace llvm {
// computing a symbol's address using gp-relative addressing:
//
// (add $gp, %gp_rel(sym))
- template<class NodeTy>
- SDValue getAddrGPRel(NodeTy *N, EVT Ty, SelectionDAG &DAG) const {
- SDLoc DL(N);
+ template <class NodeTy>
+ SDValue getAddrGPRel(NodeTy *N, SDLoc DL, EVT Ty, SelectionDAG &DAG) const {
assert(Ty == MVT::i32);
SDValue GPRel = getTargetNode(N, Ty, DAG, MipsII::MO_GPREL);
return DAG.getNode(ISD::ADD, DL, Ty,
@@ -363,6 +362,8 @@ namespace llvm {
// Subtarget Info
const MipsSubtarget &Subtarget;
+ // Cache the ABI from the TargetMachine, we use it everywhere.
+ const MipsABIInfo &ABI;
private:
// Create a TargetGlobalAddress node.
@@ -488,9 +489,10 @@ namespace llvm {
std::pair<unsigned, const TargetRegisterClass *>
parseRegForInlineAsmConstraint(StringRef C, MVT VT) const;
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const override;
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
+ MVT VT) const override;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
@@ -534,6 +536,9 @@ namespace llvm {
MachineBasicBlock *emitAtomicCmpSwapPartword(MachineInstr *MI,
MachineBasicBlock *BB, unsigned Size) const;
MachineBasicBlock *emitSEL_D(MachineInstr *MI, MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitPseudoSELECT(MachineInstr *MI,
+ MachineBasicBlock *BB, bool isFPCmp,
+ unsigned Opc) const;
};
/// Create MipsTargetLowering objects.
diff --git a/lib/Target/Mips/MipsInstrFPU.td b/lib/Target/Mips/MipsInstrFPU.td
index 2aa8328..ed97cb4 100644
--- a/lib/Target/Mips/MipsInstrFPU.td
+++ b/lib/Target/Mips/MipsInstrFPU.td
@@ -458,42 +458,42 @@ def FSUB_S : MMRel, ADDS_FT<"sub.s", FGR32Opnd, II_SUB_S, 0, fsub>,
defm FSUB : ADDS_M<"sub.d", II_SUB_D, 0, fsub>, ADDS_FM<0x01, 17>;
def MADD_S : MMRel, MADDS_FT<"madd.s", FGR32Opnd, II_MADD_S, fadd>,
- MADDS_FM<4, 0>, ISA_MIPS32R2_NOT_32R6_64R6;
+ MADDS_FM<4, 0>, INSN_MIPS4_32R2_NOT_32R6_64R6;
def MSUB_S : MMRel, MADDS_FT<"msub.s", FGR32Opnd, II_MSUB_S, fsub>,
- MADDS_FM<5, 0>, ISA_MIPS32R2_NOT_32R6_64R6;
+ MADDS_FM<5, 0>, INSN_MIPS4_32R2_NOT_32R6_64R6;
let AdditionalPredicates = [NoNaNsFPMath] in {
def NMADD_S : MMRel, NMADDS_FT<"nmadd.s", FGR32Opnd, II_NMADD_S, fadd>,
- MADDS_FM<6, 0>, ISA_MIPS32R2_NOT_32R6_64R6;
+ MADDS_FM<6, 0>, INSN_MIPS4_32R2_NOT_32R6_64R6;
def NMSUB_S : MMRel, NMADDS_FT<"nmsub.s", FGR32Opnd, II_NMSUB_S, fsub>,
- MADDS_FM<7, 0>, ISA_MIPS32R2_NOT_32R6_64R6;
+ MADDS_FM<7, 0>, INSN_MIPS4_32R2_NOT_32R6_64R6;
}
def MADD_D32 : MMRel, MADDS_FT<"madd.d", AFGR64Opnd, II_MADD_D, fadd>,
- MADDS_FM<4, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_32;
+ MADDS_FM<4, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_32;
def MSUB_D32 : MMRel, MADDS_FT<"msub.d", AFGR64Opnd, II_MSUB_D, fsub>,
- MADDS_FM<5, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_32;
+ MADDS_FM<5, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_32;
let AdditionalPredicates = [NoNaNsFPMath] in {
def NMADD_D32 : MMRel, NMADDS_FT<"nmadd.d", AFGR64Opnd, II_NMADD_D, fadd>,
- MADDS_FM<6, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_32;
+ MADDS_FM<6, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_32;
def NMSUB_D32 : MMRel, NMADDS_FT<"nmsub.d", AFGR64Opnd, II_NMSUB_D, fsub>,
- MADDS_FM<7, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_32;
+ MADDS_FM<7, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_32;
}
-let isCodeGenOnly=1 in {
+let DecoderNamespace = "Mips64" in {
def MADD_D64 : MADDS_FT<"madd.d", FGR64Opnd, II_MADD_D, fadd>,
- MADDS_FM<4, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_64;
+ MADDS_FM<4, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_64;
def MSUB_D64 : MADDS_FT<"msub.d", FGR64Opnd, II_MSUB_D, fsub>,
- MADDS_FM<5, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_64;
+ MADDS_FM<5, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_64;
}
let AdditionalPredicates = [NoNaNsFPMath],
- isCodeGenOnly=1 in {
+ DecoderNamespace = "Mips64" in {
def NMADD_D64 : NMADDS_FT<"nmadd.d", FGR64Opnd, II_NMADD_D, fadd>,
- MADDS_FM<6, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_64;
+ MADDS_FM<6, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_64;
def NMSUB_D64 : NMADDS_FT<"nmsub.d", FGR64Opnd, II_NMSUB_D, fsub>,
- MADDS_FM<7, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_64;
+ MADDS_FM<7, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_64;
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Mips/MipsInstrFormats.td b/lib/Target/Mips/MipsInstrFormats.td
index 5c91fbc..8cc1603 100644
--- a/lib/Target/Mips/MipsInstrFormats.td
+++ b/lib/Target/Mips/MipsInstrFormats.td
@@ -297,6 +297,19 @@ class BGEZ_FM<bits<6> op, bits<5> funct> : StdArch {
let Inst{15-0} = offset;
}
+class BBIT_FM<bits<6> op> : StdArch {
+ bits<5> rs;
+ bits<5> p;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = op;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = p;
+ let Inst{15-0} = offset;
+}
+
class SLTI_FM<bits<6> op> : StdArch {
bits<5> rt;
bits<5> rs;
@@ -411,6 +424,20 @@ class SYNC_FM : StdArch {
let Inst{5-0} = 0xf;
}
+class SYNCI_FM : StdArch {
+ // Produced by the mem_simm16 address as reg << 16 | imm (see getMemEncoding).
+ bits<21> addr;
+ bits<5> rs = addr{20-16};
+ bits<16> offset = addr{15-0};
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b000001;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = 0b11111;
+ let Inst{15-0} = offset;
+}
+
class MULT_FM<bits<6> op, bits<6> funct> : StdArch {
bits<5> rs;
bits<5> rt;
diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp
index dcc0e24..0839147 100644
--- a/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/lib/Target/Mips/MipsInstrInfo.cpp
@@ -15,7 +15,7 @@
#include "InstPrinter/MipsInstPrinter.h"
#include "MipsAnalyzeImmediate.h"
#include "MipsMachineFunction.h"
-#include "MipsTargetMachine.h"
+#include "MipsSubtarget.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td
index aebac34..04a16b3 100644
--- a/lib/Target/Mips/MipsInstrInfo.td
+++ b/lib/Target/Mips/MipsInstrInfo.td
@@ -156,6 +156,8 @@ def HasMips3 : Predicate<"Subtarget->hasMips3()">,
AssemblerPredicate<"FeatureMips3">;
def HasMips4_32 : Predicate<"Subtarget->hasMips4_32()">,
AssemblerPredicate<"FeatureMips4_32">;
+def NotMips4_32 : Predicate<"!Subtarget->hasMips4_32()">,
+ AssemblerPredicate<"FeatureMips4_32">;
def HasMips4_32r2 : Predicate<"Subtarget->hasMips4_32r2()">,
AssemblerPredicate<"FeatureMips4_32r2">;
def HasMips5_32r2 : Predicate<"Subtarget->hasMips5_32r2()">,
@@ -180,8 +182,6 @@ def HasMips64r6 : Predicate<"Subtarget->hasMips64r6()">,
AssemblerPredicate<"FeatureMips64r6">;
def NotMips64r6 : Predicate<"!Subtarget->hasMips64r6()">,
AssemblerPredicate<"!FeatureMips64r6">;
-def IsN64 : Predicate<"Subtarget->isABI_N64()">,
- AssemblerPredicate<"FeatureN64">;
def InMips16Mode : Predicate<"Subtarget->inMips16Mode()">,
AssemblerPredicate<"FeatureMips16">;
def HasCnMips : Predicate<"Subtarget->hasCnMips()">,
@@ -220,6 +220,9 @@ class GPR_64 { list<Predicate> GPRPredicates = [IsGP64bit]; }
// subtractive predicate will hopefully keep us under the 32 predicate
// limit long enough to develop an alternative way to handle P1||P2
// predicates.
+class ISA_MIPS1_NOT_4_32 {
+ list<Predicate> InsnPredicates = [NotMips4_32];
+}
class ISA_MIPS1_NOT_32R6_64R6 {
list<Predicate> InsnPredicates = [NotMips32r6, NotMips64r6];
}
@@ -316,7 +319,7 @@ class IsAsCheapAsAMove {
}
class NeverHasSideEffects {
- bit neverHasSideEffects = 1;
+ bit hasSideEffects = 0;
}
//===----------------------------------------------------------------------===//
@@ -425,7 +428,14 @@ def MipsMemSimm11AsmOperand : AsmOperandClass {
let RenderMethod = "addMemOperands";
let ParserMethod = "parseMemOperand";
let PredicateMethod = "isMemWithSimmOffset<11>";
- //let DiagnosticType = "Simm11";
+}
+
+def MipsMemSimm16AsmOperand : AsmOperandClass {
+ let Name = "MemOffsetSimm16";
+ let SuperClasses = [MipsMemAsmOperand];
+ let RenderMethod = "addMemOperands";
+ let ParserMethod = "parseMemOperand";
+ let PredicateMethod = "isMemWithSimmOffset<16>";
}
def MipsInvertedImmoperand : AsmOperandClass {
@@ -470,6 +480,12 @@ def mem_simm11 : mem_generic {
let ParserMatchClass = MipsMemSimm11AsmOperand;
}
+def mem_simm16 : mem_generic {
+ let MIOperandInfo = (ops ptr_rc, simm16);
+ let EncoderMethod = "getMemEncoding";
+ let ParserMatchClass = MipsMemSimm16AsmOperand;
+}
+
def mem_ea : Operand<iPTR> {
let PrintMethod = "printMemOperandEA";
let MIOperandInfo = (ops ptr_rc, simm16);
@@ -632,7 +648,7 @@ class shift_rotate_reg<string opstr, RegisterOperand RO, InstrItinClass itin,
class LoadUpper<string opstr, RegisterOperand RO, Operand Imm>:
InstSE<(outs RO:$rt), (ins Imm:$imm16), !strconcat(opstr, "\t$rt, $imm16"),
[], II_LUI, FrmI, opstr>, IsAsCheapAsAMove {
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
let isReMaterializable = 1;
}
@@ -860,6 +876,13 @@ class SYNC_FT<string opstr> :
InstSE<(outs), (ins i32imm:$stype), "sync $stype", [(MipsSync imm:$stype)],
NoItinerary, FrmOther, opstr>;
+class SYNCI_FT<string opstr> :
+ InstSE<(outs), (ins mem_simm16:$addr), !strconcat(opstr, "\t$addr"), [],
+ NoItinerary, FrmOther, opstr> {
+ let hasSideEffects = 1;
+ let DecoderMethod = "DecodeSyncI";
+}
+
let hasSideEffects = 1 in
class TEQ_FT<string opstr, RegisterOperand RO> :
InstSE<(outs), (ins RO:$rs, RO:$rt, uimm16:$code_),
@@ -876,7 +899,7 @@ class Mult<string opstr, InstrItinClass itin, RegisterOperand RO,
itin, FrmR, opstr> {
let isCommutable = 1;
let Defs = DefRegs;
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
}
// Pseudo multiply/divide instruction with explicit accumulator register
@@ -922,7 +945,7 @@ class MoveFromLOHI<string opstr, RegisterOperand RO, Register UseReg>:
InstSE<(outs RO:$rd), (ins), !strconcat(opstr, "\t$rd"), [], II_MFHI_MFLO,
FrmR, opstr> {
let Uses = [UseReg];
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
}
class PseudoMTLOHI<RegisterClass DstRC, RegisterClass SrcRC>
@@ -934,7 +957,7 @@ class MoveToLOHI<string opstr, RegisterOperand RO, list<Register> DefRegs>:
InstSE<(outs), (ins RO:$rs), !strconcat(opstr, "\t$rs"), [], II_MTHI_MTLO,
FrmR, opstr> {
let Defs = DefRegs;
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
}
class EffectiveAddress<string opstr, RegisterOperand RO> :
@@ -964,7 +987,7 @@ class SignExtInReg<string opstr, ValueType vt, RegisterOperand RO,
class SubwordSwap<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rt), !strconcat(opstr, "\t$rd, $rt"), [],
NoItinerary, FrmR, opstr> {
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
}
// Read Hardware
@@ -1130,12 +1153,14 @@ def ADD : MMRel, ArithLogicR<"add", GPR32Opnd>, ADD_FM<0, 0x20>;
def SUB : MMRel, ArithLogicR<"sub", GPR32Opnd>, ADD_FM<0, 0x22>;
def SLT : MMRel, SetCC_R<"slt", setlt, GPR32Opnd>, ADD_FM<0, 0x2a>;
def SLTu : MMRel, SetCC_R<"sltu", setult, GPR32Opnd>, ADD_FM<0, 0x2b>;
+let AdditionalPredicates = [NotInMicroMips] in {
def AND : MMRel, ArithLogicR<"and", GPR32Opnd, 1, II_AND, and>,
ADD_FM<0, 0x24>;
def OR : MMRel, ArithLogicR<"or", GPR32Opnd, 1, II_OR, or>,
ADD_FM<0, 0x25>;
def XOR : MMRel, ArithLogicR<"xor", GPR32Opnd, 1, II_XOR, xor>,
ADD_FM<0, 0x26>;
+}
def NOR : MMRel, LogicNOR<"nor", GPR32Opnd>, ADD_FM<0, 0x27>;
/// Shift Instructions
@@ -1169,11 +1194,15 @@ def LBu : Load<"lbu", GPR32Opnd, zextloadi8, II_LBU, addrDefault>, MMRel,
def LH : Load<"lh", GPR32Opnd, sextloadi16, II_LH, addrDefault>, MMRel,
LW_FM<0x21>;
def LHu : Load<"lhu", GPR32Opnd, zextloadi16, II_LHU>, MMRel, LW_FM<0x25>;
+let AdditionalPredicates = [NotInMicroMips] in {
def LW : Load<"lw", GPR32Opnd, load, II_LW, addrDefault>, MMRel,
LW_FM<0x23>;
+}
def SB : Store<"sb", GPR32Opnd, truncstorei8, II_SB>, MMRel, LW_FM<0x28>;
def SH : Store<"sh", GPR32Opnd, truncstorei16, II_SH>, MMRel, LW_FM<0x29>;
+let AdditionalPredicates = [NotInMicroMips] in {
def SW : Store<"sw", GPR32Opnd, store, II_SW>, MMRel, LW_FM<0x2b>;
+}
/// load/store left/right
let EncodingPredicates = []<Predicate>, // FIXME: Lack of HasStdEnc is probably a bug
@@ -1188,6 +1217,7 @@ def SWR : StoreLeftRight<"swr", MipsSWR, GPR32Opnd, II_SWR>, LW_FM<0x2e>,
ISA_MIPS1_NOT_32R6_64R6;
}
+let AdditionalPredicates = [NotInMicroMips] in {
// COP2 Memory Instructions
def LWC2 : LW_FT2<"lwc2", COP2Opnd, NoItinerary, load>, LW_FM<0x32>,
ISA_MIPS1_NOT_32R6_64R6;
@@ -1207,8 +1237,10 @@ let DecoderNamespace = "COP3_" in {
def SDC3 : SW_FT3<"sdc3", COP3Opnd, NoItinerary, store>, LW_FM<0x3f>,
ISA_MIPS2;
}
+}
def SYNC : MMRel, SYNC_FT<"sync">, SYNC_FM, ISA_MIPS32;
+def SYNCI : MMRel, SYNCI_FT<"synci">, SYNCI_FM, ISA_MIPS32R2;
def TEQ : MMRel, TEQ_FT<"teq", GPR32Opnd>, TEQ_FM<0x34>, ISA_MIPS2;
def TGE : MMRel, TEQ_FT<"tge", GPR32Opnd>, TEQ_FM<0x30>, ISA_MIPS2;
@@ -1284,8 +1316,8 @@ let AdditionalPredicates = [NotInMicroMips] in {
def JALRPseudo : JumpLinkRegPseudo<GPR32Opnd, JALR, RA>;
}
-// FIXME: JALX really requires either MIPS16 or microMIPS in addition to MIPS32.
-def JALX : JumpLink<"jalx", calltarget>, FJ<0x1D>, ISA_MIPS32_NOT_32R6_64R6;
+def JALX : MMRel, JumpLink<"jalx", calltarget>, FJ<0x1D>,
+ ISA_MIPS32_NOT_32R6_64R6;
def BGEZAL : MMRel, BGEZAL_FT<"bgezal", brtarget, GPR32Opnd>, BGEZAL_FM<0x11>,
ISA_MIPS1_NOT_32R6_64R6;
def BGEZALL : MMRel, BGEZAL_FT<"bgezall", brtarget, GPR32Opnd, 0>,
@@ -1440,10 +1472,10 @@ def MFC2 : MFC3OP<"mfc2", GPR32Opnd>, MFC3OP_FM<0x12, 0>;
def MTC2 : MFC3OP<"mtc2", GPR32Opnd>, MFC3OP_FM<0x12, 4>;
class Barrier<string asmstr> : InstSE<(outs), (ins), asmstr, [], NoItinerary,
- FrmOther>;
-def SSNOP : Barrier<"ssnop">, BARRIER_FM<1>;
-def EHB : Barrier<"ehb">, BARRIER_FM<3>;
-def PAUSE : Barrier<"pause">, BARRIER_FM<5>, ISA_MIPS32R2;
+ FrmOther, asmstr>;
+def SSNOP : MMRel, Barrier<"ssnop">, BARRIER_FM<1>;
+def EHB : MMRel, Barrier<"ehb">, BARRIER_FM<3>;
+def PAUSE : MMRel, Barrier<"pause">, BARRIER_FM<5>, ISA_MIPS32R2;
// JR_HB and JALR_HB are defined here using the new style naming
// scheme because some of this code is shared with Mips32r6InstrInfo.td
@@ -1494,13 +1526,14 @@ def TLBWR : MMRel, TLB<"tlbwr">, COP0_TLB_FM<0x06>;
class CacheOp<string instr_asm, Operand MemOpnd> :
InstSE<(outs), (ins MemOpnd:$addr, uimm5:$hint),
- !strconcat(instr_asm, "\t$hint, $addr"), [], NoItinerary, FrmOther> {
+ !strconcat(instr_asm, "\t$hint, $addr"), [], NoItinerary, FrmOther,
+ instr_asm> {
let DecoderMethod = "DecodeCacheOp";
}
-def CACHE : CacheOp<"cache", mem>, CACHEOP_FM<0b101111>,
+def CACHE : MMRel, CacheOp<"cache", mem>, CACHEOP_FM<0b101111>,
INSN_MIPS3_32_NOT_32R6_64R6;
-def PREF : CacheOp<"pref", mem>, CACHEOP_FM<0b110011>,
+def PREF : MMRel, CacheOp<"pref", mem>, CACHEOP_FM<0b110011>,
INSN_MIPS3_32_NOT_32R6_64R6;
//===----------------------------------------------------------------------===//
@@ -1531,8 +1564,6 @@ def : MipsInstAlias<"j $rs", (JR GPR32Opnd:$rs), 0>;
let Predicates = [NotInMicroMips] in {
def : MipsInstAlias<"jalr $rs", (JALR RA, GPR32Opnd:$rs), 0>;
}
-def : MipsInstAlias<"jal $rs", (JALR RA, GPR32Opnd:$rs), 0>;
-def : MipsInstAlias<"jal $rd,$rs", (JALR GPR32Opnd:$rd, GPR32Opnd:$rs), 0>;
def : MipsInstAlias<"jalr.hb $rs", (JALR_HB RA, GPR32Opnd:$rs), 1>, ISA_MIPS32;
def : MipsInstAlias<"not $rt, $rs",
(NOR GPR32Opnd:$rt, GPR32Opnd:$rs, ZERO), 0>;
@@ -1557,7 +1588,9 @@ def : MipsInstAlias<"mfc0 $rt, $rd", (MFC0 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
def : MipsInstAlias<"mtc0 $rt, $rd", (MTC0 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
def : MipsInstAlias<"mfc2 $rt, $rd", (MFC2 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
def : MipsInstAlias<"mtc2 $rt, $rd", (MTC2 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+let AdditionalPredicates = [NotInMicroMips] in {
def : MipsInstAlias<"b $offset", (BEQ ZERO, ZERO, brtarget:$offset), 0>;
+}
def : MipsInstAlias<"bnez $rs,$offset",
(BNE GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>;
def : MipsInstAlias<"beqz $rs,$offset",
@@ -1606,7 +1639,7 @@ def : MipsInstAlias<"sync",
// Assembler Pseudo Instructions
//===----------------------------------------------------------------------===//
-class LoadImm32< string instr_asm, Operand Od, RegisterOperand RO> :
+class LoadImm32<string instr_asm, Operand Od, RegisterOperand RO> :
MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm32),
!strconcat(instr_asm, "\t$rt, $imm32")> ;
def LoadImm32Reg : LoadImm32<"li", uimm5, GPR32Opnd>;
@@ -1621,6 +1654,11 @@ class LoadAddressImm<string instr_asm, Operand Od, RegisterOperand RO> :
!strconcat(instr_asm, "\t$rt, $imm32")> ;
def LoadAddr32Imm : LoadAddressImm<"la", uimm5, GPR32Opnd>;
+def JalTwoReg : MipsAsmPseudoInst<(outs GPR32Opnd:$rd), (ins GPR32Opnd:$rs),
+ "jal\t$rd, $rs"> ;
+def JalOneReg : MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs),
+ "jal\t$rs"> ;
+
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
@@ -1633,10 +1671,12 @@ class StoreRegImmPat<Instruction StoreInst, ValueType ValTy> :
MipsPat<(store ValTy:$v, addrRegImm:$a), (StoreInst ValTy:$v, addrRegImm:$a)>;
// Small immediates
+let AdditionalPredicates = [NotInMicroMips] in {
def : MipsPat<(i32 immSExt16:$in),
(ADDiu ZERO, imm:$in)>;
def : MipsPat<(i32 immZExt16:$in),
(ORi ZERO, imm:$in)>;
+}
def : MipsPat<(i32 immLow16Zero:$in),
(LUi (HI16 imm:$in))>;
@@ -1826,7 +1866,9 @@ def : MipsPat<(bswap GPR32:$rt), (ROTR (WSBH GPR32:$rt), 16)>;
let AddedComplexity = 40 in {
def : LoadRegImmPat<LBu, i32, zextloadi8>;
def : LoadRegImmPat<LH, i32, sextloadi16>;
+ let AdditionalPredicates = [NotInMicroMips] in {
def : LoadRegImmPat<LW, i32, load>;
+ }
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Mips/MipsLongBranch.cpp b/lib/Target/Mips/MipsLongBranch.cpp
index e44d6ee..90f8cc0 100644
--- a/lib/Target/Mips/MipsLongBranch.cpp
+++ b/lib/Target/Mips/MipsLongBranch.cpp
@@ -63,11 +63,9 @@ namespace {
public:
static char ID;
MipsLongBranch(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm),
- IsPIC(TM.getRelocationModel() == Reloc::PIC_),
- ABI(TM.getSubtarget<MipsSubtarget>().getABI()),
- LongBranchSeqSize(!IsPIC ? 2 : (ABI.IsN64() ? 10 :
- (!TM.getSubtarget<MipsSubtarget>().isTargetNaCl() ? 9 : 10))) {}
+ : MachineFunctionPass(ID), TM(tm),
+ IsPIC(TM.getRelocationModel() == Reloc::PIC_),
+ ABI(static_cast<const MipsTargetMachine &>(TM).getABI()) {}
const char *getPassName() const override {
return "Mips Long Branch";
@@ -110,8 +108,7 @@ static MachineBasicBlock *getTargetMBB(const MachineInstr &Br) {
return MO.getMBB();
}
- assert(false && "This instruction does not have an MBB operand.");
- return nullptr;
+ llvm_unreachable("This instruction does not have an MBB operand.");
}
// Traverse the list of instructions backwards until a non-debug instruction is
@@ -171,7 +168,7 @@ void MipsLongBranch::initMBBInfo() {
MBBInfos.resize(MF->size());
const MipsInstrInfo *TII =
- static_cast<const MipsInstrInfo *>(TM.getSubtargetImpl()->getInstrInfo());
+ static_cast<const MipsInstrInfo *>(MF->getSubtarget().getInstrInfo());
for (unsigned I = 0, E = MBBInfos.size(); I < E; ++I) {
MachineBasicBlock *MBB = MF->getBlockNumbered(I);
@@ -217,8 +214,8 @@ int64_t MipsLongBranch::computeOffset(const MachineInstr *Br) {
// MachineBasicBlock operand MBBOpnd.
void MipsLongBranch::replaceBranch(MachineBasicBlock &MBB, Iter Br,
DebugLoc DL, MachineBasicBlock *MBBOpnd) {
- const MipsInstrInfo *TII =
- static_cast<const MipsInstrInfo *>(TM.getSubtargetImpl()->getInstrInfo());
+ const MipsInstrInfo *TII = static_cast<const MipsInstrInfo *>(
+ MBB.getParent()->getSubtarget().getInstrInfo());
unsigned NewOpc = TII->getOppositeBranchOpc(Br->getOpcode());
const MCInstrDesc &NewDesc = TII->get(NewOpc);
@@ -237,15 +234,21 @@ void MipsLongBranch::replaceBranch(MachineBasicBlock &MBB, Iter Br,
MIB.addMBB(MBBOpnd);
- // Bundle the instruction in the delay slot to the newly created branch
- // and erase the original branch.
- assert(Br->isBundledWithSucc());
- MachineBasicBlock::instr_iterator II(Br);
- MIBundleBuilder(&*MIB).append((++II)->removeFromBundle());
+ if (Br->hasDelaySlot()) {
+ // Bundle the instruction in the delay slot to the newly created branch
+ // and erase the original branch.
+ assert(Br->isBundledWithSucc());
+ MachineBasicBlock::instr_iterator II(Br);
+ MIBundleBuilder(&*MIB).append((++II)->removeFromBundle());
+ }
Br->eraseFromParent();
}
// Expand branch instructions to long branches.
+// TODO: This function has to be fixed for beqz16 and bnez16, because it
+// currently assumes that all branches have 16-bit offsets, and will produce
+// wrong code if branches whose allowed offsets are [-128, -126, ..., 126]
+// are present.
void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
MachineBasicBlock::iterator Pos;
MachineBasicBlock *MBB = I.Br->getParent(), *TgtMBB = getTargetMBB(*I.Br);
@@ -253,9 +256,10 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator FallThroughMBB = ++MachineFunction::iterator(MBB);
MachineBasicBlock *LongBrMBB = MF->CreateMachineBasicBlock(BB);
-
+ const MipsSubtarget &Subtarget =
+ static_cast<const MipsSubtarget &>(MF->getSubtarget());
const MipsInstrInfo *TII =
- static_cast<const MipsInstrInfo *>(TM.getSubtargetImpl()->getInstrInfo());
+ static_cast<const MipsInstrInfo *>(Subtarget.getInstrInfo());
MF->insert(FallThroughMBB, LongBrMBB);
MBB->removeSuccessor(TgtMBB);
@@ -270,8 +274,6 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
// We must select between the MIPS32r6/MIPS64r6 BAL (which is a normal
// instruction) and the pre-MIPS32r6/MIPS64r6 definition (which is an
// pseudo-instruction wrapping BGEZAL).
-
- const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
unsigned BalOp = Subtarget.hasMips32r6() ? Mips::BAL : Mips::BAL_BR;
if (!ABI.IsN64()) {
@@ -328,7 +330,7 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::LW), Mips::RA)
.addReg(Mips::SP).addImm(0);
- if (!TM.getSubtarget<MipsSubtarget>().isTargetNaCl()) {
+ if (!Subtarget.isTargetNaCl()) {
MIBundleBuilder(*BalTgtMBB, Pos)
.append(BuildMI(*MF, DL, TII->get(Mips::JR)).addReg(Mips::AT))
.append(BuildMI(*MF, DL, TII->get(Mips::ADDiu), Mips::SP)
@@ -447,14 +449,17 @@ static void emitGPDisp(MachineFunction &F, const MipsInstrInfo *TII) {
}
bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
+ const MipsSubtarget &STI =
+ static_cast<const MipsSubtarget &>(F.getSubtarget());
const MipsInstrInfo *TII =
- static_cast<const MipsInstrInfo *>(TM.getSubtargetImpl()->getInstrInfo());
+ static_cast<const MipsInstrInfo *>(STI.getInstrInfo());
+ LongBranchSeqSize =
+ !IsPIC ? 2 : (ABI.IsN64() ? 10 : (!STI.isTargetNaCl() ? 9 : 10));
- const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
if (STI.inMips16Mode() || !STI.enableLongBranchPass())
return false;
if ((TM.getRelocationModel() == Reloc::PIC_) &&
- TM.getSubtarget<MipsSubtarget>().isABI_O32() &&
+ static_cast<const MipsTargetMachine &>(TM).getABI().IsO32() &&
F.getInfo<MipsFunctionInfo>()->globalBaseRegSet())
emitGPDisp(F, TII);
@@ -476,10 +481,10 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
if (!I->Br || I->HasLongBranch)
continue;
- int ShVal = TM.getSubtarget<MipsSubtarget>().inMicroMipsMode() ? 2 : 4;
+ int ShVal = STI.inMicroMipsMode() ? 2 : 4;
int64_t Offset = computeOffset(I->Br) / ShVal;
- if (TM.getSubtarget<MipsSubtarget>().isTargetNaCl()) {
+ if (STI.isTargetNaCl()) {
// The offset calculation does not include sandboxing instructions
// that will be added later in the MC layer. Since at this point we
// don't know the exact amount of code that "sandboxing" will add, we
diff --git a/lib/Target/Mips/MipsMachineFunction.cpp b/lib/Target/Mips/MipsMachineFunction.cpp
index a89718a..30b93dc 100644
--- a/lib/Target/Mips/MipsMachineFunction.cpp
+++ b/lib/Target/Mips/MipsMachineFunction.cpp
@@ -7,10 +7,11 @@
//
//===----------------------------------------------------------------------===//
-#include "MipsMachineFunction.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "MipsInstrInfo.h"
+#include "MipsMachineFunction.h"
#include "MipsSubtarget.h"
+#include "MipsTargetMachine.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
@@ -78,15 +79,14 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() {
if (GlobalBaseReg)
return GlobalBaseReg;
- const MipsSubtarget &ST = MF.getTarget().getSubtarget<MipsSubtarget>();
-
- const TargetRegisterClass *RC;
- if (ST.inMips16Mode())
- RC=(const TargetRegisterClass*)&Mips::CPU16RegsRegClass;
- else
- RC = ST.isABI_N64() ?
- (const TargetRegisterClass*)&Mips::GPR64RegClass :
- (const TargetRegisterClass*)&Mips::GPR32RegClass;
+ const TargetRegisterClass *RC =
+ static_cast<const MipsSubtarget &>(MF.getSubtarget()).inMips16Mode()
+ ? &Mips::CPU16RegsRegClass
+ : static_cast<const MipsTargetMachine &>(MF.getTarget())
+ .getABI()
+ .IsN64()
+ ? &Mips::GPR64RegClass
+ : &Mips::GPR32RegClass;
return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC);
}
@@ -98,16 +98,16 @@ unsigned MipsFunctionInfo::getMips16SPAliasReg() {
if (Mips16SPAliasReg)
return Mips16SPAliasReg;
- const TargetRegisterClass *RC;
- RC=(const TargetRegisterClass*)&Mips::CPU16RegsRegClass;
+ const TargetRegisterClass *RC = &Mips::CPU16RegsRegClass;
return Mips16SPAliasReg = MF.getRegInfo().createVirtualRegister(RC);
}
void MipsFunctionInfo::createEhDataRegsFI() {
for (int I = 0; I < 4; ++I) {
- const MipsSubtarget &ST = MF.getTarget().getSubtarget<MipsSubtarget>();
- const TargetRegisterClass *RC = ST.isABI_N64() ?
- &Mips::GPR64RegClass : &Mips::GPR32RegClass;
+ const TargetRegisterClass *RC =
+ static_cast<const MipsTargetMachine &>(MF.getTarget()).getABI().IsN64()
+ ? &Mips::GPR64RegClass
+ : &Mips::GPR32RegClass;
EhDataRegFI[I] = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
RC->getAlignment(), false);
diff --git a/lib/Target/Mips/MipsOptimizePICCall.cpp b/lib/Target/Mips/MipsOptimizePICCall.cpp
index 22c524e..7c940ee 100644
--- a/lib/Target/Mips/MipsOptimizePICCall.cpp
+++ b/lib/Target/Mips/MipsOptimizePICCall.cpp
@@ -174,7 +174,7 @@ void MBBInfo::postVisit() {
// OptimizePICCall methods.
bool OptimizePICCall::runOnMachineFunction(MachineFunction &F) {
- if (F.getTarget().getSubtarget<MipsSubtarget>().inMips16Mode())
+ if (static_cast<const MipsSubtarget &>(F.getSubtarget()).inMips16Mode())
return false;
// Do a pre-order traversal of the dominator tree.
diff --git a/lib/Target/Mips/MipsOptionRecord.h b/lib/Target/Mips/MipsOptionRecord.h
index f82544a..dc29cbd 100644
--- a/lib/Target/Mips/MipsOptionRecord.h
+++ b/lib/Target/Mips/MipsOptionRecord.h
@@ -36,9 +36,8 @@ public:
class MipsRegInfoRecord : public MipsOptionRecord {
public:
- MipsRegInfoRecord(MipsELFStreamer *S, MCContext &Context,
- const MCSubtargetInfo &STI)
- : Streamer(S), Context(Context), STI(STI) {
+ MipsRegInfoRecord(MipsELFStreamer *S, MCContext &Context)
+ : Streamer(S), Context(Context) {
ri_gprmask = 0;
ri_cprmask[0] = ri_cprmask[1] = ri_cprmask[2] = ri_cprmask[3] = 0;
ri_gp_value = 0;
@@ -61,7 +60,6 @@ public:
private:
MipsELFStreamer *Streamer;
MCContext &Context;
- const MCSubtargetInfo &STI;
const MCRegisterClass *GPR32RegClass;
const MCRegisterClass *GPR64RegClass;
const MCRegisterClass *FGR32RegClass;
diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp
index 20ef3f3..2110c03 100644
--- a/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -17,6 +17,7 @@
#include "MipsInstrInfo.h"
#include "MipsMachineFunction.h"
#include "MipsSubtarget.h"
+#include "MipsTargetMachine.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -62,7 +63,7 @@ MipsRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
case Mips::GPR32RegClassID:
case Mips::GPR64RegClassID:
case Mips::DSPRRegClassID: {
- const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+ const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
return 28 - TFI->hasFP(MF);
}
case Mips::FGR32RegClassID:
@@ -167,7 +168,7 @@ getReservedRegs(const MachineFunction &MF) const {
Reserved.set(*Reg);
}
// Reserve FP if this function should have a dedicated frame pointer register.
- if (MF.getSubtarget().getFrameLowering()->hasFP(MF)) {
+ if (Subtarget.getFrameLowering()->hasFP(MF)) {
if (Subtarget.inMips16Mode())
Reserved.set(Mips::S0);
else {
@@ -256,8 +257,9 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
unsigned MipsRegisterInfo::
getFrameRegister(const MachineFunction &MF) const {
- const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
- bool IsN64 = Subtarget.isABI_N64();
+ const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
+ bool IsN64 =
+ static_cast<const MipsTargetMachine &>(MF.getTarget()).getABI().IsN64();
if (Subtarget.inMips16Mode())
return TFI->hasFP(MF) ? Mips::S0 : Mips::SP;
diff --git a/lib/Target/Mips/MipsRegisterInfo.td b/lib/Target/Mips/MipsRegisterInfo.td
index 42fe76b..7497a25 100644
--- a/lib/Target/Mips/MipsRegisterInfo.td
+++ b/lib/Target/Mips/MipsRegisterInfo.td
@@ -289,10 +289,28 @@ def GPR32 : GPR32Class<[i32]>;
def DSPR : GPR32Class<[v4i8, v2i16]>;
def GPRMM16 : RegisterClass<"Mips", [i32], 32, (add
+ // Callee save
+ S0, S1,
// Return Values and Arguments
- V0, V1, A0, A1, A2, A3,
+ V0, V1, A0, A1, A2, A3)>;
+
+def GPRMM16Zero : RegisterClass<"Mips", [i32], 32, (add
+ // Reserved
+ ZERO,
// Callee save
- S0, S1)>;
+ S1,
+ // Return Values and Arguments
+ V0, V1, A0, A1, A2, A3)>;
+
+def GPRMM16MoveP : RegisterClass<"Mips", [i32], 32, (add
+ // Reserved
+ ZERO,
+ // Callee save
+ S1,
+ // Return Values and Arguments
+ V0, V1,
+ // Callee save
+ S0, S2, S3, S4)>;
def GPR64 : RegisterClass<"Mips", [i64], 64, (add
// Reserved
@@ -380,6 +398,8 @@ def MSA128W: RegisterClass<"Mips", [v4i32, v4f32], 128,
(sequence "W%u", 0, 31)>;
def MSA128D: RegisterClass<"Mips", [v2i64, v2f64], 128,
(sequence "W%u", 0, 31)>;
+def MSA128WEvens: RegisterClass<"Mips", [v4i32, v4f32], 128,
+ (decimate (sequence "W%u", 0, 31), 2)>;
def MSACtrl: RegisterClass<"Mips", [i32], 32, (add
MSAIR, MSACSR, MSAAccess, MSASave, MSAModify, MSARequest, MSAMap, MSAUnmap)>;
@@ -446,6 +466,16 @@ def GPRMM16AsmOperand : MipsAsmRegOperand {
let PredicateMethod = "isMM16AsmReg";
}
+def GPRMM16AsmOperandZero : MipsAsmRegOperand {
+ let Name = "GPRMM16AsmRegZero";
+ let PredicateMethod = "isMM16AsmRegZero";
+}
+
+def GPRMM16AsmOperandMoveP : MipsAsmRegOperand {
+ let Name = "GPRMM16AsmRegMoveP";
+ let PredicateMethod = "isMM16AsmRegMoveP";
+}
+
def ACC64DSPAsmOperand : MipsAsmRegOperand {
let Name = "ACC64DSPAsmReg";
let PredicateMethod = "isACCAsmReg";
@@ -505,6 +535,14 @@ def GPRMM16Opnd : RegisterOperand<GPRMM16> {
let ParserMatchClass = GPRMM16AsmOperand;
}
+def GPRMM16OpndZero : RegisterOperand<GPRMM16Zero> {
+ let ParserMatchClass = GPRMM16AsmOperandZero;
+}
+
+def GPRMM16OpndMoveP : RegisterOperand<GPRMM16MoveP> {
+ let ParserMatchClass = GPRMM16AsmOperandMoveP;
+}
+
def GPR64Opnd : RegisterOperand<GPR64> {
let ParserMatchClass = GPR64AsmOperand;
}
diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp
index 97d9edf..7c79c4c 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -71,11 +71,17 @@ private:
MachineFunction &MF;
MachineRegisterInfo &MRI;
+ const MipsSubtarget &Subtarget;
+ const MipsSEInstrInfo &TII;
+ const MipsRegisterInfo &RegInfo;
};
}
ExpandPseudo::ExpandPseudo(MachineFunction &MF_)
- : MF(MF_), MRI(MF.getRegInfo()) {}
+ : MF(MF_), MRI(MF.getRegInfo()),
+ Subtarget(static_cast<const MipsSubtarget &>(MF.getSubtarget())),
+ TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())),
+ RegInfo(*Subtarget.getRegisterInfo()) {}
bool ExpandPseudo::expand() {
bool Expanded = false;
@@ -146,11 +152,6 @@ void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) {
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
- const MipsSEInstrInfo &TII =
- *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo());
- const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
-
const TargetRegisterClass *RC = RegInfo.intRegClass(4);
unsigned VR = MRI.createVirtualRegister(RC);
unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
@@ -166,11 +167,6 @@ void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) {
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
- const MipsSEInstrInfo &TII =
- *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo());
- const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
-
const TargetRegisterClass *RC = RegInfo.intRegClass(4);
unsigned VR = MRI.createVirtualRegister(RC);
unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
@@ -189,11 +185,6 @@ void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
- const MipsSEInstrInfo &TII =
- *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo());
- const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
-
const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
unsigned VR0 = MRI.createVirtualRegister(RC);
unsigned VR1 = MRI.createVirtualRegister(RC);
@@ -219,11 +210,6 @@ void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
- const MipsSEInstrInfo &TII =
- *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo());
- const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
-
const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
unsigned VR0 = MRI.createVirtualRegister(RC);
unsigned VR1 = MRI.createVirtualRegister(RC);
@@ -254,11 +240,6 @@ bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I,
// mfhi $vr1, src
// copy dst_hi, $vr1
- const MipsSEInstrInfo &TII =
- *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo());
- const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
-
unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg();
unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2;
const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize);
@@ -298,16 +279,8 @@ bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
// register). Unfortunately, we have to make this decision before register
// allocation so for now we use a spill/reload sequence for all
// double-precision values in regardless of being an odd/even register.
-
- const TargetMachine &TM = MF.getTarget();
- const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) ||
(FP64 && !Subtarget.useOddSPReg())) {
- const MipsSEInstrInfo &TII = *static_cast<const MipsSEInstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
- const MipsRegisterInfo &TRI = *static_cast<const MipsRegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
-
unsigned DstReg = I->getOperand(0).getReg();
unsigned LoReg = I->getOperand(1).getReg();
unsigned HiReg = I->getOperand(2).getReg();
@@ -327,11 +300,11 @@ bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2);
if (!Subtarget.isLittle())
std::swap(LoReg, HiReg);
- TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, &TRI,
- 0);
- TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, &TRI,
- 4);
- TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &TRI, 0);
+ TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC,
+ &RegInfo, 0);
+ TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC,
+ &RegInfo, 4);
+ TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0);
return true;
}
@@ -359,15 +332,8 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
// allocation so for now we use a spill/reload sequence for all
// double-precision values in regardless of being an odd/even register.
- const TargetMachine &TM = MF.getTarget();
- const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) ||
(FP64 && !Subtarget.useOddSPReg())) {
- const MipsSEInstrInfo &TII = *static_cast<const MipsSEInstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
- const MipsRegisterInfo &TRI = *static_cast<const MipsRegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
-
unsigned DstReg = I->getOperand(0).getReg();
unsigned SrcReg = I->getOperand(1).getReg();
unsigned N = I->getOperand(2).getImm();
@@ -386,9 +352,9 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
// We re-use the same spill slot each time so that the stack frame doesn't
// grow too much in functions with a large number of moves.
int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC);
- TII.storeRegToStack(MBB, I, SrcReg, I->getOperand(1).isKill(), FI, RC, &TRI,
- 0);
- TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &TRI, Offset);
+ TII.storeRegToStack(MBB, I, SrcReg, I->getOperand(1).isKill(), FI, RC,
+ &RegInfo, 0);
+ TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset);
return true;
}
@@ -415,9 +381,9 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
const MipsSEInstrInfo &TII =
- *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo());
- const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
+ *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
MachineBasicBlock::iterator MBBI = MBB.begin();
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
@@ -550,9 +516,9 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
const MipsSEInstrInfo &TII =
- *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo());
- const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
+ *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
DebugLoc dl = MBBI->getDebugLoc();
unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
@@ -605,7 +571,7 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
const TargetRegisterInfo *TRI) const {
MachineFunction *MF = MBB.getParent();
MachineBasicBlock *EntryBlock = MF->begin();
- const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
// Add the callee-saved register as live-in. Do not add if the register is
@@ -646,7 +612,7 @@ void MipsSEFrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
const MipsSEInstrInfo &TII =
- *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
if (!hasReservedCallFrame(MF)) {
int64_t Amount = I->getOperand(0).getImm();
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index f759905..0761ded 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -37,7 +37,7 @@ using namespace llvm;
#define DEBUG_TYPE "mips-isel"
bool MipsSEDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
- Subtarget = &TM.getSubtarget<MipsSubtarget>();
+ Subtarget = &static_cast<const MipsSubtarget &>(MF.getSubtarget());
if (Subtarget->inMips16Mode())
return false;
return MipsDAGToDAGISel::runOnMachineFunction(MF);
@@ -130,20 +130,17 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
MachineBasicBlock &MBB = MF.front();
MachineBasicBlock::iterator I = MBB.begin();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
unsigned V0, V1, GlobalBaseReg = MipsFI->getGlobalBaseReg();
const TargetRegisterClass *RC;
-
- if (Subtarget->isABI_N64())
- RC = (const TargetRegisterClass*)&Mips::GPR64RegClass;
- else
- RC = (const TargetRegisterClass*)&Mips::GPR32RegClass;
+ const MipsABIInfo &ABI = static_cast<const MipsTargetMachine &>(TM).getABI();
+ RC = (ABI.IsN64()) ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
V0 = RegInfo.createVirtualRegister(RC);
V1 = RegInfo.createVirtualRegister(RC);
- if (Subtarget->isABI_N64()) {
+ if (ABI.IsN64()) {
MF.getRegInfo().addLiveIn(Mips::T9_64);
MBB.addLiveIn(Mips::T9_64);
@@ -175,7 +172,7 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
MF.getRegInfo().addLiveIn(Mips::T9);
MBB.addLiveIn(Mips::T9);
- if (Subtarget->isABI_N32()) {
+ if (ABI.IsN32()) {
// lui $v0, %hi(%neg(%gp_rel(fname)))
// addu $v1, $v0, $t9
// addiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname)))
@@ -188,7 +185,7 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
return;
}
- assert(Subtarget->isABI_O32());
+ assert(ABI.IsO32());
// For O32 ABI, the following instruction sequence is emitted to initialize
// the global base register:
@@ -239,13 +236,31 @@ SDNode *MipsSEDAGToDAGISel::selectAddESubE(unsigned MOp, SDValue InFlag,
(Opc == ISD::SUBC || Opc == ISD::SUBE)) &&
"(ADD|SUB)E flag operand must come from (ADD|SUB)C/E insn");
+ unsigned SLTuOp = Mips::SLTu, ADDuOp = Mips::ADDu;
+ if (Subtarget->isGP64bit()) {
+ SLTuOp = Mips::SLTu64;
+ ADDuOp = Mips::DADDu;
+ }
+
SDValue Ops[] = { CmpLHS, InFlag.getOperand(1) };
SDValue LHS = Node->getOperand(0), RHS = Node->getOperand(1);
EVT VT = LHS.getValueType();
- SDNode *Carry = CurDAG->getMachineNode(Mips::SLTu, DL, VT, Ops);
- SDNode *AddCarry = CurDAG->getMachineNode(Mips::ADDu, DL, VT,
+ SDNode *Carry = CurDAG->getMachineNode(SLTuOp, DL, VT, Ops);
+
+ if (Subtarget->isGP64bit()) {
+ // On 64-bit targets, sltu produces an i64 but our backend currently says
+ // that SLTu64 produces an i32. We need to fix this in the long run but for
+ // now, just make the DAG type-correct by asserting the upper bits are zero.
+ Carry = CurDAG->getMachineNode(Mips::SUBREG_TO_REG, DL, VT,
+ CurDAG->getTargetConstant(0, VT),
+ SDValue(Carry, 0),
+ CurDAG->getTargetConstant(Mips::sub_32, VT));
+ }
+
+ SDNode *AddCarry = CurDAG->getMachineNode(ADDuOp, DL, VT,
SDValue(Carry, 0), RHS);
+
return CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue, LHS,
SDValue(AddCarry, 0));
}
@@ -392,6 +407,28 @@ bool MipsSEDAGToDAGISel::selectIntAddrMM(SDValue Addr, SDValue &Base,
selectAddrDefault(Addr, Base, Offset);
}
+bool MipsSEDAGToDAGISel::selectIntAddrLSL2MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ if (selectAddrFrameIndexOffset(Addr, Base, Offset, 7)) {
+ if (isa<FrameIndexSDNode>(Base))
+ return false;
+
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Offset)) {
+ unsigned CnstOff = CN->getZExtValue();
+ return (CnstOff == (CnstOff & 0x3c));
+ }
+
+ return false;
+ }
+
+ // For all other cases where "lw" would be selected, don't select "lw16"
+ // because it would result in additional instructions to prepare operands.
+ if (selectAddrRegImm(Addr, Base, Offset))
+ return false;
+
+ return selectAddrDefault(Addr, Base, Offset);
+}
+
bool MipsSEDAGToDAGISel::selectIntAddrMSA(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
if (selectAddrRegImm10(Addr, Base, Offset))
@@ -644,7 +681,8 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
case ISD::SUBE: {
SDValue InFlag = Node->getOperand(2);
- Result = selectAddESubE(Mips::SUBu, InFlag, InFlag.getOperand(0), DL, Node);
+ unsigned Opc = Subtarget->isGP64bit() ? Mips::DSUBu : Mips::SUBu;
+ Result = selectAddESubE(Opc, InFlag, InFlag.getOperand(0), DL, Node);
return std::make_pair(true, Result);
}
@@ -652,7 +690,8 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
if (Subtarget->hasDSP()) // Select DSP instructions, ADDSC and ADDWC.
break;
SDValue InFlag = Node->getOperand(2);
- Result = selectAddESubE(Mips::ADDu, InFlag, InFlag.getValue(0), DL, Node);
+ unsigned Opc = Subtarget->isGP64bit() ? Mips::DADDu : Mips::ADDu;
+ Result = selectAddESubE(Opc, InFlag, InFlag.getValue(0), DL, Node);
return std::make_pair(true, Result);
}
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.h b/lib/Target/Mips/MipsSEISelDAGToDAG.h
index 2e11fa7..2d24eb4 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.h
@@ -65,6 +65,9 @@ private:
bool selectIntAddrMM(SDValue Addr, SDValue &Base,
SDValue &Offset) const override;
+ bool selectIntAddrLSL2MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const override;
+
bool selectIntAddrMSA(SDValue Addr, SDValue &Base,
SDValue &Offset) const override;
diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp
index 4a0ce09..09ff4f9 100644
--- a/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -46,17 +46,13 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM,
if (Subtarget.hasDSP() || Subtarget.hasMSA()) {
// Expand all truncating stores and extending loads.
- unsigned FirstVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- unsigned LastVT = (unsigned)MVT::LAST_VECTOR_VALUETYPE;
-
- for (unsigned VT0 = FirstVT; VT0 <= LastVT; ++VT0) {
- for (unsigned VT1 = FirstVT; VT1 <= LastVT; ++VT1)
- setTruncStoreAction((MVT::SimpleValueType)VT0,
- (MVT::SimpleValueType)VT1, Expand);
-
- setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
- setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT0, Expand);
+ for (MVT VT0 : MVT::vector_valuetypes()) {
+ for (MVT VT1 : MVT::vector_valuetypes()) {
+ setTruncStoreAction(VT0, VT1, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT0, VT1, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT0, VT1, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT0, VT1, Expand);
+ }
}
}
@@ -126,6 +122,8 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::MUL, MVT::i64, Custom);
if (Subtarget.isGP64bit()) {
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Custom);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom);
setOperationAction(ISD::MULHS, MVT::i64, Custom);
setOperationAction(ISD::MULHU, MVT::i64, Custom);
setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
@@ -204,6 +202,8 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM,
if (Subtarget.hasMips64r6()) {
// MIPS64r6 replaces the accumulator-based multiplies with a three register
// instruction
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
setOperationAction(ISD::MUL, MVT::i64, Legal);
setOperationAction(ISD::MULHS, MVT::i64, Legal);
setOperationAction(ISD::MULHU, MVT::i64, Legal);
@@ -224,7 +224,7 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
}
- computeRegisterProperties();
+ computeRegisterProperties(Subtarget.getRegisterInfo());
}
const MipsTargetLowering *
@@ -1836,11 +1836,9 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_fill_h:
case Intrinsic::mips_fill_w:
case Intrinsic::mips_fill_d: {
- SmallVector<SDValue, 16> Ops;
EVT ResTy = Op->getValueType(0);
-
- for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i)
- Ops.push_back(Op->getOperand(1));
+ SmallVector<SDValue, 16> Ops(ResTy.getVectorNumElements(),
+ Op->getOperand(1));
// If ResTy is v2i64 then the type legalizer will break this node down into
// an equivalent v4i32.
@@ -2291,9 +2289,9 @@ lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
static bool isConstantOrUndef(const SDValue Op) {
if (Op->getOpcode() == ISD::UNDEF)
return true;
- if (dyn_cast<ConstantSDNode>(Op))
+ if (isa<ConstantSDNode>(Op))
return true;
- if (dyn_cast<ConstantFPSDNode>(Op))
+ if (isa<ConstantFPSDNode>(Op))
return true;
return false;
}
@@ -2747,8 +2745,7 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
// $vr0 = phi($vr2, $fbb, $vr1, $tbb)
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
DebugLoc DL = MI->getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
@@ -2813,8 +2810,7 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
// $rd = phi($rd1, $fbb, $rd2, $tbb)
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
DebugLoc DL = MI->getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
@@ -2875,18 +2871,28 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
// for lane 1 because it would require FR=0 mode which isn't supported by MSA.
MachineBasicBlock * MipsSETargetLowering::
emitCOPY_FW(MachineInstr *MI, MachineBasicBlock *BB) const{
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Fd = MI->getOperand(0).getReg();
unsigned Ws = MI->getOperand(1).getReg();
unsigned Lane = MI->getOperand(2).getImm();
- if (Lane == 0)
- BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_lo);
- else {
- unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+ if (Lane == 0) {
+ unsigned Wt = Ws;
+ if (!Subtarget.useOddSPReg()) {
+ // We must copy to an even-numbered MSA register so that the
+ // single-precision sub-register is also guaranteed to be even-numbered.
+ Wt = RegInfo.createVirtualRegister(&Mips::MSA128WEvensRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Wt).addReg(Ws);
+ }
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
+ } else {
+ unsigned Wt = RegInfo.createVirtualRegister(
+ Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass :
+ &Mips::MSA128WEvensRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(Lane);
BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
@@ -2910,8 +2916,7 @@ MachineBasicBlock * MipsSETargetLowering::
emitCOPY_FD(MachineInstr *MI, MachineBasicBlock *BB) const{
assert(Subtarget.isFP64bit());
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
unsigned Fd = MI->getOperand(0).getReg();
unsigned Ws = MI->getOperand(1).getReg();
@@ -2940,15 +2945,16 @@ emitCOPY_FD(MachineInstr *MI, MachineBasicBlock *BB) const{
MachineBasicBlock *
MipsSETargetLowering::emitINSERT_FW(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Wd = MI->getOperand(0).getReg();
unsigned Wd_in = MI->getOperand(1).getReg();
unsigned Lane = MI->getOperand(2).getImm();
unsigned Fs = MI->getOperand(3).getReg();
- unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+ unsigned Wt = RegInfo.createVirtualRegister(
+ Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass :
+ &Mips::MSA128WEvensRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
.addImm(0)
@@ -2975,8 +2981,7 @@ MipsSETargetLowering::emitINSERT_FD(MachineInstr *MI,
MachineBasicBlock *BB) const {
assert(Subtarget.isFP64bit());
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Wd = MI->getOperand(0).getReg();
@@ -3024,8 +3029,7 @@ MipsSETargetLowering::emitINSERT_DF_VIDX(MachineInstr *MI,
MachineBasicBlock *BB,
unsigned EltSizeInBytes,
bool IsFP) const {
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Wd = MI->getOperand(0).getReg();
@@ -3135,8 +3139,7 @@ MipsSETargetLowering::emitINSERT_DF_VIDX(MachineInstr *MI,
MachineBasicBlock *
MipsSETargetLowering::emitFILL_FW(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Wd = MI->getOperand(0).getReg();
@@ -3167,8 +3170,7 @@ MipsSETargetLowering::emitFILL_FD(MachineInstr *MI,
MachineBasicBlock *BB) const {
assert(Subtarget.isFP64bit());
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Wd = MI->getOperand(0).getReg();
@@ -3196,8 +3198,7 @@ MipsSETargetLowering::emitFILL_FD(MachineInstr *MI,
MachineBasicBlock *
MipsSETargetLowering::emitFEXP2_W_1(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetRegisterClass *RC = &Mips::MSA128WRegClass;
unsigned Ws1 = RegInfo.createVirtualRegister(RC);
@@ -3226,8 +3227,7 @@ MipsSETargetLowering::emitFEXP2_W_1(MachineInstr *MI,
MachineBasicBlock *
MipsSETargetLowering::emitFEXP2_D_1(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetRegisterClass *RC = &Mips::MSA128DRegClass;
unsigned Ws1 = RegInfo.createVirtualRegister(RC);
diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp
index 16bea8b..74f291f 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -27,7 +27,7 @@ using namespace llvm;
MipsSEInstrInfo::MipsSEInstrInfo(const MipsSubtarget &STI)
: MipsInstrInfo(STI, STI.getRelocationModel() == Reloc::PIC_ ? Mips::B
: Mips::J),
- RI(STI), IsN64(STI.isABI_N64()) {}
+ RI(STI) {}
const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
return RI;
@@ -38,9 +38,8 @@ const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
-unsigned MipsSEInstrInfo::
-isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
-{
+unsigned MipsSEInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
unsigned Opc = MI->getOpcode();
if ((Opc == Mips::LW) || (Opc == Mips::LD) ||
@@ -61,9 +60,8 @@ isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
-unsigned MipsSEInstrInfo::
-isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const
-{
+unsigned MipsSEInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
unsigned Opc = MI->getOpcode();
if ((Opc == Mips::SW) || (Opc == Mips::SD) ||
@@ -352,6 +350,8 @@ unsigned MipsSEInstrInfo::getOppositeBranchOpc(unsigned Opc) const {
case Mips::BLEZ64: return Mips::BGTZ64;
case Mips::BC1T: return Mips::BC1F;
case Mips::BC1F: return Mips::BC1T;
+ case Mips::BEQZC_MM: return Mips::BNEZC_MM;
+ case Mips::BNEZC_MM: return Mips::BEQZC_MM;
}
}
@@ -422,7 +422,7 @@ unsigned MipsSEInstrInfo::getAnalyzableBrOpc(unsigned Opc) const {
Opc == Mips::BEQ64 || Opc == Mips::BNE64 || Opc == Mips::BGTZ64 ||
Opc == Mips::BGEZ64 || Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 ||
Opc == Mips::BC1T || Opc == Mips::BC1F || Opc == Mips::B ||
- Opc == Mips::J) ?
+ Opc == Mips::J || Opc == Mips::BEQZC_MM || Opc == Mips::BNEZC_MM) ?
Opc : 0;
}
@@ -620,18 +620,13 @@ void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB,
// jr $ra (via RetRA)
const TargetMachine &TM = MBB.getParent()->getTarget();
if (TM.getRelocationModel() == Reloc::PIC_)
- BuildMI(MBB, I, I->getDebugLoc(),
- TM.getSubtargetImpl()->getInstrInfo()->get(ADDU), T9)
+ BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), T9)
.addReg(TargetReg)
.addReg(ZERO);
- BuildMI(MBB, I, I->getDebugLoc(),
- TM.getSubtargetImpl()->getInstrInfo()->get(ADDU), RA)
+ BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), RA)
.addReg(TargetReg)
.addReg(ZERO);
- BuildMI(MBB, I, I->getDebugLoc(),
- TM.getSubtargetImpl()->getInstrInfo()->get(ADDU), SP)
- .addReg(SP)
- .addReg(OffsetReg);
+ BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), SP).addReg(SP).addReg(OffsetReg);
expandRetRA(MBB, I);
}
diff --git a/lib/Target/Mips/MipsSEInstrInfo.h b/lib/Target/Mips/MipsSEInstrInfo.h
index b2d2301..d16fab2 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.h
+++ b/lib/Target/Mips/MipsSEInstrInfo.h
@@ -21,7 +21,6 @@ namespace llvm {
class MipsSEInstrInfo : public MipsInstrInfo {
const MipsSERegisterInfo RI;
- bool IsN64;
public:
explicit MipsSEInstrInfo(const MipsSubtarget &STI);
diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp
index 8768b12..26f39a2 100644
--- a/lib/Target/Mips/MipsSubtarget.cpp
+++ b/lib/Target/Mips/MipsSubtarget.cpp
@@ -33,120 +33,61 @@ using namespace llvm;
// FIXME: Maybe this should be on by default when Mips16 is specified
//
-static cl::opt<bool> Mixed16_32(
- "mips-mixed-16-32",
- cl::init(false),
- cl::desc("Allow for a mixture of Mips16 "
- "and Mips32 code in a single source file"),
- cl::Hidden);
-
-static cl::opt<bool> Mips_Os16(
- "mips-os16",
- cl::init(false),
- cl::desc("Compile all functions that don' use "
- "floating point as Mips 16"),
- cl::Hidden);
-
static cl::opt<bool>
-Mips16HardFloat("mips16-hard-float", cl::NotHidden,
- cl::desc("MIPS: mips16 hard float enable."),
- cl::init(false));
+ Mixed16_32("mips-mixed-16-32", cl::init(false),
+ cl::desc("Allow for a mixture of Mips16 "
+ "and Mips32 code in a single output file"),
+ cl::Hidden);
+
+static cl::opt<bool> Mips_Os16("mips-os16", cl::init(false),
+ cl::desc("Compile all functions that don't use "
+ "floating point as Mips 16"),
+ cl::Hidden);
+
+static cl::opt<bool> Mips16HardFloat("mips16-hard-float", cl::NotHidden,
+ cl::desc("Enable mips16 hard float."),
+ cl::init(false));
static cl::opt<bool>
-Mips16ConstantIslands(
- "mips16-constant-islands", cl::NotHidden,
- cl::desc("MIPS: mips16 constant islands enable."),
- cl::init(true));
+ Mips16ConstantIslands("mips16-constant-islands", cl::NotHidden,
+ cl::desc("Enable mips16 constant islands."),
+ cl::init(true));
static cl::opt<bool>
-GPOpt("mgpopt", cl::Hidden,
- cl::desc("MIPS: Enable gp-relative addressing of small data items"));
-
-/// Select the Mips CPU for the given triple and cpu name.
-/// FIXME: Merge with the copy in MipsMCTargetDesc.cpp
-static StringRef selectMipsCPU(Triple TT, StringRef CPU) {
- if (CPU.empty() || CPU == "generic") {
- if (TT.getArch() == Triple::mips || TT.getArch() == Triple::mipsel)
- CPU = "mips32";
- else
- CPU = "mips64";
- }
- return CPU;
-}
+ GPOpt("mgpopt", cl::Hidden,
+ cl::desc("Enable gp-relative addressing of mips small data items"));
void MipsSubtarget::anchor() { }
-static std::string computeDataLayout(const MipsSubtarget &ST) {
- std::string Ret = "";
-
- // There are both little and big endian mips.
- if (ST.isLittle())
- Ret += "e";
- else
- Ret += "E";
-
- Ret += "-m:m";
-
- // Pointers are 32 bit on some ABIs.
- if (!ST.isABI_N64())
- Ret += "-p:32:32";
-
- // 8 and 16 bit integers only need no have natural alignment, but try to
- // align them to 32 bits. 64 bit integers have natural alignment.
- Ret += "-i8:8:32-i16:16:32-i64:64";
-
- // 32 bit registers are always available and the stack is at least 64 bit
- // aligned. On N64 64 bit registers are also available and the stack is
- // 128 bit aligned.
- if (ST.isABI_N64() || ST.isABI_N32())
- Ret += "-n32:64-S128";
- else
- Ret += "-n32-S64";
-
- return Ret;
-}
-
MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool little,
- const MipsTargetMachine *_TM)
+ const MipsTargetMachine &TM)
: MipsGenSubtargetInfo(TT, CPU, FS), MipsArchVersion(MipsDefault),
- ABI(MipsABIInfo::Unknown()), IsLittle(little), IsSingleFloat(false),
- IsFPXX(false), NoABICalls(false), IsFP64bit(false), UseOddSPReg(true),
- IsNaN2008bit(false), IsGP64bit(false), HasVFPU(false), HasCnMips(false),
- IsLinux(true), HasMips3_32(false), HasMips3_32r2(false),
- HasMips4_32(false), HasMips4_32r2(false), HasMips5_32r2(false),
- InMips16Mode(false), InMips16HardFloat(Mips16HardFloat),
- InMicroMipsMode(false), HasDSP(false), HasDSPR2(false),
- AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16),
- HasMSA(false), TM(_TM), TargetTriple(TT),
- DL(computeDataLayout(initializeSubtargetDependencies(CPU, FS, TM))),
- TSInfo(DL), InstrInfo(MipsInstrInfo::create(*this)),
+ IsLittle(little), IsSingleFloat(false), IsFPXX(false), NoABICalls(false),
+ IsFP64bit(false), UseOddSPReg(true), IsNaN2008bit(false),
+ IsGP64bit(false), HasVFPU(false), HasCnMips(false), HasMips3_32(false),
+ HasMips3_32r2(false), HasMips4_32(false), HasMips4_32r2(false),
+ HasMips5_32r2(false), InMips16Mode(false),
+ InMips16HardFloat(Mips16HardFloat), InMicroMipsMode(false), HasDSP(false),
+ HasDSPR2(false), AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16),
+ HasMSA(false), TM(TM), TargetTriple(TT), TSInfo(*TM.getDataLayout()),
+ InstrInfo(
+ MipsInstrInfo::create(initializeSubtargetDependencies(CPU, FS, TM))),
FrameLowering(MipsFrameLowering::create(*this)),
- TLInfo(MipsTargetLowering::create(*TM, *this)) {
+ TLInfo(MipsTargetLowering::create(TM, *this)) {
PreviousInMips16Mode = InMips16Mode;
if (MipsArchVersion == MipsDefault)
MipsArchVersion = Mips32;
- // Don't even attempt to generate code for MIPS-I, MIPS-III and MIPS-V.
- // They have not been tested and currently exist for the integrated
- // assembler only.
+ // Don't even attempt to generate code for MIPS-I and MIPS-V. They have not
+ // been tested and currently exist for the integrated assembler only.
if (MipsArchVersion == Mips1)
report_fatal_error("Code generation for MIPS-I is not implemented", false);
- if (MipsArchVersion == Mips3)
- report_fatal_error("Code generation for MIPS-III is not implemented",
- false);
if (MipsArchVersion == Mips5)
report_fatal_error("Code generation for MIPS-V is not implemented", false);
- // Assert exactly one ABI was chosen.
- assert(ABI.IsKnown());
- assert((((getFeatureBits() & Mips::FeatureO32) != 0) +
- ((getFeatureBits() & Mips::FeatureEABI) != 0) +
- ((getFeatureBits() & Mips::FeatureN32) != 0) +
- ((getFeatureBits() & Mips::FeatureN64) != 0)) == 1);
-
// Check if Architecture and ABI are compatible.
assert(((!isGP64bit() && (isABI_O32() || isABI_EABI())) ||
(isGP64bit() && (isABI_N32() || isABI_N64()))) &&
@@ -172,11 +113,7 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
report_fatal_error(ISA + " is not compatible with the DSP ASE", false);
}
- // Is the target system Linux ?
- if (TT.find("linux") == std::string::npos)
- IsLinux = false;
-
- if (NoABICalls && TM->getRelocationModel() == Reloc::PIC_)
+ if (NoABICalls && TM.getRelocationModel() == Reloc::PIC_)
report_fatal_error("position-independent code requires '-mabicalls'");
// Set UseSmallSection.
@@ -203,22 +140,22 @@ CodeGenOpt::Level MipsSubtarget::getOptLevelToEnablePostRAScheduler() const {
MipsSubtarget &
MipsSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS,
- const TargetMachine *TM) {
- std::string CPUName = selectMipsCPU(TargetTriple, CPU);
-
+ const TargetMachine &TM) {
+ std::string CPUName = MIPS_MC::selectMipsCPU(TM.getTargetTriple(), CPU);
+
// Parse features string.
ParseSubtargetFeatures(CPUName, FS);
// Initialize scheduling itinerary for the specified CPU.
InstrItins = getInstrItineraryForCPU(CPUName);
- if (InMips16Mode && !TM->Options.UseSoftFloat)
+ if (InMips16Mode && !TM.Options.UseSoftFloat)
InMips16HardFloat = true;
return *this;
}
bool MipsSubtarget::abiUsesSoftFloat() const {
- return TM->Options.UseSoftFloat && !InMips16HardFloat;
+ return TM.Options.UseSoftFloat && !InMips16HardFloat;
}
bool MipsSubtarget::useConstantIslands() {
@@ -227,5 +164,11 @@ bool MipsSubtarget::useConstantIslands() {
}
Reloc::Model MipsSubtarget::getRelocationModel() const {
- return TM->getRelocationModel();
+ return TM.getRelocationModel();
}
+
+bool MipsSubtarget::isABI_EABI() const { return getABI().IsEABI(); }
+bool MipsSubtarget::isABI_N64() const { return getABI().IsN64(); }
+bool MipsSubtarget::isABI_N32() const { return getABI().IsN32(); }
+bool MipsSubtarget::isABI_O32() const { return getABI().IsO32(); }
+const MipsABIInfo &MipsSubtarget::getABI() const { return TM.getABI(); }
diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h
index bff9013..faded8a 100644
--- a/lib/Target/Mips/MipsSubtarget.h
+++ b/lib/Target/Mips/MipsSubtarget.h
@@ -14,6 +14,7 @@
#ifndef LLVM_LIB_TARGET_MIPS_MIPSSUBTARGET_H
#define LLVM_LIB_TARGET_MIPS_MIPSSUBTARGET_H
+#include "MCTargetDesc/MipsABIInfo.h"
#include "MipsFrameLowering.h"
#include "MipsISelLowering.h"
#include "MipsInstrInfo.h"
@@ -22,7 +23,6 @@
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetSubtargetInfo.h"
-#include "MipsABIInfo.h"
#include <string>
#define GET_SUBTARGETINFO_HEADER
@@ -38,16 +38,13 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
enum MipsArchEnum {
MipsDefault,
- Mips1, Mips2, Mips32, Mips32r2, Mips32r6, Mips3, Mips4, Mips5, Mips64,
- Mips64r2, Mips64r6
+ Mips1, Mips2, Mips32, Mips32r2, Mips32r3, Mips32r5, Mips32r6, Mips32Max,
+ Mips3, Mips4, Mips5, Mips64, Mips64r2, Mips64r3, Mips64r5, Mips64r6
};
// Mips architecture version
MipsArchEnum MipsArchVersion;
- // Selected ABI
- MipsABIInfo ABI;
-
// IsLittle - The target is Little Endian
bool IsLittle;
@@ -136,11 +133,10 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
// as from the command line
enum {NoOverride, Mips16Override, NoMips16Override} OverrideMode;
- const MipsTargetMachine *TM;
+ const MipsTargetMachine &TM;
Triple TargetTriple;
- const DataLayout DL; // Calculates type size & alignment
const MipsSelectionDAGInfo TSInfo;
std::unique_ptr<const MipsInstrInfo> InstrInfo;
std::unique_ptr<const MipsFrameLowering> FrameLowering;
@@ -153,18 +149,18 @@ public:
CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const override;
/// Only O32 and EABI supported right now.
- bool isABI_EABI() const { return ABI.IsEABI(); }
- bool isABI_N64() const { return ABI.IsN64(); }
- bool isABI_N32() const { return ABI.IsN32(); }
- bool isABI_O32() const { return ABI.IsO32(); }
+ bool isABI_EABI() const;
+ bool isABI_N64() const;
+ bool isABI_N32() const;
+ bool isABI_O32() const;
+ const MipsABIInfo &getABI() const;
bool isABI_FPXX() const { return isABI_O32() && IsFPXX; }
- const MipsABIInfo &getABI() const { return ABI; }
/// This constructor initializes the data members to match that
/// of the specified triple.
MipsSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool little,
- const MipsTargetMachine *TM);
+ const MipsTargetMachine &TM);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
@@ -178,21 +174,30 @@ public:
bool hasMips4_32() const { return HasMips4_32; }
bool hasMips4_32r2() const { return HasMips4_32r2; }
bool hasMips32() const {
- return MipsArchVersion >= Mips32 && MipsArchVersion != Mips3 &&
- MipsArchVersion != Mips4 && MipsArchVersion != Mips5;
+ return (MipsArchVersion >= Mips32 && MipsArchVersion < Mips32Max) ||
+ hasMips64();
}
bool hasMips32r2() const {
- return MipsArchVersion == Mips32r2 || MipsArchVersion == Mips32r6 ||
- MipsArchVersion == Mips64r2 || MipsArchVersion == Mips64r6;
+ return (MipsArchVersion >= Mips32r2 && MipsArchVersion < Mips32Max) ||
+ hasMips64r2();
+ }
+ bool hasMips32r3() const {
+ return (MipsArchVersion >= Mips32r3 && MipsArchVersion < Mips32Max) ||
+ hasMips64r2();
+ }
+ bool hasMips32r5() const {
+ return (MipsArchVersion >= Mips32r5 && MipsArchVersion < Mips32Max) ||
+ hasMips64r2();
}
bool hasMips32r6() const {
- return MipsArchVersion == Mips32r6 || MipsArchVersion == Mips64r6;
+ return (MipsArchVersion >= Mips32r6 && MipsArchVersion < Mips32Max) ||
+ hasMips64r6();
}
bool hasMips64() const { return MipsArchVersion >= Mips64; }
- bool hasMips64r2() const {
- return MipsArchVersion == Mips64r2 || MipsArchVersion == Mips64r6;
- }
- bool hasMips64r6() const { return MipsArchVersion == Mips64r6; }
+ bool hasMips64r2() const { return MipsArchVersion >= Mips64r2; }
+ bool hasMips64r3() const { return MipsArchVersion >= Mips64r3; }
+ bool hasMips64r5() const { return MipsArchVersion >= Mips64r5; }
+ bool hasMips64r6() const { return MipsArchVersion >= Mips64r6; }
bool hasCnMips() const { return HasCnMips; }
@@ -223,7 +228,6 @@ public:
bool hasDSP() const { return HasDSP; }
bool hasDSPR2() const { return HasDSPR2; }
bool hasMSA() const { return HasMSA; }
- bool isLinux() const { return IsLinux; }
bool useSmallSection() const { return UseSmallSection; }
bool hasStandardEncoding() const { return !inMips16Mode(); }
@@ -239,9 +243,9 @@ public:
bool hasMTHC1() const { return hasMips32r2(); }
bool allowMixed16_32() const { return inMips16ModeDefault() |
- AllowMixed16_32;}
+ AllowMixed16_32; }
- bool os16() const { return Os16;};
+ bool os16() const { return Os16; }
bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
@@ -255,7 +259,7 @@ public:
Reloc::Model getRelocationModel() const;
MipsSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS,
- const TargetMachine *TM);
+ const TargetMachine &TM);
/// Does the system support unaligned memory access.
///
@@ -271,7 +275,6 @@ public:
const MipsSelectionDAGInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
- const DataLayout *getDataLayout() const override { return &DL; }
const MipsInstrInfo *getInstrInfo() const override { return InstrInfo.get(); }
const TargetFrameLowering *getFrameLowering() const override {
return FrameLowering.get();
diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp
index 33280e3..86c8931 100644
--- a/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/lib/Target/Mips/MipsTargetMachine.cpp
@@ -29,7 +29,7 @@
#include "MipsTargetObjectFile.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/PassManager.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
@@ -46,6 +46,36 @@ extern "C" void LLVMInitializeMipsTarget() {
RegisterTargetMachine<MipselTargetMachine> B(TheMips64elTarget);
}
+static std::string computeDataLayout(bool isLittle, MipsABIInfo &ABI) {
+ std::string Ret = "";
+
+ // There are both little and big endian mips.
+ if (isLittle)
+ Ret += "e";
+ else
+ Ret += "E";
+
+ Ret += "-m:m";
+
+ // Pointers are 32 bit on some ABIs.
+ if (!ABI.IsN64())
+ Ret += "-p:32:32";
+
+ // 8 and 16 bit integers only need no have natural alignment, but try to
+ // align them to 32 bits. 64 bit integers have natural alignment.
+ Ret += "-i8:8:32-i16:16:32-i64:64";
+
+ // 32 bit registers are always available and the stack is at least 64 bit
+ // aligned. On N64 64 bit registers are also available and the stack is
+ // 128 bit aligned.
+ if (ABI.IsN64() || ABI.IsN32())
+ Ret += "-n32:64-S128";
+ else
+ Ret += "-n32-S64";
+
+ return Ret;
+}
+
// On function prologue, the stack is created by decrementing
// its pointer. Once decremented, all references are done with positive
// offset from the stack/frame pointer, using StackGrowsUp enables
@@ -57,14 +87,14 @@ MipsTargetMachine::MipsTargetMachine(const Target &T, StringRef TT,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool isLittle)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- isLittle(isLittle),
- TLOF(make_unique<MipsTargetObjectFile>()),
- Subtarget(nullptr),
- DefaultSubtarget(TT, CPU, FS, isLittle, this),
+ isLittle(isLittle), TLOF(make_unique<MipsTargetObjectFile>()),
+ ABI(MipsABIInfo::computeTargetABI(Triple(TT), CPU, Options.MCOptions)),
+ DL(computeDataLayout(isLittle, ABI)), Subtarget(nullptr),
+ DefaultSubtarget(TT, CPU, FS, isLittle, *this),
NoMips16Subtarget(TT, CPU, FS.empty() ? "-mips16" : FS.str() + ",-mips16",
- isLittle, this),
+ isLittle, *this),
Mips16Subtarget(TT, CPU, FS.empty() ? "+mips16" : FS.str() + ",+mips16",
- isLittle, this) {
+ isLittle, *this) {
Subtarget = &DefaultSubtarget;
initAsmInfo();
}
@@ -91,11 +121,8 @@ MipselTargetMachine(const Target &T, StringRef TT,
const MipsSubtarget *
MipsTargetMachine::getSubtargetImpl(const Function &F) const {
- AttributeSet FnAttrs = F.getAttributes();
- Attribute CPUAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu");
- Attribute FSAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features");
+ Attribute CPUAttr = F.getFnAttribute("target-cpu");
+ Attribute FSAttr = F.getFnAttribute("target-features");
std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
? CPUAttr.getValueAsString().str()
@@ -104,19 +131,16 @@ MipsTargetMachine::getSubtargetImpl(const Function &F) const {
? FSAttr.getValueAsString().str()
: TargetFS;
bool hasMips16Attr =
- !FnAttrs.getAttribute(AttributeSet::FunctionIndex, "mips16")
- .hasAttribute(Attribute::None);
+ !F.getFnAttribute("mips16").hasAttribute(Attribute::None);
bool hasNoMips16Attr =
- !FnAttrs.getAttribute(AttributeSet::FunctionIndex, "nomips16")
- .hasAttribute(Attribute::None);
+ !F.getFnAttribute("nomips16").hasAttribute(Attribute::None);
// FIXME: This is related to the code below to reset the target options,
// we need to know whether or not the soft float flag is set on the
// function before we can generate a subtarget. We also need to use
// it as a key for the subtarget since that can be the only difference
// between two functions.
- Attribute SFAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "use-soft-float");
+ Attribute SFAttr = F.getFnAttribute("use-soft-float");
bool softFloat = !SFAttr.hasAttribute(Attribute::None)
? SFAttr.getValueAsString() == "true"
: Options.UseSoftFloat;
@@ -133,7 +157,7 @@ MipsTargetMachine::getSubtargetImpl(const Function &F) const {
// creation will depend on the TM and the code generation flags on the
// function that reside in TargetOptions.
resetTargetOptions(F);
- I = llvm::make_unique<MipsSubtarget>(TargetTriple, CPU, FS, isLittle, this);
+ I = llvm::make_unique<MipsSubtarget>(TargetTriple, CPU, FS, isLittle, *this);
}
return I.get();
}
@@ -170,9 +194,9 @@ public:
void addIRPasses() override;
bool addInstSelector() override;
void addMachineSSAOptimization() override;
- bool addPreEmitPass() override;
+ void addPreEmitPass() override;
- bool addPreRegAlloc() override;
+ void addPreRegAlloc() override;
};
} // namespace
@@ -203,35 +227,30 @@ void MipsPassConfig::addMachineSSAOptimization() {
TargetPassConfig::addMachineSSAOptimization();
}
-bool MipsPassConfig::addPreRegAlloc() {
- if (getOptLevel() == CodeGenOpt::None) {
+void MipsPassConfig::addPreRegAlloc() {
+ if (getOptLevel() == CodeGenOpt::None)
addPass(createMipsOptimizePICCallPass(getMipsTargetMachine()));
- return true;
- }
- else
- return false;
}
-void MipsTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
- if (Subtarget->allowMixed16_32()) {
- DEBUG(errs() << "No ");
- //FIXME: The Basic Target Transform Info
- // pass needs to become a function pass instead of
- // being an immutable pass and then this method as it exists now
- // would be unnecessary.
- PM.add(createNoTargetTransformInfoPass());
- } else
- LLVMTargetMachine::addAnalysisPasses(PM);
- DEBUG(errs() << "Target Transform Info Pass Added\n");
+TargetIRAnalysis MipsTargetMachine::getTargetIRAnalysis() {
+ return TargetIRAnalysis([this](Function &F) {
+ if (Subtarget->allowMixed16_32()) {
+ DEBUG(errs() << "No Target Transform Info Pass Added\n");
+ // FIXME: This is no longer necessary as the TTI returned is per-function.
+ return TargetTransformInfo(getDataLayout());
+ }
+
+ DEBUG(errs() << "Target Transform Info Pass Added\n");
+ return TargetTransformInfo(BasicTTIImpl(this, F));
+ });
}
// Implemented by targets that want to run passes immediately before
// machine code is emitted. return true if -print-machineinstrs should
// print out the code after the passes.
-bool MipsPassConfig::addPreEmitPass() {
+void MipsPassConfig::addPreEmitPass() {
MipsTargetMachine &TM = getMipsTargetMachine();
addPass(createMipsDelaySlotFillerPass(TM));
addPass(createMipsLongBranchPass(TM));
addPass(createMipsConstantIslandPass(TM));
- return true;
}
diff --git a/lib/Target/Mips/MipsTargetMachine.h b/lib/Target/Mips/MipsTargetMachine.h
index 1349f82..afd0cea 100644
--- a/lib/Target/Mips/MipsTargetMachine.h
+++ b/lib/Target/Mips/MipsTargetMachine.h
@@ -14,7 +14,9 @@
#ifndef LLVM_LIB_TARGET_MIPS_MIPSTARGETMACHINE_H
#define LLVM_LIB_TARGET_MIPS_MIPSTARGETMACHINE_H
+#include "MCTargetDesc/MipsABIInfo.h"
#include "MipsSubtarget.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetFrameLowering.h"
@@ -27,6 +29,9 @@ class MipsRegisterInfo;
class MipsTargetMachine : public LLVMTargetMachine {
bool isLittle;
std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ // Selected ABI
+ MipsABIInfo ABI;
+ const DataLayout DL; // Calculates type size & alignment
MipsSubtarget *Subtarget;
MipsSubtarget DefaultSubtarget;
MipsSubtarget NoMips16Subtarget;
@@ -40,8 +45,9 @@ public:
CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle);
~MipsTargetMachine() override;
- void addAnalysisPasses(PassManagerBase &PM) override;
+ TargetIRAnalysis getTargetIRAnalysis() override;
+ const DataLayout *getDataLayout() const override { return &DL; }
const MipsSubtarget *getSubtargetImpl() const override {
if (Subtarget)
return Subtarget;
@@ -59,6 +65,9 @@ public:
TargetLoweringObjectFile *getObjFileLowering() const override {
return TLOF.get();
}
+
+ bool isLittleEndian() const { return isLittle; }
+ const MipsABIInfo &getABI() const { return ABI; }
};
/// MipsebTargetMachine - Mips32/64 big endian target machine.
diff --git a/lib/Target/Mips/MipsTargetObjectFile.cpp b/lib/Target/Mips/MipsTargetObjectFile.cpp
index b56c39b..c07693e 100644
--- a/lib/Target/Mips/MipsTargetObjectFile.cpp
+++ b/lib/Target/Mips/MipsTargetObjectFile.cpp
@@ -39,15 +39,11 @@ void MipsTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
InitializeELF(TM.Options.UseInitArray);
- SmallDataSection =
- getContext().getELFSection(".sdata", ELF::SHT_PROGBITS,
- ELF::SHF_WRITE |ELF::SHF_ALLOC,
- SectionKind::getDataRel());
-
- SmallBSSSection =
- getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
- ELF::SHF_WRITE |ELF::SHF_ALLOC,
- SectionKind::getBSS());
+ SmallDataSection = getContext().getELFSection(
+ ".sdata", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
+
+ SmallBSSSection = getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC);
this->TM = &TM;
}
@@ -109,8 +105,7 @@ IsGlobalInSmallSectionImpl(const GlobalValue *GV,
return false;
Type *Ty = GV->getType()->getElementType();
- return IsInSmallSection(
- TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(Ty));
+ return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty));
}
const MCSection *MipsTargetObjectFile::
@@ -132,10 +127,9 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
/// Return true if this constant should be placed into small data section.
bool MipsTargetObjectFile::
IsConstantInSmallSection(const Constant *CN, const TargetMachine &TM) const {
- return (TM.getSubtarget<MipsSubtarget>().useSmallSection() &&
- LocalSData &&
- IsInSmallSection(TM.getSubtargetImpl()->getDataLayout()
- ->getTypeAllocSize(CN->getType())));
+ return (
+ TM.getSubtarget<MipsSubtarget>().useSmallSection() && LocalSData &&
+ IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(CN->getType())));
}
const MCSection *MipsTargetObjectFile::
diff --git a/lib/Target/Mips/MipsTargetStreamer.h b/lib/Target/Mips/MipsTargetStreamer.h
index c1f17933..b3b8296 100644
--- a/lib/Target/Mips/MipsTargetStreamer.h
+++ b/lib/Target/Mips/MipsTargetStreamer.h
@@ -10,10 +10,11 @@
#ifndef LLVM_LIB_TARGET_MIPS_MIPSTARGETSTREAMER_H
#define LLVM_LIB_TARGET_MIPS_MIPSTARGETSTREAMER_H
+#include "MCTargetDesc/MipsABIFlagsSection.h"
+#include "MCTargetDesc/MipsABIInfo.h"
#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
-#include "MCTargetDesc/MipsABIFlagsSection.h"
namespace llvm {
@@ -34,6 +35,7 @@ public:
virtual void emitDirectiveSetMsa();
virtual void emitDirectiveSetNoMsa();
virtual void emitDirectiveSetAt();
+ virtual void emitDirectiveSetAtWithArg(unsigned RegNo);
virtual void emitDirectiveSetNoAt();
virtual void emitDirectiveEnd(StringRef Name);
@@ -57,9 +59,13 @@ public:
virtual void emitDirectiveSetMips5();
virtual void emitDirectiveSetMips32();
virtual void emitDirectiveSetMips32R2();
+ virtual void emitDirectiveSetMips32R3();
+ virtual void emitDirectiveSetMips32R5();
virtual void emitDirectiveSetMips32R6();
virtual void emitDirectiveSetMips64();
virtual void emitDirectiveSetMips64R2();
+ virtual void emitDirectiveSetMips64R3();
+ virtual void emitDirectiveSetMips64R5();
virtual void emitDirectiveSetMips64R6();
virtual void emitDirectiveSetDsp();
virtual void emitDirectiveSetNoDsp();
@@ -95,12 +101,18 @@ public:
// structure values.
template <class PredicateLibrary>
void updateABIInfo(const PredicateLibrary &P) {
+ ABI = &P.getABI();
ABIFlagsSection.setAllFromPredicates(P);
}
MipsABIFlagsSection &getABIFlagsSection() { return ABIFlagsSection; }
+ const MipsABIInfo &getABI() const {
+ assert(ABI && "ABI hasn't been set!");
+ return *ABI;
+ }
protected:
+ const MipsABIInfo *ABI;
MipsABIFlagsSection ABIFlagsSection;
bool GPRInfoSet;
@@ -138,6 +150,7 @@ public:
void emitDirectiveSetMsa() override;
void emitDirectiveSetNoMsa() override;
void emitDirectiveSetAt() override;
+ void emitDirectiveSetAtWithArg(unsigned RegNo) override;
void emitDirectiveSetNoAt() override;
void emitDirectiveEnd(StringRef Name) override;
@@ -161,9 +174,13 @@ public:
void emitDirectiveSetMips5() override;
void emitDirectiveSetMips32() override;
void emitDirectiveSetMips32R2() override;
+ void emitDirectiveSetMips32R3() override;
+ void emitDirectiveSetMips32R5() override;
void emitDirectiveSetMips32R6() override;
void emitDirectiveSetMips64() override;
void emitDirectiveSetMips64R2() override;
+ void emitDirectiveSetMips64R3() override;
+ void emitDirectiveSetMips64R5() override;
void emitDirectiveSetMips64R6() override;
void emitDirectiveSetDsp() override;
void emitDirectiveSetNoDsp() override;
@@ -224,11 +241,6 @@ public:
// ABI Flags
void emitDirectiveModuleOddSPReg(bool Enabled, bool IsO32ABI) override;
void emitMipsAbiFlags() override;
-
-protected:
- bool isO32() const { return STI.getFeatureBits() & Mips::FeatureO32; }
- bool isN32() const { return STI.getFeatureBits() & Mips::FeatureN32; }
- bool isN64() const { return STI.getFeatureBits() & Mips::FeatureN64; }
};
}
#endif
diff --git a/lib/Target/NVPTX/LLVMBuild.txt b/lib/Target/NVPTX/LLVMBuild.txt
index bc8d82e..6ea244a 100644
--- a/lib/Target/NVPTX/LLVMBuild.txt
+++ b/lib/Target/NVPTX/LLVMBuild.txt
@@ -28,5 +28,5 @@ has_asmprinter = 1
type = Library
name = NVPTXCodeGen
parent = NVPTX
-required_libraries = Analysis AsmPrinter CodeGen Core MC NVPTXAsmPrinter NVPTXDesc NVPTXInfo Scalar SelectionDAG Support Target
+required_libraries = Analysis AsmPrinter CodeGen Core MC NVPTXAsmPrinter NVPTXDesc NVPTXInfo Scalar SelectionDAG Support Target TransformUtils
add_to_library_groups = NVPTX
diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
index 4fd5bdd..11d737e 100644
--- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
+++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
@@ -50,5 +50,6 @@ NVPTXMCAsmInfo::NVPTXMCAsmInfo(StringRef TT) {
AscizDirective = " .b8";
// @TODO: Can we just disable this?
+ WeakDirective = "\t// .weak\t";
GlobalDirective = "\t// .globl\t";
}
diff --git a/lib/Target/NVPTX/NVPTX.h b/lib/Target/NVPTX/NVPTX.h
index 13ba57e..382525d 100644
--- a/lib/Target/NVPTX/NVPTX.h
+++ b/lib/Target/NVPTX/NVPTX.h
@@ -59,9 +59,8 @@ inline static const char *NVPTXCondCodeToString(NVPTXCC::CondCodes CC) {
llvm_unreachable("Unknown condition code");
}
-ImmutablePass *createNVPTXTargetTransformInfoPass(const NVPTXTargetMachine *TM);
-FunctionPass *
-createNVPTXISelDag(NVPTXTargetMachine &TM, llvm::CodeGenOpt::Level OptLevel);
+FunctionPass *createNVPTXISelDag(NVPTXTargetMachine &TM,
+ llvm::CodeGenOpt::Level OptLevel);
ModulePass *createNVPTXAssignValidGlobalNamesPass();
ModulePass *createGenericToNVVMPass();
FunctionPass *createNVPTXFavorNonGenericAddrSpacesPass();
diff --git a/lib/Target/NVPTX/NVPTXAllocaHoisting.h b/lib/Target/NVPTX/NVPTXAllocaHoisting.h
index 69fc86e..c343980 100644
--- a/lib/Target/NVPTX/NVPTXAllocaHoisting.h
+++ b/lib/Target/NVPTX/NVPTXAllocaHoisting.h
@@ -15,6 +15,7 @@
#define LLVM_LIB_TARGET_NVPTX_NVPTXALLOCAHOISTING_H
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/StackProtector.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Pass.h"
@@ -32,8 +33,8 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DataLayoutPass>();
- AU.addPreserved("stack-protector");
AU.addPreserved<MachineFunctionAnalysis>();
+ AU.addPreserved<StackProtector>();
}
const char *getPassName() const override {
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 35ba4f1..833db04 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -17,8 +17,8 @@
#include "MCTargetDesc/NVPTXMCAsmInfo.h"
#include "NVPTX.h"
#include "NVPTXInstrInfo.h"
-#include "NVPTXMachineFunctionInfo.h"
#include "NVPTXMCExpr.h"
+#include "NVPTXMachineFunctionInfo.h"
#include "NVPTXRegisterInfo.h"
#include "NVPTXTargetMachine.h"
#include "NVPTXUtilities.h"
@@ -27,6 +27,7 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/DebugInfo.h"
@@ -45,6 +46,7 @@
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TimeValue.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Transforms/Utils/UnrollLoop.h"
#include <sstream>
using namespace llvm;
@@ -108,160 +110,6 @@ void VisitGlobalVariableForEmission(
}
}
-// @TODO: This is a copy from AsmPrinter.cpp. The function is static, so we
-// cannot just link to the existing version.
-/// LowerConstant - Lower the specified LLVM Constant to an MCExpr.
-///
-using namespace nvptx;
-const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
- MCContext &Ctx = AP.OutContext;
-
- if (CV->isNullValue() || isa<UndefValue>(CV))
- return MCConstantExpr::Create(0, Ctx);
-
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV))
- return MCConstantExpr::Create(CI->getZExtValue(), Ctx);
-
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV))
- return MCSymbolRefExpr::Create(AP.getSymbol(GV), Ctx);
-
- if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV))
- return MCSymbolRefExpr::Create(AP.GetBlockAddressSymbol(BA), Ctx);
-
- const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV);
- if (!CE)
- llvm_unreachable("Unknown constant value to lower!");
-
- switch (CE->getOpcode()) {
- default:
- // If the code isn't optimized, there may be outstanding folding
- // opportunities. Attempt to fold the expression using DataLayout as a
- // last resort before giving up.
- if (Constant *C = ConstantFoldConstantExpression(
- CE, AP.TM.getSubtargetImpl()->getDataLayout()))
- if (C != CE)
- return LowerConstant(C, AP);
-
- // Otherwise report the problem to the user.
- {
- std::string S;
- raw_string_ostream OS(S);
- OS << "Unsupported expression in static initializer: ";
- CE->printAsOperand(OS, /*PrintType=*/ false,
- !AP.MF ? nullptr : AP.MF->getFunction()->getParent());
- report_fatal_error(OS.str());
- }
- case Instruction::AddrSpaceCast: {
- // Strip any addrspace(1)->addrspace(0) addrspace casts. These will be
- // handled by the generic() logic in the MCExpr printer
- PointerType *DstTy = cast<PointerType>(CE->getType());
- PointerType *SrcTy = cast<PointerType>(CE->getOperand(0)->getType());
- if (SrcTy->getAddressSpace() == 1 && DstTy->getAddressSpace() == 0) {
- return LowerConstant(cast<const Constant>(CE->getOperand(0)), AP);
- }
- std::string S;
- raw_string_ostream OS(S);
- OS << "Unsupported expression in static initializer: ";
- CE->printAsOperand(OS, /*PrintType=*/ false,
- !AP.MF ? nullptr : AP.MF->getFunction()->getParent());
- report_fatal_error(OS.str());
- }
- case Instruction::GetElementPtr: {
- const DataLayout &TD = *AP.TM.getSubtargetImpl()->getDataLayout();
- // Generate a symbolic expression for the byte address
- APInt OffsetAI(TD.getPointerSizeInBits(), 0);
- cast<GEPOperator>(CE)->accumulateConstantOffset(TD, OffsetAI);
-
- const MCExpr *Base = LowerConstant(CE->getOperand(0), AP);
- if (!OffsetAI)
- return Base;
-
- int64_t Offset = OffsetAI.getSExtValue();
- return MCBinaryExpr::CreateAdd(Base, MCConstantExpr::Create(Offset, Ctx),
- Ctx);
- }
-
- case Instruction::Trunc:
- // We emit the value and depend on the assembler to truncate the generated
- // expression properly. This is important for differences between
- // blockaddress labels. Since the two labels are in the same function, it
- // is reasonable to treat their delta as a 32-bit value.
- // FALL THROUGH.
- case Instruction::BitCast:
- return LowerConstant(CE->getOperand(0), AP);
-
- case Instruction::IntToPtr: {
- const DataLayout &TD = *AP.TM.getSubtargetImpl()->getDataLayout();
- // Handle casts to pointers by changing them into casts to the appropriate
- // integer type. This promotes constant folding and simplifies this code.
- Constant *Op = CE->getOperand(0);
- Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
- false /*ZExt*/);
- return LowerConstant(Op, AP);
- }
-
- case Instruction::PtrToInt: {
- const DataLayout &TD = *AP.TM.getSubtargetImpl()->getDataLayout();
- // Support only foldable casts to/from pointers that can be eliminated by
- // changing the pointer to the appropriately sized integer type.
- Constant *Op = CE->getOperand(0);
- Type *Ty = CE->getType();
-
- const MCExpr *OpExpr = LowerConstant(Op, AP);
-
- // We can emit the pointer value into this slot if the slot is an
- // integer slot equal to the size of the pointer.
- if (TD.getTypeAllocSize(Ty) == TD.getTypeAllocSize(Op->getType()))
- return OpExpr;
-
- // Otherwise the pointer is smaller than the resultant integer, mask off
- // the high bits so we are sure to get a proper truncation if the input is
- // a constant expr.
- unsigned InBits = TD.getTypeAllocSizeInBits(Op->getType());
- const MCExpr *MaskExpr =
- MCConstantExpr::Create(~0ULL >> (64 - InBits), Ctx);
- return MCBinaryExpr::CreateAnd(OpExpr, MaskExpr, Ctx);
- }
-
- // The MC library also has a right-shift operator, but it isn't consistently
- // signed or unsigned between different targets.
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Mul:
- case Instruction::SDiv:
- case Instruction::SRem:
- case Instruction::Shl:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor: {
- const MCExpr *LHS = LowerConstant(CE->getOperand(0), AP);
- const MCExpr *RHS = LowerConstant(CE->getOperand(1), AP);
- switch (CE->getOpcode()) {
- default:
- llvm_unreachable("Unknown binary operator constant cast expr");
- case Instruction::Add:
- return MCBinaryExpr::CreateAdd(LHS, RHS, Ctx);
- case Instruction::Sub:
- return MCBinaryExpr::CreateSub(LHS, RHS, Ctx);
- case Instruction::Mul:
- return MCBinaryExpr::CreateMul(LHS, RHS, Ctx);
- case Instruction::SDiv:
- return MCBinaryExpr::CreateDiv(LHS, RHS, Ctx);
- case Instruction::SRem:
- return MCBinaryExpr::CreateMod(LHS, RHS, Ctx);
- case Instruction::Shl:
- return MCBinaryExpr::CreateShl(LHS, RHS, Ctx);
- case Instruction::And:
- return MCBinaryExpr::CreateAnd(LHS, RHS, Ctx);
- case Instruction::Or:
- return MCBinaryExpr::CreateOr(LHS, RHS, Ctx);
- case Instruction::Xor:
- return MCBinaryExpr::CreateXor(LHS, RHS, Ctx);
- }
- }
- }
-}
-
void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) {
if (!EmitLineNumbers)
return;
@@ -316,7 +164,7 @@ void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) {
void NVPTXAsmPrinter::EmitInstruction(const MachineInstr *MI) {
SmallString<128> Str;
raw_svector_ostream OS(Str);
- if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA)
+ if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() == NVPTX::CUDA)
emitLineNumberAsDotLoc(*MI);
MCInst Inst;
@@ -389,8 +237,6 @@ void NVPTXAsmPrinter::lowerImageHandleSymbol(unsigned Index, MCOperand &MCOp) {
void NVPTXAsmPrinter::lowerToMCInst(const MachineInstr *MI, MCInst &OutMI) {
OutMI.setOpcode(MI->getOpcode());
- const NVPTXSubtarget &ST = TM.getSubtarget<NVPTXSubtarget>();
-
// Special: Do not mangle symbol operand of CALL_PROTOTYPE
if (MI->getOpcode() == NVPTX::CALL_PROTOTYPE) {
const MachineOperand &MO = MI->getOperand(0);
@@ -403,7 +249,7 @@ void NVPTXAsmPrinter::lowerToMCInst(const MachineInstr *MI, MCInst &OutMI) {
const MachineOperand &MO = MI->getOperand(i);
MCOperand MCOp;
- if (!ST.hasImageHandles()) {
+ if (!nvptxSubtarget->hasImageHandles()) {
if (lowerImageHandleOperand(MI, i, MCOp)) {
OutMI.addOperand(MCOp);
continue;
@@ -500,12 +346,12 @@ MCOperand NVPTXAsmPrinter::GetSymbolRef(const MCSymbol *Symbol) {
}
void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) {
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
- const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
+ const DataLayout *TD = TM.getDataLayout();
+ const TargetLowering *TLI = nvptxSubtarget->getTargetLowering();
Type *Ty = F->getReturnType();
- bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ bool isABI = (nvptxSubtarget->getSmVersion() >= 20);
if (Ty->getTypeID() == Type::VoidTyID)
return;
@@ -528,17 +374,15 @@ void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) {
} else if (isa<PointerType>(Ty)) {
O << ".param .b" << TLI->getPointerTy().getSizeInBits()
<< " func_retval0";
- } else {
- if ((Ty->getTypeID() == Type::StructTyID) || isa<VectorType>(Ty)) {
- unsigned totalsz = TD->getTypeAllocSize(Ty);
- unsigned retAlignment = 0;
- if (!llvm::getAlign(*F, 0, retAlignment))
- retAlignment = TD->getABITypeAlignment(Ty);
- O << ".param .align " << retAlignment << " .b8 func_retval0[" << totalsz
- << "]";
- } else
- assert(false && "Unknown return type");
- }
+ } else if ((Ty->getTypeID() == Type::StructTyID) || isa<VectorType>(Ty)) {
+ unsigned totalsz = TD->getTypeAllocSize(Ty);
+ unsigned retAlignment = 0;
+ if (!llvm::getAlign(*F, 0, retAlignment))
+ retAlignment = TD->getABITypeAlignment(Ty);
+ O << ".param .align " << retAlignment << " .b8 func_retval0[" << totalsz
+ << "]";
+ } else
+ llvm_unreachable("Unknown return type");
} else {
SmallVector<EVT, 16> vtparts;
ComputeValueVTs(*TLI, Ty, vtparts);
@@ -574,6 +418,42 @@ void NVPTXAsmPrinter::printReturnValStr(const MachineFunction &MF,
printReturnValStr(F, O);
}
+// Return true if MBB is the header of a loop marked with
+// llvm.loop.unroll.disable.
+// TODO: consider "#pragma unroll 1" which is equivalent to "#pragma nounroll".
+bool NVPTXAsmPrinter::isLoopHeaderOfNoUnroll(
+ const MachineBasicBlock &MBB) const {
+ MachineLoopInfo &LI = getAnalysis<MachineLoopInfo>();
+ // TODO: isLoopHeader() should take "const MachineBasicBlock *".
+ // We insert .pragma "nounroll" only to the loop header.
+ if (!LI.isLoopHeader(const_cast<MachineBasicBlock *>(&MBB)))
+ return false;
+
+ // llvm.loop.unroll.disable is marked on the back edges of a loop. Therefore,
+ // we iterate through each back edge of the loop with header MBB, and check
+ // whether its metadata contains llvm.loop.unroll.disable.
+ for (auto I = MBB.pred_begin(); I != MBB.pred_end(); ++I) {
+ const MachineBasicBlock *PMBB = *I;
+ if (LI.getLoopFor(PMBB) != LI.getLoopFor(&MBB)) {
+ // Edges from other loops to MBB are not back edges.
+ continue;
+ }
+ if (const BasicBlock *PBB = PMBB->getBasicBlock()) {
+ if (MDNode *LoopID = PBB->getTerminator()->getMetadata("llvm.loop")) {
+ if (GetUnrollMetadata(LoopID, "llvm.loop.unroll.disable"))
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void NVPTXAsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) const {
+ AsmPrinter::EmitBasicBlockStart(MBB);
+ if (isLoopHeaderOfNoUnroll(MBB))
+ OutStreamer.EmitRawText(StringRef("\t.pragma \"nounroll\";\n"));
+}
+
void NVPTXAsmPrinter::EmitFunctionEntryLabel() {
SmallString<128> Str;
raw_svector_ostream O(Str);
@@ -624,14 +504,13 @@ void NVPTXAsmPrinter::EmitFunctionBodyEnd() {
void NVPTXAsmPrinter::emitImplicitDef(const MachineInstr *MI) const {
unsigned RegNo = MI->getOperand(0).getReg();
- const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
+ const TargetRegisterInfo *TRI = nvptxSubtarget->getRegisterInfo();
if (TRI->isVirtualRegister(RegNo)) {
OutStreamer.AddComment(Twine("implicit-def: ") +
getVirtualRegisterName(RegNo));
} else {
- OutStreamer.AddComment(
- Twine("implicit-def: ") +
- TM.getSubtargetImpl()->getRegisterInfo()->getName(RegNo));
+ OutStreamer.AddComment(Twine("implicit-def: ") +
+ nvptxSubtarget->getRegisterInfo()->getName(RegNo));
}
OutStreamer.AddBlankLine();
}
@@ -793,11 +672,6 @@ static bool usedInOneFunc(const User *U, Function const *&oneFunc) {
return false;
}
- if (const MDNode *md = dyn_cast<MDNode>(U))
- if (md->hasName() && ((md->getName().str() == "llvm.dbg.gv") ||
- (md->getName().str() == "llvm.dbg.sp")))
- return true;
-
for (const User *UU : U->users())
if (usedInOneFunc(UU, oneFunc) == false)
return false;
@@ -938,6 +812,14 @@ void NVPTXAsmPrinter::recordAndEmitFilenames(Module &M) {
}
bool NVPTXAsmPrinter::doInitialization(Module &M) {
+ // Construct a default subtarget off of the TargetMachine defaults. The
+ // rest of NVPTX isn't friendly to change subtargets per function and
+ // so the default TargetMachine will have all of the options.
+ StringRef TT = TM.getTargetTriple();
+ StringRef CPU = TM.getTargetCPU();
+ StringRef FS = TM.getTargetFeatureString();
+ const NVPTXTargetMachine &NTM = static_cast<const NVPTXTargetMachine &>(TM);
+ const NVPTXSubtarget STI(TT, CPU, FS, NTM);
SmallString<128> Str1;
raw_svector_ostream OS1(Str1);
@@ -952,10 +834,10 @@ bool NVPTXAsmPrinter::doInitialization(Module &M) {
const_cast<TargetLoweringObjectFile &>(getObjFileLowering())
.Initialize(OutContext, TM);
- Mang = new Mangler(TM.getSubtargetImpl()->getDataLayout());
+ Mang = new Mangler(TM.getDataLayout());
// Emit header before any dwarf directives are emitted below.
- emitHeader(M, OS1);
+ emitHeader(M, OS1, STI);
OutStreamer.EmitRawText(OS1.str());
// Already commented out
@@ -971,7 +853,8 @@ bool NVPTXAsmPrinter::doInitialization(Module &M) {
OutStreamer.AddBlankLine();
}
- if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA)
+ // If we're not NVCL we're CUDA, go ahead and emit filenames.
+ if (Triple(TM.getTargetTriple()).getOS() != Triple::NVCL)
recordAndEmitFilenames(M);
GlobalsEmitted = false;
@@ -1012,22 +895,24 @@ void NVPTXAsmPrinter::emitGlobals(const Module &M) {
OutStreamer.EmitRawText(OS2.str());
}
-void NVPTXAsmPrinter::emitHeader(Module &M, raw_ostream &O) {
+void NVPTXAsmPrinter::emitHeader(Module &M, raw_ostream &O,
+ const NVPTXSubtarget &STI) {
O << "//\n";
O << "// Generated by LLVM NVPTX Back-End\n";
O << "//\n";
O << "\n";
- unsigned PTXVersion = nvptxSubtarget.getPTXVersion();
+ unsigned PTXVersion = STI.getPTXVersion();
O << ".version " << (PTXVersion / 10) << "." << (PTXVersion % 10) << "\n";
O << ".target ";
- O << nvptxSubtarget.getTargetName();
+ O << STI.getTargetName();
- if (nvptxSubtarget.getDrvInterface() == NVPTX::NVCL)
+ const NVPTXTargetMachine &NTM = static_cast<const NVPTXTargetMachine &>(TM);
+ if (NTM.getDrvInterface() == NVPTX::NVCL)
O << ", texmode_independent";
- if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA) {
- if (!nvptxSubtarget.hasDouble())
+ else {
+ if (!STI.hasDouble())
O << ", map_f64_to_f32";
}
@@ -1037,7 +922,7 @@ void NVPTXAsmPrinter::emitHeader(Module &M, raw_ostream &O) {
O << "\n";
O << ".address_size ";
- if (nvptxSubtarget.is64Bit())
+ if (NTM.is64Bit())
O << "64";
else
O << "32";
@@ -1047,7 +932,6 @@ void NVPTXAsmPrinter::emitHeader(Module &M, raw_ostream &O) {
}
bool NVPTXAsmPrinter::doFinalization(Module &M) {
-
// If we did not emit any functions, then the global declarations have not
// yet been emitted.
if (!GlobalsEmitted) {
@@ -1109,7 +993,7 @@ bool NVPTXAsmPrinter::doFinalization(Module &M) {
void NVPTXAsmPrinter::emitLinkageDirective(const GlobalValue *V,
raw_ostream &O) {
- if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA) {
+ if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() == NVPTX::CUDA) {
if (V->hasExternalLinkage()) {
if (isa<GlobalVariable>(V)) {
const GlobalVariable *GVar = cast<GlobalVariable>(V);
@@ -1153,7 +1037,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
GVar->getName().startswith("nvvm."))
return;
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
// GlobalVariables are always constant pointers themselves.
const PointerType *PTy = GVar->getType();
@@ -1287,7 +1171,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
else
O << " .align " << GVar->getAlignment();
- if (ETy->isSingleValueType()) {
+ if (ETy->isFloatingPointTy() || ETy->isIntegerTy() || ETy->isPointerTy()) {
O << " .";
// Special case: ABI requires that we use .u8 for predicates
if (ETy->isIntegerTy(1))
@@ -1341,7 +1225,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
AggBuffer aggBuffer(ElementSize, O, *this);
bufferAggregateConstant(Initializer, &aggBuffer);
if (aggBuffer.numSymbols) {
- if (nvptxSubtarget.is64Bit()) {
+ if (static_cast<const NVPTXTargetMachine &>(TM).is64Bit()) {
O << " .u64 " << *getSymbol(GVar) << "[";
O << ElementSize / 8;
} else {
@@ -1439,7 +1323,7 @@ NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, bool useB4PTR) const {
case Type::DoubleTyID:
return "f64";
case Type::PointerTyID:
- if (nvptxSubtarget.is64Bit())
+ if (static_cast<const NVPTXTargetMachine &>(TM).is64Bit())
if (useB4PTR)
return "b64";
else
@@ -1456,7 +1340,7 @@ NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, bool useB4PTR) const {
void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
raw_ostream &O) {
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
// GlobalVariables are always constant pointers themselves.
const PointerType *PTy = GVar->getType();
@@ -1469,7 +1353,7 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
else
O << " .align " << GVar->getAlignment();
- if (ETy->isSingleValueType()) {
+ if (ETy->isFloatingPointTy() || ETy->isIntegerTy() || ETy->isPointerTy()) {
O << " .";
O << getPTXFundamentalTypeStr(ETy);
O << " ";
@@ -1508,17 +1392,6 @@ static unsigned int getOpenCLAlignment(const DataLayout *TD, Type *Ty) {
if (ATy)
return getOpenCLAlignment(TD, ATy->getElementType());
- const VectorType *VTy = dyn_cast<VectorType>(Ty);
- if (VTy) {
- Type *ETy = VTy->getElementType();
- unsigned int numE = VTy->getNumElements();
- unsigned int alignE = TD->getPrefTypeAlignment(ETy);
- if (numE == 3)
- return 4 * alignE;
- else
- return numE * alignE;
- }
-
const StructType *STy = dyn_cast<StructType>(Ty);
if (STy) {
unsigned int alignStruct = 1;
@@ -1541,50 +1414,22 @@ static unsigned int getOpenCLAlignment(const DataLayout *TD, Type *Ty) {
void NVPTXAsmPrinter::printParamName(Function::const_arg_iterator I,
int paramIndex, raw_ostream &O) {
- if ((nvptxSubtarget.getDrvInterface() == NVPTX::NVCL) ||
- (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA))
- O << *getSymbol(I->getParent()) << "_param_" << paramIndex;
- else {
- std::string argName = I->getName();
- const char *p = argName.c_str();
- while (*p) {
- if (*p == '.')
- O << "_";
- else
- O << *p;
- p++;
- }
- }
+ O << *getSymbol(I->getParent()) << "_param_" << paramIndex;
}
void NVPTXAsmPrinter::printParamName(int paramIndex, raw_ostream &O) {
- Function::const_arg_iterator I, E;
- int i = 0;
-
- if ((nvptxSubtarget.getDrvInterface() == NVPTX::NVCL) ||
- (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA)) {
- O << *CurrentFnSym << "_param_" << paramIndex;
- return;
- }
-
- for (I = F->arg_begin(), E = F->arg_end(); I != E; ++I, i++) {
- if (i == paramIndex) {
- printParamName(I, paramIndex, O);
- return;
- }
- }
- llvm_unreachable("paramIndex out of bound");
+ O << *CurrentFnSym << "_param_" << paramIndex;
}
void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
const AttributeSet &PAL = F->getAttributes();
- const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
+ const TargetLowering *TLI = nvptxSubtarget->getTargetLowering();
Function::const_arg_iterator I, E;
unsigned paramIndex = 0;
bool first = true;
bool isKernelFunc = llvm::isKernelFunction(*F);
- bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ bool isABI = (nvptxSubtarget->getSmVersion() >= 20);
MVT thePointerTy = TLI->getPointerTy();
O << "(\n";
@@ -1603,21 +1448,21 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
if (isImage(*I)) {
std::string sname = I->getName();
if (isImageWriteOnly(*I) || isImageReadWrite(*I)) {
- if (nvptxSubtarget.hasImageHandles())
+ if (nvptxSubtarget->hasImageHandles())
O << "\t.param .u64 .ptr .surfref ";
else
O << "\t.param .surfref ";
O << *CurrentFnSym << "_param_" << paramIndex;
}
else { // Default image is read_only
- if (nvptxSubtarget.hasImageHandles())
+ if (nvptxSubtarget->hasImageHandles())
O << "\t.param .u64 .ptr .texref ";
else
O << "\t.param .texref ";
O << *CurrentFnSym << "_param_" << paramIndex;
}
} else {
- if (nvptxSubtarget.hasImageHandles())
+ if (nvptxSubtarget->hasImageHandles())
O << "\t.param .u64 .ptr .samplerref ";
else
O << "\t.param .samplerref ";
@@ -1650,7 +1495,8 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
// Special handling for pointer arguments to kernel
O << "\t.param .u" << thePointerTy.getSizeInBits() << " ";
- if (nvptxSubtarget.getDrvInterface() != NVPTX::CUDA) {
+ if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() !=
+ NVPTX::CUDA) {
Type *ETy = PTy->getElementType();
int addrSpace = PTy->getAddressSpace();
switch (addrSpace) {
@@ -1779,7 +1625,7 @@ void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
if (NumBytes) {
O << "\t.local .align " << MFI->getMaxAlignment() << " .b8 \t" << DEPOTNAME
<< getFunctionNumber() << "[" << NumBytes << "];\n";
- if (nvptxSubtarget.is64Bit()) {
+ if (static_cast<const NVPTXTargetMachine &>(MF.getTarget()).is64Bit()) {
O << "\t.reg .b64 \t%SP;\n";
O << "\t.reg .b64 \t%SPL;\n";
} else {
@@ -1900,7 +1746,7 @@ void NVPTXAsmPrinter::printScalarConstant(const Constant *CPV, raw_ostream &O) {
}
return;
} else {
- O << *LowerConstant(CPV, *this);
+ O << *lowerConstant(CPV);
return;
}
}
@@ -1910,7 +1756,7 @@ void NVPTXAsmPrinter::printScalarConstant(const Constant *CPV, raw_ostream &O) {
void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
AggBuffer *aggBuffer) {
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
if (isa<UndefValue>(CPV) || CPV->isNullValue()) {
int s = TD->getTypeAllocSize(CPV->getType());
@@ -2034,7 +1880,7 @@ void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
void NVPTXAsmPrinter::bufferAggregateConstant(const Constant *CPV,
AggBuffer *aggBuffer) {
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
int Bytes;
// Old constants
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.h b/lib/Target/NVPTX/NVPTXAsmPrinter.h
index 83fa5d3..7e6b5e8 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.h
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.h
@@ -39,13 +39,6 @@
// A better approach is to clone the MCAsmStreamer to a MCPTXAsmStreamer
// (subclass of MCStreamer).
-// This is defined in AsmPrinter.cpp.
-// Used to process the constant expressions in initializers.
-namespace nvptx {
-const llvm::MCExpr *
-LowerConstant(const llvm::Constant *CV, llvm::AsmPrinter &AP);
-}
-
namespace llvm {
class LineReader {
@@ -145,7 +138,7 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter {
unsigned int nSym = 0;
unsigned int nextSymbolPos = symbolPosInBuffer[nSym];
unsigned int nBytes = 4;
- if (AP.nvptxSubtarget.is64Bit())
+ if (static_cast<const NVPTXTargetMachine &>(AP.TM).is64Bit())
nBytes = 8;
for (pos = 0; pos < size; pos += nBytes) {
if (pos)
@@ -167,7 +160,7 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter {
O << *Name;
}
} else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(v)) {
- O << *nvptx::LowerConstant(Cexpr, AP);
+ O << *AP.lowerConstant(Cexpr);
} else
llvm_unreachable("symbol type unknown");
nSym++;
@@ -194,6 +187,7 @@ private:
const Function *F;
std::string CurrentFnName;
+ void EmitBasicBlockStart(const MachineBasicBlock &MBB) const override;
void EmitFunctionEntryLabel() override;
void EmitFunctionBodyStart() override;
void EmitFunctionBodyEnd() override;
@@ -218,7 +212,7 @@ private:
void printParamName(Function::const_arg_iterator I, int paramIndex,
raw_ostream &O);
void emitGlobals(const Module &M);
- void emitHeader(Module &M, raw_ostream &O);
+ void emitHeader(Module &M, raw_ostream &O, const NVPTXSubtarget &STI);
void emitKernelFunctionDirectives(const Function &F, raw_ostream &O) const;
void emitVirtualRegister(unsigned int vr, raw_ostream &);
void emitFunctionExternParamList(const MachineFunction &MF);
@@ -254,8 +248,10 @@ private:
typedef DenseMap<unsigned, unsigned> VRegMap;
typedef DenseMap<const TargetRegisterClass *, VRegMap> VRegRCMap;
VRegRCMap VRegMapping;
- // cache the subtarget here.
- const NVPTXSubtarget &nvptxSubtarget;
+
+ // Cache the subtarget here.
+ const NVPTXSubtarget *nvptxSubtarget;
+
// Build the map between type name and ID based on module's type
// symbol table.
std::map<const Type *, std::string> TypeNameMap;
@@ -288,6 +284,8 @@ private:
MCOperand &MCOp);
void lowerImageHandleSymbol(unsigned Index, MCOperand &MCOp);
+ bool isLoopHeaderOfNoUnroll(const MachineBasicBlock &MBB) const;
+
LineReader *reader;
LineReader *getReader(std::string);
@@ -305,12 +303,12 @@ private:
bool EmitGeneric;
public:
- NVPTXAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer),
- nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
+ NVPTXAsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)),
+ EmitGeneric(static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() ==
+ NVPTX::CUDA) {
CurrentBankselLabelInBasicBlock = "";
reader = nullptr;
- EmitGeneric = (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA);
}
~NVPTXAsmPrinter() {
@@ -318,6 +316,15 @@ public:
delete reader;
}
+ bool runOnMachineFunction(MachineFunction &F) override {
+ nvptxSubtarget = &F.getSubtarget<NVPTXSubtarget>();
+ return AsmPrinter::runOnMachineFunction(F);
+ }
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineLoopInfo>();
+ AsmPrinter::getAnalysisUsage(AU);
+ }
+
bool ignoreLoc(const MachineInstr &);
std::string getVirtualRegisterName(unsigned) const;
diff --git a/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp b/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp
index 962b123..7d4be8e 100644
--- a/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp
+++ b/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp
@@ -19,8 +19,8 @@
#include "NVPTX.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
-#include "llvm/PassManager.h"
#include "llvm/Support/raw_ostream.h"
#include <string>
diff --git a/lib/Target/NVPTX/NVPTXFrameLowering.cpp b/lib/Target/NVPTX/NVPTXFrameLowering.cpp
index 314df38..34d3a66 100644
--- a/lib/Target/NVPTX/NVPTXFrameLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXFrameLowering.cpp
@@ -26,9 +26,8 @@
using namespace llvm;
-NVPTXFrameLowering::NVPTXFrameLowering(NVPTXSubtarget &STI)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsUp, 8, 0),
- is64bit(STI.is64Bit()) {}
+NVPTXFrameLowering::NVPTXFrameLowering()
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsUp, 8, 0) {}
bool NVPTXFrameLowering::hasFP(const MachineFunction &MF) const { return true; }
@@ -45,7 +44,7 @@ void NVPTXFrameLowering::emitPrologue(MachineFunction &MF) const {
// mov %SPL, %depot;
// cvta.local %SP, %SPL;
- if (is64bit) {
+ if (static_cast<const NVPTXTargetMachine &>(MF.getTarget()).is64Bit()) {
unsigned LocalReg = MRI.createVirtualRegister(&NVPTX::Int64RegsRegClass);
MachineInstr *MI =
BuildMI(MBB, MBBI, dl, MF.getSubtarget().getInstrInfo()->get(
diff --git a/lib/Target/NVPTX/NVPTXFrameLowering.h b/lib/Target/NVPTX/NVPTXFrameLowering.h
index 0846b78..d1e0a5c 100644
--- a/lib/Target/NVPTX/NVPTXFrameLowering.h
+++ b/lib/Target/NVPTX/NVPTXFrameLowering.h
@@ -19,18 +19,16 @@
namespace llvm {
class NVPTXSubtarget;
class NVPTXFrameLowering : public TargetFrameLowering {
- bool is64bit;
-
public:
- explicit NVPTXFrameLowering(NVPTXSubtarget &STI);
+ explicit NVPTXFrameLowering();
bool hasFP(const MachineFunction &MF) const override;
void emitPrologue(MachineFunction &MF) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const override;
+ void
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
};
} // End llvm namespace
diff --git a/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp b/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
index 58fa95b..86d134b 100644
--- a/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
+++ b/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
@@ -22,10 +22,11 @@
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/ValueMap.h"
-#include "llvm/PassManager.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
using namespace llvm;
@@ -54,8 +55,7 @@ private:
IRBuilder<> &Builder);
Value *remapConstantExpr(Module *M, Function *F, ConstantExpr *C,
IRBuilder<> &Builder);
- void remapNamedMDNode(Module *M, NamedMDNode *N);
- MDNode *remapMDNode(Module *M, MDNode *N);
+ void remapNamedMDNode(ValueToValueMapTy &VM, NamedMDNode *N);
typedef ValueMap<GlobalVariable *, GlobalVariable *> GVMapTy;
typedef ValueMap<Constant *, Value *> ConstantToValueMapTy;
@@ -125,12 +125,17 @@ bool GenericToNVVM::runOnModule(Module &M) {
ConstantToValueMap.clear();
}
+ // Copy GVMap over to a standard value map.
+ ValueToValueMapTy VM;
+ for (auto I = GVMap.begin(), E = GVMap.end(); I != E; ++I)
+ VM[I->first] = I->second;
+
// Walk through the metadata section and update the debug information
// associated with the global variables in the default address space.
for (Module::named_metadata_iterator I = M.named_metadata_begin(),
E = M.named_metadata_end();
I != E; I++) {
- remapNamedMDNode(&M, I);
+ remapNamedMDNode(VM, I);
}
// Walk through the global variable initializers, and replace any use of
@@ -362,7 +367,7 @@ Value *GenericToNVVM::remapConstantExpr(Module *M, Function *F, ConstantExpr *C,
}
}
-void GenericToNVVM::remapNamedMDNode(Module *M, NamedMDNode *N) {
+void GenericToNVVM::remapNamedMDNode(ValueToValueMapTy &VM, NamedMDNode *N) {
bool OperandChanged = false;
SmallVector<MDNode *, 16> NewOperands;
@@ -372,7 +377,7 @@ void GenericToNVVM::remapNamedMDNode(Module *M, NamedMDNode *N) {
// converted to another value.
for (unsigned i = 0; i < NumOperands; ++i) {
MDNode *Operand = N->getOperand(i);
- MDNode *NewOperand = remapMDNode(M, Operand);
+ MDNode *NewOperand = MapMetadata(Operand, VM);
OperandChanged |= Operand != NewOperand;
NewOperands.push_back(NewOperand);
}
@@ -390,47 +395,3 @@ void GenericToNVVM::remapNamedMDNode(Module *M, NamedMDNode *N) {
N->addOperand(*I);
}
}
-
-MDNode *GenericToNVVM::remapMDNode(Module *M, MDNode *N) {
-
- bool OperandChanged = false;
- SmallVector<Value *, 8> NewOperands;
- unsigned NumOperands = N->getNumOperands();
-
- // Check if any operand is or contains a global variable in GVMap, and thus
- // converted to another value.
- for (unsigned i = 0; i < NumOperands; ++i) {
- Value *Operand = N->getOperand(i);
- Value *NewOperand = Operand;
- if (Operand) {
- if (isa<GlobalVariable>(Operand)) {
- GVMapTy::iterator I = GVMap.find(cast<GlobalVariable>(Operand));
- if (I != GVMap.end()) {
- NewOperand = I->second;
- if (++i < NumOperands) {
- NewOperands.push_back(NewOperand);
- // Address space of the global variable follows the global variable
- // in the global variable debug info (see createGlobalVariable in
- // lib/Analysis/DIBuilder.cpp).
- NewOperand =
- ConstantInt::get(Type::getInt32Ty(M->getContext()),
- I->second->getType()->getAddressSpace());
- }
- }
- } else if (isa<MDNode>(Operand)) {
- NewOperand = remapMDNode(M, cast<MDNode>(Operand));
- }
- }
- OperandChanged |= Operand != NewOperand;
- NewOperands.push_back(NewOperand);
- }
-
- // If none of the operands has been modified, return N as it is.
- if (!OperandChanged) {
- return N;
- }
-
- // If any of the operands has been modified, create a new MDNode with the new
- // operands.
- return MDNode::get(M->getContext(), makeArrayRef(NewOperands));
-}
diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index cd0422d..e01c780 100644
--- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -50,11 +50,15 @@ FunctionPass *llvm::createNVPTXISelDag(NVPTXTargetMachine &TM,
NVPTXDAGToDAGISel::NVPTXDAGToDAGISel(NVPTXTargetMachine &tm,
CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(tm, OptLevel),
- Subtarget(tm.getSubtarget<NVPTXSubtarget>()) {
+ : SelectionDAGISel(tm, OptLevel), TM(tm) {
doMulWide = (OptLevel > 0);
}
+bool NVPTXDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &static_cast<const NVPTXSubtarget &>(MF.getSubtarget());
+ return SelectionDAGISel::runOnMachineFunction(MF);
+}
+
int NVPTXDAGToDAGISel::getDivF32Level() const {
if (UsePrecDivF32.getNumOccurrences() > 0) {
// If nvptx-prec-div32=N is used on the command-line, always honor it
@@ -89,16 +93,14 @@ bool NVPTXDAGToDAGISel::useF32FTZ() const {
const Function *F = MF->getFunction();
// Otherwise, check for an nvptx-f32ftz attribute on the function
if (F->hasFnAttribute("nvptx-f32ftz"))
- return (F->getAttributes().getAttribute(AttributeSet::FunctionIndex,
- "nvptx-f32ftz")
- .getValueAsString() == "true");
+ return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
else
return false;
}
}
bool NVPTXDAGToDAGISel::allowFMA() const {
- const NVPTXTargetLowering *TL = Subtarget.getTargetLowering();
+ const NVPTXTargetLowering *TL = Subtarget->getTargetLowering();
return TL->allowFMA(*MF, OptLevel);
}
@@ -525,8 +527,7 @@ SDNode *NVPTXDAGToDAGISel::SelectIntrinsicChain(SDNode *N) {
}
}
-static unsigned int getCodeAddrSpace(MemSDNode *N,
- const NVPTXSubtarget &Subtarget) {
+static unsigned int getCodeAddrSpace(MemSDNode *N) {
const Value *Src = N->getMemOperand()->getValue();
if (!Src)
@@ -579,20 +580,16 @@ SDNode *NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
switch (SrcAddrSpace) {
default: report_fatal_error("Bad address space in addrspacecast");
case ADDRESS_SPACE_GLOBAL:
- Opc = Subtarget.is64Bit() ? NVPTX::cvta_global_yes_64
- : NVPTX::cvta_global_yes;
+ Opc = TM.is64Bit() ? NVPTX::cvta_global_yes_64 : NVPTX::cvta_global_yes;
break;
case ADDRESS_SPACE_SHARED:
- Opc = Subtarget.is64Bit() ? NVPTX::cvta_shared_yes_64
- : NVPTX::cvta_shared_yes;
+ Opc = TM.is64Bit() ? NVPTX::cvta_shared_yes_64 : NVPTX::cvta_shared_yes;
break;
case ADDRESS_SPACE_CONST:
- Opc = Subtarget.is64Bit() ? NVPTX::cvta_const_yes_64
- : NVPTX::cvta_const_yes;
+ Opc = TM.is64Bit() ? NVPTX::cvta_const_yes_64 : NVPTX::cvta_const_yes;
break;
case ADDRESS_SPACE_LOCAL:
- Opc = Subtarget.is64Bit() ? NVPTX::cvta_local_yes_64
- : NVPTX::cvta_local_yes;
+ Opc = TM.is64Bit() ? NVPTX::cvta_local_yes_64 : NVPTX::cvta_local_yes;
break;
}
return CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0), Src);
@@ -604,20 +601,20 @@ SDNode *NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
switch (DstAddrSpace) {
default: report_fatal_error("Bad address space in addrspacecast");
case ADDRESS_SPACE_GLOBAL:
- Opc = Subtarget.is64Bit() ? NVPTX::cvta_to_global_yes_64
- : NVPTX::cvta_to_global_yes;
+ Opc = TM.is64Bit() ? NVPTX::cvta_to_global_yes_64
+ : NVPTX::cvta_to_global_yes;
break;
case ADDRESS_SPACE_SHARED:
- Opc = Subtarget.is64Bit() ? NVPTX::cvta_to_shared_yes_64
- : NVPTX::cvta_to_shared_yes;
+ Opc = TM.is64Bit() ? NVPTX::cvta_to_shared_yes_64
+ : NVPTX::cvta_to_shared_yes;
break;
case ADDRESS_SPACE_CONST:
- Opc = Subtarget.is64Bit() ? NVPTX::cvta_to_const_yes_64
- : NVPTX::cvta_to_const_yes;
+ Opc =
+ TM.is64Bit() ? NVPTX::cvta_to_const_yes_64 : NVPTX::cvta_to_const_yes;
break;
case ADDRESS_SPACE_LOCAL:
- Opc = Subtarget.is64Bit() ? NVPTX::cvta_to_local_yes_64
- : NVPTX::cvta_to_local_yes;
+ Opc =
+ TM.is64Bit() ? NVPTX::cvta_to_local_yes_64 : NVPTX::cvta_to_local_yes;
break;
}
return CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0), Src);
@@ -638,7 +635,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
return nullptr;
// Address Space Setting
- unsigned int codeAddrSpace = getCodeAddrSpace(LD, Subtarget);
+ unsigned int codeAddrSpace = getCodeAddrSpace(LD);
// Volatile Setting
// - .volatile is only availalble for .global and .shared
@@ -713,9 +710,8 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
getI32Imm(vecType), getI32Imm(fromType),
getI32Imm(fromTypeWidth), Addr, Chain };
NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops);
- } else if (Subtarget.is64Bit()
- ? SelectADDRsi64(N1.getNode(), N1, Base, Offset)
- : SelectADDRsi(N1.getNode(), N1, Base, Offset)) {
+ } else if (TM.is64Bit() ? SelectADDRsi64(N1.getNode(), N1, Base, Offset)
+ : SelectADDRsi(N1.getNode(), N1, Base, Offset)) {
switch (TargetVT) {
case MVT::i8:
Opcode = NVPTX::LD_i8_asi;
@@ -742,10 +738,9 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
getI32Imm(vecType), getI32Imm(fromType),
getI32Imm(fromTypeWidth), Base, Offset, Chain };
NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops);
- } else if (Subtarget.is64Bit()
- ? SelectADDRri64(N1.getNode(), N1, Base, Offset)
- : SelectADDRri(N1.getNode(), N1, Base, Offset)) {
- if (Subtarget.is64Bit()) {
+ } else if (TM.is64Bit() ? SelectADDRri64(N1.getNode(), N1, Base, Offset)
+ : SelectADDRri(N1.getNode(), N1, Base, Offset)) {
+ if (TM.is64Bit()) {
switch (TargetVT) {
case MVT::i8:
Opcode = NVPTX::LD_i8_ari_64;
@@ -797,7 +792,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
getI32Imm(fromTypeWidth), Base, Offset, Chain };
NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops);
} else {
- if (Subtarget.is64Bit()) {
+ if (TM.is64Bit()) {
switch (TargetVT) {
case MVT::i8:
Opcode = NVPTX::LD_i8_areg_64;
@@ -874,7 +869,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
return nullptr;
// Address Space Setting
- unsigned int CodeAddrSpace = getCodeAddrSpace(MemSD, Subtarget);
+ unsigned int CodeAddrSpace = getCodeAddrSpace(MemSD);
// Volatile Setting
// - .volatile is only availalble for .global and .shared
@@ -974,9 +969,8 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
getI32Imm(VecType), getI32Imm(FromType),
getI32Imm(FromTypeWidth), Addr, Chain };
LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
- } else if (Subtarget.is64Bit()
- ? SelectADDRsi64(Op1.getNode(), Op1, Base, Offset)
- : SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) {
+ } else if (TM.is64Bit() ? SelectADDRsi64(Op1.getNode(), Op1, Base, Offset)
+ : SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) {
switch (N->getOpcode()) {
default:
return nullptr;
@@ -1028,10 +1022,9 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
getI32Imm(VecType), getI32Imm(FromType),
getI32Imm(FromTypeWidth), Base, Offset, Chain };
LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
- } else if (Subtarget.is64Bit()
- ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset)
- : SelectADDRri(Op1.getNode(), Op1, Base, Offset)) {
- if (Subtarget.is64Bit()) {
+ } else if (TM.is64Bit() ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset)
+ : SelectADDRri(Op1.getNode(), Op1, Base, Offset)) {
+ if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
return nullptr;
@@ -1133,7 +1126,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
} else {
- if (Subtarget.is64Bit()) {
+ if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
return nullptr;
@@ -1425,10 +1418,9 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
SDValue Ops[] = { Addr, Chain };
LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
- } else if (Subtarget.is64Bit()
- ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset)
- : SelectADDRri(Op1.getNode(), Op1, Base, Offset)) {
- if (Subtarget.is64Bit()) {
+ } else if (TM.is64Bit() ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset)
+ : SelectADDRri(Op1.getNode(), Op1, Base, Offset)) {
+ if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
return nullptr;
@@ -1710,7 +1702,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
} else {
- if (Subtarget.is64Bit()) {
+ if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
return nullptr;
@@ -2013,7 +2005,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
return nullptr;
// Address Space Setting
- unsigned int codeAddrSpace = getCodeAddrSpace(ST, Subtarget);
+ unsigned int codeAddrSpace = getCodeAddrSpace(ST);
// Volatile Setting
// - .volatile is only availalble for .global and .shared
@@ -2083,9 +2075,8 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
getI32Imm(vecType), getI32Imm(toType),
getI32Imm(toTypeWidth), Addr, Chain };
NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
- } else if (Subtarget.is64Bit()
- ? SelectADDRsi64(N2.getNode(), N2, Base, Offset)
- : SelectADDRsi(N2.getNode(), N2, Base, Offset)) {
+ } else if (TM.is64Bit() ? SelectADDRsi64(N2.getNode(), N2, Base, Offset)
+ : SelectADDRsi(N2.getNode(), N2, Base, Offset)) {
switch (SourceVT) {
case MVT::i8:
Opcode = NVPTX::ST_i8_asi;
@@ -2112,10 +2103,9 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
getI32Imm(vecType), getI32Imm(toType),
getI32Imm(toTypeWidth), Base, Offset, Chain };
NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
- } else if (Subtarget.is64Bit()
- ? SelectADDRri64(N2.getNode(), N2, Base, Offset)
- : SelectADDRri(N2.getNode(), N2, Base, Offset)) {
- if (Subtarget.is64Bit()) {
+ } else if (TM.is64Bit() ? SelectADDRri64(N2.getNode(), N2, Base, Offset)
+ : SelectADDRri(N2.getNode(), N2, Base, Offset)) {
+ if (TM.is64Bit()) {
switch (SourceVT) {
case MVT::i8:
Opcode = NVPTX::ST_i8_ari_64;
@@ -2167,7 +2157,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
getI32Imm(toTypeWidth), Base, Offset, Chain };
NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
} else {
- if (Subtarget.is64Bit()) {
+ if (TM.is64Bit()) {
switch (SourceVT) {
case MVT::i8:
Opcode = NVPTX::ST_i8_areg_64;
@@ -2241,7 +2231,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
EVT StoreVT = MemSD->getMemoryVT();
// Address Space Setting
- unsigned CodeAddrSpace = getCodeAddrSpace(MemSD, Subtarget);
+ unsigned CodeAddrSpace = getCodeAddrSpace(MemSD);
if (CodeAddrSpace == NVPTX::PTXLdStInstCode::CONSTANT) {
report_fatal_error("Cannot store to pointer that points to constant "
@@ -2344,9 +2334,8 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
break;
}
StOps.push_back(Addr);
- } else if (Subtarget.is64Bit()
- ? SelectADDRsi64(N2.getNode(), N2, Base, Offset)
- : SelectADDRsi(N2.getNode(), N2, Base, Offset)) {
+ } else if (TM.is64Bit() ? SelectADDRsi64(N2.getNode(), N2, Base, Offset)
+ : SelectADDRsi(N2.getNode(), N2, Base, Offset)) {
switch (N->getOpcode()) {
default:
return nullptr;
@@ -2395,10 +2384,9 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
}
StOps.push_back(Base);
StOps.push_back(Offset);
- } else if (Subtarget.is64Bit()
- ? SelectADDRri64(N2.getNode(), N2, Base, Offset)
- : SelectADDRri(N2.getNode(), N2, Base, Offset)) {
- if (Subtarget.is64Bit()) {
+ } else if (TM.is64Bit() ? SelectADDRri64(N2.getNode(), N2, Base, Offset)
+ : SelectADDRri(N2.getNode(), N2, Base, Offset)) {
+ if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
return nullptr;
@@ -2496,7 +2484,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
StOps.push_back(Base);
StOps.push_back(Offset);
} else {
- if (Subtarget.is64Bit()) {
+ if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
return nullptr;
@@ -4772,7 +4760,7 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
}
// How many bits are in our mask?
- uint64_t NumBits = CountTrailingOnes_64(MaskVal);
+ uint64_t NumBits = countTrailingOnes(MaskVal);
Len = CurDAG->getTargetConstant(NumBits, MVT::i32);
if (LHS.getOpcode() == ISD::SRL || LHS.getOpcode() == ISD::SRA) {
@@ -4836,10 +4824,10 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
NumZeros = 0;
// The number of bits in the result bitfield will be the number of
// trailing ones (the AND) minus the number of bits we shift off
- NumBits = CountTrailingOnes_64(MaskVal) - ShiftAmt;
+ NumBits = countTrailingOnes(MaskVal) - ShiftAmt;
} else if (isShiftedMask_64(MaskVal)) {
NumZeros = countTrailingZeros(MaskVal);
- unsigned NumOnes = CountTrailingOnes_64(MaskVal >> NumZeros);
+ unsigned NumOnes = countTrailingOnes(MaskVal >> NumZeros);
// The number of bits in the result bitfield will be the number of
// trailing zeros plus the number of set bits in the mask minus the
// number of bits we shift off
diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
index 69afcd7..ca432b5 100644
--- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
+++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
@@ -26,6 +26,7 @@ using namespace llvm;
namespace {
class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel {
+ const NVPTXTargetMachine &TM;
// If true, generate mul.wide from sext and mul
bool doMulWide;
@@ -43,8 +44,8 @@ public:
const char *getPassName() const override {
return "NVPTX DAG->DAG Pattern Instruction Selection";
}
-
- const NVPTXSubtarget &Subtarget;
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ const NVPTXSubtarget *Subtarget;
bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode,
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 0b0b536..1dc81f7 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -106,9 +106,9 @@ static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
}
// NVPTXTargetLowering Constructor.
-NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM)
- : TargetLowering(TM), nvTM(&TM),
- nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
+NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
+ const NVPTXSubtarget &STI)
+ : TargetLowering(TM), nvTM(&TM), STI(STI) {
// always lower memset, memcpy, and memmove intrinsics to load/store
// instructions, rather
@@ -167,14 +167,14 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM)
setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
- if (nvptxSubtarget.hasROT64()) {
+ if (STI.hasROT64()) {
setOperationAction(ISD::ROTL, MVT::i64, Legal);
setOperationAction(ISD::ROTR, MVT::i64, Legal);
} else {
setOperationAction(ISD::ROTL, MVT::i64, Expand);
setOperationAction(ISD::ROTR, MVT::i64, Expand);
}
- if (nvptxSubtarget.hasROT32()) {
+ if (STI.hasROT32()) {
setOperationAction(ISD::ROTL, MVT::i32, Legal);
setOperationAction(ISD::ROTR, MVT::i32, Legal);
} else {
@@ -203,8 +203,9 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM)
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
// Turn FP extload into load/fextend
- setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
// Turn FP truncstore into trunc + store.
setTruncStoreAction(MVT::f32, MVT::f16, Expand);
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
@@ -214,12 +215,11 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM)
setOperationAction(ISD::LOAD, MVT::i1, Custom);
setOperationAction(ISD::STORE, MVT::i1, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setTruncStoreAction(MVT::i64, MVT::i1, Expand);
- setTruncStoreAction(MVT::i32, MVT::i1, Expand);
- setTruncStoreAction(MVT::i16, MVT::i1, Expand);
- setTruncStoreAction(MVT::i8, MVT::i1, Expand);
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setTruncStoreAction(VT, MVT::i1, Expand);
+ }
// This is legal in NVPTX
setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
@@ -232,9 +232,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM)
setOperationAction(ISD::ADDE, MVT::i64, Expand);
// Register custom handling for vector loads/stores
- for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE;
- ++i) {
- MVT VT = (MVT::SimpleValueType) i;
+ for (MVT VT : MVT::vector_valuetypes()) {
if (IsPTXVectorType(VT)) {
setOperationAction(ISD::LOAD, VT, Custom);
setOperationAction(ISD::STORE, VT, Custom);
@@ -261,6 +259,9 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM)
setOperationAction(ISD::CTPOP, MVT::i32, Legal);
setOperationAction(ISD::CTPOP, MVT::i64, Legal);
+ // PTX does not directly support SELP of i1, so promote to i32 first
+ setOperationAction(ISD::SELECT, MVT::i1, Custom);
+
// We have some custom DAG combine patterns for these nodes
setTargetDAGCombine(ISD::ADD);
setTargetDAGCombine(ISD::AND);
@@ -270,7 +271,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM)
// Now deduce the information based on the above mentioned
// actions
- computeRegisterProperties();
+ computeRegisterProperties(STI.getRegisterInfo());
}
const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
@@ -878,7 +879,7 @@ NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
unsigned retAlignment,
const ImmutableCallSite *CS) const {
- bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return "";
@@ -905,16 +906,14 @@ NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
O << ".param .b" << size << " _";
} else if (isa<PointerType>(retTy)) {
O << ".param .b" << getPointerTy().getSizeInBits() << " _";
+ } else if ((retTy->getTypeID() == Type::StructTyID) ||
+ isa<VectorType>(retTy)) {
+ O << ".param .align "
+ << retAlignment
+ << " .b8 _["
+ << getDataLayout()->getTypeAllocSize(retTy) << "]";
} else {
- if((retTy->getTypeID() == Type::StructTyID) ||
- isa<VectorType>(retTy)) {
- O << ".param .align "
- << retAlignment
- << " .b8 _["
- << getDataLayout()->getTypeAllocSize(retTy) << "]";
- } else {
- assert(false && "Unknown return type");
- }
+ llvm_unreachable("Unknown return type");
}
O << ") ";
}
@@ -1045,7 +1044,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Type *retTy = CLI.RetTy;
ImmutableCallSite *CS = CLI.CS;
- bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return Chain;
@@ -1456,8 +1455,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
EVT ObjectVT = getValueType(retTy);
unsigned NumElts = ObjectVT.getVectorNumElements();
EVT EltVT = ObjectVT.getVectorElementType();
- assert(nvTM->getSubtargetImpl()->getTargetLowering()->getNumRegisters(
- F->getContext(), ObjectVT) == NumElts &&
+ assert(STI.getTargetLowering()->getNumRegisters(F->getContext(),
+ ObjectVT) == NumElts &&
"Vector was not scalarized");
unsigned sz = EltVT.getSizeInBits();
bool needTruncate = sz < 8 ? true : false;
@@ -1475,11 +1474,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
LoadRetVTs.push_back(EltVT);
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SmallVector<SDValue, 4> LoadRetOps;
- LoadRetOps.push_back(Chain);
- LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
- LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
- LoadRetOps.push_back(InFlag);
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(0, MVT::i32), InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParam, dl,
DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
@@ -1505,11 +1501,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SmallVector<SDValue, 4> LoadRetOps;
- LoadRetOps.push_back(Chain);
- LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
- LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
- LoadRetOps.push_back(InFlag);
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(0, MVT::i32), InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParamV2, dl,
DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
@@ -1551,11 +1544,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SmallVector<SDValue, 4> LoadRetOps;
- LoadRetOps.push_back(Chain);
- LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
- LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
- LoadRetOps.push_back(InFlag);
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(Ofst, MVT::i32), InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
Opc, dl, DAG.getVTList(LoadRetVTs),
LoadRetOps, EltVT, MachinePointerInfo());
@@ -1609,11 +1599,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SmallVector<SDValue, 4> LoadRetOps;
- LoadRetOps.push_back(Chain);
- LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
- LoadRetOps.push_back(DAG.getConstant(Offsets[i], MVT::i32));
- LoadRetOps.push_back(InFlag);
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(Offsets[i], MVT::i32), InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParam, dl,
DAG.getVTList(LoadRetVTs), LoadRetOps,
@@ -1679,7 +1666,7 @@ SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
SDValue ShAmt = Op.getOperand(2);
unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
- if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
// For 32bit and sm35, we can use the funnel shift 'shf' instruction.
// {dHi, dLo} = {aHi, aLo} >> Amt
@@ -1739,7 +1726,7 @@ SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
- if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
// For 32bit and sm35, we can use the funnel shift 'shf' instruction.
// {dHi, dLo} = {aHi, aLo} << Amt
@@ -1807,11 +1794,29 @@ NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SRA_PARTS:
case ISD::SRL_PARTS:
return LowerShiftRightParts(Op, DAG);
+ case ISD::SELECT:
+ return LowerSelect(Op, DAG);
default:
llvm_unreachable("Custom lowering not defined for operation");
}
}
+SDValue NVPTXTargetLowering::LowerSelect(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Op0 = Op->getOperand(0);
+ SDValue Op1 = Op->getOperand(1);
+ SDValue Op2 = Op->getOperand(2);
+ SDLoc DL(Op.getNode());
+
+ assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
+
+ Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
+ Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
+ SDValue Select = DAG.getNode(ISD::SELECT, DL, MVT::i32, Op0, Op1, Op2);
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
+
+ return Trunc;
+}
+
SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
if (Op.getValueType() == MVT::i1)
return LowerLOADi1(Op, DAG);
@@ -2033,13 +2038,13 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
const Function *F = MF.getFunction();
const AttributeSet &PAL = F->getAttributes();
- const TargetLowering *TLI = DAG.getSubtarget().getTargetLowering();
+ const TargetLowering *TLI = STI.getTargetLowering();
SDValue Root = DAG.getRoot();
std::vector<SDValue> OutChains;
bool isKernel = llvm::isKernelFunction(*F);
- bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return Chain;
@@ -2337,7 +2342,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
Type *RetTy = F->getReturnType();
const DataLayout *TD = getDataLayout();
- bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return Chain;
@@ -3757,7 +3762,8 @@ NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
}
std::pair<unsigned, const TargetRegisterClass *>
-NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
+NVPTXTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
@@ -3778,7 +3784,7 @@ NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
}
}
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
/// getFunctionAlignment - Return the Log2 alignment of this function.
@@ -4200,7 +4206,7 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
default: break;
case ISD::ADD:
case ISD::FADD:
- return PerformADDCombine(N, DCI, nvptxSubtarget, OptLevel);
+ return PerformADDCombine(N, DCI, STI, OptLevel);
case ISD::MUL:
return PerformMULCombine(N, DCI, OptLevel);
case ISD::SHL:
@@ -4285,11 +4291,8 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
}
}
- SmallVector<SDValue, 8> OtherOps;
-
// Copy regular operands
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- OtherOps.push_back(N->getOperand(i));
+ SmallVector<SDValue, 8> OtherOps(N->op_begin(), N->op_end());
// The select routine does not have access to the LoadSDNode instance, so
// pass along the extension information
@@ -4402,8 +4405,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
OtherOps.push_back(Chain); // Chain
// Skip operand 1 (intrinsic ID)
// Others
- for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
- OtherOps.push_back(N->getOperand(i));
+ OtherOps.append(N->op_begin() + 2, N->op_end());
MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
@@ -4434,9 +4436,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
"Custom handling of non-i8 ldu/ldg?");
// Just copy all operands as-is
- SmallVector<SDValue, 4> Ops;
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- Ops.push_back(N->getOperand(i));
+ SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
// Force output to i16
SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.h b/lib/Target/NVPTX/NVPTXISelLowering.h
index d66d81a..1b4da2c 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -436,7 +436,8 @@ class NVPTXSubtarget;
//===--------------------------------------------------------------------===//
class NVPTXTargetLowering : public TargetLowering {
public:
- explicit NVPTXTargetLowering(const NVPTXTargetMachine &TM);
+ explicit NVPTXTargetLowering(const NVPTXTargetMachine &TM,
+ const NVPTXSubtarget &STI);
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
@@ -469,7 +470,8 @@ public:
ConstraintType
getConstraintType(const std::string &Constraint) const override;
std::pair<unsigned, const TargetRegisterClass *>
- getRegForInlineAsmConstraint(const std::string &Constraint,
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
MVT VT) const override;
SDValue LowerFormalArguments(
@@ -507,8 +509,10 @@ public:
bool isFMAFasterThanFMulAndFAdd(EVT) const override { return true; }
+ bool enableAggressiveFMAFusion(EVT VT) const override { return true; }
+
private:
- const NVPTXSubtarget &nvptxSubtarget; // cache the subtarget here
+ const NVPTXSubtarget &STI; // cache the subtarget here
SDValue getExtSymb(SelectionDAG &DAG, const char *name, int idx,
EVT = MVT::i32) const;
@@ -527,6 +531,8 @@ private:
SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSelect(SDValue Op, SelectionDAG &DAG) const;
+
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const override;
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
diff --git a/lib/Target/NVPTX/NVPTXImageOptimizer.cpp b/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
index a98fb37..aa36b6b 100644
--- a/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
+++ b/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
@@ -16,11 +16,11 @@
#include "NVPTX.h"
#include "NVPTXUtilities.h"
+#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Analysis/ConstantFolding.h"
using namespace llvm;
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/lib/Target/NVPTX/NVPTXInstrInfo.cpp
index b5b4fbe..dabc3be 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.cpp
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.cpp
@@ -14,11 +14,11 @@
#include "NVPTX.h"
#include "NVPTXInstrInfo.h"
#include "NVPTXTargetMachine.h"
-#include "llvm/IR/Function.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Function.h"
using namespace llvm;
@@ -28,9 +28,7 @@ using namespace llvm;
// Pin the vtable to this file.
void NVPTXInstrInfo::anchor() {}
-// FIXME: Add the subtarget support on this constructor.
-NVPTXInstrInfo::NVPTXInstrInfo(NVPTXSubtarget &STI)
- : NVPTXGenInstrInfo(), RegInfo(STI) {}
+NVPTXInstrInfo::NVPTXInstrInfo() : NVPTXGenInstrInfo(), RegInfo() {}
void NVPTXInstrInfo::copyPhysReg(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.h b/lib/Target/NVPTX/NVPTXInstrInfo.h
index 6de7536..9b5d491 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.h
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.h
@@ -27,7 +27,7 @@ class NVPTXInstrInfo : public NVPTXGenInstrInfo {
const NVPTXRegisterInfo RegInfo;
virtual void anchor();
public:
- explicit NVPTXInstrInfo(NVPTXSubtarget &STI);
+ explicit NVPTXInstrInfo();
const NVPTXRegisterInfo &getRegisterInfo() const { return RegInfo; }
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.td b/lib/Target/NVPTX/NVPTXInstrInfo.td
index 9900b8c..68f0d9f 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -117,24 +117,24 @@ def F32ConstOne : Operand<f32>, PatLeaf<(f32 fpimm)>, SDNodeXForm<fpimm, [{
//===----------------------------------------------------------------------===//
-def hasAtomRedG32 : Predicate<"Subtarget.hasAtomRedG32()">;
-def hasAtomRedS32 : Predicate<"Subtarget.hasAtomRedS32()">;
-def hasAtomRedGen32 : Predicate<"Subtarget.hasAtomRedGen32()">;
+def hasAtomRedG32 : Predicate<"Subtarget->hasAtomRedG32()">;
+def hasAtomRedS32 : Predicate<"Subtarget->hasAtomRedS32()">;
+def hasAtomRedGen32 : Predicate<"Subtarget->hasAtomRedGen32()">;
def useAtomRedG32forGen32 :
- Predicate<"!Subtarget.hasAtomRedGen32() && Subtarget.hasAtomRedG32()">;
-def hasBrkPt : Predicate<"Subtarget.hasBrkPt()">;
-def hasAtomRedG64 : Predicate<"Subtarget.hasAtomRedG64()">;
-def hasAtomRedS64 : Predicate<"Subtarget.hasAtomRedS64()">;
-def hasAtomRedGen64 : Predicate<"Subtarget.hasAtomRedGen64()">;
+ Predicate<"!Subtarget->hasAtomRedGen32() && Subtarget->hasAtomRedG32()">;
+def hasBrkPt : Predicate<"Subtarget->hasBrkPt()">;
+def hasAtomRedG64 : Predicate<"Subtarget->hasAtomRedG64()">;
+def hasAtomRedS64 : Predicate<"Subtarget->hasAtomRedS64()">;
+def hasAtomRedGen64 : Predicate<"Subtarget->hasAtomRedGen64()">;
def useAtomRedG64forGen64 :
- Predicate<"!Subtarget.hasAtomRedGen64() && Subtarget.hasAtomRedG64()">;
-def hasAtomAddF32 : Predicate<"Subtarget.hasAtomAddF32()">;
-def hasVote : Predicate<"Subtarget.hasVote()">;
-def hasDouble : Predicate<"Subtarget.hasDouble()">;
-def reqPTX20 : Predicate<"Subtarget.reqPTX20()">;
-def hasLDG : Predicate<"Subtarget.hasLDG()">;
-def hasLDU : Predicate<"Subtarget.hasLDU()">;
-def hasGenericLdSt : Predicate<"Subtarget.hasGenericLdSt()">;
+ Predicate<"!Subtarget->hasAtomRedGen64() && Subtarget->hasAtomRedG64()">;
+def hasAtomAddF32 : Predicate<"Subtarget->hasAtomAddF32()">;
+def hasVote : Predicate<"Subtarget->hasVote()">;
+def hasDouble : Predicate<"Subtarget->hasDouble()">;
+def reqPTX20 : Predicate<"Subtarget->reqPTX20()">;
+def hasLDG : Predicate<"Subtarget->hasLDG()">;
+def hasLDU : Predicate<"Subtarget->hasLDU()">;
+def hasGenericLdSt : Predicate<"Subtarget->hasGenericLdSt()">;
def doF32FTZ : Predicate<"useF32FTZ()">;
def doNoF32FTZ : Predicate<"!useF32FTZ()">;
@@ -150,12 +150,12 @@ def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;
def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;
def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;
-def hasHWROT32 : Predicate<"Subtarget.hasHWROT32()">;
-def noHWROT32 : Predicate<"!Subtarget.hasHWROT32()">;
+def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
+def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
def true : Predicate<"1">;
-def hasPTX31 : Predicate<"Subtarget.getPTXVersion() >= 31">;
+def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;
//===----------------------------------------------------------------------===//
@@ -296,7 +296,7 @@ multiclass F2<string OpcStr, SDNode OpNode> {
// General Type Conversion
//-----------------------------------
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
// Generate a cvt to the given type from all possible types.
// Each instance takes a CvtMode immediate that defines the conversion mode to
// use. It can be CvtNONE to omit a conversion mode.
@@ -1356,11 +1356,6 @@ defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;
defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;
defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;
-// Special select for predicate operands
-def : Pat<(i1 (select Int1Regs:$p, Int1Regs:$a, Int1Regs:$b)),
- (ORb1rr (ANDb1rr Int1Regs:$p, Int1Regs:$a),
- (ANDb1rr (NOT1 Int1Regs:$p), Int1Regs:$b))>;
-
//
// Funnnel shift in clamp mode
//
@@ -1659,12 +1654,12 @@ multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
(SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
}
-defm FSetGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;
-defm FSetLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;
-defm FSetGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;
-defm FSetLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;
-defm FSetEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;
-defm FSetNE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;
+defm FSetOGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;
+defm FSetOLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;
+defm FSetOGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;
+defm FSetOLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;
+defm FSetOEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;
+defm FSetONE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;
defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>;
defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>;
@@ -1673,6 +1668,13 @@ defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>;
defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>;
defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>;
+defm FSetGT : FSET_FORMAT<setgt, CmpGT, CmpGT_FTZ>;
+defm FSetLT : FSET_FORMAT<setlt, CmpLT, CmpLT_FTZ>;
+defm FSetGE : FSET_FORMAT<setge, CmpGE, CmpGE_FTZ>;
+defm FSetLE : FSET_FORMAT<setle, CmpLE, CmpLE_FTZ>;
+defm FSetEQ : FSET_FORMAT<seteq, CmpEQ, CmpEQ_FTZ>;
+defm FSetNE : FSET_FORMAT<setne, CmpNE, CmpNE_FTZ>;
+
defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;
defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;
@@ -2094,7 +2096,7 @@ multiclass LD<NVPTXRegClass regclass> {
"$fromWidth \t$dst, [$addr+$offset];"), []>;
}
-let mayLoad=1, neverHasSideEffects=1 in {
+let mayLoad=1, hasSideEffects=0 in {
defm LD_i8 : LD<Int16Regs>;
defm LD_i16 : LD<Int16Regs>;
defm LD_i32 : LD<Int32Regs>;
@@ -2136,7 +2138,7 @@ multiclass ST<NVPTXRegClass regclass> {
" \t[$addr+$offset], $src;"), []>;
}
-let mayStore=1, neverHasSideEffects=1 in {
+let mayStore=1, hasSideEffects=0 in {
defm ST_i8 : ST<Int16Regs>;
defm ST_i16 : ST<Int16Regs>;
defm ST_i32 : ST<Int32Regs>;
@@ -2220,7 +2222,7 @@ multiclass LD_VEC<NVPTXRegClass regclass> {
"$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];"),
[]>;
}
-let mayLoad=1, neverHasSideEffects=1 in {
+let mayLoad=1, hasSideEffects=0 in {
defm LDV_i8 : LD_VEC<Int16Regs>;
defm LDV_i16 : LD_VEC<Int16Regs>;
defm LDV_i32 : LD_VEC<Int32Regs>;
@@ -2303,7 +2305,7 @@ multiclass ST_VEC<NVPTXRegClass regclass> {
"$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};"),
[]>;
}
-let mayStore=1, neverHasSideEffects=1 in {
+let mayStore=1, hasSideEffects=0 in {
defm STV_i8 : ST_VEC<Int16Regs>;
defm STV_i16 : ST_VEC<Int16Regs>;
defm STV_i32 : ST_VEC<Int32Regs>;
diff --git a/lib/Target/NVPTX/NVPTXLowerAggrCopies.h b/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
index 8759406..da301d5 100644
--- a/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
+++ b/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
@@ -16,6 +16,7 @@
#define LLVM_LIB_TARGET_NVPTX_NVPTXLOWERAGGRCOPIES_H
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/StackProtector.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Pass.h"
@@ -29,8 +30,8 @@ struct NVPTXLowerAggrCopies : public FunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DataLayoutPass>();
- AU.addPreserved("stack-protector");
AU.addPreserved<MachineFunctionAnalysis>();
+ AU.addPreserved<StackProtector>();
}
bool runOnFunction(Function &F) override;
diff --git a/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
index a1e1b9e..c1c67e3 100644
--- a/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
+++ b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
@@ -48,9 +48,9 @@ MachineFunctionPass *llvm::createNVPTXPrologEpilogPass() {
char NVPTXPrologEpilogPass::ID = 0;
bool NVPTXPrologEpilogPass::runOnMachineFunction(MachineFunction &MF) {
- const TargetMachine &TM = MF.getTarget();
- const TargetFrameLowering &TFI = *TM.getSubtargetImpl()->getFrameLowering();
- const TargetRegisterInfo &TRI = *TM.getSubtargetImpl()->getRegisterInfo();
+ const TargetSubtargetInfo &STI = MF.getSubtarget();
+ const TargetFrameLowering &TFI = *STI.getFrameLowering();
+ const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
bool Modified = false;
calculateFrameObjectOffsets(MF);
diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
index 358ccce..5ca96e4 100644
--- a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
+++ b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
@@ -71,8 +71,7 @@ std::string getNVPTXRegClassStr(TargetRegisterClass const *RC) {
}
}
-NVPTXRegisterInfo::NVPTXRegisterInfo(const NVPTXSubtarget &st)
- : NVPTXGenRegisterInfo(0), Is64Bit(st.is64Bit()) {}
+NVPTXRegisterInfo::NVPTXRegisterInfo() : NVPTXGenRegisterInfo(0) {}
#define GET_REGINFO_TARGET_DESC
#include "NVPTXGenRegisterInfo.inc"
diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.h b/lib/Target/NVPTX/NVPTXRegisterInfo.h
index d2e6733..75b8f15 100644
--- a/lib/Target/NVPTX/NVPTXRegisterInfo.h
+++ b/lib/Target/NVPTX/NVPTXRegisterInfo.h
@@ -22,19 +22,13 @@
#include "NVPTXGenRegisterInfo.inc"
namespace llvm {
-
-// Forward Declarations.
-class TargetInstrInfo;
-class NVPTXSubtarget;
-
class NVPTXRegisterInfo : public NVPTXGenRegisterInfo {
private:
- bool Is64Bit;
// Hold Strings that can be free'd all together with NVPTXRegisterInfo
ManagedStringPool ManagedStrPool;
public:
- NVPTXRegisterInfo(const NVPTXSubtarget &st);
+ NVPTXRegisterInfo();
//------------------------------------------------------
// Pure virtual functions from TargetRegisterInfo
diff --git a/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp b/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
index 324420d..e83f735 100644
--- a/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
+++ b/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
@@ -16,11 +16,12 @@
#include "NVPTX.h"
#include "NVPTXMachineFunctionInfo.h"
#include "NVPTXSubtarget.h"
+#include "NVPTXTargetMachine.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/DenseSet.h"
using namespace llvm;
@@ -142,8 +143,9 @@ findIndexForHandle(MachineOperand &Op, MachineFunction &MF, unsigned &Idx) {
case NVPTX::LD_i64_avar: {
// The handle is a parameter value being loaded, replace with the
// parameter symbol
- const NVPTXSubtarget &ST = MF.getTarget().getSubtarget<NVPTXSubtarget>();
- if (ST.getDrvInterface() == NVPTX::CUDA) {
+ const NVPTXTargetMachine &TM =
+ static_cast<const NVPTXTargetMachine &>(MF.getTarget());
+ if (TM.getDrvInterface() == NVPTX::CUDA) {
// For CUDA, we preserve the param loads coming from function arguments
return false;
}
diff --git a/lib/Target/NVPTX/NVPTXSubtarget.cpp b/lib/Target/NVPTX/NVPTXSubtarget.cpp
index 3d52532..069d6e1 100644
--- a/lib/Target/NVPTX/NVPTXSubtarget.cpp
+++ b/lib/Target/NVPTX/NVPTXSubtarget.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "NVPTXSubtarget.h"
+#include "NVPTXTargetMachine.h"
using namespace llvm;
@@ -25,17 +26,6 @@ using namespace llvm;
// Pin the vtable to this file.
void NVPTXSubtarget::anchor() {}
-static std::string computeDataLayout(bool is64Bit) {
- std::string Ret = "e";
-
- if (!is64Bit)
- Ret += "-p:32:32";
-
- Ret += "-i64:64-v16:16-v32:32-n16:32:64";
-
- return Ret;
-}
-
NVPTXSubtarget &NVPTXSubtarget::initializeSubtargetDependencies(StringRef CPU,
StringRef FS) {
// Provide the default CPU if we don't have one.
@@ -54,18 +44,18 @@ NVPTXSubtarget &NVPTXSubtarget::initializeSubtargetDependencies(StringRef CPU,
}
NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, const TargetMachine &TM,
- bool is64Bit)
- : NVPTXGenSubtargetInfo(TT, CPU, FS), Is64Bit(is64Bit), PTXVersion(0),
- SmVersion(20), DL(computeDataLayout(is64Bit)),
- InstrInfo(initializeSubtargetDependencies(CPU, FS)),
- TLInfo((const NVPTXTargetMachine &)TM), TSInfo(&DL),
- FrameLowering(*this) {
-
- Triple T(TT);
-
- if (T.getOS() == Triple::NVCL)
- drvInterface = NVPTX::NVCL;
- else
- drvInterface = NVPTX::CUDA;
+ const std::string &FS,
+ const NVPTXTargetMachine &TM)
+ : NVPTXGenSubtargetInfo(TT, CPU, FS), PTXVersion(0), SmVersion(20), TM(TM),
+ InstrInfo(), TLInfo(TM, initializeSubtargetDependencies(CPU, FS)),
+ TSInfo(TM.getDataLayout()), FrameLowering() {}
+
+bool NVPTXSubtarget::hasImageHandles() const {
+ // Enable handles for Kepler+, where CUDA supports indirect surfaces and
+ // textures
+ if (TM.getDrvInterface() == NVPTX::CUDA)
+ return (SmVersion >= 30);
+
+ // Disabled, otherwise
+ return false;
}
diff --git a/lib/Target/NVPTX/NVPTXSubtarget.h b/lib/Target/NVPTX/NVPTXSubtarget.h
index fb2d404..e9833e5 100644
--- a/lib/Target/NVPTX/NVPTXSubtarget.h
+++ b/lib/Target/NVPTX/NVPTXSubtarget.h
@@ -32,8 +32,6 @@ namespace llvm {
class NVPTXSubtarget : public NVPTXGenSubtargetInfo {
virtual void anchor();
std::string TargetName;
- NVPTX::DrvInterface drvInterface;
- bool Is64Bit;
// PTX version x.y is represented as 10*x+y, e.g. 3.1 == 31
unsigned PTXVersion;
@@ -41,7 +39,7 @@ class NVPTXSubtarget : public NVPTXGenSubtargetInfo {
// SM version x.y is represented as 10*x+y, e.g. 3.1 == 31
unsigned int SmVersion;
- const DataLayout DL; // Calculates type size & alignment
+ const NVPTXTargetMachine &TM;
NVPTXInstrInfo InstrInfo;
NVPTXTargetLowering TLInfo;
TargetSelectionDAGInfo TSInfo;
@@ -55,13 +53,12 @@ public:
/// of the specified module.
///
NVPTXSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, const TargetMachine &TM, bool is64Bit);
+ const std::string &FS, const NVPTXTargetMachine &TM);
const TargetFrameLowering *getFrameLowering() const override {
return &FrameLowering;
}
const NVPTXInstrInfo *getInstrInfo() const override { return &InstrInfo; }
- const DataLayout *getDataLayout() const override { return &DL; }
const NVPTXRegisterInfo *getRegisterInfo() const override {
return &InstrInfo.getRegisterInfo();
}
@@ -95,20 +92,9 @@ public:
}
inline bool hasROT32() const { return hasHWROT32() || hasSWROT32(); }
inline bool hasROT64() const { return SmVersion >= 20; }
-
- bool hasImageHandles() const {
- // Enable handles for Kepler+, where CUDA supports indirect surfaces and
- // textures
- if (getDrvInterface() == NVPTX::CUDA)
- return (SmVersion >= 30);
-
- // Disabled, otherwise
- return false;
- }
- bool is64Bit() const { return Is64Bit; }
+ bool hasImageHandles() const;
unsigned int getSmVersion() const { return SmVersion; }
- NVPTX::DrvInterface getDrvInterface() const { return drvInterface; }
std::string getTargetName() const { return TargetName; }
unsigned getPTXVersion() const { return PTXVersion; }
diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index d87693f..1a267a6 100644
--- a/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -17,6 +17,7 @@
#include "NVPTXAllocaHoisting.h"
#include "NVPTXLowerAggrCopies.h"
#include "NVPTXTargetObjectFile.h"
+#include "NVPTXTargetTransformInfo.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
@@ -24,12 +25,12 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Verifier.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormattedStream.h"
@@ -69,14 +70,29 @@ extern "C" void LLVMInitializeNVPTXTarget() {
initializeNVPTXLowerStructArgsPass(*PassRegistry::getPassRegistry());
}
+static std::string computeDataLayout(bool is64Bit) {
+ std::string Ret = "e";
+
+ if (!is64Bit)
+ Ret += "-p:32:32";
+
+ Ret += "-i64:64-v16:16-v32:32-n16:32:64";
+
+ return Ret;
+}
+
NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool is64bit)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), is64bit(is64bit),
TLOF(make_unique<NVPTXTargetObjectFile>()),
- Subtarget(TT, CPU, FS, *this, is64bit) {
+ DL(computeDataLayout(is64bit)), Subtarget(TT, CPU, FS, *this) {
+ if (Triple(TT).getOS() == Triple::NVCL)
+ drvInterface = NVPTX::NVCL;
+ else
+ drvInterface = NVPTX::CUDA;
initAsmInfo();
}
@@ -110,8 +126,7 @@ public:
void addIRPasses() override;
bool addInstSelector() override;
- bool addPreRegAlloc() override;
- bool addPostRegAlloc() override;
+ void addPostRegAlloc() override;
void addMachineSSAOptimization() override;
FunctionPass *createTargetRegisterAllocator(bool) override;
@@ -125,12 +140,9 @@ TargetPassConfig *NVPTXTargetMachine::createPassConfig(PassManagerBase &PM) {
return PassConfig;
}
-void NVPTXTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
- // Add first the target-independent BasicTTI pass, then our NVPTX pass. This
- // allows the NVPTX pass to delegate to the target independent layer when
- // appropriate.
- PM.add(createBasicTargetTransformInfoPass(this));
- PM.add(createNVPTXTargetTransformInfoPass(this));
+TargetIRAnalysis NVPTXTargetMachine::getTargetIRAnalysis() {
+ return TargetIRAnalysis(
+ [this](Function &) { return TargetTransformInfo(NVPTXTTIImpl(this)); });
}
void NVPTXPassConfig::addIRPasses() {
@@ -149,6 +161,7 @@ void NVPTXPassConfig::addIRPasses() {
addPass(createNVPTXAssignValidGlobalNamesPass());
addPass(createGenericToNVVMPass());
addPass(createNVPTXFavorNonGenericAddrSpacesPass());
+ addPass(createStraightLineStrengthReducePass());
addPass(createSeparateConstOffsetFromGEPPass());
// The SeparateConstOffsetFromGEP pass creates variadic bases that can be used
// by multiple GEPs. Run GVN or EarlyCSE to really reuse them. GVN generates
@@ -183,10 +196,8 @@ bool NVPTXPassConfig::addInstSelector() {
return false;
}
-bool NVPTXPassConfig::addPreRegAlloc() { return false; }
-bool NVPTXPassConfig::addPostRegAlloc() {
- addPass(createNVPTXPrologEpilogPass());
- return false;
+void NVPTXPassConfig::addPostRegAlloc() {
+ addPass(createNVPTXPrologEpilogPass(), false);
}
FunctionPass *NVPTXPassConfig::createTargetRegisterAllocator(bool) {
diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.h b/lib/Target/NVPTX/NVPTXTargetMachine.h
index a726bd1..a81abfe 100644
--- a/lib/Target/NVPTX/NVPTXTargetMachine.h
+++ b/lib/Target/NVPTX/NVPTXTargetMachine.h
@@ -14,8 +14,8 @@
#ifndef LLVM_LIB_TARGET_NVPTX_NVPTXTARGETMACHINE_H
#define LLVM_LIB_TARGET_NVPTX_NVPTXTARGETMACHINE_H
-#include "NVPTXSubtarget.h"
#include "ManagedStringPool.h"
+#include "NVPTXSubtarget.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSelectionDAGInfo.h"
@@ -25,7 +25,10 @@ namespace llvm {
/// NVPTXTargetMachine
///
class NVPTXTargetMachine : public LLVMTargetMachine {
+ bool is64bit;
std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ const DataLayout DL; // Calculates type size & alignment
+ NVPTX::DrvInterface drvInterface;
NVPTXSubtarget Subtarget;
// Hold Strings that can be free'd all together with NVPTXTargetMachine
@@ -37,9 +40,10 @@ public:
CodeModel::Model CM, CodeGenOpt::Level OP, bool is64bit);
~NVPTXTargetMachine() override;
-
+ const DataLayout *getDataLayout() const override { return &DL; }
const NVPTXSubtarget *getSubtargetImpl() const override { return &Subtarget; }
-
+ bool is64Bit() const { return is64bit; }
+ NVPTX::DrvInterface getDrvInterface() const { return drvInterface; }
ManagedStringPool *getManagedStrPool() const {
return const_cast<ManagedStringPool *>(&ManagedStrPool);
}
@@ -55,8 +59,7 @@ public:
return TLOF.get();
}
- /// \brief Register NVPTX analysis passes with a pass manager.
- void addAnalysisPasses(PassManagerBase &PM) override;
+ TargetIRAnalysis getTargetIRAnalysis() override;
}; // NVPTXTargetMachine.
diff --git a/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index b09d0d4..b8af04d 100644
--- a/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -1,4 +1,4 @@
-//===-- NVPTXTargetTransformInfo.cpp - NVPTX specific TTI pass ---------===//
+//===-- NVPTXTargetTransformInfo.cpp - NVPTX specific TTI -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,19 +6,12 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// \file
-// This file implements a TargetTransformInfo analysis pass specific to the
-// NVPTX target machine. It uses the target's detailed information to provide
-// more precise answers to certain TTI queries, while letting the target
-// independent and default TTI implementations handle the rest.
-//
-//===----------------------------------------------------------------------===//
-#include "NVPTXTargetMachine.h"
+#include "NVPTXTargetTransformInfo.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/CostTable.h"
#include "llvm/Target/TargetLowering.h"
@@ -26,69 +19,10 @@ using namespace llvm;
#define DEBUG_TYPE "NVPTXtti"
-// Declare the pass initialization routine locally as target-specific passes
-// don't have a target-wide initialization entry point, and so we rely on the
-// pass constructor initialization.
-namespace llvm {
-void initializeNVPTXTTIPass(PassRegistry &);
-}
-
-namespace {
-
-class NVPTXTTI final : public ImmutablePass, public TargetTransformInfo {
- const NVPTXTargetLowering *TLI;
-public:
- NVPTXTTI() : ImmutablePass(ID), TLI(nullptr) {
- llvm_unreachable("This pass cannot be directly constructed");
- }
-
- NVPTXTTI(const NVPTXTargetMachine *TM)
- : ImmutablePass(ID), TLI(TM->getSubtargetImpl()->getTargetLowering()) {
- initializeNVPTXTTIPass(*PassRegistry::getPassRegistry());
- }
-
- void initializePass() override { pushTTIStack(this); }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- TargetTransformInfo::getAnalysisUsage(AU);
- }
-
- /// Pass identification.
- static char ID;
-
- /// Provide necessary pointer adjustments for the two base classes.
- void *getAdjustedAnalysisPointer(const void *ID) override {
- if (ID == &TargetTransformInfo::ID)
- return (TargetTransformInfo *)this;
- return this;
- }
-
- bool hasBranchDivergence() const override;
-
- unsigned getArithmeticInstrCost(
- unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
- OperandValueKind Opd2Info = OK_AnyValue,
- OperandValueProperties Opd1PropInfo = OP_None,
- OperandValueProperties Opd2PropInfo = OP_None) const override;
-};
-
-} // end anonymous namespace
-
-INITIALIZE_AG_PASS(NVPTXTTI, TargetTransformInfo, "NVPTXtti",
- "NVPTX Target Transform Info", true, true, false)
-char NVPTXTTI::ID = 0;
-
-ImmutablePass *
-llvm::createNVPTXTargetTransformInfoPass(const NVPTXTargetMachine *TM) {
- return new NVPTXTTI(TM);
-}
-
-bool NVPTXTTI::hasBranchDivergence() const { return true; }
-
-unsigned NVPTXTTI::getArithmeticInstrCost(
- unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
- OperandValueKind Opd2Info, OperandValueProperties Opd1PropInfo,
- OperandValueProperties Opd2PropInfo) const {
+unsigned NVPTXTTIImpl::getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
+ TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
+ TTI::OperandValueProperties Opd2PropInfo) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
@@ -96,8 +30,8 @@ unsigned NVPTXTTI::getArithmeticInstrCost(
switch (ISD) {
default:
- return TargetTransformInfo::getArithmeticInstrCost(
- Opcode, Ty, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo);
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
+ Opd1PropInfo, Opd2PropInfo);
case ISD::ADD:
case ISD::MUL:
case ISD::XOR:
@@ -109,7 +43,7 @@ unsigned NVPTXTTI::getArithmeticInstrCost(
if (LT.second.SimpleTy == MVT::i64)
return 2 * LT.first;
// Delegate other cases to the basic TTI.
- return TargetTransformInfo::getArithmeticInstrCost(
- Opcode, Ty, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo);
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
+ Opd1PropInfo, Opd2PropInfo);
}
}
diff --git a/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
new file mode 100644
index 0000000..bf21e88
--- /dev/null
+++ b/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
@@ -0,0 +1,74 @@
+//===-- NVPTXTargetTransformInfo.h - NVPTX specific TTI ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file a TargetTransformInfo::Concept conforming object specific to the
+/// NVPTX target machine. It uses the target's detailed information to
+/// provide more precise answers to certain TTI queries, while letting the
+/// target independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
+
+#include "NVPTX.h"
+#include "NVPTXTargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+
+class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
+ typedef BasicTTIImplBase<NVPTXTTIImpl> BaseT;
+ typedef TargetTransformInfo TTI;
+ friend BaseT;
+
+ const NVPTXSubtarget *ST;
+ const NVPTXTargetLowering *TLI;
+
+ const NVPTXSubtarget *getST() const { return ST; };
+ const NVPTXTargetLowering *getTLI() const { return TLI; };
+
+public:
+ explicit NVPTXTTIImpl(const NVPTXTargetMachine *TM)
+ : BaseT(TM), ST(TM->getSubtargetImpl()), TLI(ST->getTargetLowering()) {}
+
+ // Provide value semantics. MSVC requires that we spell all of these out.
+ NVPTXTTIImpl(const NVPTXTTIImpl &Arg)
+ : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
+ NVPTXTTIImpl(NVPTXTTIImpl &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
+ TLI(std::move(Arg.TLI)) {}
+ NVPTXTTIImpl &operator=(const NVPTXTTIImpl &RHS) {
+ BaseT::operator=(static_cast<const BaseT &>(RHS));
+ ST = RHS.ST;
+ TLI = RHS.TLI;
+ return *this;
+ }
+ NVPTXTTIImpl &operator=(NVPTXTTIImpl &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ ST = std::move(RHS.ST);
+ TLI = std::move(RHS.TLI);
+ return *this;
+ }
+
+ bool hasBranchDivergence() { return true; }
+
+ unsigned getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty,
+ TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+ TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+ TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+ TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/NVPTX/NVPTXUtilities.cpp b/lib/Target/NVPTX/NVPTXUtilities.cpp
index 5caa8bd..cf1feac 100644
--- a/lib/Target/NVPTX/NVPTXUtilities.cpp
+++ b/lib/Target/NVPTX/NVPTXUtilities.cpp
@@ -15,16 +15,16 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MutexGuard.h"
#include <algorithm>
#include <cstring>
#include <map>
#include <string>
#include <vector>
-#include "llvm/Support/ManagedStatic.h"
-#include "llvm/IR/InstIterator.h"
-#include "llvm/Support/MutexGuard.h"
using namespace llvm;
@@ -52,7 +52,7 @@ static void cacheAnnotationFromMD(const MDNode *md, key_val_pair_t &retval) {
assert(prop && "Annotation property not a string");
// value
- ConstantInt *Val = dyn_cast<ConstantInt>(md->getOperand(i + 1));
+ ConstantInt *Val = mdconst::dyn_extract<ConstantInt>(md->getOperand(i + 1));
assert(Val && "Value operand not a constant int");
std::string keyname = prop->getString().str();
@@ -75,7 +75,8 @@ static void cacheAnnotationFromMD(const Module *m, const GlobalValue *gv) {
for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
const MDNode *elem = NMD->getOperand(i);
- Value *entity = elem->getOperand(0);
+ GlobalValue *entity =
+ mdconst::dyn_extract_or_null<GlobalValue>(elem->getOperand(0));
// entity may be null due to DCE
if (!entity)
continue;
@@ -322,7 +323,7 @@ bool llvm::getAlign(const CallInst &I, unsigned index, unsigned &align) {
if (MDNode *alignNode = I.getMetadata("callalign")) {
for (int i = 0, n = alignNode->getNumOperands(); i < n; i++) {
if (const ConstantInt *CI =
- dyn_cast<ConstantInt>(alignNode->getOperand(i))) {
+ mdconst::dyn_extract<ConstantInt>(alignNode->getOperand(i))) {
unsigned v = CI->getZExtValue();
if ((v >> 16) == index) {
align = v & 0xFFFF;
diff --git a/lib/Target/NVPTX/NVPTXVector.td b/lib/Target/NVPTX/NVPTXVector.td
index 775df19..85aa34e 100644
--- a/lib/Target/NVPTX/NVPTXVector.td
+++ b/lib/Target/NVPTX/NVPTXVector.td
@@ -661,7 +661,7 @@ class ShuffleAsmStr4<string type>
string s = !strconcat(t6, ShuffleOneLine<"4", "3", type>.s);
}
-let neverHasSideEffects=1, VecInstType=isVecShuffle.Value in {
+let hasSideEffects=0, VecInstType=isVecShuffle.Value in {
def VecShuffle_v4f32 : NVPTXVecInst<(outs V4F32Regs:$dst),
(ins V4F32Regs:$src1, V4F32Regs:$src2,
i8imm:$c0, i8imm:$c1, i8imm:$c2, i8imm:$c3),
@@ -847,7 +847,7 @@ class Vec_Move<string asmstr, NVPTXRegClass vclass, NVPTXInst sop=NOP>
!strconcat(asmstr, "\t${dst:vecfull}, ${src:vecfull};"),
[], sop>;
-let isAsCheapAsAMove=1, neverHasSideEffects=1, IsSimpleMove=1,
+let isAsCheapAsAMove=1, hasSideEffects=0, IsSimpleMove=1,
VecInstType=isVecOther.Value in {
def V4f32Mov : Vec_Move<"mov.v4.f32", V4F32Regs, FMOV32rr>;
def V2f32Mov : Vec_Move<"mov.v2.f32", V2F32Regs, FMOV32rr>;
diff --git a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
index 06bb968..bf00e73 100644
--- a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
+++ b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
@@ -32,9 +32,7 @@
using namespace llvm;
-namespace {
-
-static unsigned RRegs[32] = {
+static const MCPhysReg RRegs[32] = {
PPC::R0, PPC::R1, PPC::R2, PPC::R3,
PPC::R4, PPC::R5, PPC::R6, PPC::R7,
PPC::R8, PPC::R9, PPC::R10, PPC::R11,
@@ -44,7 +42,7 @@ static unsigned RRegs[32] = {
PPC::R24, PPC::R25, PPC::R26, PPC::R27,
PPC::R28, PPC::R29, PPC::R30, PPC::R31
};
-static unsigned RRegsNoR0[32] = {
+static const MCPhysReg RRegsNoR0[32] = {
PPC::ZERO,
PPC::R1, PPC::R2, PPC::R3,
PPC::R4, PPC::R5, PPC::R6, PPC::R7,
@@ -55,7 +53,7 @@ static unsigned RRegsNoR0[32] = {
PPC::R24, PPC::R25, PPC::R26, PPC::R27,
PPC::R28, PPC::R29, PPC::R30, PPC::R31
};
-static unsigned XRegs[32] = {
+static const MCPhysReg XRegs[32] = {
PPC::X0, PPC::X1, PPC::X2, PPC::X3,
PPC::X4, PPC::X5, PPC::X6, PPC::X7,
PPC::X8, PPC::X9, PPC::X10, PPC::X11,
@@ -65,7 +63,7 @@ static unsigned XRegs[32] = {
PPC::X24, PPC::X25, PPC::X26, PPC::X27,
PPC::X28, PPC::X29, PPC::X30, PPC::X31
};
-static unsigned XRegsNoX0[32] = {
+static const MCPhysReg XRegsNoX0[32] = {
PPC::ZERO8,
PPC::X1, PPC::X2, PPC::X3,
PPC::X4, PPC::X5, PPC::X6, PPC::X7,
@@ -76,7 +74,7 @@ static unsigned XRegsNoX0[32] = {
PPC::X24, PPC::X25, PPC::X26, PPC::X27,
PPC::X28, PPC::X29, PPC::X30, PPC::X31
};
-static unsigned FRegs[32] = {
+static const MCPhysReg FRegs[32] = {
PPC::F0, PPC::F1, PPC::F2, PPC::F3,
PPC::F4, PPC::F5, PPC::F6, PPC::F7,
PPC::F8, PPC::F9, PPC::F10, PPC::F11,
@@ -86,7 +84,7 @@ static unsigned FRegs[32] = {
PPC::F24, PPC::F25, PPC::F26, PPC::F27,
PPC::F28, PPC::F29, PPC::F30, PPC::F31
};
-static unsigned VRegs[32] = {
+static const MCPhysReg VRegs[32] = {
PPC::V0, PPC::V1, PPC::V2, PPC::V3,
PPC::V4, PPC::V5, PPC::V6, PPC::V7,
PPC::V8, PPC::V9, PPC::V10, PPC::V11,
@@ -96,7 +94,7 @@ static unsigned VRegs[32] = {
PPC::V24, PPC::V25, PPC::V26, PPC::V27,
PPC::V28, PPC::V29, PPC::V30, PPC::V31
};
-static unsigned VSRegs[64] = {
+static const MCPhysReg VSRegs[64] = {
PPC::VSL0, PPC::VSL1, PPC::VSL2, PPC::VSL3,
PPC::VSL4, PPC::VSL5, PPC::VSL6, PPC::VSL7,
PPC::VSL8, PPC::VSL9, PPC::VSL10, PPC::VSL11,
@@ -115,7 +113,7 @@ static unsigned VSRegs[64] = {
PPC::VSH24, PPC::VSH25, PPC::VSH26, PPC::VSH27,
PPC::VSH28, PPC::VSH29, PPC::VSH30, PPC::VSH31
};
-static unsigned VSFRegs[64] = {
+static const MCPhysReg VSFRegs[64] = {
PPC::F0, PPC::F1, PPC::F2, PPC::F3,
PPC::F4, PPC::F5, PPC::F6, PPC::F7,
PPC::F8, PPC::F9, PPC::F10, PPC::F11,
@@ -134,7 +132,17 @@ static unsigned VSFRegs[64] = {
PPC::VF24, PPC::VF25, PPC::VF26, PPC::VF27,
PPC::VF28, PPC::VF29, PPC::VF30, PPC::VF31
};
-static unsigned CRBITRegs[32] = {
+static unsigned QFRegs[32] = {
+ PPC::QF0, PPC::QF1, PPC::QF2, PPC::QF3,
+ PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7,
+ PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11,
+ PPC::QF12, PPC::QF13, PPC::QF14, PPC::QF15,
+ PPC::QF16, PPC::QF17, PPC::QF18, PPC::QF19,
+ PPC::QF20, PPC::QF21, PPC::QF22, PPC::QF23,
+ PPC::QF24, PPC::QF25, PPC::QF26, PPC::QF27,
+ PPC::QF28, PPC::QF29, PPC::QF30, PPC::QF31
+};
+static const MCPhysReg CRBITRegs[32] = {
PPC::CR0LT, PPC::CR0GT, PPC::CR0EQ, PPC::CR0UN,
PPC::CR1LT, PPC::CR1GT, PPC::CR1EQ, PPC::CR1UN,
PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
@@ -144,7 +152,7 @@ static unsigned CRBITRegs[32] = {
PPC::CR6LT, PPC::CR6GT, PPC::CR6EQ, PPC::CR6UN,
PPC::CR7LT, PPC::CR7GT, PPC::CR7EQ, PPC::CR7UN
};
-static unsigned CRRegs[8] = {
+static const MCPhysReg CRRegs[8] = {
PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3,
PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7
};
@@ -210,6 +218,8 @@ EvaluateCRExpr(const MCExpr *E) {
llvm_unreachable("Invalid expression kind!");
}
+namespace {
+
struct PPCOperand;
class PPCAsmParser : public MCTargetAsmParser {
@@ -429,6 +439,7 @@ public:
bool isU8ImmX8() const { return Kind == Immediate &&
isUInt<8>(getImm()) &&
(getImm() & 7) == 0; }
+ bool isU12Imm() const { return Kind == Immediate && isUInt<12>(getImm()); }
bool isU16Imm() const {
switch (Kind) {
case Expression:
@@ -564,6 +575,21 @@ public:
Inst.addOperand(MCOperand::CreateReg(VSFRegs[getVSReg()]));
}
+ void addRegQFRCOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(QFRegs[getReg()]));
+ }
+
+ void addRegQSRCOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(QFRegs[getReg()]));
+ }
+
+ void addRegQBRCOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(QFRegs[getReg()]));
+ }
+
void addRegCRBITRCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateReg(CRBITRegs[getCRBit()]));
@@ -1053,7 +1079,6 @@ bool PPCAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
MCInst Inst;
switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
- default: break;
case Match_Success:
// Post-process instructions (typically extended mnemonics)
ProcessInstruction(Inst, Operands);
@@ -1063,7 +1088,7 @@ bool PPCAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
case Match_MissingFeature:
return Error(IDLoc, "instruction use requires an option to be enabled");
case Match_MnemonicFail:
- return Error(IDLoc, "unrecognized instruction mnemonic");
+ return Error(IDLoc, "unrecognized instruction mnemonic");
case Match_InvalidOperand: {
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0ULL) {
diff --git a/lib/Target/PowerPC/CMakeLists.txt b/lib/Target/PowerPC/CMakeLists.txt
index 47a9474..936ed7f 100644
--- a/lib/Target/PowerPC/CMakeLists.txt
+++ b/lib/Target/PowerPC/CMakeLists.txt
@@ -20,8 +20,11 @@ add_llvm_target(PowerPCCodeGen
PPCInstrInfo.cpp
PPCISelDAGToDAG.cpp
PPCISelLowering.cpp
+ PPCEarlyReturn.cpp
PPCFastISel.cpp
PPCFrameLowering.cpp
+ PPCLoopDataPrefetch.cpp
+ PPCLoopPreIncPrep.cpp
PPCMCInstLower.cpp
PPCMachineFunctionInfo.cpp
PPCRegisterInfo.cpp
@@ -30,6 +33,9 @@ add_llvm_target(PowerPCCodeGen
PPCTargetObjectFile.cpp
PPCTargetTransformInfo.cpp
PPCSelectionDAGInfo.cpp
+ PPCTLSDynamicCall.cpp
+ PPCVSXCopy.cpp
+ PPCVSXFMAMutate.cpp
)
add_subdirectory(AsmParser)
diff --git a/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp b/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp
index 5251b60..0ed0723 100644
--- a/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp
+++ b/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp
@@ -164,6 +164,17 @@ static const unsigned G8Regs[] = {
PPC::X28, PPC::X29, PPC::X30, PPC::X31
};
+static const unsigned QFRegs[] = {
+ PPC::QF0, PPC::QF1, PPC::QF2, PPC::QF3,
+ PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7,
+ PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11,
+ PPC::QF12, PPC::QF13, PPC::QF14, PPC::QF15,
+ PPC::QF16, PPC::QF17, PPC::QF18, PPC::QF19,
+ PPC::QF20, PPC::QF21, PPC::QF22, PPC::QF23,
+ PPC::QF24, PPC::QF25, PPC::QF26, PPC::QF27,
+ PPC::QF28, PPC::QF29, PPC::QF30, PPC::QF31
+};
+
template <std::size_t N>
static DecodeStatus decodeRegisterClass(MCInst &Inst, uint64_t RegNo,
const unsigned (&Regs)[N]) {
@@ -235,6 +246,15 @@ static DecodeStatus DecodeG8RCRegisterClass(MCInst &Inst, uint64_t RegNo,
#define DecodePointerLikeRegClass0 DecodeGPRCRegisterClass
#define DecodePointerLikeRegClass1 DecodeGPRC_NOR0RegisterClass
+static DecodeStatus DecodeQFRCRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, QFRegs);
+}
+
+#define DecodeQSRCRegisterClass DecodeQFRCRegisterClass
+#define DecodeQBRCRegisterClass DecodeQFRCRegisterClass
+
template<unsigned N>
static DecodeStatus decodeUImmOperand(MCInst &Inst, uint64_t Imm,
int64_t Address, const void *Decoder) {
@@ -335,6 +355,15 @@ DecodeStatus PPCDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
uint32_t Inst =
(Bytes[0] << 24) | (Bytes[1] << 16) | (Bytes[2] << 8) | (Bytes[3] << 0);
+ if ((STI.getFeatureBits() & PPC::FeatureQPX) != 0) {
+ DecodeStatus result =
+ decodeInstruction(DecoderTableQPX32, MI, Inst, Address, this, STI);
+ if (result != MCDisassembler::Fail)
+ return result;
+
+ MI.clear();
+ }
+
return decodeInstruction(DecoderTable32, MI, Inst, Address, this, STI);
}
diff --git a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
index 670c40a..c287fbe 100644
--- a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
+++ b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
@@ -34,7 +34,20 @@ FullRegNames("ppc-asm-full-reg-names", cl::Hidden, cl::init(false),
#include "PPCGenAsmWriter.inc"
void PPCInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
- OS << getRegisterName(RegNo);
+ const char *RegName = getRegisterName(RegNo);
+ if (RegName[0] == 'q' /* QPX */) {
+ // The system toolchain on the BG/Q does not understand QPX register names
+ // in .cfi_* directives, so print the name of the floating-point
+ // subregister instead.
+ std::string RN(RegName);
+
+ RN[0] = 'f';
+ OS << RN;
+
+ return;
+ }
+
+ OS << RegName;
}
void PPCInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
@@ -236,6 +249,13 @@ void PPCInstPrinter::printU6ImmOperand(const MCInst *MI, unsigned OpNo,
O << (unsigned int)Value;
}
+void PPCInstPrinter::printU12ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned short Value = MI->getOperand(OpNo).getImm();
+ assert(Value <= 4095 && "Invalid u12imm argument!");
+ O << (unsigned short)Value;
+}
+
void PPCInstPrinter::printS16ImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
if (MI->getOperand(OpNo).isImm())
@@ -338,6 +358,7 @@ static const char *stripRegisterPrefix(const char *RegName) {
switch (RegName[0]) {
case 'r':
case 'f':
+ case 'q': // for QPX
case 'v':
if (RegName[1] == 's')
return RegName + 2;
diff --git a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
index b21aa22..6ead19b 100644
--- a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
+++ b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
@@ -48,6 +48,7 @@ public:
void printS5ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU5ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU6ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printU12ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printS16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printBranchOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
diff --git a/lib/Target/PowerPC/LLVMBuild.txt b/lib/Target/PowerPC/LLVMBuild.txt
index 9d173d6..fd5fa56 100644
--- a/lib/Target/PowerPC/LLVMBuild.txt
+++ b/lib/Target/PowerPC/LLVMBuild.txt
@@ -31,5 +31,5 @@ has_jit = 1
type = Library
name = PowerPCCodeGen
parent = PowerPC
-required_libraries = Analysis AsmPrinter CodeGen Core MC PowerPCAsmPrinter PowerPCDesc PowerPCInfo SelectionDAG Support Target TransformUtils
+required_libraries = Analysis AsmPrinter CodeGen Core MC PowerPCAsmPrinter PowerPCDesc PowerPCInfo Scalar SelectionDAG Support Target TransformUtils
add_to_library_groups = PowerPC
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
index c54d5e7..bea88a2 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
@@ -9,8 +9,8 @@
#include "MCTargetDesc/PPCMCTargetDesc.h"
#include "MCTargetDesc/PPCFixupKinds.h"
-#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCELF.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCFixupKindInfo.h"
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
index 893aae3..2b4f2d8 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
@@ -74,9 +74,6 @@ PPCELFMCAsmInfo::PPCELFMCAsmInfo(bool is64Bit, const Triple& T) {
AssemblerDialect = 1; // New-Style mnemonics.
LCOMMDirectiveAlignmentType = LCOMM::ByteAlignment;
- if (T.getOS() == llvm::Triple::FreeBSD ||
- (T.getOS() == llvm::Triple::NetBSD && !is64Bit) ||
- (T.getOS() == llvm::Triple::OpenBSD && !is64Bit))
- UseIntegratedAssembler = true;
+ UseIntegratedAssembler = true;
}
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
index 9f0294d..86ad385 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
@@ -21,7 +21,8 @@ namespace llvm {
class Triple;
class PPCMCAsmInfoDarwin : public MCAsmInfoDarwin {
- void anchor() override;
+ virtual void anchor();
+
public:
explicit PPCMCAsmInfoDarwin(bool is64Bit, const Triple&);
};
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
index 786b7fe..06d380e 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
@@ -31,8 +31,8 @@ STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
namespace {
class PPCMCCodeEmitter : public MCCodeEmitter {
- PPCMCCodeEmitter(const PPCMCCodeEmitter &) LLVM_DELETED_FUNCTION;
- void operator=(const PPCMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ PPCMCCodeEmitter(const PPCMCCodeEmitter &) = delete;
+ void operator=(const PPCMCCodeEmitter &) = delete;
const MCInstrInfo &MCII;
const MCContext &CTX;
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
index 00be8f4..f2da389 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
@@ -184,6 +184,23 @@ public:
if ((Flags & ELF::EF_PPC64_ABI) == 0)
MCA.setELFHeaderEFlags(Flags | 2);
}
+ void emitAssignment(MCSymbol *Symbol, const MCExpr *Value) override {
+ // When encoding an assignment to set symbol A to symbol B, also copy
+ // the st_other bits encoding the local entry point offset.
+ if (Value->getKind() != MCExpr::SymbolRef)
+ return;
+ const MCSymbol &RhsSym =
+ static_cast<const MCSymbolRefExpr *>(Value)->getSymbol();
+ MCSymbolData &Data = getStreamer().getOrCreateSymbolData(&RhsSym);
+ MCSymbolData &SymbolData = getStreamer().getOrCreateSymbolData(Symbol);
+ // The "other" values are stored in the last 6 bits of the second byte.
+ // The traditional defines for STO values assume the full byte and thus
+ // the shift to pack it.
+ unsigned Other = MCELF::getOther(SymbolData) << 2;
+ Other &= ~ELF::STO_PPC64_LOCAL_MASK;
+ Other |= (MCELF::getOther(Data) << 2) & ELF::STO_PPC64_LOCAL_MASK;
+ MCELF::setOther(SymbolData, Other >> 2);
+ }
};
class PPCTargetMachOStreamer : public PPCTargetStreamer {
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
index df2f14a..f7259b9 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
@@ -41,7 +41,7 @@ public:
: MCMachObjectTargetWriter(Is64Bit, CPUType, CPUSubtype,
/*UseAggressiveSymbolFolding=*/Is64Bit) {}
- void RecordRelocation(MachObjectWriter *Writer, const MCAssembler &Asm,
+ void RecordRelocation(MachObjectWriter *Writer, MCAssembler &Asm,
const MCAsmLayout &Layout, const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) override {
@@ -282,7 +282,7 @@ bool PPCMachObjectWriter::RecordScatteredRelocation(
MachO::any_relocation_info MRE;
makeScatteredRelocationInfo(MRE, other_half, MachO::GENERIC_RELOC_PAIR,
Log2Size, IsPCRel, Value2);
- Writer->addRelocation(Fragment->getParent(), MRE);
+ Writer->addRelocation(nullptr, Fragment->getParent(), MRE);
} else {
// If the offset is more than 24-bits, it won't fit in a scattered
// relocation offset field, so we fall back to using a non-scattered
@@ -296,7 +296,7 @@ bool PPCMachObjectWriter::RecordScatteredRelocation(
}
MachO::any_relocation_info MRE;
makeScatteredRelocationInfo(MRE, FixupOffset, Type, Log2Size, IsPCRel, Value);
- Writer->addRelocation(Fragment->getParent(), MRE);
+ Writer->addRelocation(nullptr, Fragment->getParent(), MRE);
return true;
}
@@ -331,9 +331,9 @@ void PPCMachObjectWriter::RecordPPCRelocation(
// See <reloc.h>.
const uint32_t FixupOffset = getFixupOffset(Layout, Fragment, Fixup);
unsigned Index = 0;
- unsigned IsExtern = 0;
unsigned Type = RelocType;
+ const MCSymbolData *RelSymbol = nullptr;
if (Target.isAbsolute()) { // constant
// SymbolNum of 0 indicates the absolute section.
//
@@ -355,8 +355,7 @@ void PPCMachObjectWriter::RecordPPCRelocation(
// Check whether we need an external or internal relocation.
if (Writer->doesSymbolRequireExternRelocation(SD)) {
- IsExtern = 1;
- Index = SD->getIndex();
+ RelSymbol = SD;
// For external relocations, make sure to offset the fixup value to
// compensate for the addend of the symbol address, if it was
// undefined. This occurs with weak definitions, for example.
@@ -375,9 +374,8 @@ void PPCMachObjectWriter::RecordPPCRelocation(
// struct relocation_info (8 bytes)
MachO::any_relocation_info MRE;
- makeRelocationInfo(MRE, FixupOffset, Index, IsPCRel, Log2Size, IsExtern,
- Type);
- Writer->addRelocation(Fragment->getParent(), MRE);
+ makeRelocationInfo(MRE, FixupOffset, Index, IsPCRel, Log2Size, false, Type);
+ Writer->addRelocation(RelSymbol, Fragment->getParent(), MRE);
}
MCObjectWriter *llvm::createPPCMachObjectWriter(raw_ostream &OS, bool Is64Bit,
diff --git a/lib/Target/PowerPC/PPC.h b/lib/Target/PowerPC/PPC.h
index 8fb33df..5e5a9b1 100644
--- a/lib/Target/PowerPC/PPC.h
+++ b/lib/Target/PowerPC/PPC.h
@@ -34,18 +34,17 @@ namespace llvm {
#ifndef NDEBUG
FunctionPass *createPPCCTRLoopsVerify();
#endif
+ FunctionPass *createPPCLoopDataPrefetchPass();
+ FunctionPass *createPPCLoopPreIncPrepPass(PPCTargetMachine &TM);
FunctionPass *createPPCEarlyReturnPass();
FunctionPass *createPPCVSXCopyPass();
- FunctionPass *createPPCVSXCopyCleanupPass();
FunctionPass *createPPCVSXFMAMutatePass();
FunctionPass *createPPCBranchSelectionPass();
FunctionPass *createPPCISelDag(PPCTargetMachine &TM);
+ FunctionPass *createPPCTLSDynamicCallPass();
void LowerPPCMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
AsmPrinter &AP, bool isDarwin);
- /// \brief Creates an PPC-specific Target Transformation Info pass.
- ImmutablePass *createPPCTargetTransformInfoPass(const PPCTargetMachine *TM);
-
void initializePPCVSXFMAMutatePass(PassRegistry&);
extern char &PPCVSXFMAMutateID;
@@ -93,12 +92,7 @@ namespace llvm {
MO_TOC_LO = 7 << 4,
// Symbol for VK_PPC_TLS fixup attached to an ADD instruction
- MO_TLS = 8 << 4,
-
- // Symbols for VK_PPC_TLSGD and VK_PPC_TLSLD in __tls_get_addr
- // call sequences.
- MO_TLSLD = 9 << 4,
- MO_TLSGD = 10 << 4
+ MO_TLS = 8 << 4
};
} // end namespace PPCII
diff --git a/lib/Target/PowerPC/PPC.td b/lib/Target/PowerPC/PPC.td
index 46d56a4..f53add5 100644
--- a/lib/Target/PowerPC/PPC.td
+++ b/lib/Target/PowerPC/PPC.td
@@ -88,8 +88,13 @@ def FeaturePOPCNTD : SubtargetFeature<"popcntd","HasPOPCNTD", "true",
"Enable the popcnt[dw] instructions">;
def FeatureLDBRX : SubtargetFeature<"ldbrx","HasLDBRX", "true",
"Enable the ldbrx instruction">;
+def FeatureCMPB : SubtargetFeature<"cmpb", "HasCMPB", "true",
+ "Enable the cmpb instruction">;
+def FeatureICBT : SubtargetFeature<"icbt","HasICBT", "true",
+ "Enable icbt instruction">;
def FeatureBookE : SubtargetFeature<"booke", "IsBookE", "true",
- "Enable Book E instructions">;
+ "Enable Book E instructions",
+ [FeatureICBT]>;
def FeatureMSYNC : SubtargetFeature<"msync", "HasOnlyMSYNC", "true",
"Has only the msync instruction instead of sync",
[FeatureBookE]>;
@@ -104,9 +109,17 @@ def FeatureQPX : SubtargetFeature<"qpx","HasQPX", "true",
def FeatureVSX : SubtargetFeature<"vsx","HasVSX", "true",
"Enable VSX instructions",
[FeatureAltivec]>;
+def FeatureP8Altivec : SubtargetFeature<"power8-altivec", "HasP8Altivec", "true",
+ "Enable POWER8 Altivec instructions",
+ [FeatureAltivec]>;
def FeatureP8Vector : SubtargetFeature<"power8-vector", "HasP8Vector", "true",
"Enable POWER8 vector instructions",
- [FeatureVSX, FeatureAltivec]>;
+ [FeatureVSX, FeatureP8Altivec]>;
+
+def FeatureInvariantFunctionDescriptors :
+ SubtargetFeature<"invariant-function-descriptors",
+ "HasInvariantFunctionDescriptors", "true",
+ "Assume function descriptors are invariant">;
def DeprecatedMFTB : SubtargetFeature<"", "DeprecatedMFTB", "true",
"Treat mftb as deprecated">;
@@ -116,21 +129,10 @@ def DeprecatedDST : SubtargetFeature<"", "DeprecatedDST", "true",
// Note: Future features to add when support is extended to more
// recent ISA levels:
//
-// CMPB p6, p6x, p7 cmpb
// DFP p6, p6x, p7 decimal floating-point instructions
// POPCNTB p5 through p7 popcntb and related instructions
//===----------------------------------------------------------------------===//
-// ABI Selection //
-//===----------------------------------------------------------------------===//
-
-def FeatureELFv1 : SubtargetFeature<"elfv1", "TargetABI", "PPC_ABI_ELFv1",
- "Use the ELFv1 ABI">;
-
-def FeatureELFv2 : SubtargetFeature<"elfv2", "TargetABI", "PPC_ABI_ELFv2",
- "Use the ELFv2 ABI">;
-
-//===----------------------------------------------------------------------===//
// Classes used for relation maps.
//===----------------------------------------------------------------------===//
// RecFormRel - Filter class used to relate non-record-form instructions with
@@ -201,12 +203,12 @@ include "PPCInstrInfo.td"
def : Processor<"generic", G3Itineraries, [Directive32]>;
def : ProcessorModel<"440", PPC440Model, [Directive440, FeatureISEL,
FeatureFRES, FeatureFRSQRTE,
- FeatureBookE, FeatureMSYNC,
- DeprecatedMFTB]>;
+ FeatureICBT, FeatureBookE,
+ FeatureMSYNC, DeprecatedMFTB]>;
def : ProcessorModel<"450", PPC440Model, [Directive440, FeatureISEL,
FeatureFRES, FeatureFRSQRTE,
- FeatureBookE, FeatureMSYNC,
- DeprecatedMFTB]>;
+ FeatureICBT, FeatureBookE,
+ FeatureMSYNC, DeprecatedMFTB]>;
def : Processor<"601", G3Itineraries, [Directive601]>;
def : Processor<"602", G3Itineraries, [Directive602]>;
def : Processor<"603", G3Itineraries, [Directive603,
@@ -233,6 +235,34 @@ def : Processor<"7450", G4PlusItineraries, [Directive7400, FeatureAltivec,
FeatureFRES, FeatureFRSQRTE]>;
def : Processor<"g4+", G4PlusItineraries, [Directive7400, FeatureAltivec,
FeatureFRES, FeatureFRSQRTE]>;
+
+/* Since new processors generally contain a superset of features of those that
+ came before them, the idea is to make implementations of new processors
+ less error prone and easier to read.
+ Namely:
+ list<SubtargetFeature> Power8FeatureList = ...
+ list<SubtargetFeature> FutureProcessorSpecificFeatureList =
+ [ features that Power8 does not support ]
+ list<SubtargetFeature> FutureProcessorFeatureList =
+ !listconcat(Power8FeatureList, FutureProcessorSpecificFeatureList)
+
+ Makes it explicit and obvious what is new in FutureProcesor vs. Power8 as
+ well as providing a single point of definition if the feature set will be
+ used elsewhere.
+
+*/
+def ProcessorFeatures {
+ list<SubtargetFeature> Power8FeatureList =
+ [DirectivePwr8, FeatureAltivec, FeatureP8Altivec, FeatureVSX,
+ FeatureP8Vector, FeatureMFOCRF, FeatureFCPSGN, FeatureFSqrt,
+ FeatureFRE, FeatureFRES, FeatureFRSQRTE, FeatureFRSQRTES,
+ FeatureRecipPrec, FeatureSTFIWX, FeatureLFIWAX,
+ FeatureFPRND, FeatureFPCVT, FeatureISEL,
+ FeaturePOPCNTD, FeatureCMPB, FeatureLDBRX,
+ Feature64Bit /*, Feature64BitRegs */, FeatureICBT,
+ DeprecatedMFTB, DeprecatedDST];
+}
+
def : ProcessorModel<"970", G5Model,
[Directive970, FeatureAltivec,
FeatureMFOCRF, FeatureFSqrt,
@@ -246,27 +276,27 @@ def : ProcessorModel<"g5", G5Model,
DeprecatedMFTB, DeprecatedDST]>;
def : ProcessorModel<"e500mc", PPCE500mcModel,
[DirectiveE500mc, FeatureMFOCRF,
- FeatureSTFIWX, FeatureBookE, FeatureISEL,
- DeprecatedMFTB]>;
+ FeatureSTFIWX, FeatureICBT, FeatureBookE,
+ FeatureISEL, DeprecatedMFTB]>;
def : ProcessorModel<"e5500", PPCE5500Model,
[DirectiveE5500, FeatureMFOCRF, Feature64Bit,
- FeatureSTFIWX, FeatureBookE, FeatureISEL,
- DeprecatedMFTB]>;
+ FeatureSTFIWX, FeatureICBT, FeatureBookE,
+ FeatureISEL, DeprecatedMFTB]>;
def : ProcessorModel<"a2", PPCA2Model,
- [DirectiveA2, FeatureBookE, FeatureMFOCRF,
+ [DirectiveA2, FeatureICBT, FeatureBookE, FeatureMFOCRF,
FeatureFCPSGN, FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec,
FeatureSTFIWX, FeatureLFIWAX,
FeatureFPRND, FeatureFPCVT, FeatureISEL,
- FeaturePOPCNTD, FeatureLDBRX, Feature64Bit
+ FeaturePOPCNTD, FeatureCMPB, FeatureLDBRX, Feature64Bit
/*, Feature64BitRegs */, DeprecatedMFTB]>;
def : ProcessorModel<"a2q", PPCA2Model,
- [DirectiveA2, FeatureBookE, FeatureMFOCRF,
+ [DirectiveA2, FeatureICBT, FeatureBookE, FeatureMFOCRF,
FeatureFCPSGN, FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec,
FeatureSTFIWX, FeatureLFIWAX,
FeatureFPRND, FeatureFPCVT, FeatureISEL,
- FeaturePOPCNTD, FeatureLDBRX, Feature64Bit
+ FeaturePOPCNTD, FeatureCMPB, FeatureLDBRX, Feature64Bit
/*, Feature64BitRegs */, FeatureQPX, DeprecatedMFTB]>;
def : ProcessorModel<"pwr3", G5Model,
[DirectivePwr3, FeatureAltivec,
@@ -292,45 +322,33 @@ def : ProcessorModel<"pwr6", G5Model,
[DirectivePwr6, FeatureAltivec,
FeatureMFOCRF, FeatureFCPSGN, FeatureFSqrt, FeatureFRE,
FeatureFRES, FeatureFRSQRTE, FeatureFRSQRTES,
- FeatureRecipPrec, FeatureSTFIWX, FeatureLFIWAX,
+ FeatureRecipPrec, FeatureSTFIWX, FeatureLFIWAX, FeatureCMPB,
FeatureFPRND, Feature64Bit /*, Feature64BitRegs */,
DeprecatedMFTB, DeprecatedDST]>;
def : ProcessorModel<"pwr6x", G5Model,
[DirectivePwr5x, FeatureAltivec, FeatureMFOCRF,
FeatureFCPSGN, FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec,
- FeatureSTFIWX, FeatureLFIWAX,
+ FeatureSTFIWX, FeatureLFIWAX, FeatureCMPB,
FeatureFPRND, Feature64Bit,
DeprecatedMFTB, DeprecatedDST]>;
def : ProcessorModel<"pwr7", P7Model,
- [DirectivePwr7, FeatureAltivec,
+ [DirectivePwr7, FeatureAltivec, FeatureVSX,
FeatureMFOCRF, FeatureFCPSGN, FeatureFSqrt, FeatureFRE,
FeatureFRES, FeatureFRSQRTE, FeatureFRSQRTES,
FeatureRecipPrec, FeatureSTFIWX, FeatureLFIWAX,
FeatureFPRND, FeatureFPCVT, FeatureISEL,
- FeaturePOPCNTD, FeatureLDBRX,
- Feature64Bit /*, Feature64BitRegs */,
- DeprecatedMFTB, DeprecatedDST]>;
-def : ProcessorModel<"pwr8", P7Model /* FIXME: Update to P8Model when available */,
- [DirectivePwr8, FeatureAltivec,
- FeatureMFOCRF, FeatureFCPSGN, FeatureFSqrt, FeatureFRE,
- FeatureFRES, FeatureFRSQRTE, FeatureFRSQRTES,
- FeatureRecipPrec, FeatureSTFIWX, FeatureLFIWAX,
- FeatureFPRND, FeatureFPCVT, FeatureISEL,
- FeaturePOPCNTD, FeatureLDBRX,
+ FeaturePOPCNTD, FeatureCMPB, FeatureLDBRX,
Feature64Bit /*, Feature64BitRegs */,
DeprecatedMFTB, DeprecatedDST]>;
+def : ProcessorModel<"pwr8", P8Model, ProcessorFeatures.Power8FeatureList>;
def : Processor<"ppc", G3Itineraries, [Directive32]>;
def : ProcessorModel<"ppc64", G5Model,
[Directive64, FeatureAltivec,
FeatureMFOCRF, FeatureFSqrt, FeatureFRES,
FeatureFRSQRTE, FeatureSTFIWX,
Feature64Bit /*, Feature64BitRegs */]>;
-def : ProcessorModel<"ppc64le", G5Model,
- [Directive64, FeatureAltivec,
- FeatureMFOCRF, FeatureFSqrt, FeatureFRES,
- FeatureFRSQRTE, FeatureSTFIWX,
- Feature64Bit /*, Feature64BitRegs */]>;
+def : ProcessorModel<"ppc64le", P8Model, ProcessorFeatures.Power8FeatureList>;
//===----------------------------------------------------------------------===//
// Calling Conventions
diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp
index 5648873..1327290 100644
--- a/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -18,9 +18,9 @@
#include "PPC.h"
#include "InstPrinter/PPCInstPrinter.h"
-#include "PPCMachineFunctionInfo.h"
#include "MCTargetDesc/PPCMCExpr.h"
#include "MCTargetDesc/PPCPredicates.h"
+#include "PPCMachineFunctionInfo.h"
#include "PPCSubtarget.h"
#include "PPCTargetMachine.h"
#include "PPCTargetStreamer.h"
@@ -34,6 +34,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
@@ -67,12 +68,13 @@ namespace {
class PPCAsmPrinter : public AsmPrinter {
protected:
MapVector<MCSymbol*, MCSymbol*> TOC;
- const PPCSubtarget &Subtarget;
+ const PPCSubtarget *Subtarget;
uint64_t TOCLabelID;
+ StackMaps SM;
public:
- explicit PPCAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer),
- Subtarget(TM.getSubtarget<PPCSubtarget>()), TOCLabelID(0) {}
+ explicit PPCAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)), TOCLabelID(0), SM(*this) {}
const char *getPassName() const override {
return "PowerPC Assembly Printer";
@@ -90,13 +92,26 @@ namespace {
bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &O) override;
+
+ void EmitEndOfAsmFile(Module &M) override;
+
+ void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI);
+ void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI);
+ void EmitTlsCall(const MachineInstr *MI, MCSymbolRefExpr::VariantKind VK);
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ Subtarget = &MF.getSubtarget<PPCSubtarget>();
+ return AsmPrinter::runOnMachineFunction(MF);
+ }
};
/// PPCLinuxAsmPrinter - PowerPC assembly printer, customized for Linux
class PPCLinuxAsmPrinter : public PPCAsmPrinter {
public:
- explicit PPCLinuxAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : PPCAsmPrinter(TM, Streamer) {}
+ explicit PPCLinuxAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : PPCAsmPrinter(TM, std::move(Streamer)) {}
const char *getPassName() const override {
return "Linux PPC Assembly Printer";
@@ -115,8 +130,9 @@ namespace {
/// OS X
class PPCDarwinAsmPrinter : public PPCAsmPrinter {
public:
- explicit PPCDarwinAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : PPCAsmPrinter(TM, Streamer) {}
+ explicit PPCDarwinAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : PPCAsmPrinter(TM, std::move(Streamer)) {}
const char *getPassName() const override {
return "Darwin PPC Assembly Printer";
@@ -135,6 +151,7 @@ static const char *stripRegisterPrefix(const char *RegName) {
switch (RegName[0]) {
case 'r':
case 'f':
+ case 'q': // for QPX
case 'v':
if (RegName[1] == 's')
return RegName + 2;
@@ -147,7 +164,7 @@ static const char *stripRegisterPrefix(const char *RegName) {
void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
raw_ostream &O) {
- const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = TM.getDataLayout();
const MachineOperand &MO = MI->getOperand(OpNo);
switch (MO.getType()) {
@@ -155,7 +172,8 @@ void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
const char *RegName = PPCInstPrinter::getRegisterName(MO.getReg());
// Linux assembler (Others?) does not take register mnemonics.
// FIXME - What about special registers used in mfspr/mtspr?
- if (!Subtarget.isDarwin()) RegName = stripRegisterPrefix(RegName);
+ if (!Subtarget->isDarwin())
+ RegName = stripRegisterPrefix(RegName);
O << RegName;
return;
}
@@ -270,7 +288,8 @@ bool PPCAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
case 'y': // A memory reference for an X-form instruction
{
const char *RegName = "r0";
- if (!Subtarget.isDarwin()) RegName = stripRegisterPrefix(RegName);
+ if (!Subtarget->isDarwin())
+ RegName = stripRegisterPrefix(RegName);
O << RegName << ", ";
printOperand(MI, OpNo, O);
return false;
@@ -302,7 +321,7 @@ bool PPCAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
/// exists for it. If not, create one. Then return a symbol that references
/// the TOC entry.
MCSymbol *PPCAsmPrinter::lookUpOrCreateTOCEntry(MCSymbol *Sym) {
- const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = TM.getDataLayout();
MCSymbol *&TOCEntry = TOC[Sym];
// To avoid name clash check if the name already exists.
@@ -316,13 +335,120 @@ MCSymbol *PPCAsmPrinter::lookUpOrCreateTOCEntry(MCSymbol *Sym) {
return TOCEntry;
}
+void PPCAsmPrinter::EmitEndOfAsmFile(Module &M) {
+ SM.serializeToStackMapSection();
+}
+
+void PPCAsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI) {
+ unsigned NumNOPBytes = MI.getOperand(1).getImm();
+
+ SM.recordStackMap(MI);
+ assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
+
+ // Scan ahead to trim the shadow.
+ const MachineBasicBlock &MBB = *MI.getParent();
+ MachineBasicBlock::const_iterator MII(MI);
+ ++MII;
+ while (NumNOPBytes > 0) {
+ if (MII == MBB.end() || MII->isCall() ||
+ MII->getOpcode() == PPC::DBG_VALUE ||
+ MII->getOpcode() == TargetOpcode::PATCHPOINT ||
+ MII->getOpcode() == TargetOpcode::STACKMAP)
+ break;
+ ++MII;
+ NumNOPBytes -= 4;
+ }
+
+ // Emit nops.
+ for (unsigned i = 0; i < NumNOPBytes; i += 4)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::NOP));
+}
+
+// Lower a patchpoint of the form:
+// [<def>], <id>, <numBytes>, <target>, <numArgs>
+void PPCAsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI) {
+ SM.recordPatchPoint(MI);
+ PatchPointOpers Opers(&MI);
+
+ int64_t CallTarget = Opers.getMetaOper(PatchPointOpers::TargetPos).getImm();
+ unsigned EncodedBytes = 0;
+ if (CallTarget) {
+ assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
+ "High 16 bits of call target should be zero.");
+ unsigned ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
+ EncodedBytes = 6*4;
+ // Materialize the jump address:
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::LI8)
+ .addReg(ScratchReg)
+ .addImm((CallTarget >> 32) & 0xFFFF));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::RLDIC)
+ .addReg(ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(32).addImm(16));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ORIS8)
+ .addReg(ScratchReg)
+ .addReg(ScratchReg)
+ .addImm((CallTarget >> 16) & 0xFFFF));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ORI8)
+ .addReg(ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(CallTarget & 0xFFFF));
+
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::MTCTR8).addReg(ScratchReg));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::BCTRL8));
+ }
+
+ // Emit padding.
+ unsigned NumBytes = Opers.getMetaOper(PatchPointOpers::NBytesPos).getImm();
+ assert(NumBytes >= EncodedBytes &&
+ "Patchpoint can't request size less than the length of a call.");
+ assert((NumBytes - EncodedBytes) % 4 == 0 &&
+ "Invalid number of NOP bytes requested!");
+ for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::NOP));
+}
+
+/// EmitTlsCall -- Given a GETtls[ld]ADDR[32] instruction, print a
+/// call to __tls_get_addr to the current output stream.
+void PPCAsmPrinter::EmitTlsCall(const MachineInstr *MI,
+ MCSymbolRefExpr::VariantKind VK) {
+ StringRef Name = "__tls_get_addr";
+ MCSymbol *TlsGetAddr = OutContext.GetOrCreateSymbol(Name);
+ MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_None;
+
+ assert(MI->getOperand(0).isReg() &&
+ ((Subtarget->isPPC64() && MI->getOperand(0).getReg() == PPC::X3) ||
+ (!Subtarget->isPPC64() && MI->getOperand(0).getReg() == PPC::R3)) &&
+ "GETtls[ld]ADDR[32] must define GPR3");
+ assert(MI->getOperand(1).isReg() &&
+ ((Subtarget->isPPC64() && MI->getOperand(1).getReg() == PPC::X3) ||
+ (!Subtarget->isPPC64() && MI->getOperand(1).getReg() == PPC::R3)) &&
+ "GETtls[ld]ADDR[32] must read GPR3");
+
+ if (!Subtarget->isPPC64() && !Subtarget->isDarwin() &&
+ TM.getRelocationModel() == Reloc::PIC_)
+ Kind = MCSymbolRefExpr::VK_PLT;
+ const MCSymbolRefExpr *TlsRef =
+ MCSymbolRefExpr::Create(TlsGetAddr, Kind, OutContext);
+ const MachineOperand &MO = MI->getOperand(2);
+ const GlobalValue *GValue = MO.getGlobal();
+ MCSymbol *MOSymbol = getSymbol(GValue);
+ const MCExpr *SymVar = MCSymbolRefExpr::Create(MOSymbol, VK, OutContext);
+ EmitToStreamer(OutStreamer,
+ MCInstBuilder(Subtarget->isPPC64() ?
+ PPC::BL8_NOP_TLS : PPC::BL_TLS)
+ .addExpr(TlsRef)
+ .addExpr(SymVar));
+}
/// EmitInstruction -- Print out a single PowerPC MI in Darwin syntax to
/// the current output stream.
///
void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCInst TmpInst;
- bool isPPC64 = Subtarget.isPPC64();
+ bool isPPC64 = Subtarget->isPPC64();
bool isDarwin = Triple(TM.getTargetTriple()).isOSDarwin();
const Module *M = MF->getFunction()->getParent();
PICLevel::Level PL = M->getPICLevel();
@@ -332,6 +458,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
default: break;
case TargetOpcode::DBG_VALUE:
llvm_unreachable("Should be handled target independently");
+ case TargetOpcode::STACKMAP:
+ return LowerSTACKMAP(OutStreamer, SM, *MI);
+ case TargetOpcode::PATCHPOINT:
+ return LowerPATCHPOINT(OutStreamer, SM, *MI);
+
case PPC::MoveGOTtoLR: {
// Transform %LR = MoveGOTtoLR
// Into this: bl _GLOBAL_OFFSET_TABLE_@local-4
@@ -602,7 +733,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
case PPC::ADDISgotTprelHA: {
// Transform: %Xd = ADDISgotTprelHA %X2, <ga:@sym>
// Into: %Xd = ADDIS8 %X2, sym@got@tlsgd@ha
- assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
+ assert(Subtarget->isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
MCSymbol *MOSymbol = getSymbol(GValue);
@@ -611,7 +742,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
OutContext);
EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDIS8)
.addReg(MI->getOperand(0).getReg())
- .addReg(PPC::X2)
+ .addReg(MI->getOperand(1).getReg())
.addExpr(SymGotTprel));
return;
}
@@ -681,7 +812,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
case PPC::ADDIStlsgdHA: {
// Transform: %Xd = ADDIStlsgdHA %X2, <ga:@sym>
// Into: %Xd = ADDIS8 %X2, sym@got@tlsgd@ha
- assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
+ assert(Subtarget->isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
MCSymbol *MOSymbol = getSymbol(GValue);
@@ -690,7 +821,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
OutContext);
EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDIS8)
.addReg(MI->getOperand(0).getReg())
- .addReg(PPC::X2)
+ .addReg(MI->getOperand(1).getReg())
.addExpr(SymGotTlsGD));
return;
}
@@ -703,22 +834,30 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
MCSymbol *MOSymbol = getSymbol(GValue);
- const MCExpr *SymGotTlsGD =
- MCSymbolRefExpr::Create(MOSymbol, Subtarget.isPPC64() ?
- MCSymbolRefExpr::VK_PPC_GOT_TLSGD_LO :
- MCSymbolRefExpr::VK_PPC_GOT_TLSGD,
- OutContext);
+ const MCExpr *SymGotTlsGD = MCSymbolRefExpr::Create(
+ MOSymbol, Subtarget->isPPC64() ? MCSymbolRefExpr::VK_PPC_GOT_TLSGD_LO
+ : MCSymbolRefExpr::VK_PPC_GOT_TLSGD,
+ OutContext);
EmitToStreamer(OutStreamer,
- MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
+ MCInstBuilder(Subtarget->isPPC64() ? PPC::ADDI8 : PPC::ADDI)
.addReg(MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg())
.addExpr(SymGotTlsGD));
return;
}
+ case PPC::GETtlsADDR:
+ // Transform: %X3 = GETtlsADDR %X3, <ga:@sym>
+ // Into: BL8_NOP_TLS __tls_get_addr(sym at tlsgd)
+ case PPC::GETtlsADDR32: {
+ // Transform: %R3 = GETtlsADDR32 %R3, <ga:@sym>
+ // Into: BL_TLS __tls_get_addr(sym at tlsgd)@PLT
+ EmitTlsCall(MI, MCSymbolRefExpr::VK_PPC_TLSGD);
+ return;
+ }
case PPC::ADDIStlsldHA: {
// Transform: %Xd = ADDIStlsldHA %X2, <ga:@sym>
// Into: %Xd = ADDIS8 %X2, sym@got@tlsld@ha
- assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
+ assert(Subtarget->isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
MCSymbol *MOSymbol = getSymbol(GValue);
@@ -727,7 +866,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
OutContext);
EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDIS8)
.addReg(MI->getOperand(0).getReg())
- .addReg(PPC::X2)
+ .addReg(MI->getOperand(1).getReg())
.addExpr(SymGotTlsLD));
return;
}
@@ -740,16 +879,24 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
MCSymbol *MOSymbol = getSymbol(GValue);
- const MCExpr *SymGotTlsLD =
- MCSymbolRefExpr::Create(MOSymbol, Subtarget.isPPC64() ?
- MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO :
- MCSymbolRefExpr::VK_PPC_GOT_TLSLD,
- OutContext);
+ const MCExpr *SymGotTlsLD = MCSymbolRefExpr::Create(
+ MOSymbol, Subtarget->isPPC64() ? MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO
+ : MCSymbolRefExpr::VK_PPC_GOT_TLSLD,
+ OutContext);
EmitToStreamer(OutStreamer,
- MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
- .addReg(MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg())
- .addExpr(SymGotTlsLD));
+ MCInstBuilder(Subtarget->isPPC64() ? PPC::ADDI8 : PPC::ADDI)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addExpr(SymGotTlsLD));
+ return;
+ }
+ case PPC::GETtlsldADDR:
+ // Transform: %X3 = GETtlsldADDR %X3, <ga:@sym>
+ // Into: BL8_NOP_TLS __tls_get_addr(sym at tlsld)
+ case PPC::GETtlsldADDR32: {
+ // Transform: %R3 = GETtlsldADDR32 %R3, <ga:@sym>
+ // Into: BL_TLS __tls_get_addr(sym at tlsld)@PLT
+ EmitTlsCall(MI, MCSymbolRefExpr::VK_PPC_TLSLD);
return;
}
case PPC::ADDISdtprelHA:
@@ -764,11 +911,12 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MCExpr *SymDtprel =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_HA,
OutContext);
- EmitToStreamer(OutStreamer,
- MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDIS8 : PPC::ADDIS)
- .addReg(MI->getOperand(0).getReg())
- .addReg(Subtarget.isPPC64() ? PPC::X3 : PPC::R3)
- .addExpr(SymDtprel));
+ EmitToStreamer(
+ OutStreamer,
+ MCInstBuilder(Subtarget->isPPC64() ? PPC::ADDIS8 : PPC::ADDIS)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(Subtarget->isPPC64() ? PPC::X3 : PPC::R3)
+ .addExpr(SymDtprel));
return;
}
case PPC::ADDIdtprelL:
@@ -784,15 +932,15 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_LO,
OutContext);
EmitToStreamer(OutStreamer,
- MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
- .addReg(MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg())
- .addExpr(SymDtprel));
+ MCInstBuilder(Subtarget->isPPC64() ? PPC::ADDI8 : PPC::ADDI)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addExpr(SymDtprel));
return;
}
case PPC::MFOCRF:
case PPC::MFOCRF8:
- if (!Subtarget.hasMFOCRF()) {
+ if (!Subtarget->hasMFOCRF()) {
// Transform: %R3 = MFOCRF %CR7
// Into: %R3 = MFCR ;; cr7
unsigned NewOpcode =
@@ -806,7 +954,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
break;
case PPC::MTOCRF:
case PPC::MTOCRF8:
- if (!Subtarget.hasMFOCRF()) {
+ if (!Subtarget->hasMFOCRF()) {
// Transform: %CR7 = MTOCRF %R3
// Into: MTCRF mask, %R3 ;; cr7
unsigned NewOpcode =
@@ -831,7 +979,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// suite shows a handful of test cases that fail this check for
// Darwin. Those need to be investigated before this sanity test
// can be enabled for those subtargets.
- if (!Subtarget.isDarwin()) {
+ if (!Subtarget->isDarwin()) {
unsigned OpNum = (MI->getOpcode() == PPC::STD) ? 2 : 1;
const MachineOperand &MO = MI->getOperand(OpNum);
if (MO.isGlobal() && MO.getGlobal()->getAlignment() < 4)
@@ -847,7 +995,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
void PPCLinuxAsmPrinter::EmitStartOfAsmFile(Module &M) {
- if (Subtarget.isELFv2ABI()) {
+ if (static_cast<const PPCTargetMachine &>(TM).isELFv2ABI()) {
PPCTargetStreamer *TS =
static_cast<PPCTargetStreamer *>(OutStreamer.getTargetStreamer());
@@ -855,15 +1003,15 @@ void PPCLinuxAsmPrinter::EmitStartOfAsmFile(Module &M) {
TS->emitAbiVersion(2);
}
- if (Subtarget.isPPC64() || TM.getRelocationModel() != Reloc::PIC_)
+ if (static_cast<const PPCTargetMachine &>(TM).isPPC64() ||
+ TM.getRelocationModel() != Reloc::PIC_)
return AsmPrinter::EmitStartOfAsmFile(M);
if (M.getPICLevel() == PICLevel::Small)
return AsmPrinter::EmitStartOfAsmFile(M);
- OutStreamer.SwitchSection(OutContext.getELFSection(".got2",
- ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC,
- SectionKind::getReadOnly()));
+ OutStreamer.SwitchSection(OutContext.getELFSection(
+ ".got2", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC));
MCSymbol *TOCSym = OutContext.GetOrCreateSymbol(Twine(".LTOC"));
MCSymbol *CurrentPos = OutContext.CreateTempSymbol();
@@ -884,12 +1032,12 @@ void PPCLinuxAsmPrinter::EmitStartOfAsmFile(Module &M) {
void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
// linux/ppc32 - Normal entry label.
- if (!Subtarget.isPPC64() &&
+ if (!Subtarget->isPPC64() &&
(TM.getRelocationModel() != Reloc::PIC_ ||
MF->getFunction()->getParent()->getPICLevel() == PICLevel::Small))
return AsmPrinter::EmitFunctionEntryLabel();
- if (!Subtarget.isPPC64()) {
+ if (!Subtarget->isPPC64()) {
const PPCFunctionInfo *PPCFI = MF->getInfo<PPCFunctionInfo>();
if (PPCFI->usesPICBase()) {
MCSymbol *RelocSymbol = PPCFI->getPICOffsetSymbol();
@@ -910,14 +1058,13 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
}
// ELFv2 ABI - Normal entry label.
- if (Subtarget.isELFv2ABI())
+ if (Subtarget->isELFv2ABI())
return AsmPrinter::EmitFunctionEntryLabel();
// Emit an official procedure descriptor.
MCSectionSubPair Current = OutStreamer.getCurrentSection();
- const MCSectionELF *Section = OutStreamer.getContext().getELFSection(".opd",
- ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC,
- SectionKind::getReadOnly());
+ const MCSectionELF *Section = OutStreamer.getContext().getELFSection(
+ ".opd", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
OutStreamer.SwitchSection(Section);
OutStreamer.EmitLabel(CurrentFnSym);
OutStreamer.EmitValueToAlignment(8);
@@ -944,7 +1091,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
bool isPPC64 = TD->getPointerSizeInBits() == 64;
@@ -955,13 +1102,11 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
const MCSectionELF *Section;
if (isPPC64)
- Section = OutStreamer.getContext().getELFSection(".toc",
- ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC,
- SectionKind::getReadOnly());
- else
- Section = OutStreamer.getContext().getELFSection(".got2",
- ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC,
- SectionKind::getReadOnly());
+ Section = OutStreamer.getContext().getELFSection(
+ ".toc", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
+ else
+ Section = OutStreamer.getContext().getELFSection(
+ ".got2", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
OutStreamer.SwitchSection(Section);
for (MapVector<MCSymbol*, MCSymbol*>::iterator I = TOC.begin(),
@@ -1015,7 +1160,7 @@ void PPCLinuxAsmPrinter::EmitFunctionBodyStart() {
//
// This ensures we have r2 set up correctly while executing the function
// body, no matter which entry point is called.
- if (Subtarget.isELFv2ABI()
+ if (Subtarget->isELFv2ABI()
// Only do all that if the function uses r2 in the first place.
&& !MF->getRegInfo().use_empty(PPC::X2)) {
@@ -1070,7 +1215,7 @@ void PPCLinuxAsmPrinter::EmitFunctionBodyEnd() {
// FIXME: We should fill in the eight-byte mandatory fields as described in
// the PPC64 ELF ABI (this is a low-priority item because GDB does not
// currently make use of these fields).
- if (Subtarget.isPPC64()) {
+ if (Subtarget->isPPC64()) {
OutStreamer.EmitIntValue(0, 4/*size*/);
OutStreamer.EmitIntValue(0, 8/*size*/);
}
@@ -1101,13 +1246,21 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
"ppc64le"
};
- unsigned Directive = Subtarget.getDarwinDirective();
- if (Subtarget.hasMFOCRF() && Directive < PPC::DIR_970)
- Directive = PPC::DIR_970;
- if (Subtarget.hasAltivec() && Directive < PPC::DIR_7400)
- Directive = PPC::DIR_7400;
- if (Subtarget.isPPC64() && Directive < PPC::DIR_64)
- Directive = PPC::DIR_64;
+ // Get the numerically largest directive.
+ // FIXME: How should we merge darwin directives?
+ unsigned Directive = PPC::DIR_NONE;
+ for (const Function &F : M) {
+ const PPCSubtarget &STI = TM.getSubtarget<PPCSubtarget>(F);
+ unsigned FDir = STI.getDarwinDirective();
+ Directive = Directive > FDir ? FDir : STI.getDarwinDirective();
+ if (STI.hasMFOCRF() && Directive < PPC::DIR_970)
+ Directive = PPC::DIR_970;
+ if (STI.hasAltivec() && Directive < PPC::DIR_7400)
+ Directive = PPC::DIR_7400;
+ if (STI.isPPC64() && Directive < PPC::DIR_64)
+ Directive = PPC::DIR_64;
+ }
+
assert(Directive <= PPC::DIR_64 && "Directive out of range.");
assert(Directive < array_lengthof(CPUDirectives) &&
@@ -1150,10 +1303,18 @@ static MCSymbol *GetAnonSym(MCSymbol *Sym, MCContext &Ctx) {
void PPCDarwinAsmPrinter::
EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
- bool isPPC64 =
- TM.getSubtargetImpl()->getDataLayout()->getPointerSizeInBits() == 64;
- bool isDarwin = Subtarget.isDarwin();
-
+ bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64;
+
+ // Construct a local MCSubtargetInfo and shadow EmitToStreamer here.
+ // This is because the MachineFunction won't exist (but have not yet been
+ // freed) and since we're at the global level we can use the default
+ // constructed subtarget.
+ std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo(
+ TM.getTargetTriple(), TM.getTargetCPU(), TM.getTargetFeatureString()));
+ auto EmitToStreamer = [&STI] (MCStreamer &S, const MCInst &Inst) {
+ S.EmitInstruction(Inst, *STI);
+ };
+
const TargetLoweringObjectFileMachO &TLOFMacho =
static_cast<const TargetLoweringObjectFileMachO &>(getObjFileLowering());
@@ -1192,7 +1353,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
// mflr r11
EmitToStreamer(OutStreamer, MCInstBuilder(PPC::MFLR).addReg(PPC::R11));
// addis r11, r11, ha16(LazyPtr - AnonSymbol)
- const MCExpr *SubHa16 = PPCMCExpr::CreateHa(Sub, isDarwin, OutContext);
+ const MCExpr *SubHa16 = PPCMCExpr::CreateHa(Sub, true, OutContext);
EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDIS)
.addReg(PPC::R11)
.addReg(PPC::R11)
@@ -1202,7 +1363,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
// ldu r12, lo16(LazyPtr - AnonSymbol)(r11)
// lwzu r12, lo16(LazyPtr - AnonSymbol)(r11)
- const MCExpr *SubLo16 = PPCMCExpr::CreateLo(Sub, isDarwin, OutContext);
+ const MCExpr *SubLo16 = PPCMCExpr::CreateLo(Sub, true, OutContext);
EmitToStreamer(OutStreamer, MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU)
.addReg(PPC::R12)
.addExpr(SubLo16).addExpr(SubLo16)
@@ -1248,7 +1409,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
// lis r11, ha16(LazyPtr)
const MCExpr *LazyPtrHa16 =
- PPCMCExpr::CreateHa(LazyPtrExpr, isDarwin, OutContext);
+ PPCMCExpr::CreateHa(LazyPtrExpr, true, OutContext);
EmitToStreamer(OutStreamer, MCInstBuilder(PPC::LIS)
.addReg(PPC::R11)
.addExpr(LazyPtrHa16));
@@ -1256,7 +1417,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
// ldu r12, lo16(LazyPtr)(r11)
// lwzu r12, lo16(LazyPtr)(r11)
const MCExpr *LazyPtrLo16 =
- PPCMCExpr::CreateLo(LazyPtrExpr, isDarwin, OutContext);
+ PPCMCExpr::CreateLo(LazyPtrExpr, true, OutContext);
EmitToStreamer(OutStreamer, MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU)
.addReg(PPC::R12)
.addExpr(LazyPtrLo16).addExpr(LazyPtrLo16)
@@ -1287,8 +1448,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
- bool isPPC64 =
- TM.getSubtargetImpl()->getDataLayout()->getPointerSizeInBits() == 64;
+ bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64;
// Darwin/PPC always uses mach-o.
const TargetLoweringObjectFileMachO &TLOFMacho =
@@ -1383,13 +1543,12 @@ bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
/// for a MachineFunction to the given output stream, in a format that the
/// Darwin assembler can deal with.
///
-static AsmPrinter *createPPCAsmPrinterPass(TargetMachine &tm,
- MCStreamer &Streamer) {
- const PPCSubtarget *Subtarget = &tm.getSubtarget<PPCSubtarget>();
-
- if (Subtarget->isDarwin())
- return new PPCDarwinAsmPrinter(tm, Streamer);
- return new PPCLinuxAsmPrinter(tm, Streamer);
+static AsmPrinter *
+createPPCAsmPrinterPass(TargetMachine &tm,
+ std::unique_ptr<MCStreamer> &&Streamer) {
+ if (Triple(tm.getTargetTriple()).isMacOSX())
+ return new PPCDarwinAsmPrinter(tm, std::move(Streamer));
+ return new PPCLinuxAsmPrinter(tm, std::move(Streamer));
}
// Force static initialization.
diff --git a/lib/Target/PowerPC/PPCBranchSelector.cpp b/lib/Target/PowerPC/PPCBranchSelector.cpp
index 41594be..940d55a 100644
--- a/lib/Target/PowerPC/PPCBranchSelector.cpp
+++ b/lib/Target/PowerPC/PPCBranchSelector.cpp
@@ -70,12 +70,37 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
Fn.RenumberBlocks();
BlockSizes.resize(Fn.getNumBlockIDs());
+ auto GetAlignmentAdjustment =
+ [TII](MachineBasicBlock &MBB, unsigned Offset) -> unsigned {
+ unsigned Align = MBB.getAlignment();
+ if (!Align)
+ return 0;
+
+ unsigned AlignAmt = 1 << Align;
+ unsigned ParentAlign = MBB.getParent()->getAlignment();
+
+ if (Align <= ParentAlign)
+ return OffsetToAlignment(Offset, AlignAmt);
+
+ // The alignment of this MBB is larger than the function's alignment, so we
+ // can't tell whether or not it will insert nops. Assume that it will.
+ return AlignAmt + OffsetToAlignment(Offset, AlignAmt);
+ };
+
// Measure each MBB and compute a size for the entire function.
unsigned FuncSize = 0;
for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
++MFI) {
MachineBasicBlock *MBB = MFI;
+ // The end of the previous block may have extra nops if this block has an
+ // alignment requirement.
+ if (MBB->getNumber() > 0) {
+ unsigned AlignExtra = GetAlignmentAdjustment(*MBB, FuncSize);
+ BlockSizes[MBB->getNumber()-1] += AlignExtra;
+ FuncSize += AlignExtra;
+ }
+
unsigned BlockSize = 0;
for (MachineBasicBlock::iterator MBBI = MBB->begin(), EE = MBB->end();
MBBI != EE; ++MBBI)
diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp
index 5f3b176..5af8aab 100644
--- a/lib/Target/PowerPC/PPCCTRLoops.cpp
+++ b/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -30,6 +30,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
@@ -42,7 +43,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
@@ -94,8 +94,8 @@ namespace {
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addPreserved<LoopInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<ScalarEvolution>();
@@ -146,7 +146,7 @@ namespace {
INITIALIZE_PASS_BEGIN(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_END(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
false, false)
@@ -168,12 +168,13 @@ FunctionPass *llvm::createPPCCTRLoopsVerify() {
#endif // NDEBUG
bool PPCCTRLoops::runOnFunction(Function &F) {
- LI = &getAnalysis<LoopInfo>();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
DL = DLP ? &DLP->getDataLayout() : nullptr;
- LibInfo = getAnalysisIfAvailable<TargetLibraryInfo>();
+ auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
+ LibInfo = TLIP ? &TLIP->getTLI() : nullptr;
bool MadeChange = false;
@@ -194,6 +195,21 @@ static bool isLargeIntegerTy(bool Is32Bit, Type *Ty) {
return false;
}
+// Determining the address of a TLS variable results in a function call in
+// certain TLS models.
+static bool memAddrUsesCTR(const PPCTargetMachine *TM,
+ const llvm::Value *MemAddr) {
+ const auto *GV = dyn_cast<GlobalValue>(MemAddr);
+ if (!GV)
+ return false;
+ if (!GV->isThreadLocal())
+ return false;
+ if (!TM)
+ return true;
+ TLSModel::Model Model = TM->getTLSModel(GV);
+ return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic;
+}
+
bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
J != JE; ++J) {
@@ -214,7 +230,8 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
if (!TM)
return true;
- const TargetLowering *TLI = TM->getSubtargetImpl()->getTargetLowering();
+ const TargetLowering *TLI =
+ TM->getSubtargetImpl(*BB->getParent())->getTargetLowering();
if (Function *F = CI->getCalledFunction()) {
// Most intrinsics don't become function calls, but some might.
@@ -384,11 +401,15 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
if (!TM)
return true;
- const TargetLowering *TLI = TM->getSubtargetImpl()->getTargetLowering();
+ const TargetLowering *TLI =
+ TM->getSubtargetImpl(*BB->getParent())->getTargetLowering();
if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
return true;
}
+ for (Value *Operand : J->operands())
+ if (memAddrUsesCTR(TM, Operand))
+ return true;
}
return false;
diff --git a/lib/Target/PowerPC/PPCCallingConv.h b/lib/Target/PowerPC/PPCCallingConv.h
new file mode 100644
index 0000000..eb904a8
--- /dev/null
+++ b/lib/Target/PowerPC/PPCCallingConv.h
@@ -0,0 +1,35 @@
+//=== PPCCallingConv.h - PPC Custom Calling Convention Routines -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the custom routines for the PPC Calling Convention that
+// aren't done by tablegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_PPC_PPCCALLINGCONV_H
+#define LLVM_LIB_TARGET_PPC_PPCCALLINGCONV_H
+
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/IR/CallingConv.h"
+
+namespace llvm {
+
+inline bool CC_PPC_AnyReg_Error(unsigned &, MVT &, MVT &,
+ CCValAssign::LocInfo &, ISD::ArgFlagsTy &,
+ CCState &) {
+ llvm_unreachable("The AnyReg calling convention is only supported by the " \
+ "stackmap and patchpoint intrinsics.");
+ // gracefully fallback to PPC C calling convention on Release builds.
+ return false;
+}
+
+} // End llvm namespace
+
+#endif
+
diff --git a/lib/Target/PowerPC/PPCCallingConv.td b/lib/Target/PowerPC/PPCCallingConv.td
index cf8fee4..045fca3 100644
--- a/lib/Target/PowerPC/PPCCallingConv.td
+++ b/lib/Target/PowerPC/PPCCallingConv.td
@@ -28,8 +28,21 @@ class CCIfNotSubtarget<string F, CCAction A>
// Return Value Calling Convention
//===----------------------------------------------------------------------===//
+// PPC64 AnyReg return-value convention. No explicit register is specified for
+// the return-value. The register allocator is allowed and expected to choose
+// any free register.
+//
+// This calling convention is currently only supported by the stackmap and
+// patchpoint intrinsics. All other uses will result in an assert on Debug
+// builds. On Release builds we fallback to the PPC C calling convention.
+def RetCC_PPC64_AnyReg : CallingConv<[
+ CCCustom<"CC_PPC_AnyReg_Error">
+]>;
+
// Return-value convention for PowerPC
def RetCC_PPC : CallingConv<[
+ CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>,
+
// On PPC64, integer return values are always promoted to i64
CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>,
CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>,
@@ -42,15 +55,28 @@ def RetCC_PPC : CallingConv<[
// only the ELFv2 ABI fully utilizes all these registers.
CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
-
+
+ // QPX vectors are returned in QF1 and QF2.
+ CCIfType<[v4f64, v4f32, v4i1],
+ CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>,
+
// Vector types returned as "direct" go into V2 .. V9; note that only the
// ELFv2 ABI fully utilizes all these registers.
- CCIfType<[v16i8, v8i16, v4i32, v4f32],
- CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>,
- CCIfType<[v2f64, v2i64],
- CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9]>>
+ CCIfType<[v16i8, v8i16, v4i32, v4f32], CCIfSubtarget<"hasAltivec()",
+ CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>,
+ CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()",
+ CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9]>>>
]>;
+// No explicit register is specified for the AnyReg calling convention. The
+// register allocator may assign the arguments to any free register.
+//
+// This calling convention is currently only supported by the stackmap and
+// patchpoint intrinsics. All other uses will result in an assert on Debug
+// builds. On Release builds we fallback to the PPC C calling convention.
+def CC_PPC64_AnyReg : CallingConv<[
+ CCCustom<"CC_PPC_AnyReg_Error">
+]>;
// Note that we don't currently have calling conventions for 64-bit
// PowerPC, but handle all the complexities of the ABI in the lowering
@@ -61,6 +87,8 @@ def RetCC_PPC : CallingConv<[
// Only handle ints and floats. All ints are promoted to i64.
// Vector types and quadword ints are not handled.
def CC_PPC64_ELF_FIS : CallingConv<[
+ CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>,
+
CCIfType<[i1], CCPromoteToType<i64>>,
CCIfType<[i8], CCPromoteToType<i64>>,
CCIfType<[i16], CCPromoteToType<i64>>,
@@ -74,6 +102,8 @@ def CC_PPC64_ELF_FIS : CallingConv<[
// and multiple register returns are "supported" to avoid compile
// errors, but none are handled by the fast selector.
def RetCC_PPC64_ELF_FIS : CallingConv<[
+ CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>,
+
CCIfType<[i1], CCPromoteToType<i64>>,
CCIfType<[i8], CCPromoteToType<i64>>,
CCIfType<[i16], CCPromoteToType<i64>>,
@@ -82,10 +112,12 @@ def RetCC_PPC64_ELF_FIS : CallingConv<[
CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
- CCIfType<[v16i8, v8i16, v4i32, v4f32],
- CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>,
- CCIfType<[v2f64, v2i64],
- CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9]>>
+ CCIfType<[v4f64, v4f32, v4i1],
+ CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>,
+ CCIfType<[v16i8, v8i16, v4i32, v4f32], CCIfSubtarget<"hasAltivec()",
+ CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>,
+ CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()",
+ CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9]>>>
]>;
//===----------------------------------------------------------------------===//
@@ -118,6 +150,9 @@ def CC_PPC32_SVR4_Common : CallingConv<[
// alignment and size as doubles.
CCIfType<[f32,f64], CCAssignToStack<8, 8>>,
+ // QPX vectors that are stored in double precision need 32-byte alignment.
+ CCIfType<[v4f64, v4i1], CCAssignToStack<32, 32>>,
+
// Vectors get 16-byte stack slots that are 16-byte aligned.
CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>>
]>;
@@ -132,12 +167,17 @@ def CC_PPC32_SVR4_VarArg : CallingConv<[
// In contrast to CC_PPC32_SVR4_VarArg, this calling convention first tries to
// put vector arguments in vector registers before putting them on the stack.
def CC_PPC32_SVR4 : CallingConv<[
+ // QPX vectors mirror the scalar FP convention.
+ CCIfType<[v4f64, v4f32, v4i1], CCIfSubtarget<"hasQPX()",
+ CCAssignToReg<[QF1, QF2, QF3, QF4, QF5, QF6, QF7, QF8]>>>,
+
// The first 12 Vector arguments are passed in AltiVec registers.
- CCIfType<[v16i8, v8i16, v4i32, v4f32],
- CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13]>>,
- CCIfType<[v2f64, v2i64],
+ CCIfType<[v16i8, v8i16, v4i32, v4f32], CCIfSubtarget<"hasAltivec()",
+ CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9,
+ V10, V11, V12, V13]>>>,
+ CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()",
CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9,
- VSH10, VSH11, VSH12, VSH13]>>,
+ VSH10, VSH11, VSH12, VSH13]>>>,
CCDelegateTo<CC_PPC32_SVR4_Common>
]>;
@@ -198,8 +238,23 @@ def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20,
F27, F28, F29, F30, F31, CR2, CR3, CR4
)>;
-
def CSR_SVR464_Altivec : CalleeSavedRegs<(add CSR_SVR464, CSR_Altivec)>;
+def CSR_SVR464_R2 : CalleeSavedRegs<(add CSR_SVR464, X2)>;
+
+def CSR_SVR464_R2_Altivec : CalleeSavedRegs<(add CSR_SVR464_Altivec, X2)>;
+
def CSR_NoRegs : CalleeSavedRegs<(add)>;
+def CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10),
+ (sequence "X%u", 14, 31),
+ (sequence "F%u", 0, 31),
+ (sequence "CR%u", 0, 7))>;
+
+def CSR_64_AllRegs_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs,
+ (sequence "V%u", 0, 31))>;
+
+def CSR_64_AllRegs_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec,
+ (sequence "VSL%u", 0, 31),
+ (sequence "VSH%u", 0, 31))>;
+
diff --git a/lib/Target/PowerPC/PPCEarlyReturn.cpp b/lib/Target/PowerPC/PPCEarlyReturn.cpp
new file mode 100644
index 0000000..08673cc
--- /dev/null
+++ b/lib/Target/PowerPC/PPCEarlyReturn.cpp
@@ -0,0 +1,201 @@
+//===------------- PPCEarlyReturn.cpp - Form Early Returns ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A pass that form early (predicated) returns. If-conversion handles some of
+// this, but this pass picks up some remaining cases.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPCInstrInfo.h"
+#include "MCTargetDesc/PPCPredicates.h"
+#include "PPC.h"
+#include "PPCInstrBuilder.h"
+#include "PPCMachineFunctionInfo.h"
+#include "PPCTargetMachine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "ppc-early-ret"
+STATISTIC(NumBCLR, "Number of early conditional returns");
+STATISTIC(NumBLR, "Number of early returns");
+
+namespace llvm {
+ void initializePPCEarlyReturnPass(PassRegistry&);
+}
+
+namespace {
+ // PPCEarlyReturn pass - For simple functions without epilogue code, move
+ // returns up, and create conditional returns, to avoid unnecessary
+ // branch-to-blr sequences.
+ struct PPCEarlyReturn : public MachineFunctionPass {
+ static char ID;
+ PPCEarlyReturn() : MachineFunctionPass(ID) {
+ initializePPCEarlyReturnPass(*PassRegistry::getPassRegistry());
+ }
+
+ const TargetInstrInfo *TII;
+
+protected:
+ bool processBlock(MachineBasicBlock &ReturnMBB) {
+ bool Changed = false;
+
+ MachineBasicBlock::iterator I = ReturnMBB.begin();
+ I = ReturnMBB.SkipPHIsAndLabels(I);
+
+ // The block must be essentially empty except for the blr.
+ if (I == ReturnMBB.end() ||
+ (I->getOpcode() != PPC::BLR && I->getOpcode() != PPC::BLR8) ||
+ I != ReturnMBB.getLastNonDebugInstr())
+ return Changed;
+
+ SmallVector<MachineBasicBlock*, 8> PredToRemove;
+ for (MachineBasicBlock::pred_iterator PI = ReturnMBB.pred_begin(),
+ PIE = ReturnMBB.pred_end(); PI != PIE; ++PI) {
+ bool OtherReference = false, BlockChanged = false;
+ for (MachineBasicBlock::iterator J = (*PI)->getLastNonDebugInstr();;) {
+ MachineInstrBuilder MIB;
+ if (J->getOpcode() == PPC::B) {
+ if (J->getOperand(0).getMBB() == &ReturnMBB) {
+ // This is an unconditional branch to the return. Replace the
+ // branch with a blr.
+ MIB =
+ BuildMI(**PI, J, J->getDebugLoc(), TII->get(I->getOpcode()));
+ MIB.copyImplicitOps(I);
+ MachineBasicBlock::iterator K = J--;
+ K->eraseFromParent();
+ BlockChanged = true;
+ ++NumBLR;
+ continue;
+ }
+ } else if (J->getOpcode() == PPC::BCC) {
+ if (J->getOperand(2).getMBB() == &ReturnMBB) {
+ // This is a conditional branch to the return. Replace the branch
+ // with a bclr.
+ MIB = BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BCCLR))
+ .addImm(J->getOperand(0).getImm())
+ .addReg(J->getOperand(1).getReg());
+ MIB.copyImplicitOps(I);
+ MachineBasicBlock::iterator K = J--;
+ K->eraseFromParent();
+ BlockChanged = true;
+ ++NumBCLR;
+ continue;
+ }
+ } else if (J->getOpcode() == PPC::BC || J->getOpcode() == PPC::BCn) {
+ if (J->getOperand(1).getMBB() == &ReturnMBB) {
+ // This is a conditional branch to the return. Replace the branch
+ // with a bclr.
+ MIB = BuildMI(**PI, J, J->getDebugLoc(),
+ TII->get(J->getOpcode() == PPC::BC ?
+ PPC::BCLR : PPC::BCLRn))
+ .addReg(J->getOperand(0).getReg());
+ MIB.copyImplicitOps(I);
+ MachineBasicBlock::iterator K = J--;
+ K->eraseFromParent();
+ BlockChanged = true;
+ ++NumBCLR;
+ continue;
+ }
+ } else if (J->isBranch()) {
+ if (J->isIndirectBranch()) {
+ if (ReturnMBB.hasAddressTaken())
+ OtherReference = true;
+ } else
+ for (unsigned i = 0; i < J->getNumOperands(); ++i)
+ if (J->getOperand(i).isMBB() &&
+ J->getOperand(i).getMBB() == &ReturnMBB)
+ OtherReference = true;
+ } else if (!J->isTerminator() && !J->isDebugValue())
+ break;
+
+ if (J == (*PI)->begin())
+ break;
+
+ --J;
+ }
+
+ if ((*PI)->canFallThrough() && (*PI)->isLayoutSuccessor(&ReturnMBB))
+ OtherReference = true;
+
+ // Predecessors are stored in a vector and can't be removed here.
+ if (!OtherReference && BlockChanged) {
+ PredToRemove.push_back(*PI);
+ }
+
+ if (BlockChanged)
+ Changed = true;
+ }
+
+ for (unsigned i = 0, ie = PredToRemove.size(); i != ie; ++i)
+ PredToRemove[i]->removeSuccessor(&ReturnMBB);
+
+ if (Changed && !ReturnMBB.hasAddressTaken()) {
+ // We now might be able to merge this blr-only block into its
+ // by-layout predecessor.
+ if (ReturnMBB.pred_size() == 1 &&
+ (*ReturnMBB.pred_begin())->isLayoutSuccessor(&ReturnMBB)) {
+ // Move the blr into the preceding block.
+ MachineBasicBlock &PrevMBB = **ReturnMBB.pred_begin();
+ PrevMBB.splice(PrevMBB.end(), &ReturnMBB, I);
+ PrevMBB.removeSuccessor(&ReturnMBB);
+ }
+
+ if (ReturnMBB.pred_empty())
+ ReturnMBB.eraseFromParent();
+ }
+
+ return Changed;
+ }
+
+public:
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ TII = MF.getSubtarget().getInstrInfo();
+
+ bool Changed = false;
+
+ // If the function does not have at least two blocks, then there is
+ // nothing to do.
+ if (MF.size() < 2)
+ return Changed;
+
+ for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
+ MachineBasicBlock &B = *I++;
+ if (processBlock(B))
+ Changed = true;
+ }
+
+ return Changed;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ };
+}
+
+INITIALIZE_PASS(PPCEarlyReturn, DEBUG_TYPE,
+ "PowerPC Early-Return Creation", false, false)
+
+char PPCEarlyReturn::ID = 0;
+FunctionPass*
+llvm::createPPCEarlyReturnPass() { return new PPCEarlyReturn(); }
+
diff --git a/lib/Target/PowerPC/PPCFastISel.cpp b/lib/Target/PowerPC/PPCFastISel.cpp
index 1149354..54532b5 100644
--- a/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/lib/Target/PowerPC/PPCFastISel.cpp
@@ -15,7 +15,9 @@
#include "PPC.h"
#include "MCTargetDesc/PPCPredicates.h"
+#include "PPCCallingConv.h"
#include "PPCISelLowering.h"
+#include "PPCMachineFunctionInfo.h"
#include "PPCSubtarget.h"
#include "PPCTargetMachine.h"
#include "llvm/ADT/Optional.h"
@@ -84,18 +86,20 @@ typedef struct Address {
class PPCFastISel final : public FastISel {
const TargetMachine &TM;
+ const PPCSubtarget *PPCSubTarget;
+ PPCFunctionInfo *PPCFuncInfo;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
- const PPCSubtarget *PPCSubTarget;
LLVMContext *Context;
public:
explicit PPCFastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo)
: FastISel(FuncInfo, LibInfo), TM(FuncInfo.MF->getTarget()),
- TII(*TM.getSubtargetImpl()->getInstrInfo()),
- TLI(*TM.getSubtargetImpl()->getTargetLowering()),
- PPCSubTarget(&TM.getSubtarget<PPCSubtarget>()),
+ PPCSubTarget(&FuncInfo.MF->getSubtarget<PPCSubtarget>()),
+ PPCFuncInfo(FuncInfo.MF->getInfo<PPCFunctionInfo>()),
+ TII(*PPCSubTarget->getInstrInfo()),
+ TLI(*PPCSubTarget->getTargetLowering()),
Context(&FuncInfo.Fn->getContext()) {}
// Backend specific FastISel code.
@@ -119,6 +123,8 @@ class PPCFastISel final : public FastISel {
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill);
+ bool fastLowerCall(CallLoweringInfo &CLI) override;
+
// Instruction selection routines.
private:
bool SelectLoad(const Instruction *I);
@@ -130,7 +136,6 @@ class PPCFastISel final : public FastISel {
bool SelectIToFP(const Instruction *I, bool IsSigned);
bool SelectFPToI(const Instruction *I, bool IsSigned);
bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
- bool SelectCall(const Instruction *I);
bool SelectRet(const Instruction *I);
bool SelectTrunc(const Instruction *I);
bool SelectIntExt(const Instruction *I);
@@ -139,6 +144,9 @@ class PPCFastISel final : public FastISel {
private:
bool isTypeLegal(Type *Ty, MVT &VT);
bool isLoadTypeLegal(Type *Ty, MVT &VT);
+ bool isVSFRCRegister(unsigned Register) const {
+ return MRI.getRegClass(Register)->getID() == PPC::VSFRCRegClassID;
+ }
bool PPCEmitCmp(const Value *Src1Value, const Value *Src2Value,
bool isZExt, unsigned DestReg);
bool PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
@@ -171,9 +179,7 @@ class PPCFastISel final : public FastISel {
CallingConv::ID CC,
unsigned &NumBytes,
bool IsVarArg);
- void finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
- const Instruction *I, CallingConv::ID CC,
- unsigned &NumBytes, bool IsVarArg);
+ bool finishCall(MVT RetVT, CallLoweringInfo &CLI, unsigned &NumBytes);
CCAssignFn *usePPC32CCs(unsigned Flag);
private:
@@ -482,6 +488,16 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
// the indexed form. Also handle stack pointers with special needs.
unsigned IndexReg = 0;
PPCSimplifyAddress(Addr, VT, UseOffset, IndexReg);
+
+ // If this is a potential VSX load with an offset of 0, a VSX indexed load can
+ // be used.
+ bool IsVSFRC = (ResultReg != 0) && isVSFRCRegister(ResultReg);
+ if (IsVSFRC && (Opc == PPC::LFD) &&
+ (Addr.BaseType != Address::FrameIndexBase) && UseOffset &&
+ (Addr.Offset == 0)) {
+ UseOffset = false;
+ }
+
if (ResultReg == 0)
ResultReg = createResultReg(UseRC);
@@ -489,6 +505,8 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
// in range, as otherwise PPCSimplifyAddress would have converted it
// into a RegBase.
if (Addr.BaseType == Address::FrameIndexBase) {
+ // VSX only provides an indexed load.
+ if (IsVSFRC && Opc == PPC::LFD) return false;
MachineMemOperand *MMO =
FuncInfo.MF->getMachineMemOperand(
@@ -501,6 +519,8 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
// Base reg with offset in range.
} else if (UseOffset) {
+ // VSX only provides an indexed load.
+ if (IsVSFRC && Opc == PPC::LFD) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addImm(Addr.Offset).addReg(Addr.Base.Reg);
@@ -524,7 +544,7 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
case PPC::LWA_32: Opc = PPC::LWAX_32; break;
case PPC::LD: Opc = PPC::LDX; break;
case PPC::LFS: Opc = PPC::LFSX; break;
- case PPC::LFD: Opc = PPC::LFDX; break;
+ case PPC::LFD: Opc = IsVSFRC ? PPC::LXSDX : PPC::LFDX; break;
}
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(Addr.Base.Reg).addReg(IndexReg);
@@ -602,10 +622,22 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
unsigned IndexReg = 0;
PPCSimplifyAddress(Addr, VT, UseOffset, IndexReg);
+ // If this is a potential VSX store with an offset of 0, a VSX indexed store
+ // can be used.
+ bool IsVSFRC = isVSFRCRegister(SrcReg);
+ if (IsVSFRC && (Opc == PPC::STFD) &&
+ (Addr.BaseType != Address::FrameIndexBase) && UseOffset &&
+ (Addr.Offset == 0)) {
+ UseOffset = false;
+ }
+
// Note: If we still have a frame index here, we know the offset is
// in range, as otherwise PPCSimplifyAddress would have converted it
// into a RegBase.
if (Addr.BaseType == Address::FrameIndexBase) {
+ // VSX only provides an indexed store.
+ if (IsVSFRC && Opc == PPC::STFD) return false;
+
MachineMemOperand *MMO =
FuncInfo.MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(Addr.Base.FI, Addr.Offset),
@@ -619,12 +651,15 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
.addMemOperand(MMO);
// Base reg with offset in range.
- } else if (UseOffset)
+ } else if (UseOffset) {
+ // VSX only provides an indexed store.
+ if (IsVSFRC && Opc == PPC::STFD) return false;
+
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
.addReg(SrcReg).addImm(Addr.Offset).addReg(Addr.Base.Reg);
// Indexed form.
- else {
+ } else {
// Get the RR opcode corresponding to the RI one. FIXME: It would be
// preferable to use the ImmToIdxMap from PPCRegisterInfo.cpp, but it
// is hard to get at.
@@ -638,7 +673,7 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
case PPC::STW8: Opc = PPC::STWX8; break;
case PPC::STD: Opc = PPC::STDX; break;
case PPC::STFS: Opc = PPC::STFSX; break;
- case PPC::STFD: Opc = PPC::STFDX; break;
+ case PPC::STFD: Opc = IsVSFRC ? PPC::STXSDX : PPC::STFDX; break;
}
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
.addReg(SrcReg).addReg(Addr.Base.Reg).addReg(IndexReg);
@@ -1202,9 +1237,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, *Context);
// Reserve space for the linkage area on the stack.
- bool isELFv2ABI = PPCSubTarget->isELFv2ABI();
- unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false,
- isELFv2ABI);
+ unsigned LinkageSize = PPCSubTarget->getFrameLowering()->getLinkageSize();
CCInfo.AllocateStack(LinkageSize, 8);
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_PPC64_ELF_FIS);
@@ -1243,7 +1276,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
// Prepare to assign register arguments. Every argument uses up a
// GPR protocol register even if it's passed in a floating-point
- // register.
+ // register (unless we're using the fast calling convention).
unsigned NextGPR = PPC::X3;
unsigned NextFPR = PPC::F1;
@@ -1293,7 +1326,8 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
unsigned ArgReg;
if (ArgVT == MVT::f32 || ArgVT == MVT::f64) {
ArgReg = NextFPR++;
- ++NextGPR;
+ if (CC != CallingConv::Fast)
+ ++NextGPR;
} else
ArgReg = NextGPR++;
@@ -1307,9 +1341,9 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
// For a call that we've determined we can fast-select, finish the
// call sequence and generate a copy to obtain the return value (if any).
-void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
- const Instruction *I, CallingConv::ID CC,
- unsigned &NumBytes, bool IsVarArg) {
+bool PPCFastISel::finishCall(MVT RetVT, CallLoweringInfo &CLI, unsigned &NumBytes) {
+ CallingConv::ID CC = CLI.CallConv;
+
// Issue CallSEQ_END.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TII.getCallFrameDestroyOpcode()))
@@ -1320,7 +1354,7 @@ void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
// any real difficulties there.
if (RetVT != MVT::isVoid) {
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs, *Context);
+ CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
CCInfo.AnalyzeCallResult(RetVT, RetCC_PPC64_ELF_FIS);
CCValAssign &VA = RVLocs[0];
assert(RVLocs.size() == 1 && "No support for multi-reg return values!");
@@ -1365,39 +1399,35 @@ void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
}
assert(ResultReg && "ResultReg unset!");
- UsedRegs.push_back(SourcePhysReg);
- updateValueMap(I, ResultReg);
+ CLI.InRegs.push_back(SourcePhysReg);
+ CLI.ResultReg = ResultReg;
+ CLI.NumResultRegs = 1;
}
+
+ return true;
}
-// Attempt to fast-select a call instruction.
-bool PPCFastISel::SelectCall(const Instruction *I) {
- const CallInst *CI = cast<CallInst>(I);
- const Value *Callee = CI->getCalledValue();
+bool PPCFastISel::fastLowerCall(CallLoweringInfo &CLI) {
+ CallingConv::ID CC = CLI.CallConv;
+ bool IsTailCall = CLI.IsTailCall;
+ bool IsVarArg = CLI.IsVarArg;
+ const Value *Callee = CLI.Callee;
+ const char *SymName = CLI.SymName;
- // Can't handle inline asm.
- if (isa<InlineAsm>(Callee))
+ if (!Callee && !SymName)
return false;
// Allow SelectionDAG isel to handle tail calls.
- if (CI->isTailCall())
+ if (IsTailCall)
return false;
- // Obtain calling convention.
- ImmutableCallSite CS(CI);
- CallingConv::ID CC = CS.getCallingConv();
-
- PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
- FunctionType *FTy = cast<FunctionType>(PT->getElementType());
- bool IsVarArg = FTy->isVarArg();
-
- // Not ready for varargs yet.
+ // Let SDISel handle vararg functions.
if (IsVarArg)
return false;
// Handle simple calls for now, with legal return types and
// those that can be extended.
- Type *RetTy = I->getType();
+ Type *RetTy = CLI.RetTy;
MVT RetVT;
if (RetTy->isVoidTy())
RetVT = MVT::isVoid;
@@ -1418,7 +1448,7 @@ bool PPCFastISel::SelectCall(const Instruction *I) {
// Bail early if more than 8 arguments, as we only currently
// handle arguments passed in registers.
- unsigned NumArgs = CS.arg_size();
+ unsigned NumArgs = CLI.OutVals.size();
if (NumArgs > 8)
return false;
@@ -1433,28 +1463,16 @@ bool PPCFastISel::SelectCall(const Instruction *I) {
ArgVTs.reserve(NumArgs);
ArgFlags.reserve(NumArgs);
- for (ImmutableCallSite::arg_iterator II = CS.arg_begin(), IE = CS.arg_end();
- II != IE; ++II) {
- // FIXME: ARM does something for intrinsic calls here, check into that.
-
- unsigned AttrIdx = II - CS.arg_begin() + 1;
-
+ for (unsigned i = 0, ie = NumArgs; i != ie; ++i) {
// Only handle easy calls for now. It would be reasonably easy
// to handle <= 8-byte structures passed ByVal in registers, but we
// have to ensure they are right-justified in the register.
- if (CS.paramHasAttr(AttrIdx, Attribute::InReg) ||
- CS.paramHasAttr(AttrIdx, Attribute::StructRet) ||
- CS.paramHasAttr(AttrIdx, Attribute::Nest) ||
- CS.paramHasAttr(AttrIdx, Attribute::ByVal))
+ ISD::ArgFlagsTy Flags = CLI.OutFlags[i];
+ if (Flags.isInReg() || Flags.isSRet() || Flags.isNest() || Flags.isByVal())
return false;
- ISD::ArgFlagsTy Flags;
- if (CS.paramHasAttr(AttrIdx, Attribute::SExt))
- Flags.setSExt();
- if (CS.paramHasAttr(AttrIdx, Attribute::ZExt))
- Flags.setZExt();
-
- Type *ArgTy = (*II)->getType();
+ Value *ArgValue = CLI.OutVals[i];
+ Type *ArgTy = ArgValue->getType();
MVT ArgVT;
if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8)
return false;
@@ -1462,14 +1480,11 @@ bool PPCFastISel::SelectCall(const Instruction *I) {
if (ArgVT.isVector())
return false;
- unsigned Arg = getRegForValue(*II);
+ unsigned Arg = getRegForValue(ArgValue);
if (Arg == 0)
return false;
- unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
- Flags.setOrigAlign(OriginalAlignment);
-
- Args.push_back(*II);
+ Args.push_back(ArgValue);
ArgRegs.push_back(Arg);
ArgVTs.push_back(ArgVT);
ArgFlags.push_back(Flags);
@@ -1483,39 +1498,46 @@ bool PPCFastISel::SelectCall(const Instruction *I) {
RegArgs, CC, NumBytes, IsVarArg))
return false;
+ MachineInstrBuilder MIB;
// FIXME: No handling for function pointers yet. This requires
// implementing the function descriptor (OPD) setup.
const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
- if (!GV)
- return false;
-
- // Build direct call with NOP for TOC restore.
- // FIXME: We can and should optimize away the NOP for local calls.
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(PPC::BL8_NOP));
- // Add callee.
- MIB.addGlobalAddress(GV);
+ if (!GV) {
+ // patchpoints are a special case; they always dispatch to a pointer value.
+ // However, we don't actually want to generate the indirect call sequence
+ // here (that will be generated, as necessary, during asm printing), and
+ // the call we generate here will be erased by FastISel::selectPatchpoint,
+ // so don't try very hard...
+ if (CLI.IsPatchPoint)
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::NOP));
+ else
+ return false;
+ } else {
+ // Build direct call with NOP for TOC restore.
+ // FIXME: We can and should optimize away the NOP for local calls.
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(PPC::BL8_NOP));
+ // Add callee.
+ MIB.addGlobalAddress(GV);
+ }
// Add implicit physical register uses to the call.
for (unsigned II = 0, IE = RegArgs.size(); II != IE; ++II)
MIB.addReg(RegArgs[II], RegState::Implicit);
- // Direct calls in the ELFv2 ABI need the TOC register live into the call.
- if (PPCSubTarget->isELFv2ABI())
- MIB.addReg(PPC::X2, RegState::Implicit);
+ // Direct calls, in both the ELF V1 and V2 ABIs, need the TOC register live
+ // into the call.
+ PPCFuncInfo->setUsesTOCBasePtr();
+ MIB.addReg(PPC::X2, RegState::Implicit);
// Add a register mask with the call-preserved registers. Proper
// defs for return values will be added by setPhysRegsDeadExcept().
MIB.addRegMask(TRI.getCallPreservedMask(CC));
- // Finish off the call including any return values.
- SmallVector<unsigned, 4> UsedRegs;
- finishCall(RetVT, UsedRegs, I, CC, NumBytes, IsVarArg);
-
- // Set all unused physregs defs as dead.
- static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
+ CLI.Call = MIB;
- return true;
+ // Finish off the call including any return values.
+ return finishCall(RetVT, CLI, NumBytes);
}
// Attempt to fast-select a return instruction.
@@ -1626,7 +1648,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
}
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(PPC::BLR));
+ TII.get(PPC::BLR8));
for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
MIB.addReg(RetRegs[i], RegState::Implicit);
@@ -1805,9 +1827,7 @@ bool PPCFastISel::fastSelectInstruction(const Instruction *I) {
case Instruction::Sub:
return SelectBinaryIntOp(I, ISD::SUB);
case Instruction::Call:
- if (dyn_cast<IntrinsicInst>(I))
- return false;
- return SelectCall(I);
+ return selectCall(I);
case Instruction::Ret:
return SelectRet(I);
case Instruction::Trunc:
@@ -1846,6 +1866,7 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
unsigned Opc = (VT == MVT::f32) ? PPC::LFS : PPC::LFD;
unsigned TmpReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ PPCFuncInfo->setUsesTOCBasePtr();
// For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)).
if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocCPT),
@@ -1895,6 +1916,7 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
if (GV->isThreadLocal())
return 0;
+ PPCFuncInfo->setUsesTOCBasePtr();
// For small code model, generate a simple TOC load.
if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtoc),
@@ -2077,7 +2099,7 @@ unsigned PPCFastISel::fastMaterializeConstant(const Constant *C) {
else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
return PPCMaterializeGV(GV, VT);
else if (isa<ConstantInt>(C))
- return PPCMaterializeInt(C, VT);
+ return PPCMaterializeInt(C, VT, VT != MVT::i1);
return 0;
}
@@ -2280,13 +2302,10 @@ namespace llvm {
// Create the fast instruction selector for PowerPC64 ELF.
FastISel *PPC::createFastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo) {
- const TargetMachine &TM = FuncInfo.MF->getTarget();
-
// Only available on 64-bit ELF for now.
- const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>();
- if (Subtarget->isPPC64() && Subtarget->isSVR4ABI())
+ const PPCSubtarget &Subtarget = FuncInfo.MF->getSubtarget<PPCSubtarget>();
+ if (Subtarget.isPPC64() && Subtarget.isSVR4ABI())
return new PPCFastISel(FuncInfo, LibInfo);
-
return nullptr;
}
}
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index dc87a6c..f997fea 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -16,6 +16,7 @@
#include "PPCInstrInfo.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCSubtarget.h"
+#include "PPCTargetMachine.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -36,10 +37,58 @@ static const uint16_t VRRegNo[] = {
PPC::V24, PPC::V25, PPC::V26, PPC::V27, PPC::V28, PPC::V29, PPC::V30, PPC::V31
};
+static unsigned computeReturnSaveOffset(const PPCSubtarget &STI) {
+ if (STI.isDarwinABI())
+ return STI.isPPC64() ? 16 : 8;
+ // SVR4 ABI:
+ return STI.isPPC64() ? 16 : 4;
+}
+
+static unsigned computeTOCSaveOffset(const PPCSubtarget &STI) {
+ return STI.isELFv2ABI() ? 24 : 40;
+}
+
+static unsigned computeFramePointerSaveOffset(const PPCSubtarget &STI) {
+ // For the Darwin ABI:
+ // We cannot use the TOC save slot (offset +20) in the PowerPC linkage area
+ // for saving the frame pointer (if needed.) While the published ABI has
+ // not used this slot since at least MacOSX 10.2, there is older code
+ // around that does use it, and that needs to continue to work.
+ if (STI.isDarwinABI())
+ return STI.isPPC64() ? -8U : -4U;
+
+ // SVR4 ABI: First slot in the general register save area.
+ return STI.isPPC64() ? -8U : -4U;
+}
+
+static unsigned computeLinkageSize(const PPCSubtarget &STI) {
+ if (STI.isDarwinABI() || STI.isPPC64())
+ return (STI.isELFv2ABI() ? 4 : 6) * (STI.isPPC64() ? 8 : 4);
+
+ // SVR4 ABI:
+ return 8;
+}
+
+static unsigned computeBasePointerSaveOffset(const PPCSubtarget &STI) {
+ if (STI.isDarwinABI())
+ return STI.isPPC64() ? -16U : -8U;
+
+ // SVR4 ABI: First slot in the general register save area.
+ return STI.isPPC64()
+ ? -16U
+ : (STI.getTargetMachine().getRelocationModel() == Reloc::PIC_)
+ ? -12U
+ : -8U;
+}
+
PPCFrameLowering::PPCFrameLowering(const PPCSubtarget &STI)
: TargetFrameLowering(TargetFrameLowering::StackGrowsDown,
- (STI.hasQPX() || STI.isBGQ()) ? 32 : 16, 0),
- Subtarget(STI) {}
+ STI.getPlatformStackAlignment(), 0),
+ Subtarget(STI), ReturnSaveOffset(computeReturnSaveOffset(Subtarget)),
+ TOCSaveOffset(computeTOCSaveOffset(Subtarget)),
+ FramePointerSaveOffset(computeFramePointerSaveOffset(Subtarget)),
+ LinkageSize(computeLinkageSize(Subtarget)),
+ BasePointerSaveOffset(computeBasePointerSaveOffset(STI)) {}
// With the SVR4 ABI, callee-saved registers have fixed offsets on the stack.
const PPCFrameLowering::SpillSlot *PPCFrameLowering::getCalleeSavedSpillSlots(
@@ -355,6 +404,20 @@ static bool hasNonRISpills(const MachineFunction &MF) {
return FuncInfo->hasNonRISpills();
}
+/// MustSaveLR - Return true if this function requires that we save the LR
+/// register onto the stack in the prolog and restore it in the epilog of the
+/// function.
+static bool MustSaveLR(const MachineFunction &MF, unsigned LR) {
+ const PPCFunctionInfo *MFI = MF.getInfo<PPCFunctionInfo>();
+
+ // We need a save/restore of LR if there is any def of LR (which is
+ // defined by calls, including the PIC setup sequence), or if there is
+ // some use of the LR stack slot (e.g. for builtin_return_address).
+ // (LR comes in 32 and 64 bit versions.)
+ MachineRegisterInfo::def_iterator RI = MF.getRegInfo().def_begin(LR);
+ return RI !=MF.getRegInfo().def_end() || MFI->isLRStoreRequired();
+}
+
/// determineFrameLayout - Determine the size of the frame and maximum call
/// frame size.
unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF,
@@ -372,15 +435,15 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF,
unsigned AlignMask = std::max(MaxAlign, TargetAlign) - 1;
const PPCRegisterInfo *RegInfo =
- static_cast<const PPCRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ static_cast<const PPCRegisterInfo *>(Subtarget.getRegisterInfo());
// If we are a leaf function, and use up to 224 bytes of stack space,
// don't have a frame pointer, calls, or dynamic alloca then we do not need
// to adjust the stack pointer (we fit in the Red Zone).
// The 32-bit SVR4 ABI has no Red Zone. However, it can still generate
// stackless code if all local vars are reg-allocated.
- bool DisableRedZone = MF.getFunction()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::NoRedZone);
+ bool DisableRedZone = MF.getFunction()->hasFnAttribute(Attribute::NoRedZone);
+ unsigned LR = RegInfo->getRARegister();
if (!DisableRedZone &&
(Subtarget.isPPC64() || // 32-bit SVR4, no stack-
!Subtarget.isSVR4ABI() || // allocated locals.
@@ -388,6 +451,7 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF,
FrameSize <= 224 && // Fits in red zone.
!MFI->hasVarSizedObjects() && // No dynamic alloca.
!MFI->adjustsStack() && // No calls.
+ !MustSaveLR(MF, LR) &&
!RegInfo->hasBasePointer(MF)) { // No special alignment.
// No need for frame
if (UpdateMF)
@@ -399,9 +463,7 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF,
unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
// Maximum call frame needs to be at least big enough for linkage area.
- unsigned minCallFrameSize = getLinkageSize(Subtarget.isPPC64(),
- Subtarget.isDarwinABI(),
- Subtarget.isELFv2ABI());
+ unsigned minCallFrameSize = getLinkageSize();
maxCallFrameSize = std::max(maxCallFrameSize, minCallFrameSize);
// If we have dynamic alloca then maxCallFrameSize needs to be aligned so
@@ -444,12 +506,12 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
// Naked functions have no stack frame pushed, so we don't have a frame
// pointer.
- if (MF.getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::Naked))
+ if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
return false;
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
MFI->hasVarSizedObjects() ||
+ MFI->hasStackMap() || MFI->hasPatchPoint() ||
(MF.getTarget().Options.GuaranteedTailCallOpt &&
MF.getInfo<PPCFunctionInfo>()->hasFastCall());
}
@@ -460,7 +522,7 @@ void PPCFrameLowering::replaceFPWithRealFP(MachineFunction &MF) const {
unsigned FP8Reg = is31 ? PPC::X31 : PPC::X1;
const PPCRegisterInfo *RegInfo =
- static_cast<const PPCRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ static_cast<const PPCRegisterInfo *>(Subtarget.getRegisterInfo());
bool HasBP = RegInfo->hasBasePointer(MF);
unsigned BPReg = HasBP ? (unsigned) RegInfo->getBaseRegister(MF) : FPReg;
unsigned BP8Reg = HasBP ? (unsigned) PPC::X30 : FPReg;
@@ -498,24 +560,22 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
const PPCInstrInfo &TII =
- *static_cast<const PPCInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ *static_cast<const PPCInstrInfo *>(Subtarget.getInstrInfo());
const PPCRegisterInfo *RegInfo =
- static_cast<const PPCRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ static_cast<const PPCRegisterInfo *>(Subtarget.getRegisterInfo());
MachineModuleInfo &MMI = MF.getMMI();
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
DebugLoc dl;
- bool needsFrameMoves = MMI.hasDebugInfo() ||
+ bool needsCFI = MMI.hasDebugInfo() ||
MF.getFunction()->needsUnwindTableEntry();
- bool isPIC = MF.getTarget().getRelocationModel() == Reloc::PIC_;
// Get processor type.
bool isPPC64 = Subtarget.isPPC64();
// Get the ABI.
- bool isDarwinABI = Subtarget.isDarwinABI();
bool isSVR4ABI = Subtarget.isSVR4ABI();
bool isELFv2ABI = Subtarget.isELFv2ABI();
- assert((isDarwinABI || isSVR4ABI) &&
+ assert((Subtarget.isDarwinABI() || isSVR4ABI) &&
"Currently only Darwin and SVR4 ABIs are supported for PowerPC.");
// Scan the prolog, looking for an UPDATE_VRSAVE instruction. If we find it,
@@ -581,7 +641,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
assert((isPPC64 || !isSVR4ABI || !(!FrameSize && (MustSaveLR || HasFP))) &&
"FrameSize must be >0 to save/restore the FP or LR for 32-bit SVR4.");
- int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
+ int LROffset = getReturnSaveOffset();
int FPOffset = 0;
if (HasFP) {
@@ -591,8 +651,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
assert(FPIndex && "No Frame Pointer Save Slot!");
FPOffset = FFI->getObjectOffset(FPIndex);
} else {
- FPOffset =
- PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
+ FPOffset = getFramePointerSaveOffset();
}
}
@@ -604,13 +663,18 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
assert(BPIndex && "No Base Pointer Save Slot!");
BPOffset = FFI->getObjectOffset(BPIndex);
} else {
- BPOffset =
- PPCFrameLowering::getBasePointerSaveOffset(isPPC64,
- isDarwinABI,
- isPIC);
+ BPOffset = getBasePointerSaveOffset();
}
}
+ int PBPOffset = 0;
+ if (FI->usesPICBase()) {
+ MachineFrameInfo *FFI = MF.getFrameInfo();
+ int PBPIndex = FI->getPICBasePointerSaveIndex();
+ assert(PBPIndex && "No PIC Base Pointer Save Slot!");
+ PBPOffset = FFI->getObjectOffset(PBPIndex);
+ }
+
// Get stack alignments.
unsigned MaxAlign = MFI->getMaxAlignment();
if (HasBP && MaxAlign > 1)
@@ -644,6 +708,13 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
.addImm(FPOffset)
.addReg(SPReg);
+ if (FI->usesPICBase())
+ // FIXME: On PPC32 SVR4, we must not spill before claiming the stackframe.
+ BuildMI(MBB, MBBI, dl, StoreInst)
+ .addReg(PPC::R30)
+ .addImm(PBPOffset)
+ .addReg(SPReg);
+
if (HasBP)
// FIXME: On PPC32 SVR4, we must not spill before claiming the stackframe.
BuildMI(MBB, MBBI, dl, StoreInst)
@@ -726,17 +797,28 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(ScratchReg);
}
- // Add the "machine moves" for the instructions we generated above, but in
- // reverse order.
- if (needsFrameMoves) {
- // Show update of SP.
- assert(NegFrameSize);
- unsigned CFIIndex = MMI.addFrameInst(
- MCCFIInstruction::createDefCfaOffset(nullptr, NegFrameSize));
+ // Add Call Frame Information for the instructions we generated above.
+ if (needsCFI) {
+ unsigned CFIIndex;
+
+ if (HasBP) {
+ // Define CFA in terms of BP. Do this in preference to using FP/SP,
+ // because if the stack needed aligning then CFA won't be at a fixed
+ // offset from FP/SP.
+ unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaRegister(nullptr, Reg));
+ } else {
+ // Adjust the definition of CFA to account for the change in SP.
+ assert(NegFrameSize);
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, NegFrameSize));
+ }
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
if (HasFP) {
+ // Describe where FP was saved, at a fixed offset from CFA.
unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createOffset(nullptr, Reg, FPOffset));
@@ -744,7 +826,17 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
.addCFIIndex(CFIIndex);
}
+ if (FI->usesPICBase()) {
+ // Describe where FP was saved, at a fixed offset from CFA.
+ unsigned Reg = MRI->getDwarfRegNum(PPC::R30, true);
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, Reg, PBPOffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+
if (HasBP) {
+ // Describe where BP was saved, at a fixed offset from CFA.
unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createOffset(nullptr, Reg, BPOffset));
@@ -753,6 +845,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
}
if (MustSaveLR) {
+ // Describe where LR was saved, at a fixed offset from CFA.
unsigned Reg = MRI->getDwarfRegNum(LRReg, true);
CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createOffset(nullptr, Reg, LROffset));
@@ -767,8 +860,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(SPReg)
.addReg(SPReg);
- if (needsFrameMoves) {
- // Mark effective beginning of when frame pointer is ready.
+ if (!HasBP && needsCFI) {
+ // Change the definition of CFA from SP+offset to FP+offset, because SP
+ // will change at every alloca.
unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfaRegister(nullptr, Reg));
@@ -778,8 +872,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
}
}
- if (needsFrameMoves) {
- // Add callee saved registers to move list.
+ if (needsCFI) {
+ // Describe where callee saved registers were saved, at fixed offsets from
+ // CFA.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
unsigned Reg = CSI[I].getReg();
@@ -824,14 +919,15 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
assert(MBBI != MBB.end() && "Returning block has no terminator");
const PPCInstrInfo &TII =
- *static_cast<const PPCInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ *static_cast<const PPCInstrInfo *>(Subtarget.getInstrInfo());
const PPCRegisterInfo *RegInfo =
- static_cast<const PPCRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ static_cast<const PPCRegisterInfo *>(Subtarget.getRegisterInfo());
unsigned RetOpcode = MBBI->getOpcode();
DebugLoc dl;
assert((RetOpcode == PPC::BLR ||
+ RetOpcode == PPC::BLR8 ||
RetOpcode == PPC::TCRETURNri ||
RetOpcode == PPC::TCRETURNdi ||
RetOpcode == PPC::TCRETURNai ||
@@ -849,9 +945,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
// Get processor type.
bool isPPC64 = Subtarget.isPPC64();
// Get the ABI.
- bool isDarwinABI = Subtarget.isDarwinABI();
bool isSVR4ABI = Subtarget.isSVR4ABI();
- bool isPIC = MF.getTarget().getRelocationModel() == Reloc::PIC_;
// Check if the link register (LR) has been saved.
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
@@ -879,7 +973,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
const MCInstrDesc& AddInst = TII.get( isPPC64 ? PPC::ADD8
: PPC::ADD4 );
- int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
+ int LROffset = getReturnSaveOffset();
int FPOffset = 0;
if (HasFP) {
@@ -889,8 +983,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
assert(FPIndex && "No Frame Pointer Save Slot!");
FPOffset = FFI->getObjectOffset(FPIndex);
} else {
- FPOffset =
- PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
+ FPOffset = getFramePointerSaveOffset();
}
}
@@ -902,13 +995,18 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
assert(BPIndex && "No Base Pointer Save Slot!");
BPOffset = FFI->getObjectOffset(BPIndex);
} else {
- BPOffset =
- PPCFrameLowering::getBasePointerSaveOffset(isPPC64,
- isDarwinABI,
- isPIC);
+ BPOffset = getBasePointerSaveOffset();
}
}
+ int PBPOffset = 0;
+ if (FI->usesPICBase()) {
+ MachineFrameInfo *FFI = MF.getFrameInfo();
+ int PBPIndex = FI->getPICBasePointerSaveIndex();
+ assert(PBPIndex && "No PIC Base Pointer Save Slot!");
+ PBPOffset = FFI->getObjectOffset(PBPIndex);
+ }
+
bool UsesTCRet = RetOpcode == PPC::TCRETURNri ||
RetOpcode == PPC::TCRETURNdi ||
RetOpcode == PPC::TCRETURNai ||
@@ -988,6 +1086,13 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
.addImm(FPOffset)
.addReg(SPReg);
+ if (FI->usesPICBase())
+ // FIXME: On PPC32 SVR4, we must not spill before claiming the stackframe.
+ BuildMI(MBB, MBBI, dl, LoadInst)
+ .addReg(PPC::R30)
+ .addImm(PBPOffset)
+ .addReg(SPReg);
+
if (HasBP)
BuildMI(MBB, MBBI, dl, LoadInst, BPReg)
.addImm(BPOffset)
@@ -1003,7 +1108,8 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
// Callee pop calling convention. Pop parameter/linkage area. Used for tail
// call optimization
- if (MF.getTarget().Options.GuaranteedTailCallOpt && RetOpcode == PPC::BLR &&
+ if (MF.getTarget().Options.GuaranteedTailCallOpt &&
+ (RetOpcode == PPC::BLR || RetOpcode == PPC::BLR8) &&
MF.getFunction()->getCallingConv() == CallingConv::Fast) {
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
unsigned CallerAllocatedAmt = FI->getMinReservedArea();
@@ -1051,25 +1157,11 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
}
}
-/// MustSaveLR - Return true if this function requires that we save the LR
-/// register onto the stack in the prolog and restore it in the epilog of the
-/// function.
-static bool MustSaveLR(const MachineFunction &MF, unsigned LR) {
- const PPCFunctionInfo *MFI = MF.getInfo<PPCFunctionInfo>();
-
- // We need a save/restore of LR if there is any def of LR (which is
- // defined by calls, including the PIC setup sequence), or if there is
- // some use of the LR stack slot (e.g. for builtin_return_address).
- // (LR comes in 32 and 64 bit versions.)
- MachineRegisterInfo::def_iterator RI = MF.getRegInfo().def_begin(LR);
- return RI !=MF.getRegInfo().def_end() || MFI->isLRStoreRequired();
-}
-
void
PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *) const {
const PPCRegisterInfo *RegInfo =
- static_cast<const PPCRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ static_cast<const PPCRegisterInfo *>(Subtarget.getRegisterInfo());
// Save and clear the LR state.
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
@@ -1082,13 +1174,12 @@ PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
int FPSI = FI->getFramePointerSaveIndex();
bool isPPC64 = Subtarget.isPPC64();
bool isDarwinABI = Subtarget.isDarwinABI();
- bool isPIC = MF.getTarget().getRelocationModel() == Reloc::PIC_;
MachineFrameInfo *MFI = MF.getFrameInfo();
// If the frame pointer save index hasn't been defined yet.
if (!FPSI && needsFP(MF)) {
// Find out what the fix offset of the frame pointer save area.
- int FPOffset = getFramePointerSaveOffset(isPPC64, isDarwinABI);
+ int FPOffset = getFramePointerSaveOffset();
// Allocate the frame index for frame pointer save area.
FPSI = MFI->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
// Save the result.
@@ -1097,13 +1188,21 @@ PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
int BPSI = FI->getBasePointerSaveIndex();
if (!BPSI && RegInfo->hasBasePointer(MF)) {
- int BPOffset = getBasePointerSaveOffset(isPPC64, isDarwinABI, isPIC);
+ int BPOffset = getBasePointerSaveOffset();
// Allocate the frame index for the base pointer save area.
BPSI = MFI->CreateFixedObject(isPPC64? 8 : 4, BPOffset, true);
// Save the result.
FI->setBasePointerSaveIndex(BPSI);
}
+ // Reserve stack space for the PIC Base register (R30).
+ // Only used in SVR4 32-bit.
+ if (FI->usesPICBase()) {
+ int PBPSI = FI->getPICBasePointerSaveIndex();
+ PBPSI = MFI->CreateFixedObject(4, -8, true);
+ FI->setPICBasePointerSaveIndex(PBPSI);
+ }
+
// Reserve stack space to move the linkage area to in case of a tail call.
int TCSPDelta = 0;
if (MF.getTarget().Options.GuaranteedTailCallOpt &&
@@ -1201,7 +1300,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
}
PPCFunctionInfo *PFI = MF.getInfo<PPCFunctionInfo>();
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
int64_t LowerBound = 0;
@@ -1235,8 +1334,17 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
}
+ if (PFI->usesPICBase()) {
+ HasGPSaveArea = true;
+
+ int FI = PFI->getPICBasePointerSaveIndex();
+ assert(FI && "No PIC Base Pointer Save Slot!");
+
+ FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
+ }
+
const PPCRegisterInfo *RegInfo =
- static_cast<const PPCRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ static_cast<const PPCRegisterInfo *>(Subtarget.getRegisterInfo());
if (RegInfo->hasBasePointer(MF)) {
HasGPSaveArea = true;
@@ -1384,7 +1492,7 @@ PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineFunction *MF = MBB.getParent();
const PPCInstrInfo &TII =
- *static_cast<const PPCInstrInfo *>(MF->getSubtarget().getInstrInfo());
+ *static_cast<const PPCInstrInfo *>(Subtarget.getInstrInfo());
DebugLoc DL;
bool CRSpilled = false;
MachineInstrBuilder CRMIB;
@@ -1445,8 +1553,7 @@ restoreCRs(bool isPPC64, bool is31,
const std::vector<CalleeSavedInfo> &CSI, unsigned CSIIndex) {
MachineFunction *MF = MBB.getParent();
- const PPCInstrInfo &TII =
- *static_cast<const PPCInstrInfo *>(MF->getSubtarget().getInstrInfo());
+ const PPCInstrInfo &TII = *MF->getSubtarget<PPCSubtarget>().getInstrInfo();
DebugLoc DL;
unsigned RestoreOp, MoveReg;
@@ -1478,8 +1585,7 @@ restoreCRs(bool isPPC64, bool is31,
void PPCFrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- const PPCInstrInfo &TII =
- *static_cast<const PPCInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
if (MF.getTarget().Options.GuaranteedTailCallOpt &&
I->getOpcode() == PPC::ADJCALLSTACKUP) {
// Add (actually subtract) back the amount the callee popped on return.
@@ -1529,7 +1635,7 @@ PPCFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineFunction *MF = MBB.getParent();
const PPCInstrInfo &TII =
- *static_cast<const PPCInstrInfo *>(MF->getSubtarget().getInstrInfo());
+ *static_cast<const PPCInstrInfo *>(Subtarget.getInstrInfo());
bool CR2Spilled = false;
bool CR3Spilled = false;
bool CR4Spilled = false;
diff --git a/lib/Target/PowerPC/PPCFrameLowering.h b/lib/Target/PowerPC/PPCFrameLowering.h
index c482588..dddabb8 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.h
+++ b/lib/Target/PowerPC/PPCFrameLowering.h
@@ -23,6 +23,11 @@ class PPCSubtarget;
class PPCFrameLowering: public TargetFrameLowering {
const PPCSubtarget &Subtarget;
+ const unsigned ReturnSaveOffset;
+ const unsigned TOCSaveOffset;
+ const unsigned FramePointerSaveOffset;
+ const unsigned LinkageSize;
+ const unsigned BasePointerSaveOffset;
public:
PPCFrameLowering(const PPCSubtarget &STI);
@@ -67,56 +72,23 @@ public:
/// getReturnSaveOffset - Return the previous frame offset to save the
/// return address.
- static unsigned getReturnSaveOffset(bool isPPC64, bool isDarwinABI) {
- if (isDarwinABI)
- return isPPC64 ? 16 : 8;
- // SVR4 ABI:
- return isPPC64 ? 16 : 4;
- }
+ unsigned getReturnSaveOffset() const { return ReturnSaveOffset; }
/// getTOCSaveOffset - Return the previous frame offset to save the
/// TOC register -- 64-bit SVR4 ABI only.
- static unsigned getTOCSaveOffset(bool isELFv2ABI) {
- return isELFv2ABI ? 24 : 40;
- }
+ unsigned getTOCSaveOffset() const { return TOCSaveOffset; }
/// getFramePointerSaveOffset - Return the previous frame offset to save the
/// frame pointer.
- static unsigned getFramePointerSaveOffset(bool isPPC64, bool isDarwinABI) {
- // For the Darwin ABI:
- // We cannot use the TOC save slot (offset +20) in the PowerPC linkage area
- // for saving the frame pointer (if needed.) While the published ABI has
- // not used this slot since at least MacOSX 10.2, there is older code
- // around that does use it, and that needs to continue to work.
- if (isDarwinABI)
- return isPPC64 ? -8U : -4U;
-
- // SVR4 ABI: First slot in the general register save area.
- return isPPC64 ? -8U : -4U;
- }
+ unsigned getFramePointerSaveOffset() const { return FramePointerSaveOffset; }
/// getBasePointerSaveOffset - Return the previous frame offset to save the
/// base pointer.
- static unsigned getBasePointerSaveOffset(bool isPPC64,
- bool isDarwinABI,
- bool isPIC) {
- if (isDarwinABI)
- return isPPC64 ? -16U : -8U;
-
- // SVR4 ABI: First slot in the general register save area.
- return isPPC64 ? -16U : isPIC ? -12U : -8U;
- }
+ unsigned getBasePointerSaveOffset() const { return BasePointerSaveOffset; }
/// getLinkageSize - Return the size of the PowerPC ABI linkage area.
///
- static unsigned getLinkageSize(bool isPPC64, bool isDarwinABI,
- bool isELFv2ABI) {
- if (isDarwinABI || isPPC64)
- return (isELFv2ABI ? 4 : 6) * (isPPC64 ? 8 : 4);
-
- // SVR4 ABI:
- return 8;
- }
+ unsigned getLinkageSize() const { return LinkageSize; }
const SpillSlot *
getCalleeSavedSpillSlots(unsigned &NumEntries) const override;
diff --git a/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/lib/Target/PowerPC/PPCHazardRecognizers.cpp
index d9b242c..7234e30 100644
--- a/lib/Target/PowerPC/PPCHazardRecognizers.cpp
+++ b/lib/Target/PowerPC/PPCHazardRecognizers.cpp
@@ -160,7 +160,7 @@ unsigned PPCDispatchGroupSBHazardRecognizer::PreEmitNoops(SUnit *SU) {
// new group.
if (isLoadAfterStore(SU) && CurSlots < 6) {
unsigned Directive =
- DAG->TM.getSubtarget<PPCSubtarget>().getDarwinDirective();
+ DAG->MF.getSubtarget<PPCSubtarget>().getDarwinDirective();
// If we're using a special group-terminating nop, then we need only one.
if (Directive == PPC::DIR_PWR6 || Directive == PPC::DIR_PWR7 ||
Directive == PPC::DIR_PWR8 )
@@ -220,7 +220,7 @@ void PPCDispatchGroupSBHazardRecognizer::Reset() {
void PPCDispatchGroupSBHazardRecognizer::EmitNoop() {
unsigned Directive =
- DAG->TM.getSubtarget<PPCSubtarget>().getDarwinDirective();
+ DAG->MF.getSubtarget<PPCSubtarget>().getDarwinDirective();
// If the group has now filled all of its slots, or if we're using a special
// group-terminating nop, the group is complete.
if (Directive == PPC::DIR_PWR6 || Directive == PPC::DIR_PWR7 ||
diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 49ba58b..b10e854 100644
--- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -42,6 +42,16 @@ using namespace llvm;
cl::opt<bool> ANDIGlueBug("expose-ppc-andi-glue-bug",
cl::desc("expose the ANDI glue bug on PPC"), cl::Hidden);
+static cl::opt<bool>
+ UseBitPermRewriter("ppc-use-bit-perm-rewriter", cl::init(true),
+ cl::desc("use aggressive ppc isel for bit permutations"),
+ cl::Hidden);
+static cl::opt<bool> BPermRewriterNoMasking(
+ "ppc-bit-perm-rewriter-stress-rotates",
+ cl::desc("stress rotate selection in aggressive ppc isel for "
+ "bit permutations"),
+ cl::Hidden);
+
namespace llvm {
void initializePPCDAGToDAGISelPass(PassRegistry&);
}
@@ -53,22 +63,20 @@ namespace {
///
class PPCDAGToDAGISel : public SelectionDAGISel {
const PPCTargetMachine &TM;
- const PPCTargetLowering *PPCLowering;
const PPCSubtarget *PPCSubTarget;
+ const PPCTargetLowering *PPCLowering;
unsigned GlobalBaseReg;
public:
explicit PPCDAGToDAGISel(PPCTargetMachine &tm)
- : SelectionDAGISel(tm), TM(tm),
- PPCLowering(TM.getSubtargetImpl()->getTargetLowering()),
- PPCSubTarget(TM.getSubtargetImpl()) {
+ : SelectionDAGISel(tm), TM(tm) {
initializePPCDAGToDAGISelPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override {
// Make sure we re-emit a set of the global base reg if necessary
GlobalBaseReg = 0;
- PPCLowering = TM.getSubtargetImpl()->getTargetLowering();
- PPCSubTarget = TM.getSubtargetImpl();
+ PPCSubTarget = &MF.getSubtarget<PPCSubtarget>();
+ PPCLowering = PPCSubTarget->getTargetLowering();
SelectionDAGISel::runOnMachineFunction(MF);
if (!PPCSubTarget->isSVR4ABI())
@@ -77,6 +85,7 @@ namespace {
return true;
}
+ void PreprocessISelDAG() override;
void PostprocessISelDAG() override;
/// getI32Imm - Return a target constant with the specified value, of type
@@ -112,11 +121,14 @@ namespace {
/// base register. Return the virtual register that holds this value.
SDNode *getGlobalBaseReg();
+ SDNode *getFrameIndex(SDNode *SN, SDNode *N, unsigned Offset = 0);
+
// Select - Convert the specified operand from a target-independent to a
// target-specific node if it hasn't already been changed.
SDNode *Select(SDNode *N) override;
SDNode *SelectBitfieldInsert(SDNode *N);
+ SDNode *SelectBitPermutation(SDNode *N);
/// SelectCC - Select a comparison of the specified values with the
/// specified condition code, returning the CR# of the expression.
@@ -173,10 +185,20 @@ namespace {
/// a register. The case of adding a (possibly relocatable) constant to a
/// register can be improved, but it is wrong to substitute Reg+Reg for
/// Reg in an asm, because the load or store opcode would have to change.
- bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode,
std::vector<SDValue> &OutOps) override {
- OutOps.push_back(Op);
+ // We need to make sure that this one operand does not end up in r0
+ // (because we might end up lowering this as 0(%op)).
+ const TargetRegisterInfo *TRI = PPCSubTarget->getRegisterInfo();
+ const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF, /*Kind=*/1);
+ SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
+ SDValue NewOp =
+ SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
+ SDLoc(Op), Op.getValueType(),
+ Op, RC), 0);
+
+ OutOps.push_back(NewOp);
return false;
}
@@ -193,10 +215,16 @@ private:
SDNode *SelectSETCC(SDNode *N);
void PeepholePPC64();
+ void PeepholePPC64ZExt();
void PeepholeCROps();
+ SDValue combineToCMPB(SDNode *N);
+ void foldBoolExts(SDValue &Res, SDNode *&N);
+
bool AllUsersSelectZero(SDNode *N);
void SwapAllSelectUsers(SDNode *N);
+
+ SDNode *transferMemOperands(SDNode *N, SDNode *Result);
};
}
@@ -234,7 +262,7 @@ void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
unsigned InVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
unsigned UpdatedVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
- const TargetInstrInfo &TII = *TM.getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo &TII = *PPCSubTarget->getInstrInfo();
MachineBasicBlock &EntryBB = *Fn.begin();
DebugLoc dl;
// Emit the following code into the entry block:
@@ -270,7 +298,7 @@ void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
///
SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
if (!GlobalBaseReg) {
- const TargetInstrInfo &TII = *TM.getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo &TII = *PPCSubTarget->getInstrInfo();
// Insert the set of GlobalBaseReg into the first MBB of the function
MachineBasicBlock &FirstMBB = MF->front();
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
@@ -283,12 +311,13 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
if (M->getPICLevel() == PICLevel::Small) {
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MoveGOTtoLR));
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg);
+ MF->getInfo<PPCFunctionInfo>()->setUsesPICBase(true);
} else {
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR));
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg);
unsigned TempReg = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
BuildMI(FirstMBB, MBBI, dl,
- TII.get(PPC::UpdateGBR)).addReg(GlobalBaseReg)
+ TII.get(PPC::UpdateGBR), GlobalBaseReg)
.addReg(TempReg, RegState::Define).addReg(GlobalBaseReg);
MF->getInfo<PPCFunctionInfo>()->setUsesPICBase(true);
}
@@ -363,6 +392,18 @@ static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
&& isInt32Immediate(N->getOperand(1).getNode(), Imm);
}
+SDNode *PPCDAGToDAGISel::getFrameIndex(SDNode *SN, SDNode *N, unsigned Offset) {
+ SDLoc dl(SN);
+ int FI = cast<FrameIndexSDNode>(N)->getIndex();
+ SDValue TFI = CurDAG->getTargetFrameIndex(FI, N->getValueType(0));
+ unsigned Opc = N->getValueType(0) == MVT::i32 ? PPC::ADDI : PPC::ADDI8;
+ if (SN->hasOneUse())
+ return CurDAG->SelectNodeTo(SN, Opc, N->getValueType(0), TFI,
+ getSmallIPtrImm(Offset));
+ return CurDAG->getMachineNode(Opc, dl, N->getValueType(0), TFI,
+ getSmallIPtrImm(Offset));
+}
+
bool PPCDAGToDAGISel::isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME) {
if (!Val)
return false;
@@ -507,6 +548,1401 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
return nullptr;
}
+// Predict the number of instructions that would be generated by calling
+// SelectInt64(N).
+static unsigned SelectInt64CountDirect(int64_t Imm) {
+ // Assume no remaining bits.
+ unsigned Remainder = 0;
+ // Assume no shift required.
+ unsigned Shift = 0;
+
+ // If it can't be represented as a 32 bit value.
+ if (!isInt<32>(Imm)) {
+ Shift = countTrailingZeros<uint64_t>(Imm);
+ int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
+
+ // If the shifted value fits 32 bits.
+ if (isInt<32>(ImmSh)) {
+ // Go with the shifted value.
+ Imm = ImmSh;
+ } else {
+ // Still stuck with a 64 bit value.
+ Remainder = Imm;
+ Shift = 32;
+ Imm >>= 32;
+ }
+ }
+
+ // Intermediate operand.
+ unsigned Result = 0;
+
+ // Handle first 32 bits.
+ unsigned Lo = Imm & 0xFFFF;
+ unsigned Hi = (Imm >> 16) & 0xFFFF;
+
+ // Simple value.
+ if (isInt<16>(Imm)) {
+ // Just the Lo bits.
+ ++Result;
+ } else if (Lo) {
+ // Handle the Hi bits and Lo bits.
+ Result += 2;
+ } else {
+ // Just the Hi bits.
+ ++Result;
+ }
+
+ // If no shift, we're done.
+ if (!Shift) return Result;
+
+ // Shift for next step if the upper 32-bits were not zero.
+ if (Imm)
+ ++Result;
+
+ // Add in the last bits as required.
+ if ((Hi = (Remainder >> 16) & 0xFFFF))
+ ++Result;
+ if ((Lo = Remainder & 0xFFFF))
+ ++Result;
+
+ return Result;
+}
+
+static uint64_t Rot64(uint64_t Imm, unsigned R) {
+ return (Imm << R) | (Imm >> (64 - R));
+}
+
+static unsigned SelectInt64Count(int64_t Imm) {
+ unsigned Count = SelectInt64CountDirect(Imm);
+ if (Count == 1)
+ return Count;
+
+ for (unsigned r = 1; r < 63; ++r) {
+ uint64_t RImm = Rot64(Imm, r);
+ unsigned RCount = SelectInt64CountDirect(RImm) + 1;
+ Count = std::min(Count, RCount);
+
+ // See comments in SelectInt64 for an explanation of the logic below.
+ unsigned LS = findLastSet(RImm);
+ if (LS != r-1)
+ continue;
+
+ uint64_t OnesMask = -(int64_t) (UINT64_C(1) << (LS+1));
+ uint64_t RImmWithOnes = RImm | OnesMask;
+
+ RCount = SelectInt64CountDirect(RImmWithOnes) + 1;
+ Count = std::min(Count, RCount);
+ }
+
+ return Count;
+}
+
+// Select a 64-bit constant. For cost-modeling purposes, SelectInt64Count
+// (above) needs to be kept in sync with this function.
+static SDNode *SelectInt64Direct(SelectionDAG *CurDAG, SDLoc dl, int64_t Imm) {
+ // Assume no remaining bits.
+ unsigned Remainder = 0;
+ // Assume no shift required.
+ unsigned Shift = 0;
+
+ // If it can't be represented as a 32 bit value.
+ if (!isInt<32>(Imm)) {
+ Shift = countTrailingZeros<uint64_t>(Imm);
+ int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
+
+ // If the shifted value fits 32 bits.
+ if (isInt<32>(ImmSh)) {
+ // Go with the shifted value.
+ Imm = ImmSh;
+ } else {
+ // Still stuck with a 64 bit value.
+ Remainder = Imm;
+ Shift = 32;
+ Imm >>= 32;
+ }
+ }
+
+ // Intermediate operand.
+ SDNode *Result;
+
+ // Handle first 32 bits.
+ unsigned Lo = Imm & 0xFFFF;
+ unsigned Hi = (Imm >> 16) & 0xFFFF;
+
+ auto getI32Imm = [CurDAG](unsigned Imm) {
+ return CurDAG->getTargetConstant(Imm, MVT::i32);
+ };
+
+ // Simple value.
+ if (isInt<16>(Imm)) {
+ // Just the Lo bits.
+ Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, getI32Imm(Lo));
+ } else if (Lo) {
+ // Handle the Hi bits.
+ unsigned OpC = Hi ? PPC::LIS8 : PPC::LI8;
+ Result = CurDAG->getMachineNode(OpC, dl, MVT::i64, getI32Imm(Hi));
+ // And Lo bits.
+ Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64,
+ SDValue(Result, 0), getI32Imm(Lo));
+ } else {
+ // Just the Hi bits.
+ Result = CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, getI32Imm(Hi));
+ }
+
+ // If no shift, we're done.
+ if (!Shift) return Result;
+
+ // Shift for next step if the upper 32-bits were not zero.
+ if (Imm) {
+ Result = CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64,
+ SDValue(Result, 0),
+ getI32Imm(Shift),
+ getI32Imm(63 - Shift));
+ }
+
+ // Add in the last bits as required.
+ if ((Hi = (Remainder >> 16) & 0xFFFF)) {
+ Result = CurDAG->getMachineNode(PPC::ORIS8, dl, MVT::i64,
+ SDValue(Result, 0), getI32Imm(Hi));
+ }
+ if ((Lo = Remainder & 0xFFFF)) {
+ Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64,
+ SDValue(Result, 0), getI32Imm(Lo));
+ }
+
+ return Result;
+}
+
+static SDNode *SelectInt64(SelectionDAG *CurDAG, SDLoc dl, int64_t Imm) {
+ unsigned Count = SelectInt64CountDirect(Imm);
+ if (Count == 1)
+ return SelectInt64Direct(CurDAG, dl, Imm);
+
+ unsigned RMin = 0;
+
+ int64_t MatImm;
+ unsigned MaskEnd;
+
+ for (unsigned r = 1; r < 63; ++r) {
+ uint64_t RImm = Rot64(Imm, r);
+ unsigned RCount = SelectInt64CountDirect(RImm) + 1;
+ if (RCount < Count) {
+ Count = RCount;
+ RMin = r;
+ MatImm = RImm;
+ MaskEnd = 63;
+ }
+
+ // If the immediate to generate has many trailing zeros, it might be
+ // worthwhile to generate a rotated value with too many leading ones
+ // (because that's free with li/lis's sign-extension semantics), and then
+ // mask them off after rotation.
+
+ unsigned LS = findLastSet(RImm);
+ // We're adding (63-LS) higher-order ones, and we expect to mask them off
+ // after performing the inverse rotation by (64-r). So we need that:
+ // 63-LS == 64-r => LS == r-1
+ if (LS != r-1)
+ continue;
+
+ uint64_t OnesMask = -(int64_t) (UINT64_C(1) << (LS+1));
+ uint64_t RImmWithOnes = RImm | OnesMask;
+
+ RCount = SelectInt64CountDirect(RImmWithOnes) + 1;
+ if (RCount < Count) {
+ Count = RCount;
+ RMin = r;
+ MatImm = RImmWithOnes;
+ MaskEnd = LS;
+ }
+ }
+
+ if (!RMin)
+ return SelectInt64Direct(CurDAG, dl, Imm);
+
+ auto getI32Imm = [CurDAG](unsigned Imm) {
+ return CurDAG->getTargetConstant(Imm, MVT::i32);
+ };
+
+ SDValue Val = SDValue(SelectInt64Direct(CurDAG, dl, MatImm), 0);
+ return CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64, Val,
+ getI32Imm(64 - RMin), getI32Imm(MaskEnd));
+}
+
+// Select a 64-bit constant.
+static SDNode *SelectInt64(SelectionDAG *CurDAG, SDNode *N) {
+ SDLoc dl(N);
+
+ // Get 64 bit value.
+ int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ return SelectInt64(CurDAG, dl, Imm);
+}
+
+namespace {
+class BitPermutationSelector {
+ struct ValueBit {
+ SDValue V;
+
+ // The bit number in the value, using a convention where bit 0 is the
+ // lowest-order bit.
+ unsigned Idx;
+
+ enum Kind {
+ ConstZero,
+ Variable
+ } K;
+
+ ValueBit(SDValue V, unsigned I, Kind K = Variable)
+ : V(V), Idx(I), K(K) {}
+ ValueBit(Kind K = Variable)
+ : V(SDValue(nullptr, 0)), Idx(UINT32_MAX), K(K) {}
+
+ bool isZero() const {
+ return K == ConstZero;
+ }
+
+ bool hasValue() const {
+ return K == Variable;
+ }
+
+ SDValue getValue() const {
+ assert(hasValue() && "Cannot get the value of a constant bit");
+ return V;
+ }
+
+ unsigned getValueBitIndex() const {
+ assert(hasValue() && "Cannot get the value bit index of a constant bit");
+ return Idx;
+ }
+ };
+
+ // A bit group has the same underlying value and the same rotate factor.
+ struct BitGroup {
+ SDValue V;
+ unsigned RLAmt;
+ unsigned StartIdx, EndIdx;
+
+ // This rotation amount assumes that the lower 32 bits of the quantity are
+ // replicated in the high 32 bits by the rotation operator (which is done
+ // by rlwinm and friends in 64-bit mode).
+ bool Repl32;
+ // Did converting to Repl32 == true change the rotation factor? If it did,
+ // it decreased it by 32.
+ bool Repl32CR;
+ // Was this group coalesced after setting Repl32 to true?
+ bool Repl32Coalesced;
+
+ BitGroup(SDValue V, unsigned R, unsigned S, unsigned E)
+ : V(V), RLAmt(R), StartIdx(S), EndIdx(E), Repl32(false), Repl32CR(false),
+ Repl32Coalesced(false) {
+ DEBUG(dbgs() << "\tbit group for " << V.getNode() << " RLAmt = " << R <<
+ " [" << S << ", " << E << "]\n");
+ }
+ };
+
+ // Information on each (Value, RLAmt) pair (like the number of groups
+ // associated with each) used to choose the lowering method.
+ struct ValueRotInfo {
+ SDValue V;
+ unsigned RLAmt;
+ unsigned NumGroups;
+ unsigned FirstGroupStartIdx;
+ bool Repl32;
+
+ ValueRotInfo()
+ : RLAmt(UINT32_MAX), NumGroups(0), FirstGroupStartIdx(UINT32_MAX),
+ Repl32(false) {}
+
+ // For sorting (in reverse order) by NumGroups, and then by
+ // FirstGroupStartIdx.
+ bool operator < (const ValueRotInfo &Other) const {
+ // We need to sort so that the non-Repl32 come first because, when we're
+ // doing masking, the Repl32 bit groups might be subsumed into the 64-bit
+ // masking operation.
+ if (Repl32 < Other.Repl32)
+ return true;
+ else if (Repl32 > Other.Repl32)
+ return false;
+ else if (NumGroups > Other.NumGroups)
+ return true;
+ else if (NumGroups < Other.NumGroups)
+ return false;
+ else if (FirstGroupStartIdx < Other.FirstGroupStartIdx)
+ return true;
+ return false;
+ }
+ };
+
+ // Return true if something interesting was deduced, return false if we're
+ // providing only a generic representation of V (or something else likewise
+ // uninteresting for instruction selection).
+ bool getValueBits(SDValue V, SmallVector<ValueBit, 64> &Bits) {
+ switch (V.getOpcode()) {
+ default: break;
+ case ISD::ROTL:
+ if (isa<ConstantSDNode>(V.getOperand(1))) {
+ unsigned RotAmt = V.getConstantOperandVal(1);
+
+ SmallVector<ValueBit, 64> LHSBits(Bits.size());
+ getValueBits(V.getOperand(0), LHSBits);
+
+ for (unsigned i = 0; i < Bits.size(); ++i)
+ Bits[i] = LHSBits[i < RotAmt ? i + (Bits.size() - RotAmt) : i - RotAmt];
+
+ return true;
+ }
+ break;
+ case ISD::SHL:
+ if (isa<ConstantSDNode>(V.getOperand(1))) {
+ unsigned ShiftAmt = V.getConstantOperandVal(1);
+
+ SmallVector<ValueBit, 64> LHSBits(Bits.size());
+ getValueBits(V.getOperand(0), LHSBits);
+
+ for (unsigned i = ShiftAmt; i < Bits.size(); ++i)
+ Bits[i] = LHSBits[i - ShiftAmt];
+
+ for (unsigned i = 0; i < ShiftAmt; ++i)
+ Bits[i] = ValueBit(ValueBit::ConstZero);
+
+ return true;
+ }
+ break;
+ case ISD::SRL:
+ if (isa<ConstantSDNode>(V.getOperand(1))) {
+ unsigned ShiftAmt = V.getConstantOperandVal(1);
+
+ SmallVector<ValueBit, 64> LHSBits(Bits.size());
+ getValueBits(V.getOperand(0), LHSBits);
+
+ for (unsigned i = 0; i < Bits.size() - ShiftAmt; ++i)
+ Bits[i] = LHSBits[i + ShiftAmt];
+
+ for (unsigned i = Bits.size() - ShiftAmt; i < Bits.size(); ++i)
+ Bits[i] = ValueBit(ValueBit::ConstZero);
+
+ return true;
+ }
+ break;
+ case ISD::AND:
+ if (isa<ConstantSDNode>(V.getOperand(1))) {
+ uint64_t Mask = V.getConstantOperandVal(1);
+
+ SmallVector<ValueBit, 64> LHSBits(Bits.size());
+ bool LHSTrivial = getValueBits(V.getOperand(0), LHSBits);
+
+ for (unsigned i = 0; i < Bits.size(); ++i)
+ if (((Mask >> i) & 1) == 1)
+ Bits[i] = LHSBits[i];
+ else
+ Bits[i] = ValueBit(ValueBit::ConstZero);
+
+ // Mark this as interesting, only if the LHS was also interesting. This
+ // prevents the overall procedure from matching a single immediate 'and'
+ // (which is non-optimal because such an and might be folded with other
+ // things if we don't select it here).
+ return LHSTrivial;
+ }
+ break;
+ case ISD::OR: {
+ SmallVector<ValueBit, 64> LHSBits(Bits.size()), RHSBits(Bits.size());
+ getValueBits(V.getOperand(0), LHSBits);
+ getValueBits(V.getOperand(1), RHSBits);
+
+ bool AllDisjoint = true;
+ for (unsigned i = 0; i < Bits.size(); ++i)
+ if (LHSBits[i].isZero())
+ Bits[i] = RHSBits[i];
+ else if (RHSBits[i].isZero())
+ Bits[i] = LHSBits[i];
+ else {
+ AllDisjoint = false;
+ break;
+ }
+
+ if (!AllDisjoint)
+ break;
+
+ return true;
+ }
+ }
+
+ for (unsigned i = 0; i < Bits.size(); ++i)
+ Bits[i] = ValueBit(V, i);
+
+ return false;
+ }
+
+ // For each value (except the constant ones), compute the left-rotate amount
+ // to get it from its original to final position.
+ void computeRotationAmounts() {
+ HasZeros = false;
+ RLAmt.resize(Bits.size());
+ for (unsigned i = 0; i < Bits.size(); ++i)
+ if (Bits[i].hasValue()) {
+ unsigned VBI = Bits[i].getValueBitIndex();
+ if (i >= VBI)
+ RLAmt[i] = i - VBI;
+ else
+ RLAmt[i] = Bits.size() - (VBI - i);
+ } else if (Bits[i].isZero()) {
+ HasZeros = true;
+ RLAmt[i] = UINT32_MAX;
+ } else {
+ llvm_unreachable("Unknown value bit type");
+ }
+ }
+
+ // Collect groups of consecutive bits with the same underlying value and
+ // rotation factor. If we're doing late masking, we ignore zeros, otherwise
+ // they break up groups.
+ void collectBitGroups(bool LateMask) {
+ BitGroups.clear();
+
+ unsigned LastRLAmt = RLAmt[0];
+ SDValue LastValue = Bits[0].hasValue() ? Bits[0].getValue() : SDValue();
+ unsigned LastGroupStartIdx = 0;
+ for (unsigned i = 1; i < Bits.size(); ++i) {
+ unsigned ThisRLAmt = RLAmt[i];
+ SDValue ThisValue = Bits[i].hasValue() ? Bits[i].getValue() : SDValue();
+ if (LateMask && !ThisValue) {
+ ThisValue = LastValue;
+ ThisRLAmt = LastRLAmt;
+ // If we're doing late masking, then the first bit group always starts
+ // at zero (even if the first bits were zero).
+ if (BitGroups.empty())
+ LastGroupStartIdx = 0;
+ }
+
+ // If this bit has the same underlying value and the same rotate factor as
+ // the last one, then they're part of the same group.
+ if (ThisRLAmt == LastRLAmt && ThisValue == LastValue)
+ continue;
+
+ if (LastValue.getNode())
+ BitGroups.push_back(BitGroup(LastValue, LastRLAmt, LastGroupStartIdx,
+ i-1));
+ LastRLAmt = ThisRLAmt;
+ LastValue = ThisValue;
+ LastGroupStartIdx = i;
+ }
+ if (LastValue.getNode())
+ BitGroups.push_back(BitGroup(LastValue, LastRLAmt, LastGroupStartIdx,
+ Bits.size()-1));
+
+ if (BitGroups.empty())
+ return;
+
+ // We might be able to combine the first and last groups.
+ if (BitGroups.size() > 1) {
+ // If the first and last groups are the same, then remove the first group
+ // in favor of the last group, making the ending index of the last group
+ // equal to the ending index of the to-be-removed first group.
+ if (BitGroups[0].StartIdx == 0 &&
+ BitGroups[BitGroups.size()-1].EndIdx == Bits.size()-1 &&
+ BitGroups[0].V == BitGroups[BitGroups.size()-1].V &&
+ BitGroups[0].RLAmt == BitGroups[BitGroups.size()-1].RLAmt) {
+ DEBUG(dbgs() << "\tcombining final bit group with inital one\n");
+ BitGroups[BitGroups.size()-1].EndIdx = BitGroups[0].EndIdx;
+ BitGroups.erase(BitGroups.begin());
+ }
+ }
+ }
+
+ // Take all (SDValue, RLAmt) pairs and sort them by the number of groups
+ // associated with each. If there is a degeneracy, pick the one that occurs
+ // first (in the final value).
+ void collectValueRotInfo() {
+ ValueRots.clear();
+
+ for (auto &BG : BitGroups) {
+ unsigned RLAmtKey = BG.RLAmt + (BG.Repl32 ? 64 : 0);
+ ValueRotInfo &VRI = ValueRots[std::make_pair(BG.V, RLAmtKey)];
+ VRI.V = BG.V;
+ VRI.RLAmt = BG.RLAmt;
+ VRI.Repl32 = BG.Repl32;
+ VRI.NumGroups += 1;
+ VRI.FirstGroupStartIdx = std::min(VRI.FirstGroupStartIdx, BG.StartIdx);
+ }
+
+ // Now that we've collected the various ValueRotInfo instances, we need to
+ // sort them.
+ ValueRotsVec.clear();
+ for (auto &I : ValueRots) {
+ ValueRotsVec.push_back(I.second);
+ }
+ std::sort(ValueRotsVec.begin(), ValueRotsVec.end());
+ }
+
+ // In 64-bit mode, rlwinm and friends have a rotation operator that
+ // replicates the low-order 32 bits into the high-order 32-bits. The mask
+ // indices of these instructions can only be in the lower 32 bits, so they
+ // can only represent some 64-bit bit groups. However, when they can be used,
+ // the 32-bit replication can be used to represent, as a single bit group,
+ // otherwise separate bit groups. We'll convert to replicated-32-bit bit
+ // groups when possible. Returns true if any of the bit groups were
+ // converted.
+ void assignRepl32BitGroups() {
+ // If we have bits like this:
+ //
+ // Indices: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ // V bits: ... 7 6 5 4 3 2 1 0 31 30 29 28 27 26 25 24
+ // Groups: | RLAmt = 8 | RLAmt = 40 |
+ //
+ // But, making use of a 32-bit operation that replicates the low-order 32
+ // bits into the high-order 32 bits, this can be one bit group with a RLAmt
+ // of 8.
+
+ auto IsAllLow32 = [this](BitGroup & BG) {
+ if (BG.StartIdx <= BG.EndIdx) {
+ for (unsigned i = BG.StartIdx; i <= BG.EndIdx; ++i) {
+ if (!Bits[i].hasValue())
+ continue;
+ if (Bits[i].getValueBitIndex() >= 32)
+ return false;
+ }
+ } else {
+ for (unsigned i = BG.StartIdx; i < Bits.size(); ++i) {
+ if (!Bits[i].hasValue())
+ continue;
+ if (Bits[i].getValueBitIndex() >= 32)
+ return false;
+ }
+ for (unsigned i = 0; i <= BG.EndIdx; ++i) {
+ if (!Bits[i].hasValue())
+ continue;
+ if (Bits[i].getValueBitIndex() >= 32)
+ return false;
+ }
+ }
+
+ return true;
+ };
+
+ for (auto &BG : BitGroups) {
+ if (BG.StartIdx < 32 && BG.EndIdx < 32) {
+ if (IsAllLow32(BG)) {
+ if (BG.RLAmt >= 32) {
+ BG.RLAmt -= 32;
+ BG.Repl32CR = true;
+ }
+
+ BG.Repl32 = true;
+
+ DEBUG(dbgs() << "\t32-bit replicated bit group for " <<
+ BG.V.getNode() << " RLAmt = " << BG.RLAmt <<
+ " [" << BG.StartIdx << ", " << BG.EndIdx << "]\n");
+ }
+ }
+ }
+
+ // Now walk through the bit groups, consolidating where possible.
+ for (auto I = BitGroups.begin(); I != BitGroups.end();) {
+ // We might want to remove this bit group by merging it with the previous
+ // group (which might be the ending group).
+ auto IP = (I == BitGroups.begin()) ?
+ std::prev(BitGroups.end()) : std::prev(I);
+ if (I->Repl32 && IP->Repl32 && I->V == IP->V && I->RLAmt == IP->RLAmt &&
+ I->StartIdx == (IP->EndIdx + 1) % 64 && I != IP) {
+
+ DEBUG(dbgs() << "\tcombining 32-bit replicated bit group for " <<
+ I->V.getNode() << " RLAmt = " << I->RLAmt <<
+ " [" << I->StartIdx << ", " << I->EndIdx <<
+ "] with group with range [" <<
+ IP->StartIdx << ", " << IP->EndIdx << "]\n");
+
+ IP->EndIdx = I->EndIdx;
+ IP->Repl32CR = IP->Repl32CR || I->Repl32CR;
+ IP->Repl32Coalesced = true;
+ I = BitGroups.erase(I);
+ continue;
+ } else {
+ // There is a special case worth handling: If there is a single group
+ // covering the entire upper 32 bits, and it can be merged with both
+ // the next and previous groups (which might be the same group), then
+ // do so. If it is the same group (so there will be only one group in
+ // total), then we need to reverse the order of the range so that it
+ // covers the entire 64 bits.
+ if (I->StartIdx == 32 && I->EndIdx == 63) {
+ assert(std::next(I) == BitGroups.end() &&
+ "bit group ends at index 63 but there is another?");
+ auto IN = BitGroups.begin();
+
+ if (IP->Repl32 && IN->Repl32 && I->V == IP->V && I->V == IN->V &&
+ (I->RLAmt % 32) == IP->RLAmt && (I->RLAmt % 32) == IN->RLAmt &&
+ IP->EndIdx == 31 && IN->StartIdx == 0 && I != IP &&
+ IsAllLow32(*I)) {
+
+ DEBUG(dbgs() << "\tcombining bit group for " <<
+ I->V.getNode() << " RLAmt = " << I->RLAmt <<
+ " [" << I->StartIdx << ", " << I->EndIdx <<
+ "] with 32-bit replicated groups with ranges [" <<
+ IP->StartIdx << ", " << IP->EndIdx << "] and [" <<
+ IN->StartIdx << ", " << IN->EndIdx << "]\n");
+
+ if (IP == IN) {
+ // There is only one other group; change it to cover the whole
+ // range (backward, so that it can still be Repl32 but cover the
+ // whole 64-bit range).
+ IP->StartIdx = 31;
+ IP->EndIdx = 30;
+ IP->Repl32CR = IP->Repl32CR || I->RLAmt >= 32;
+ IP->Repl32Coalesced = true;
+ I = BitGroups.erase(I);
+ } else {
+ // There are two separate groups, one before this group and one
+ // after us (at the beginning). We're going to remove this group,
+ // but also the group at the very beginning.
+ IP->EndIdx = IN->EndIdx;
+ IP->Repl32CR = IP->Repl32CR || IN->Repl32CR || I->RLAmt >= 32;
+ IP->Repl32Coalesced = true;
+ I = BitGroups.erase(I);
+ BitGroups.erase(BitGroups.begin());
+ }
+
+ // This must be the last group in the vector (and we might have
+ // just invalidated the iterator above), so break here.
+ break;
+ }
+ }
+ }
+
+ ++I;
+ }
+ }
+
+ SDValue getI32Imm(unsigned Imm) {
+ return CurDAG->getTargetConstant(Imm, MVT::i32);
+ }
+
+ uint64_t getZerosMask() {
+ uint64_t Mask = 0;
+ for (unsigned i = 0; i < Bits.size(); ++i) {
+ if (Bits[i].hasValue())
+ continue;
+ Mask |= (UINT64_C(1) << i);
+ }
+
+ return ~Mask;
+ }
+
+ // Depending on the number of groups for a particular value, it might be
+ // better to rotate, mask explicitly (using andi/andis), and then or the
+ // result. Select this part of the result first.
+ void SelectAndParts32(SDLoc dl, SDValue &Res, unsigned *InstCnt) {
+ if (BPermRewriterNoMasking)
+ return;
+
+ for (ValueRotInfo &VRI : ValueRotsVec) {
+ unsigned Mask = 0;
+ for (unsigned i = 0; i < Bits.size(); ++i) {
+ if (!Bits[i].hasValue() || Bits[i].getValue() != VRI.V)
+ continue;
+ if (RLAmt[i] != VRI.RLAmt)
+ continue;
+ Mask |= (1u << i);
+ }
+
+ // Compute the masks for andi/andis that would be necessary.
+ unsigned ANDIMask = (Mask & UINT16_MAX), ANDISMask = Mask >> 16;
+ assert((ANDIMask != 0 || ANDISMask != 0) &&
+ "No set bits in mask for value bit groups");
+ bool NeedsRotate = VRI.RLAmt != 0;
+
+ // We're trying to minimize the number of instructions. If we have one
+ // group, using one of andi/andis can break even. If we have three
+ // groups, we can use both andi and andis and break even (to use both
+ // andi and andis we also need to or the results together). We need four
+ // groups if we also need to rotate. To use andi/andis we need to do more
+ // than break even because rotate-and-mask instructions tend to be easier
+ // to schedule.
+
+ // FIXME: We've biased here against using andi/andis, which is right for
+ // POWER cores, but not optimal everywhere. For example, on the A2,
+ // andi/andis have single-cycle latency whereas the rotate-and-mask
+ // instructions take two cycles, and it would be better to bias toward
+ // andi/andis in break-even cases.
+
+ unsigned NumAndInsts = (unsigned) NeedsRotate +
+ (unsigned) (ANDIMask != 0) +
+ (unsigned) (ANDISMask != 0) +
+ (unsigned) (ANDIMask != 0 && ANDISMask != 0) +
+ (unsigned) (bool) Res;
+
+ DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() <<
+ " RL: " << VRI.RLAmt << ":" <<
+ "\n\t\t\tisel using masking: " << NumAndInsts <<
+ " using rotates: " << VRI.NumGroups << "\n");
+
+ if (NumAndInsts >= VRI.NumGroups)
+ continue;
+
+ DEBUG(dbgs() << "\t\t\t\tusing masking\n");
+
+ if (InstCnt) *InstCnt += NumAndInsts;
+
+ SDValue VRot;
+ if (VRI.RLAmt) {
+ SDValue Ops[] =
+ { VRI.V, getI32Imm(VRI.RLAmt), getI32Imm(0), getI32Imm(31) };
+ VRot = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32,
+ Ops), 0);
+ } else {
+ VRot = VRI.V;
+ }
+
+ SDValue ANDIVal, ANDISVal;
+ if (ANDIMask != 0)
+ ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo, dl, MVT::i32,
+ VRot, getI32Imm(ANDIMask)), 0);
+ if (ANDISMask != 0)
+ ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo, dl, MVT::i32,
+ VRot, getI32Imm(ANDISMask)), 0);
+
+ SDValue TotalVal;
+ if (!ANDIVal)
+ TotalVal = ANDISVal;
+ else if (!ANDISVal)
+ TotalVal = ANDIVal;
+ else
+ TotalVal = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32,
+ ANDIVal, ANDISVal), 0);
+
+ if (!Res)
+ Res = TotalVal;
+ else
+ Res = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32,
+ Res, TotalVal), 0);
+
+ // Now, remove all groups with this underlying value and rotation
+ // factor.
+ for (auto I = BitGroups.begin(); I != BitGroups.end();) {
+ if (I->V == VRI.V && I->RLAmt == VRI.RLAmt)
+ I = BitGroups.erase(I);
+ else
+ ++I;
+ }
+ }
+ }
+
+ // Instruction selection for the 32-bit case.
+ SDNode *Select32(SDNode *N, bool LateMask, unsigned *InstCnt) {
+ SDLoc dl(N);
+ SDValue Res;
+
+ if (InstCnt) *InstCnt = 0;
+
+ // Take care of cases that should use andi/andis first.
+ SelectAndParts32(dl, Res, InstCnt);
+
+ // If we've not yet selected a 'starting' instruction, and we have no zeros
+ // to fill in, select the (Value, RLAmt) with the highest priority (largest
+ // number of groups), and start with this rotated value.
+ if ((!HasZeros || LateMask) && !Res) {
+ ValueRotInfo &VRI = ValueRotsVec[0];
+ if (VRI.RLAmt) {
+ if (InstCnt) *InstCnt += 1;
+ SDValue Ops[] =
+ { VRI.V, getI32Imm(VRI.RLAmt), getI32Imm(0), getI32Imm(31) };
+ Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
+ } else {
+ Res = VRI.V;
+ }
+
+ // Now, remove all groups with this underlying value and rotation factor.
+ for (auto I = BitGroups.begin(); I != BitGroups.end();) {
+ if (I->V == VRI.V && I->RLAmt == VRI.RLAmt)
+ I = BitGroups.erase(I);
+ else
+ ++I;
+ }
+ }
+
+ if (InstCnt) *InstCnt += BitGroups.size();
+
+ // Insert the other groups (one at a time).
+ for (auto &BG : BitGroups) {
+ if (!Res) {
+ SDValue Ops[] =
+ { BG.V, getI32Imm(BG.RLAmt), getI32Imm(Bits.size() - BG.EndIdx - 1),
+ getI32Imm(Bits.size() - BG.StartIdx - 1) };
+ Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
+ } else {
+ SDValue Ops[] =
+ { Res, BG.V, getI32Imm(BG.RLAmt), getI32Imm(Bits.size() - BG.EndIdx - 1),
+ getI32Imm(Bits.size() - BG.StartIdx - 1) };
+ Res = SDValue(CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops), 0);
+ }
+ }
+
+ if (LateMask) {
+ unsigned Mask = (unsigned) getZerosMask();
+
+ unsigned ANDIMask = (Mask & UINT16_MAX), ANDISMask = Mask >> 16;
+ assert((ANDIMask != 0 || ANDISMask != 0) &&
+ "No set bits in zeros mask?");
+
+ if (InstCnt) *InstCnt += (unsigned) (ANDIMask != 0) +
+ (unsigned) (ANDISMask != 0) +
+ (unsigned) (ANDIMask != 0 && ANDISMask != 0);
+
+ SDValue ANDIVal, ANDISVal;
+ if (ANDIMask != 0)
+ ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo, dl, MVT::i32,
+ Res, getI32Imm(ANDIMask)), 0);
+ if (ANDISMask != 0)
+ ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo, dl, MVT::i32,
+ Res, getI32Imm(ANDISMask)), 0);
+
+ if (!ANDIVal)
+ Res = ANDISVal;
+ else if (!ANDISVal)
+ Res = ANDIVal;
+ else
+ Res = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32,
+ ANDIVal, ANDISVal), 0);
+ }
+
+ return Res.getNode();
+ }
+
+ unsigned SelectRotMask64Count(unsigned RLAmt, bool Repl32,
+ unsigned MaskStart, unsigned MaskEnd,
+ bool IsIns) {
+ // In the notation used by the instructions, 'start' and 'end' are reversed
+ // because bits are counted from high to low order.
+ unsigned InstMaskStart = 64 - MaskEnd - 1,
+ InstMaskEnd = 64 - MaskStart - 1;
+
+ if (Repl32)
+ return 1;
+
+ if ((!IsIns && (InstMaskEnd == 63 || InstMaskStart == 0)) ||
+ InstMaskEnd == 63 - RLAmt)
+ return 1;
+
+ return 2;
+ }
+
+ // For 64-bit values, not all combinations of rotates and masks are
+ // available. Produce one if it is available.
+ SDValue SelectRotMask64(SDValue V, SDLoc dl, unsigned RLAmt, bool Repl32,
+ unsigned MaskStart, unsigned MaskEnd,
+ unsigned *InstCnt = nullptr) {
+ // In the notation used by the instructions, 'start' and 'end' are reversed
+ // because bits are counted from high to low order.
+ unsigned InstMaskStart = 64 - MaskEnd - 1,
+ InstMaskEnd = 64 - MaskStart - 1;
+
+ if (InstCnt) *InstCnt += 1;
+
+ if (Repl32) {
+ // This rotation amount assumes that the lower 32 bits of the quantity
+ // are replicated in the high 32 bits by the rotation operator (which is
+ // done by rlwinm and friends).
+ assert(InstMaskStart >= 32 && "Mask cannot start out of range");
+ assert(InstMaskEnd >= 32 && "Mask cannot end out of range");
+ SDValue Ops[] =
+ { V, getI32Imm(RLAmt), getI32Imm(InstMaskStart - 32),
+ getI32Imm(InstMaskEnd - 32) };
+ return SDValue(CurDAG->getMachineNode(PPC::RLWINM8, dl, MVT::i64,
+ Ops), 0);
+ }
+
+ if (InstMaskEnd == 63) {
+ SDValue Ops[] =
+ { V, getI32Imm(RLAmt), getI32Imm(InstMaskStart) };
+ return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Ops), 0);
+ }
+
+ if (InstMaskStart == 0) {
+ SDValue Ops[] =
+ { V, getI32Imm(RLAmt), getI32Imm(InstMaskEnd) };
+ return SDValue(CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64, Ops), 0);
+ }
+
+ if (InstMaskEnd == 63 - RLAmt) {
+ SDValue Ops[] =
+ { V, getI32Imm(RLAmt), getI32Imm(InstMaskStart) };
+ return SDValue(CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, Ops), 0);
+ }
+
+ // We cannot do this with a single instruction, so we'll use two. The
+ // problem is that we're not free to choose both a rotation amount and mask
+ // start and end independently. We can choose an arbitrary mask start and
+ // end, but then the rotation amount is fixed. Rotation, however, can be
+ // inverted, and so by applying an "inverse" rotation first, we can get the
+ // desired result.
+ if (InstCnt) *InstCnt += 1;
+
+ // The rotation mask for the second instruction must be MaskStart.
+ unsigned RLAmt2 = MaskStart;
+ // The first instruction must rotate V so that the overall rotation amount
+ // is RLAmt.
+ unsigned RLAmt1 = (64 + RLAmt - RLAmt2) % 64;
+ if (RLAmt1)
+ V = SelectRotMask64(V, dl, RLAmt1, false, 0, 63);
+ return SelectRotMask64(V, dl, RLAmt2, false, MaskStart, MaskEnd);
+ }
+
+ // For 64-bit values, not all combinations of rotates and masks are
+ // available. Produce a rotate-mask-and-insert if one is available.
+ SDValue SelectRotMaskIns64(SDValue Base, SDValue V, SDLoc dl, unsigned RLAmt,
+ bool Repl32, unsigned MaskStart,
+ unsigned MaskEnd, unsigned *InstCnt = nullptr) {
+ // In the notation used by the instructions, 'start' and 'end' are reversed
+ // because bits are counted from high to low order.
+ unsigned InstMaskStart = 64 - MaskEnd - 1,
+ InstMaskEnd = 64 - MaskStart - 1;
+
+ if (InstCnt) *InstCnt += 1;
+
+ if (Repl32) {
+ // This rotation amount assumes that the lower 32 bits of the quantity
+ // are replicated in the high 32 bits by the rotation operator (which is
+ // done by rlwinm and friends).
+ assert(InstMaskStart >= 32 && "Mask cannot start out of range");
+ assert(InstMaskEnd >= 32 && "Mask cannot end out of range");
+ SDValue Ops[] =
+ { Base, V, getI32Imm(RLAmt), getI32Imm(InstMaskStart - 32),
+ getI32Imm(InstMaskEnd - 32) };
+ return SDValue(CurDAG->getMachineNode(PPC::RLWIMI8, dl, MVT::i64,
+ Ops), 0);
+ }
+
+ if (InstMaskEnd == 63 - RLAmt) {
+ SDValue Ops[] =
+ { Base, V, getI32Imm(RLAmt), getI32Imm(InstMaskStart) };
+ return SDValue(CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops), 0);
+ }
+
+ // We cannot do this with a single instruction, so we'll use two. The
+ // problem is that we're not free to choose both a rotation amount and mask
+ // start and end independently. We can choose an arbitrary mask start and
+ // end, but then the rotation amount is fixed. Rotation, however, can be
+ // inverted, and so by applying an "inverse" rotation first, we can get the
+ // desired result.
+ if (InstCnt) *InstCnt += 1;
+
+ // The rotation mask for the second instruction must be MaskStart.
+ unsigned RLAmt2 = MaskStart;
+ // The first instruction must rotate V so that the overall rotation amount
+ // is RLAmt.
+ unsigned RLAmt1 = (64 + RLAmt - RLAmt2) % 64;
+ if (RLAmt1)
+ V = SelectRotMask64(V, dl, RLAmt1, false, 0, 63);
+ return SelectRotMaskIns64(Base, V, dl, RLAmt2, false, MaskStart, MaskEnd);
+ }
+
+ void SelectAndParts64(SDLoc dl, SDValue &Res, unsigned *InstCnt) {
+ if (BPermRewriterNoMasking)
+ return;
+
+ // The idea here is the same as in the 32-bit version, but with additional
+ // complications from the fact that Repl32 might be true. Because we
+ // aggressively convert bit groups to Repl32 form (which, for small
+ // rotation factors, involves no other change), and then coalesce, it might
+ // be the case that a single 64-bit masking operation could handle both
+ // some Repl32 groups and some non-Repl32 groups. If converting to Repl32
+ // form allowed coalescing, then we must use a 32-bit rotaton in order to
+ // completely capture the new combined bit group.
+
+ for (ValueRotInfo &VRI : ValueRotsVec) {
+ uint64_t Mask = 0;
+
+ // We need to add to the mask all bits from the associated bit groups.
+ // If Repl32 is false, we need to add bits from bit groups that have
+ // Repl32 true, but are trivially convertable to Repl32 false. Such a
+ // group is trivially convertable if it overlaps only with the lower 32
+ // bits, and the group has not been coalesced.
+ auto MatchingBG = [VRI](BitGroup &BG) {
+ if (VRI.V != BG.V)
+ return false;
+
+ unsigned EffRLAmt = BG.RLAmt;
+ if (!VRI.Repl32 && BG.Repl32) {
+ if (BG.StartIdx < 32 && BG.EndIdx < 32 && BG.StartIdx <= BG.EndIdx &&
+ !BG.Repl32Coalesced) {
+ if (BG.Repl32CR)
+ EffRLAmt += 32;
+ } else {
+ return false;
+ }
+ } else if (VRI.Repl32 != BG.Repl32) {
+ return false;
+ }
+
+ if (VRI.RLAmt != EffRLAmt)
+ return false;
+
+ return true;
+ };
+
+ for (auto &BG : BitGroups) {
+ if (!MatchingBG(BG))
+ continue;
+
+ if (BG.StartIdx <= BG.EndIdx) {
+ for (unsigned i = BG.StartIdx; i <= BG.EndIdx; ++i)
+ Mask |= (UINT64_C(1) << i);
+ } else {
+ for (unsigned i = BG.StartIdx; i < Bits.size(); ++i)
+ Mask |= (UINT64_C(1) << i);
+ for (unsigned i = 0; i <= BG.EndIdx; ++i)
+ Mask |= (UINT64_C(1) << i);
+ }
+ }
+
+ // We can use the 32-bit andi/andis technique if the mask does not
+ // require any higher-order bits. This can save an instruction compared
+ // to always using the general 64-bit technique.
+ bool Use32BitInsts = isUInt<32>(Mask);
+ // Compute the masks for andi/andis that would be necessary.
+ unsigned ANDIMask = (Mask & UINT16_MAX),
+ ANDISMask = (Mask >> 16) & UINT16_MAX;
+
+ bool NeedsRotate = VRI.RLAmt || (VRI.Repl32 && !isUInt<32>(Mask));
+
+ unsigned NumAndInsts = (unsigned) NeedsRotate +
+ (unsigned) (bool) Res;
+ if (Use32BitInsts)
+ NumAndInsts += (unsigned) (ANDIMask != 0) + (unsigned) (ANDISMask != 0) +
+ (unsigned) (ANDIMask != 0 && ANDISMask != 0);
+ else
+ NumAndInsts += SelectInt64Count(Mask) + /* and */ 1;
+
+ unsigned NumRLInsts = 0;
+ bool FirstBG = true;
+ for (auto &BG : BitGroups) {
+ if (!MatchingBG(BG))
+ continue;
+ NumRLInsts +=
+ SelectRotMask64Count(BG.RLAmt, BG.Repl32, BG.StartIdx, BG.EndIdx,
+ !FirstBG);
+ FirstBG = false;
+ }
+
+ DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() <<
+ " RL: " << VRI.RLAmt << (VRI.Repl32 ? " (32):" : ":") <<
+ "\n\t\t\tisel using masking: " << NumAndInsts <<
+ " using rotates: " << NumRLInsts << "\n");
+
+ // When we'd use andi/andis, we bias toward using the rotates (andi only
+ // has a record form, and is cracked on POWER cores). However, when using
+ // general 64-bit constant formation, bias toward the constant form,
+ // because that exposes more opportunities for CSE.
+ if (NumAndInsts > NumRLInsts)
+ continue;
+ if (Use32BitInsts && NumAndInsts == NumRLInsts)
+ continue;
+
+ DEBUG(dbgs() << "\t\t\t\tusing masking\n");
+
+ if (InstCnt) *InstCnt += NumAndInsts;
+
+ SDValue VRot;
+ // We actually need to generate a rotation if we have a non-zero rotation
+ // factor or, in the Repl32 case, if we care about any of the
+ // higher-order replicated bits. In the latter case, we generate a mask
+ // backward so that it actually includes the entire 64 bits.
+ if (VRI.RLAmt || (VRI.Repl32 && !isUInt<32>(Mask)))
+ VRot = SelectRotMask64(VRI.V, dl, VRI.RLAmt, VRI.Repl32,
+ VRI.Repl32 ? 31 : 0, VRI.Repl32 ? 30 : 63);
+ else
+ VRot = VRI.V;
+
+ SDValue TotalVal;
+ if (Use32BitInsts) {
+ assert((ANDIMask != 0 || ANDISMask != 0) &&
+ "No set bits in mask when using 32-bit ands for 64-bit value");
+
+ SDValue ANDIVal, ANDISVal;
+ if (ANDIMask != 0)
+ ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo8, dl, MVT::i64,
+ VRot, getI32Imm(ANDIMask)), 0);
+ if (ANDISMask != 0)
+ ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo8, dl, MVT::i64,
+ VRot, getI32Imm(ANDISMask)), 0);
+
+ if (!ANDIVal)
+ TotalVal = ANDISVal;
+ else if (!ANDISVal)
+ TotalVal = ANDIVal;
+ else
+ TotalVal = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64,
+ ANDIVal, ANDISVal), 0);
+ } else {
+ TotalVal = SDValue(SelectInt64(CurDAG, dl, Mask), 0);
+ TotalVal =
+ SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64,
+ VRot, TotalVal), 0);
+ }
+
+ if (!Res)
+ Res = TotalVal;
+ else
+ Res = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64,
+ Res, TotalVal), 0);
+
+ // Now, remove all groups with this underlying value and rotation
+ // factor.
+ for (auto I = BitGroups.begin(); I != BitGroups.end();) {
+ if (MatchingBG(*I))
+ I = BitGroups.erase(I);
+ else
+ ++I;
+ }
+ }
+ }
+
+ // Instruction selection for the 64-bit case.
+ SDNode *Select64(SDNode *N, bool LateMask, unsigned *InstCnt) {
+ SDLoc dl(N);
+ SDValue Res;
+
+ if (InstCnt) *InstCnt = 0;
+
+ // Take care of cases that should use andi/andis first.
+ SelectAndParts64(dl, Res, InstCnt);
+
+ // If we've not yet selected a 'starting' instruction, and we have no zeros
+ // to fill in, select the (Value, RLAmt) with the highest priority (largest
+ // number of groups), and start with this rotated value.
+ if ((!HasZeros || LateMask) && !Res) {
+ // If we have both Repl32 groups and non-Repl32 groups, the non-Repl32
+ // groups will come first, and so the VRI representing the largest number
+ // of groups might not be first (it might be the first Repl32 groups).
+ unsigned MaxGroupsIdx = 0;
+ if (!ValueRotsVec[0].Repl32) {
+ for (unsigned i = 0, ie = ValueRotsVec.size(); i < ie; ++i)
+ if (ValueRotsVec[i].Repl32) {
+ if (ValueRotsVec[i].NumGroups > ValueRotsVec[0].NumGroups)
+ MaxGroupsIdx = i;
+ break;
+ }
+ }
+
+ ValueRotInfo &VRI = ValueRotsVec[MaxGroupsIdx];
+ bool NeedsRotate = false;
+ if (VRI.RLAmt) {
+ NeedsRotate = true;
+ } else if (VRI.Repl32) {
+ for (auto &BG : BitGroups) {
+ if (BG.V != VRI.V || BG.RLAmt != VRI.RLAmt ||
+ BG.Repl32 != VRI.Repl32)
+ continue;
+
+ // We don't need a rotate if the bit group is confined to the lower
+ // 32 bits.
+ if (BG.StartIdx < 32 && BG.EndIdx < 32 && BG.StartIdx < BG.EndIdx)
+ continue;
+
+ NeedsRotate = true;
+ break;
+ }
+ }
+
+ if (NeedsRotate)
+ Res = SelectRotMask64(VRI.V, dl, VRI.RLAmt, VRI.Repl32,
+ VRI.Repl32 ? 31 : 0, VRI.Repl32 ? 30 : 63,
+ InstCnt);
+ else
+ Res = VRI.V;
+
+ // Now, remove all groups with this underlying value and rotation factor.
+ if (Res)
+ for (auto I = BitGroups.begin(); I != BitGroups.end();) {
+ if (I->V == VRI.V && I->RLAmt == VRI.RLAmt && I->Repl32 == VRI.Repl32)
+ I = BitGroups.erase(I);
+ else
+ ++I;
+ }
+ }
+
+ // Because 64-bit rotates are more flexible than inserts, we might have a
+ // preference regarding which one we do first (to save one instruction).
+ if (!Res)
+ for (auto I = BitGroups.begin(), IE = BitGroups.end(); I != IE; ++I) {
+ if (SelectRotMask64Count(I->RLAmt, I->Repl32, I->StartIdx, I->EndIdx,
+ false) <
+ SelectRotMask64Count(I->RLAmt, I->Repl32, I->StartIdx, I->EndIdx,
+ true)) {
+ if (I != BitGroups.begin()) {
+ BitGroup BG = *I;
+ BitGroups.erase(I);
+ BitGroups.insert(BitGroups.begin(), BG);
+ }
+
+ break;
+ }
+ }
+
+ // Insert the other groups (one at a time).
+ for (auto &BG : BitGroups) {
+ if (!Res)
+ Res = SelectRotMask64(BG.V, dl, BG.RLAmt, BG.Repl32, BG.StartIdx,
+ BG.EndIdx, InstCnt);
+ else
+ Res = SelectRotMaskIns64(Res, BG.V, dl, BG.RLAmt, BG.Repl32,
+ BG.StartIdx, BG.EndIdx, InstCnt);
+ }
+
+ if (LateMask) {
+ uint64_t Mask = getZerosMask();
+
+ // We can use the 32-bit andi/andis technique if the mask does not
+ // require any higher-order bits. This can save an instruction compared
+ // to always using the general 64-bit technique.
+ bool Use32BitInsts = isUInt<32>(Mask);
+ // Compute the masks for andi/andis that would be necessary.
+ unsigned ANDIMask = (Mask & UINT16_MAX),
+ ANDISMask = (Mask >> 16) & UINT16_MAX;
+
+ if (Use32BitInsts) {
+ assert((ANDIMask != 0 || ANDISMask != 0) &&
+ "No set bits in mask when using 32-bit ands for 64-bit value");
+
+ if (InstCnt) *InstCnt += (unsigned) (ANDIMask != 0) +
+ (unsigned) (ANDISMask != 0) +
+ (unsigned) (ANDIMask != 0 && ANDISMask != 0);
+
+ SDValue ANDIVal, ANDISVal;
+ if (ANDIMask != 0)
+ ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo8, dl, MVT::i64,
+ Res, getI32Imm(ANDIMask)), 0);
+ if (ANDISMask != 0)
+ ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo8, dl, MVT::i64,
+ Res, getI32Imm(ANDISMask)), 0);
+
+ if (!ANDIVal)
+ Res = ANDISVal;
+ else if (!ANDISVal)
+ Res = ANDIVal;
+ else
+ Res = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64,
+ ANDIVal, ANDISVal), 0);
+ } else {
+ if (InstCnt) *InstCnt += SelectInt64Count(Mask) + /* and */ 1;
+
+ SDValue MaskVal = SDValue(SelectInt64(CurDAG, dl, Mask), 0);
+ Res =
+ SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64,
+ Res, MaskVal), 0);
+ }
+ }
+
+ return Res.getNode();
+ }
+
+ SDNode *Select(SDNode *N, bool LateMask, unsigned *InstCnt = nullptr) {
+ // Fill in BitGroups.
+ collectBitGroups(LateMask);
+ if (BitGroups.empty())
+ return nullptr;
+
+ // For 64-bit values, figure out when we can use 32-bit instructions.
+ if (Bits.size() == 64)
+ assignRepl32BitGroups();
+
+ // Fill in ValueRotsVec.
+ collectValueRotInfo();
+
+ if (Bits.size() == 32) {
+ return Select32(N, LateMask, InstCnt);
+ } else {
+ assert(Bits.size() == 64 && "Not 64 bits here?");
+ return Select64(N, LateMask, InstCnt);
+ }
+
+ return nullptr;
+ }
+
+ SmallVector<ValueBit, 64> Bits;
+
+ bool HasZeros;
+ SmallVector<unsigned, 64> RLAmt;
+
+ SmallVector<BitGroup, 16> BitGroups;
+
+ DenseMap<std::pair<SDValue, unsigned>, ValueRotInfo> ValueRots;
+ SmallVector<ValueRotInfo, 16> ValueRotsVec;
+
+ SelectionDAG *CurDAG;
+
+public:
+ BitPermutationSelector(SelectionDAG *DAG)
+ : CurDAG(DAG) {}
+
+ // Here we try to match complex bit permutations into a set of
+ // rotate-and-shift/shift/and/or instructions, using a set of heuristics
+ // known to produce optimial code for common cases (like i32 byte swapping).
+ SDNode *Select(SDNode *N) {
+ Bits.resize(N->getValueType(0).getSizeInBits());
+ if (!getValueBits(SDValue(N, 0), Bits))
+ return nullptr;
+
+ DEBUG(dbgs() << "Considering bit-permutation-based instruction"
+ " selection for: ");
+ DEBUG(N->dump(CurDAG));
+
+ // Fill it RLAmt and set HasZeros.
+ computeRotationAmounts();
+
+ if (!HasZeros)
+ return Select(N, false);
+
+ // We currently have two techniques for handling results with zeros: early
+ // masking (the default) and late masking. Late masking is sometimes more
+ // efficient, but because the structure of the bit groups is different, it
+ // is hard to tell without generating both and comparing the results. With
+ // late masking, we ignore zeros in the resulting value when inserting each
+ // set of bit groups, and then mask in the zeros at the end. With early
+ // masking, we only insert the non-zero parts of the result at every step.
+
+ unsigned InstCnt, InstCntLateMask;
+ DEBUG(dbgs() << "\tEarly masking:\n");
+ SDNode *RN = Select(N, false, &InstCnt);
+ DEBUG(dbgs() << "\t\tisel would use " << InstCnt << " instructions\n");
+
+ DEBUG(dbgs() << "\tLate masking:\n");
+ SDNode *RNLM = Select(N, true, &InstCntLateMask);
+ DEBUG(dbgs() << "\t\tisel would use " << InstCntLateMask <<
+ " instructions\n");
+
+ if (InstCnt <= InstCntLateMask) {
+ DEBUG(dbgs() << "\tUsing early-masking for isel\n");
+ return RN;
+ }
+
+ DEBUG(dbgs() << "\tUsing late-masking for isel\n");
+ return RNLM;
+ }
+};
+} // anonymous namespace
+
+SDNode *PPCDAGToDAGISel::SelectBitPermutation(SDNode *N) {
+ if (N->getValueType(0) != MVT::i32 &&
+ N->getValueType(0) != MVT::i64)
+ return nullptr;
+
+ if (!UseBitPermRewriter)
+ return nullptr;
+
+ switch (N->getOpcode()) {
+ default: break;
+ case ISD::ROTL:
+ case ISD::SHL:
+ case ISD::SRL:
+ case ISD::AND:
+ case ISD::OR: {
+ BitPermutationSelector BPS(CurDAG);
+ return BPS.Select(N);
+ }
+ }
+
+ return nullptr;
+}
+
/// SelectCC - Select a comparison of the specified values with the specified
/// condition code, returning the CR# of the expression.
SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
@@ -859,6 +2295,9 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
// Altivec Vector compare instructions do not set any CR register by default and
// vector compare operations return the same type as the operands.
if (LHS.getValueType().isVector()) {
+ if (PPCSubTarget->hasQPX())
+ return nullptr;
+
EVT VecVT = LHS.getValueType();
bool Swap, Negate;
unsigned int VCmpInst = getVCmpInst(VecVT.getSimpleVT(), CC,
@@ -905,6 +2344,14 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1));
}
+SDNode *PPCDAGToDAGISel::transferMemOperands(SDNode *N, SDNode *Result) {
+ // Transfer memoperands.
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
+ return Result;
+}
+
// Select - Convert the specified operand from a target-independent to a
// target-specific node if it hasn't already been changed.
@@ -922,81 +2369,16 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
N->getOperand(1).getOpcode() == ISD::TargetConstant)
llvm_unreachable("Invalid ADD with TargetConstant operand");
+ // Try matching complex bit permutations before doing anything else.
+ if (SDNode *NN = SelectBitPermutation(N))
+ return NN;
+
switch (N->getOpcode()) {
default: break;
case ISD::Constant: {
- if (N->getValueType(0) == MVT::i64) {
- // Get 64 bit value.
- int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue();
- // Assume no remaining bits.
- unsigned Remainder = 0;
- // Assume no shift required.
- unsigned Shift = 0;
-
- // If it can't be represented as a 32 bit value.
- if (!isInt<32>(Imm)) {
- Shift = countTrailingZeros<uint64_t>(Imm);
- int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
-
- // If the shifted value fits 32 bits.
- if (isInt<32>(ImmSh)) {
- // Go with the shifted value.
- Imm = ImmSh;
- } else {
- // Still stuck with a 64 bit value.
- Remainder = Imm;
- Shift = 32;
- Imm >>= 32;
- }
- }
-
- // Intermediate operand.
- SDNode *Result;
-
- // Handle first 32 bits.
- unsigned Lo = Imm & 0xFFFF;
- unsigned Hi = (Imm >> 16) & 0xFFFF;
-
- // Simple value.
- if (isInt<16>(Imm)) {
- // Just the Lo bits.
- Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, getI32Imm(Lo));
- } else if (Lo) {
- // Handle the Hi bits.
- unsigned OpC = Hi ? PPC::LIS8 : PPC::LI8;
- Result = CurDAG->getMachineNode(OpC, dl, MVT::i64, getI32Imm(Hi));
- // And Lo bits.
- Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64,
- SDValue(Result, 0), getI32Imm(Lo));
- } else {
- // Just the Hi bits.
- Result = CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, getI32Imm(Hi));
- }
-
- // If no shift, we're done.
- if (!Shift) return Result;
-
- // Shift for next step if the upper 32-bits were not zero.
- if (Imm) {
- Result = CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64,
- SDValue(Result, 0),
- getI32Imm(Shift),
- getI32Imm(63 - Shift));
- }
-
- // Add in the last bits as required.
- if ((Hi = (Remainder >> 16) & 0xFFFF)) {
- Result = CurDAG->getMachineNode(PPC::ORIS8, dl, MVT::i64,
- SDValue(Result, 0), getI32Imm(Hi));
- }
- if ((Lo = Remainder & 0xFFFF)) {
- Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64,
- SDValue(Result, 0), getI32Imm(Lo));
- }
-
- return Result;
- }
+ if (N->getValueType(0) == MVT::i64)
+ return SelectInt64(CurDAG, N);
break;
}
@@ -1009,16 +2391,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
case PPCISD::GlobalBaseReg:
return getGlobalBaseReg();
- case ISD::FrameIndex: {
- int FI = cast<FrameIndexSDNode>(N)->getIndex();
- SDValue TFI = CurDAG->getTargetFrameIndex(FI, N->getValueType(0));
- unsigned Opc = N->getValueType(0) == MVT::i32 ? PPC::ADDI : PPC::ADDI8;
- if (N->hasOneUse())
- return CurDAG->SelectNodeTo(N, Opc, N->getValueType(0), TFI,
- getSmallIPtrImm(0));
- return CurDAG->getMachineNode(Opc, dl, N->getValueType(0), TFI,
- getSmallIPtrImm(0));
- }
+ case ISD::FrameIndex:
+ return getFrameIndex(N, N);
case PPCISD::MFOCRF: {
SDValue InFlag = N->getOperand(1);
@@ -1026,35 +2400,31 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
N->getOperand(0), InFlag);
}
- case ISD::SDIV: {
- // FIXME: since this depends on the setting of the carry flag from the srawi
- // we should really be making notes about that for the scheduler.
- // FIXME: It sure would be nice if we could cheaply recognize the
- // srl/add/sra pattern the dag combiner will generate for this as
- // sra/addze rather than having to handle sdiv ourselves. oh well.
- unsigned Imm;
- if (isInt32Immediate(N->getOperand(1), Imm)) {
- SDValue N0 = N->getOperand(0);
- if ((signed)Imm > 0 && isPowerOf2_32(Imm)) {
- SDNode *Op =
- CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Glue,
- N0, getI32Imm(Log2_32(Imm)));
- return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
- SDValue(Op, 0), SDValue(Op, 1));
- } else if ((signed)Imm < 0 && isPowerOf2_32(-Imm)) {
- SDNode *Op =
- CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Glue,
- N0, getI32Imm(Log2_32(-Imm)));
- SDValue PT =
- SDValue(CurDAG->getMachineNode(PPC::ADDZE, dl, MVT::i32,
- SDValue(Op, 0), SDValue(Op, 1)),
- 0);
- return CurDAG->SelectNodeTo(N, PPC::NEG, MVT::i32, PT);
- }
- }
+ case PPCISD::READ_TIME_BASE: {
+ return CurDAG->getMachineNode(PPC::ReadTB, dl, MVT::i32, MVT::i32,
+ MVT::Other, N->getOperand(0));
+ }
- // Other cases are autogenerated.
- break;
+ case PPCISD::SRA_ADDZE: {
+ SDValue N0 = N->getOperand(0);
+ SDValue ShiftAmt =
+ CurDAG->getTargetConstant(*cast<ConstantSDNode>(N->getOperand(1))->
+ getConstantIntValue(), N->getValueType(0));
+ if (N->getValueType(0) == MVT::i64) {
+ SDNode *Op =
+ CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, MVT::Glue,
+ N0, ShiftAmt);
+ return CurDAG->SelectNodeTo(N, PPC::ADDZE8, MVT::i64,
+ SDValue(Op, 0), SDValue(Op, 1));
+ } else {
+ assert(N->getValueType(0) == MVT::i32 &&
+ "Expecting i64 or i32 in PPCISD::SRA_ADDZE");
+ SDNode *Op =
+ CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Glue,
+ N0, ShiftAmt);
+ return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
+ SDValue(Op, 0), SDValue(Op, 1));
+ }
}
case ISD::LOAD: {
@@ -1100,9 +2470,10 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Ops[] = { Offset, Base, Chain };
- return CurDAG->getMachineNode(Opcode, dl, LD->getValueType(0),
- PPCLowering->getPointerTy(),
- MVT::Other, Ops);
+ return transferMemOperands(N, CurDAG->getMachineNode(Opcode, dl,
+ LD->getValueType(0),
+ PPCLowering->getPointerTy(),
+ MVT::Other, Ops));
} else {
unsigned Opcode;
bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD;
@@ -1111,6 +2482,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load");
switch (LoadedVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Invalid PPC load type!");
+ case MVT::v4f64: Opcode = PPC::QVLFDUX; break; // QPX
+ case MVT::v4f32: Opcode = PPC::QVLFSUX; break; // QPX
case MVT::f64: Opcode = PPC::LFDUX; break;
case MVT::f32: Opcode = PPC::LFSUX; break;
case MVT::i32: Opcode = PPC::LWZUX; break;
@@ -1135,9 +2508,10 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Ops[] = { Base, Offset, Chain };
- return CurDAG->getMachineNode(Opcode, dl, LD->getValueType(0),
- PPCLowering->getPointerTy(),
- MVT::Other, Ops);
+ return transferMemOperands(N, CurDAG->getMachineNode(Opcode, dl,
+ LD->getValueType(0),
+ PPCLowering->getPointerTy(),
+ MVT::Other, Ops));
}
}
@@ -1166,7 +2540,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
if (isInt64Immediate(N->getOperand(1).getNode(), Imm64) &&
isMask_64(Imm64)) {
SDValue Val = N->getOperand(0);
- MB = 64 - CountTrailingOnes_64(Imm64);
+ MB = 64 - countTrailingOnes(Imm64);
SH = 0;
// If the operand is a logical right shift, we can fold it into this
@@ -1207,13 +2581,34 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// Other cases are autogenerated.
break;
}
- case ISD::OR:
+ case ISD::OR: {
if (N->getValueType(0) == MVT::i32)
if (SDNode *I = SelectBitfieldInsert(N))
return I;
+ short Imm;
+ if (N->getOperand(0)->getOpcode() == ISD::FrameIndex &&
+ isIntS16Immediate(N->getOperand(1), Imm)) {
+ APInt LHSKnownZero, LHSKnownOne;
+ CurDAG->computeKnownBits(N->getOperand(0), LHSKnownZero, LHSKnownOne);
+
+ // If this is equivalent to an add, then we can fold it with the
+ // FrameIndex calculation.
+ if ((LHSKnownZero.getZExtValue()|~(uint64_t)Imm) == ~0ULL)
+ return getFrameIndex(N, N->getOperand(0).getNode(), (int)Imm);
+ }
+
// Other cases are autogenerated.
break;
+ }
+ case ISD::ADD: {
+ short Imm;
+ if (N->getOperand(0)->getOpcode() == ISD::FrameIndex &&
+ isIntS16Immediate(N->getOperand(1), Imm))
+ return getFrameIndex(N, N->getOperand(0).getNode(), (int)Imm);
+
+ break;
+ }
case ISD::SHL: {
unsigned Imm, SH, MB, ME;
if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, Imm) &&
@@ -1333,6 +2728,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SelectCCOp = PPC::SELECT_CC_VSFRC;
else
SelectCCOp = PPC::SELECT_CC_F8;
+ else if (PPCSubTarget->hasQPX() && N->getValueType(0) == MVT::v4f64)
+ SelectCCOp = PPC::SELECT_CC_QFRC;
+ else if (PPCSubTarget->hasQPX() && N->getValueType(0) == MVT::v4f32)
+ SelectCCOp = PPC::SELECT_CC_QSRC;
+ else if (PPCSubTarget->hasQPX() && N->getValueType(0) == MVT::v4i1)
+ SelectCCOp = PPC::SELECT_CC_QBRC;
else if (N->getValueType(0) == MVT::v2f64 ||
N->getValueType(0) == MVT::v2i64)
SelectCCOp = PPC::SELECT_CC_VSRC;
@@ -1365,6 +2766,15 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
else
DM[i] = 1;
+ // For little endian, we must swap the input operands and adjust
+ // the mask elements (reverse and invert them).
+ if (PPCSubTarget->isLittleEndian()) {
+ std::swap(Op1, Op2);
+ unsigned tmp = DM[0];
+ DM[0] = 1 - DM[1];
+ DM[1] = 1 - tmp;
+ }
+
SDValue DMV = CurDAG->getTargetConstant(DM[1] | (DM[0] << 1), MVT::i32);
if (Op1 == Op2 && DM[0] == 0 && DM[1] == 0 &&
@@ -1453,8 +2863,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
"Only supported for 64-bit ABI and 32-bit SVR4");
if (PPCSubTarget->isSVR4ABI() && !PPCSubTarget->isPPC64()) {
SDValue GA = N->getOperand(0);
- return CurDAG->getMachineNode(PPC::LWZtoc, dl, MVT::i32, GA,
- N->getOperand(1));
+ return transferMemOperands(N, CurDAG->getMachineNode(PPC::LWZtoc, dl,
+ MVT::i32, GA, N->getOperand(1)));
}
// For medium and large code model, we generate two instructions as
@@ -1474,12 +2884,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue GA = N->getOperand(0);
SDValue TOCbase = N->getOperand(1);
SDNode *Tmp = CurDAG->getMachineNode(PPC::ADDIStocHA, dl, MVT::i64,
- TOCbase, GA);
+ TOCbase, GA);
if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA) ||
CModel == CodeModel::Large)
- return CurDAG->getMachineNode(PPC::LDtocL, dl, MVT::i64, GA,
- SDValue(Tmp, 0));
+ return transferMemOperands(N, CurDAG->getMachineNode(PPC::LDtocL, dl,
+ MVT::i64, GA, SDValue(Tmp, 0)));
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) {
const GlobalValue *GValue = G->getGlobal();
@@ -1487,8 +2897,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
(GValue->isDeclaration() || GValue->isWeakForLinker())) ||
GValue->isDeclaration() || GValue->hasCommonLinkage() ||
GValue->hasAvailableExternallyLinkage())
- return CurDAG->getMachineNode(PPC::LDtocL, dl, MVT::i64, GA,
- SDValue(Tmp, 0));
+ return transferMemOperands(N, CurDAG->getMachineNode(PPC::LDtocL, dl,
+ MVT::i64, GA, SDValue(Tmp, 0)));
}
return CurDAG->getMachineNode(PPC::ADDItocL, dl, MVT::i64,
@@ -1576,6 +2986,324 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return SelectCode(N);
}
+// If the target supports the cmpb instruction, do the idiom recognition here.
+// We don't do this as a DAG combine because we don't want to do it as nodes
+// are being combined (because we might miss part of the eventual idiom). We
+// don't want to do it during instruction selection because we want to reuse
+// the logic for lowering the masking operations already part of the
+// instruction selector.
+SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) {
+ SDLoc dl(N);
+
+ assert(N->getOpcode() == ISD::OR &&
+ "Only OR nodes are supported for CMPB");
+
+ SDValue Res;
+ if (!PPCSubTarget->hasCMPB())
+ return Res;
+
+ if (N->getValueType(0) != MVT::i32 &&
+ N->getValueType(0) != MVT::i64)
+ return Res;
+
+ EVT VT = N->getValueType(0);
+
+ SDValue RHS, LHS;
+ bool BytesFound[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ uint64_t Mask = 0, Alt = 0;
+
+ auto IsByteSelectCC = [this](SDValue O, unsigned &b,
+ uint64_t &Mask, uint64_t &Alt,
+ SDValue &LHS, SDValue &RHS) {
+ if (O.getOpcode() != ISD::SELECT_CC)
+ return false;
+ ISD::CondCode CC = cast<CondCodeSDNode>(O.getOperand(4))->get();
+
+ if (!isa<ConstantSDNode>(O.getOperand(2)) ||
+ !isa<ConstantSDNode>(O.getOperand(3)))
+ return false;
+
+ uint64_t PM = O.getConstantOperandVal(2);
+ uint64_t PAlt = O.getConstantOperandVal(3);
+ for (b = 0; b < 8; ++b) {
+ uint64_t Mask = UINT64_C(0xFF) << (8*b);
+ if (PM && (PM & Mask) == PM && (PAlt & Mask) == PAlt)
+ break;
+ }
+
+ if (b == 8)
+ return false;
+ Mask |= PM;
+ Alt |= PAlt;
+
+ if (!isa<ConstantSDNode>(O.getOperand(1)) ||
+ O.getConstantOperandVal(1) != 0) {
+ SDValue Op0 = O.getOperand(0), Op1 = O.getOperand(1);
+ if (Op0.getOpcode() == ISD::TRUNCATE)
+ Op0 = Op0.getOperand(0);
+ if (Op1.getOpcode() == ISD::TRUNCATE)
+ Op1 = Op1.getOperand(0);
+
+ if (Op0.getOpcode() == ISD::SRL && Op1.getOpcode() == ISD::SRL &&
+ Op0.getOperand(1) == Op1.getOperand(1) && CC == ISD::SETEQ &&
+ isa<ConstantSDNode>(Op0.getOperand(1))) {
+
+ unsigned Bits = Op0.getValueType().getSizeInBits();
+ if (b != Bits/8-1)
+ return false;
+ if (Op0.getConstantOperandVal(1) != Bits-8)
+ return false;
+
+ LHS = Op0.getOperand(0);
+ RHS = Op1.getOperand(0);
+ return true;
+ }
+
+ // When we have small integers (i16 to be specific), the form present
+ // post-legalization uses SETULT in the SELECT_CC for the
+ // higher-order byte, depending on the fact that the
+ // even-higher-order bytes are known to all be zero, for example:
+ // select_cc (xor $lhs, $rhs), 256, 65280, 0, setult
+ // (so when the second byte is the same, because all higher-order
+ // bits from bytes 3 and 4 are known to be zero, the result of the
+ // xor can be at most 255)
+ if (Op0.getOpcode() == ISD::XOR && CC == ISD::SETULT &&
+ isa<ConstantSDNode>(O.getOperand(1))) {
+
+ uint64_t ULim = O.getConstantOperandVal(1);
+ if (ULim != (UINT64_C(1) << b*8))
+ return false;
+
+ // Now we need to make sure that the upper bytes are known to be
+ // zero.
+ unsigned Bits = Op0.getValueType().getSizeInBits();
+ if (!CurDAG->MaskedValueIsZero(Op0,
+ APInt::getHighBitsSet(Bits, Bits - (b+1)*8)))
+ return false;
+
+ LHS = Op0.getOperand(0);
+ RHS = Op0.getOperand(1);
+ return true;
+ }
+
+ return false;
+ }
+
+ if (CC != ISD::SETEQ)
+ return false;
+
+ SDValue Op = O.getOperand(0);
+ if (Op.getOpcode() == ISD::AND) {
+ if (!isa<ConstantSDNode>(Op.getOperand(1)))
+ return false;
+ if (Op.getConstantOperandVal(1) != (UINT64_C(0xFF) << (8*b)))
+ return false;
+
+ SDValue XOR = Op.getOperand(0);
+ if (XOR.getOpcode() == ISD::TRUNCATE)
+ XOR = XOR.getOperand(0);
+ if (XOR.getOpcode() != ISD::XOR)
+ return false;
+
+ LHS = XOR.getOperand(0);
+ RHS = XOR.getOperand(1);
+ return true;
+ } else if (Op.getOpcode() == ISD::SRL) {
+ if (!isa<ConstantSDNode>(Op.getOperand(1)))
+ return false;
+ unsigned Bits = Op.getValueType().getSizeInBits();
+ if (b != Bits/8-1)
+ return false;
+ if (Op.getConstantOperandVal(1) != Bits-8)
+ return false;
+
+ SDValue XOR = Op.getOperand(0);
+ if (XOR.getOpcode() == ISD::TRUNCATE)
+ XOR = XOR.getOperand(0);
+ if (XOR.getOpcode() != ISD::XOR)
+ return false;
+
+ LHS = XOR.getOperand(0);
+ RHS = XOR.getOperand(1);
+ return true;
+ }
+
+ return false;
+ };
+
+ SmallVector<SDValue, 8> Queue(1, SDValue(N, 0));
+ while (!Queue.empty()) {
+ SDValue V = Queue.pop_back_val();
+
+ for (const SDValue &O : V.getNode()->ops()) {
+ unsigned b;
+ uint64_t M = 0, A = 0;
+ SDValue OLHS, ORHS;
+ if (O.getOpcode() == ISD::OR) {
+ Queue.push_back(O);
+ } else if (IsByteSelectCC(O, b, M, A, OLHS, ORHS)) {
+ if (!LHS) {
+ LHS = OLHS;
+ RHS = ORHS;
+ BytesFound[b] = true;
+ Mask |= M;
+ Alt |= A;
+ } else if ((LHS == ORHS && RHS == OLHS) ||
+ (RHS == ORHS && LHS == OLHS)) {
+ BytesFound[b] = true;
+ Mask |= M;
+ Alt |= A;
+ } else {
+ return Res;
+ }
+ } else {
+ return Res;
+ }
+ }
+ }
+
+ unsigned LastB = 0, BCnt = 0;
+ for (unsigned i = 0; i < 8; ++i)
+ if (BytesFound[LastB]) {
+ ++BCnt;
+ LastB = i;
+ }
+
+ if (!LastB || BCnt < 2)
+ return Res;
+
+ // Because we'll be zero-extending the output anyway if don't have a specific
+ // value for each input byte (via the Mask), we can 'anyext' the inputs.
+ if (LHS.getValueType() != VT) {
+ LHS = CurDAG->getAnyExtOrTrunc(LHS, dl, VT);
+ RHS = CurDAG->getAnyExtOrTrunc(RHS, dl, VT);
+ }
+
+ Res = CurDAG->getNode(PPCISD::CMPB, dl, VT, LHS, RHS);
+
+ bool NonTrivialMask = ((int64_t) Mask) != INT64_C(-1);
+ if (NonTrivialMask && !Alt) {
+ // Res = Mask & CMPB
+ Res = CurDAG->getNode(ISD::AND, dl, VT, Res, CurDAG->getConstant(Mask, VT));
+ } else if (Alt) {
+ // Res = (CMPB & Mask) | (~CMPB & Alt)
+ // Which, as suggested here:
+ // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge
+ // can be written as:
+ // Res = Alt ^ ((Alt ^ Mask) & CMPB)
+ // useful because the (Alt ^ Mask) can be pre-computed.
+ Res = CurDAG->getNode(ISD::AND, dl, VT, Res,
+ CurDAG->getConstant(Mask ^ Alt, VT));
+ Res = CurDAG->getNode(ISD::XOR, dl, VT, Res, CurDAG->getConstant(Alt, VT));
+ }
+
+ return Res;
+}
+
+// When CR bit registers are enabled, an extension of an i1 variable to a i32
+// or i64 value is lowered in terms of a SELECT_I[48] operation, and thus
+// involves constant materialization of a 0 or a 1 or both. If the result of
+// the extension is then operated upon by some operator that can be constant
+// folded with a constant 0 or 1, and that constant can be materialized using
+// only one instruction (like a zero or one), then we should fold in those
+// operations with the select.
+void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) {
+ if (!PPCSubTarget->useCRBits())
+ return;
+
+ if (N->getOpcode() != ISD::ZERO_EXTEND &&
+ N->getOpcode() != ISD::SIGN_EXTEND &&
+ N->getOpcode() != ISD::ANY_EXTEND)
+ return;
+
+ if (N->getOperand(0).getValueType() != MVT::i1)
+ return;
+
+ if (!N->hasOneUse())
+ return;
+
+ SDLoc dl(N);
+ EVT VT = N->getValueType(0);
+ SDValue Cond = N->getOperand(0);
+ SDValue ConstTrue =
+ CurDAG->getConstant(N->getOpcode() == ISD::SIGN_EXTEND ? -1 : 1, VT);
+ SDValue ConstFalse = CurDAG->getConstant(0, VT);
+
+ do {
+ SDNode *User = *N->use_begin();
+ if (User->getNumOperands() != 2)
+ break;
+
+ auto TryFold = [this, N, User](SDValue Val) {
+ SDValue UserO0 = User->getOperand(0), UserO1 = User->getOperand(1);
+ SDValue O0 = UserO0.getNode() == N ? Val : UserO0;
+ SDValue O1 = UserO1.getNode() == N ? Val : UserO1;
+
+ return CurDAG->FoldConstantArithmetic(User->getOpcode(),
+ User->getValueType(0),
+ O0.getNode(), O1.getNode());
+ };
+
+ SDValue TrueRes = TryFold(ConstTrue);
+ if (!TrueRes)
+ break;
+ SDValue FalseRes = TryFold(ConstFalse);
+ if (!FalseRes)
+ break;
+
+ // For us to materialize these using one instruction, we must be able to
+ // represent them as signed 16-bit integers.
+ uint64_t True = cast<ConstantSDNode>(TrueRes)->getZExtValue(),
+ False = cast<ConstantSDNode>(FalseRes)->getZExtValue();
+ if (!isInt<16>(True) || !isInt<16>(False))
+ break;
+
+ // We can replace User with a new SELECT node, and try again to see if we
+ // can fold the select with its user.
+ Res = CurDAG->getSelect(dl, User->getValueType(0), Cond, TrueRes, FalseRes);
+ N = User;
+ ConstTrue = TrueRes;
+ ConstFalse = FalseRes;
+ } while (N->hasOneUse());
+}
+
+void PPCDAGToDAGISel::PreprocessISelDAG() {
+ SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
+ ++Position;
+
+ bool MadeChange = false;
+ while (Position != CurDAG->allnodes_begin()) {
+ SDNode *N = --Position;
+ if (N->use_empty())
+ continue;
+
+ SDValue Res;
+ switch (N->getOpcode()) {
+ default: break;
+ case ISD::OR:
+ Res = combineToCMPB(N);
+ break;
+ }
+
+ if (!Res)
+ foldBoolExts(Res, N);
+
+ if (Res) {
+ DEBUG(dbgs() << "PPC DAG preprocessing replacing:\nOld: ");
+ DEBUG(N->dump(CurDAG));
+ DEBUG(dbgs() << "\nNew: ");
+ DEBUG(Res.getNode()->dump(CurDAG));
+ DEBUG(dbgs() << "\n");
+
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
+ MadeChange = true;
+ }
+ }
+
+ if (MadeChange)
+ CurDAG->RemoveDeadNodes();
+}
+
/// PostprocessISelDAG - Perform some late peephole optimizations
/// on the DAG representation.
void PPCDAGToDAGISel::PostprocessISelDAG() {
@@ -1586,6 +3314,7 @@ void PPCDAGToDAGISel::PostprocessISelDAG() {
PeepholePPC64();
PeepholeCROps();
+ PeepholePPC64ZExt();
}
// Check if all users of this node will become isel where the second operand
@@ -1700,6 +3429,9 @@ void PPCDAGToDAGISel::PeepholeCROps() {
case PPC::SELECT_I8:
case PPC::SELECT_F4:
case PPC::SELECT_F8:
+ case PPC::SELECT_QFRC:
+ case PPC::SELECT_QSRC:
+ case PPC::SELECT_QBRC:
case PPC::SELECT_VRRC:
case PPC::SELECT_VSFRC:
case PPC::SELECT_VSRC: {
@@ -2007,6 +3739,9 @@ void PPCDAGToDAGISel::PeepholeCROps() {
case PPC::SELECT_I8:
case PPC::SELECT_F4:
case PPC::SELECT_F8:
+ case PPC::SELECT_QFRC:
+ case PPC::SELECT_QSRC:
+ case PPC::SELECT_QBRC:
case PPC::SELECT_VRRC:
case PPC::SELECT_VSFRC:
case PPC::SELECT_VSRC:
@@ -2059,6 +3794,315 @@ void PPCDAGToDAGISel::PeepholeCROps() {
} while (IsModified);
}
+// Gather the set of 32-bit operations that are known to have their
+// higher-order 32 bits zero, where ToPromote contains all such operations.
+static bool PeepholePPC64ZExtGather(SDValue Op32,
+ SmallPtrSetImpl<SDNode *> &ToPromote) {
+ if (!Op32.isMachineOpcode())
+ return false;
+
+ // First, check for the "frontier" instructions (those that will clear the
+ // higher-order 32 bits.
+
+ // For RLWINM and RLWNM, we need to make sure that the mask does not wrap
+ // around. If it does not, then these instructions will clear the
+ // higher-order bits.
+ if ((Op32.getMachineOpcode() == PPC::RLWINM ||
+ Op32.getMachineOpcode() == PPC::RLWNM) &&
+ Op32.getConstantOperandVal(2) <= Op32.getConstantOperandVal(3)) {
+ ToPromote.insert(Op32.getNode());
+ return true;
+ }
+
+ // SLW and SRW always clear the higher-order bits.
+ if (Op32.getMachineOpcode() == PPC::SLW ||
+ Op32.getMachineOpcode() == PPC::SRW) {
+ ToPromote.insert(Op32.getNode());
+ return true;
+ }
+
+ // For LI and LIS, we need the immediate to be positive (so that it is not
+ // sign extended).
+ if (Op32.getMachineOpcode() == PPC::LI ||
+ Op32.getMachineOpcode() == PPC::LIS) {
+ if (!isUInt<15>(Op32.getConstantOperandVal(0)))
+ return false;
+
+ ToPromote.insert(Op32.getNode());
+ return true;
+ }
+
+ // LHBRX and LWBRX always clear the higher-order bits.
+ if (Op32.getMachineOpcode() == PPC::LHBRX ||
+ Op32.getMachineOpcode() == PPC::LWBRX) {
+ ToPromote.insert(Op32.getNode());
+ return true;
+ }
+
+ // CNTLZW always produces a 64-bit value in [0,32], and so is zero extended.
+ if (Op32.getMachineOpcode() == PPC::CNTLZW) {
+ ToPromote.insert(Op32.getNode());
+ return true;
+ }
+
+ // Next, check for those instructions we can look through.
+
+ // Assuming the mask does not wrap around, then the higher-order bits are
+ // taken directly from the first operand.
+ if (Op32.getMachineOpcode() == PPC::RLWIMI &&
+ Op32.getConstantOperandVal(3) <= Op32.getConstantOperandVal(4)) {
+ SmallPtrSet<SDNode *, 16> ToPromote1;
+ if (!PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1))
+ return false;
+
+ ToPromote.insert(Op32.getNode());
+ ToPromote.insert(ToPromote1.begin(), ToPromote1.end());
+ return true;
+ }
+
+ // For OR, the higher-order bits are zero if that is true for both operands.
+ // For SELECT_I4, the same is true (but the relevant operand numbers are
+ // shifted by 1).
+ if (Op32.getMachineOpcode() == PPC::OR ||
+ Op32.getMachineOpcode() == PPC::SELECT_I4) {
+ unsigned B = Op32.getMachineOpcode() == PPC::SELECT_I4 ? 1 : 0;
+ SmallPtrSet<SDNode *, 16> ToPromote1;
+ if (!PeepholePPC64ZExtGather(Op32.getOperand(B+0), ToPromote1))
+ return false;
+ if (!PeepholePPC64ZExtGather(Op32.getOperand(B+1), ToPromote1))
+ return false;
+
+ ToPromote.insert(Op32.getNode());
+ ToPromote.insert(ToPromote1.begin(), ToPromote1.end());
+ return true;
+ }
+
+ // For ORI and ORIS, we need the higher-order bits of the first operand to be
+ // zero, and also for the constant to be positive (so that it is not sign
+ // extended).
+ if (Op32.getMachineOpcode() == PPC::ORI ||
+ Op32.getMachineOpcode() == PPC::ORIS) {
+ SmallPtrSet<SDNode *, 16> ToPromote1;
+ if (!PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1))
+ return false;
+ if (!isUInt<15>(Op32.getConstantOperandVal(1)))
+ return false;
+
+ ToPromote.insert(Op32.getNode());
+ ToPromote.insert(ToPromote1.begin(), ToPromote1.end());
+ return true;
+ }
+
+ // The higher-order bits of AND are zero if that is true for at least one of
+ // the operands.
+ if (Op32.getMachineOpcode() == PPC::AND) {
+ SmallPtrSet<SDNode *, 16> ToPromote1, ToPromote2;
+ bool Op0OK =
+ PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1);
+ bool Op1OK =
+ PeepholePPC64ZExtGather(Op32.getOperand(1), ToPromote2);
+ if (!Op0OK && !Op1OK)
+ return false;
+
+ ToPromote.insert(Op32.getNode());
+
+ if (Op0OK)
+ ToPromote.insert(ToPromote1.begin(), ToPromote1.end());
+
+ if (Op1OK)
+ ToPromote.insert(ToPromote2.begin(), ToPromote2.end());
+
+ return true;
+ }
+
+ // For ANDI and ANDIS, the higher-order bits are zero if either that is true
+ // of the first operand, or if the second operand is positive (so that it is
+ // not sign extended).
+ if (Op32.getMachineOpcode() == PPC::ANDIo ||
+ Op32.getMachineOpcode() == PPC::ANDISo) {
+ SmallPtrSet<SDNode *, 16> ToPromote1;
+ bool Op0OK =
+ PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1);
+ bool Op1OK = isUInt<15>(Op32.getConstantOperandVal(1));
+ if (!Op0OK && !Op1OK)
+ return false;
+
+ ToPromote.insert(Op32.getNode());
+
+ if (Op0OK)
+ ToPromote.insert(ToPromote1.begin(), ToPromote1.end());
+
+ return true;
+ }
+
+ return false;
+}
+
+void PPCDAGToDAGISel::PeepholePPC64ZExt() {
+ if (!PPCSubTarget->isPPC64())
+ return;
+
+ // When we zero-extend from i32 to i64, we use a pattern like this:
+ // def : Pat<(i64 (zext i32:$in)),
+ // (RLDICL (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $in, sub_32),
+ // 0, 32)>;
+ // There are several 32-bit shift/rotate instructions, however, that will
+ // clear the higher-order bits of their output, rendering the RLDICL
+ // unnecessary. When that happens, we remove it here, and redefine the
+ // relevant 32-bit operation to be a 64-bit operation.
+
+ SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
+ ++Position;
+
+ bool MadeChange = false;
+ while (Position != CurDAG->allnodes_begin()) {
+ SDNode *N = --Position;
+ // Skip dead nodes and any non-machine opcodes.
+ if (N->use_empty() || !N->isMachineOpcode())
+ continue;
+
+ if (N->getMachineOpcode() != PPC::RLDICL)
+ continue;
+
+ if (N->getConstantOperandVal(1) != 0 ||
+ N->getConstantOperandVal(2) != 32)
+ continue;
+
+ SDValue ISR = N->getOperand(0);
+ if (!ISR.isMachineOpcode() ||
+ ISR.getMachineOpcode() != TargetOpcode::INSERT_SUBREG)
+ continue;
+
+ if (!ISR.hasOneUse())
+ continue;
+
+ if (ISR.getConstantOperandVal(2) != PPC::sub_32)
+ continue;
+
+ SDValue IDef = ISR.getOperand(0);
+ if (!IDef.isMachineOpcode() ||
+ IDef.getMachineOpcode() != TargetOpcode::IMPLICIT_DEF)
+ continue;
+
+ // We now know that we're looking at a canonical i32 -> i64 zext. See if we
+ // can get rid of it.
+
+ SDValue Op32 = ISR->getOperand(1);
+ if (!Op32.isMachineOpcode())
+ continue;
+
+ // There are some 32-bit instructions that always clear the high-order 32
+ // bits, there are also some instructions (like AND) that we can look
+ // through.
+ SmallPtrSet<SDNode *, 16> ToPromote;
+ if (!PeepholePPC64ZExtGather(Op32, ToPromote))
+ continue;
+
+ // If the ToPromote set contains nodes that have uses outside of the set
+ // (except for the original INSERT_SUBREG), then abort the transformation.
+ bool OutsideUse = false;
+ for (SDNode *PN : ToPromote) {
+ for (SDNode *UN : PN->uses()) {
+ if (!ToPromote.count(UN) && UN != ISR.getNode()) {
+ OutsideUse = true;
+ break;
+ }
+ }
+
+ if (OutsideUse)
+ break;
+ }
+ if (OutsideUse)
+ continue;
+
+ MadeChange = true;
+
+ // We now know that this zero extension can be removed by promoting to
+ // nodes in ToPromote to 64-bit operations, where for operations in the
+ // frontier of the set, we need to insert INSERT_SUBREGs for their
+ // operands.
+ for (SDNode *PN : ToPromote) {
+ unsigned NewOpcode;
+ switch (PN->getMachineOpcode()) {
+ default:
+ llvm_unreachable("Don't know the 64-bit variant of this instruction");
+ case PPC::RLWINM: NewOpcode = PPC::RLWINM8; break;
+ case PPC::RLWNM: NewOpcode = PPC::RLWNM8; break;
+ case PPC::SLW: NewOpcode = PPC::SLW8; break;
+ case PPC::SRW: NewOpcode = PPC::SRW8; break;
+ case PPC::LI: NewOpcode = PPC::LI8; break;
+ case PPC::LIS: NewOpcode = PPC::LIS8; break;
+ case PPC::LHBRX: NewOpcode = PPC::LHBRX8; break;
+ case PPC::LWBRX: NewOpcode = PPC::LWBRX8; break;
+ case PPC::CNTLZW: NewOpcode = PPC::CNTLZW8; break;
+ case PPC::RLWIMI: NewOpcode = PPC::RLWIMI8; break;
+ case PPC::OR: NewOpcode = PPC::OR8; break;
+ case PPC::SELECT_I4: NewOpcode = PPC::SELECT_I8; break;
+ case PPC::ORI: NewOpcode = PPC::ORI8; break;
+ case PPC::ORIS: NewOpcode = PPC::ORIS8; break;
+ case PPC::AND: NewOpcode = PPC::AND8; break;
+ case PPC::ANDIo: NewOpcode = PPC::ANDIo8; break;
+ case PPC::ANDISo: NewOpcode = PPC::ANDISo8; break;
+ }
+
+ // Note: During the replacement process, the nodes will be in an
+ // inconsistent state (some instructions will have operands with values
+ // of the wrong type). Once done, however, everything should be right
+ // again.
+
+ SmallVector<SDValue, 4> Ops;
+ for (const SDValue &V : PN->ops()) {
+ if (!ToPromote.count(V.getNode()) && V.getValueType() == MVT::i32 &&
+ !isa<ConstantSDNode>(V)) {
+ SDValue ReplOpOps[] = { ISR.getOperand(0), V, ISR.getOperand(2) };
+ SDNode *ReplOp =
+ CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, SDLoc(V),
+ ISR.getNode()->getVTList(), ReplOpOps);
+ Ops.push_back(SDValue(ReplOp, 0));
+ } else {
+ Ops.push_back(V);
+ }
+ }
+
+ // Because all to-be-promoted nodes only have users that are other
+ // promoted nodes (or the original INSERT_SUBREG), we can safely replace
+ // the i32 result value type with i64.
+
+ SmallVector<EVT, 2> NewVTs;
+ SDVTList VTs = PN->getVTList();
+ for (unsigned i = 0, ie = VTs.NumVTs; i != ie; ++i)
+ if (VTs.VTs[i] == MVT::i32)
+ NewVTs.push_back(MVT::i64);
+ else
+ NewVTs.push_back(VTs.VTs[i]);
+
+ DEBUG(dbgs() << "PPC64 ZExt Peephole morphing:\nOld: ");
+ DEBUG(PN->dump(CurDAG));
+
+ CurDAG->SelectNodeTo(PN, NewOpcode, CurDAG->getVTList(NewVTs), Ops);
+
+ DEBUG(dbgs() << "\nNew: ");
+ DEBUG(PN->dump(CurDAG));
+ DEBUG(dbgs() << "\n");
+ }
+
+ // Now we replace the original zero extend and its associated INSERT_SUBREG
+ // with the value feeding the INSERT_SUBREG (which has now been promoted to
+ // return an i64).
+
+ DEBUG(dbgs() << "PPC64 ZExt Peephole replacing:\nOld: ");
+ DEBUG(N->dump(CurDAG));
+ DEBUG(dbgs() << "\nNew: ");
+ DEBUG(Op32.getNode()->dump(CurDAG));
+ DEBUG(dbgs() << "\n");
+
+ ReplaceUses(N, Op32.getNode());
+ }
+
+ if (MadeChange)
+ CurDAG->RemoveDeadNodes();
+}
+
void PPCDAGToDAGISel::PeepholePPC64() {
// These optimizations are currently supported only for 64-bit SVR4.
if (PPCSubTarget->isDarwin() || !PPCSubTarget->isPPC64())
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index e93bdaf..147e94b 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -13,6 +13,7 @@
#include "PPCISelLowering.h"
#include "MCTargetDesc/PPCPredicates.h"
+#include "PPCCallingConv.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCPerfectShuffle.h"
#include "PPCTargetMachine.h"
@@ -24,6 +25,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
@@ -55,11 +57,9 @@ cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
// FIXME: Remove this once the bug has been fixed!
extern cl::opt<bool> ANDIGlueBug;
-PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
- : TargetLowering(TM),
- Subtarget(*TM.getSubtargetImpl()) {
- setPow2SDivIsCheap();
-
+PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
+ const PPCSubtarget &STI)
+ : TargetLowering(TM), Subtarget(STI) {
// Use _setjmp/_longjmp instead of setjmp/longjmp.
setUseUnderscoreSetJmp(true);
setUseUnderscoreLongJmp(true);
@@ -75,8 +75,10 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
// PowerPC has an i16 but no i8 (or i1) SEXTLOAD
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
+ }
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
@@ -86,11 +88,15 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
+ setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
+ setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
+ setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
+ setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
if (Subtarget.useCRBits()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
@@ -115,12 +121,11 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
if (ANDIGlueBug)
setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setTruncStoreAction(MVT::i64, MVT::i1, Expand);
- setTruncStoreAction(MVT::i32, MVT::i1, Expand);
- setTruncStoreAction(MVT::i16, MVT::i1, Expand);
- setTruncStoreAction(MVT::i8, MVT::i1, Expand);
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setTruncStoreAction(VT, MVT::i1, Expand);
+ }
addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
}
@@ -171,13 +176,13 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
// If we're enabling GP optimizations, use hardware square root
if (!Subtarget.hasFSQRT() &&
- !(TM.Options.UnsafeFPMath &&
- Subtarget.hasFRSQRTE() && Subtarget.hasFRE()))
+ !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
+ Subtarget.hasFRE()))
setOperationAction(ISD::FSQRT, MVT::f64, Expand);
if (!Subtarget.hasFSQRT() &&
- !(TM.Options.UnsafeFPMath &&
- Subtarget.hasFRSQRTES() && Subtarget.hasFRES()))
+ !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
+ Subtarget.hasFRES()))
setOperationAction(ISD::FSQRT, MVT::f32, Expand);
if (Subtarget.hasFCPSGN()) {
@@ -395,14 +400,21 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
if (Subtarget.hasAltivec()) {
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
- for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
- MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
-
+ for (MVT VT : MVT::vector_valuetypes()) {
// add/sub are legal for all supported vector VT's.
setOperationAction(ISD::ADD , VT, Legal);
setOperationAction(ISD::SUB , VT, Legal);
+ // Vector instructions introduced in P8
+ if (Subtarget.hasP8Altivec()) {
+ setOperationAction(ISD::CTPOP, VT, Legal);
+ setOperationAction(ISD::CTLZ, VT, Legal);
+ }
+ else {
+ setOperationAction(ISD::CTPOP, VT, Expand);
+ setOperationAction(ISD::CTLZ, VT, Expand);
+ }
+
// We promote all shuffles to v16i8.
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
@@ -457,22 +469,18 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
setOperationAction(ISD::FPOW, VT, Expand);
setOperationAction(ISD::BSWAP, VT, Expand);
- setOperationAction(ISD::CTPOP, VT, Expand);
- setOperationAction(ISD::CTLZ, VT, Expand);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::CTTZ, VT, Expand);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
- for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) {
- MVT::SimpleValueType InnerVT = (MVT::SimpleValueType)j;
+ for (MVT InnerVT : MVT::vector_valuetypes()) {
setTruncStoreAction(VT, InnerVT, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
}
- setLoadExtAction(ISD::SEXTLOAD, VT, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, VT, Expand);
- setLoadExtAction(ISD::EXTLOAD, VT, Expand);
}
// We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
@@ -597,12 +605,171 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
}
+
+ if (Subtarget.hasP8Altivec())
+ addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
+ }
+
+ if (Subtarget.hasQPX()) {
+ setOperationAction(ISD::FADD, MVT::v4f64, Legal);
+ setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
+ setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
+ setOperationAction(ISD::FREM, MVT::v4f64, Expand);
+
+ setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
+ setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
+
+ setOperationAction(ISD::LOAD , MVT::v4f64, Custom);
+ setOperationAction(ISD::STORE , MVT::v4f64, Custom);
+
+ setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
+
+ if (!Subtarget.useCRBits())
+ setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
+
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
+ setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
+ setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
+ setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
+
+ setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
+ setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
+
+ setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
+ setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand);
+ setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
+
+ setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
+ setOperationAction(ISD::FABS , MVT::v4f64, Legal);
+ setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
+ setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
+ setOperationAction(ISD::FPOWI , MVT::v4f64, Expand);
+ setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
+ setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
+ setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
+ setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
+ setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
+ setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
+
+ setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
+ setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
+
+ setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
+ setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
+
+ addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
+
+ setOperationAction(ISD::FADD, MVT::v4f32, Legal);
+ setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
+ setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
+ setOperationAction(ISD::FREM, MVT::v4f32, Expand);
+
+ setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
+ setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
+
+ setOperationAction(ISD::LOAD , MVT::v4f32, Custom);
+ setOperationAction(ISD::STORE , MVT::v4f32, Custom);
+
+ if (!Subtarget.useCRBits())
+ setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
+
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
+ setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
+ setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
+ setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
+
+ setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
+ setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
+
+ setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
+ setOperationAction(ISD::FABS , MVT::v4f32, Legal);
+ setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
+ setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
+ setOperationAction(ISD::FPOWI , MVT::v4f32, Expand);
+ setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
+ setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
+ setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
+ setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
+ setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
+ setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
+
+ setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
+ setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
+
+ setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
+ setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
+
+ addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
+
+ setOperationAction(ISD::AND , MVT::v4i1, Legal);
+ setOperationAction(ISD::OR , MVT::v4i1, Legal);
+ setOperationAction(ISD::XOR , MVT::v4i1, Legal);
+
+ if (!Subtarget.useCRBits())
+ setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
+
+ setOperationAction(ISD::LOAD , MVT::v4i1, Custom);
+ setOperationAction(ISD::STORE , MVT::v4i1, Custom);
+
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
+ setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
+ setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
+
+ setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
+
+ addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
+
+ setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
+ setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
+ setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
+
+ setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
+ setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
+ setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
+
+ setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
+ setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
+
+ // These need to set FE_INEXACT, and so cannot be vectorized here.
+ setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
+ setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
+
+ if (TM.Options.UnsafeFPMath) {
+ setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
+ setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
+
+ setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
+ setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
+ } else {
+ setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
+ setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
+
+ setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
+ setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
+ }
}
- if (Subtarget.has64BitSupport()) {
+ if (Subtarget.has64BitSupport())
setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
- setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
- }
+
+ setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
if (!isPPC64) {
setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
@@ -610,8 +777,11 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
}
setBooleanContents(ZeroOrOneBooleanContent);
- // Altivec instructions set fields to all zeros or all ones.
- setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
+
+ if (Subtarget.hasAltivec()) {
+ // Altivec instructions set fields to all zeros or all ones.
+ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
+ }
if (!isPPC64) {
// These libcalls are not available in 32-bit.
@@ -632,6 +802,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::SINT_TO_FP);
+ if (Subtarget.hasFPCVT())
+ setTargetDAGCombine(ISD::UINT_TO_FP);
setTargetDAGCombine(ISD::LOAD);
setTargetDAGCombine(ISD::STORE);
setTargetDAGCombine(ISD::BR_CC);
@@ -639,6 +811,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
setTargetDAGCombine(ISD::BRCOND);
setTargetDAGCombine(ISD::BSWAP);
setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
+ setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
+ setTargetDAGCombine(ISD::INTRINSIC_VOID);
setTargetDAGCombine(ISD::SIGN_EXTEND);
setTargetDAGCombine(ISD::ZERO_EXTEND);
@@ -672,13 +846,33 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
// With 32 condition bits, we don't need to sink (and duplicate) compares
// aggressively in CodeGenPrep.
- if (Subtarget.useCRBits())
+ if (Subtarget.useCRBits()) {
setHasMultipleConditionRegisters();
+ setJumpIsExpensive();
+ }
setMinFunctionAlignment(2);
if (Subtarget.isDarwin())
setPrefFunctionAlignment(4);
+ switch (Subtarget.getDarwinDirective()) {
+ default: break;
+ case PPC::DIR_970:
+ case PPC::DIR_A2:
+ case PPC::DIR_E500mc:
+ case PPC::DIR_E5500:
+ case PPC::DIR_PWR4:
+ case PPC::DIR_PWR5:
+ case PPC::DIR_PWR5X:
+ case PPC::DIR_PWR6:
+ case PPC::DIR_PWR6X:
+ case PPC::DIR_PWR7:
+ case PPC::DIR_PWR8:
+ setPrefFunctionAlignment(4);
+ setPrefLoopAlignment(4);
+ break;
+ }
+
setInsertFencesForAtomic(true);
if (Subtarget.enableMachineScheduler())
@@ -686,10 +880,10 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
else
setSchedulingPreference(Sched::Hybrid);
- computeRegisterProperties();
+ computeRegisterProperties(STI.getRegisterInfo());
- // The Freescale cores does better with aggressive inlining of memcpy and
- // friends. Gcc uses same threshold of 128 bytes (= 32 word stores).
+ // The Freescale cores do better with aggressive inlining of memcpy and
+ // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc ||
Subtarget.getDarwinDirective() == PPC::DIR_E5500) {
MaxStoresPerMemset = 32;
@@ -698,8 +892,6 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
MaxStoresPerMemcpyOptSize = 8;
MaxStoresPerMemmove = 32;
MaxStoresPerMemmoveOptSize = 8;
-
- setPrefFunctionAlignment(4);
}
}
@@ -751,19 +943,23 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
default: return nullptr;
case PPCISD::FSEL: return "PPCISD::FSEL";
case PPCISD::FCFID: return "PPCISD::FCFID";
+ case PPCISD::FCFIDU: return "PPCISD::FCFIDU";
+ case PPCISD::FCFIDS: return "PPCISD::FCFIDS";
+ case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS";
case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
+ case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ";
+ case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ";
case PPCISD::FRE: return "PPCISD::FRE";
case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE";
case PPCISD::STFIWX: return "PPCISD::STFIWX";
case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
case PPCISD::VPERM: return "PPCISD::VPERM";
+ case PPCISD::CMPB: return "PPCISD::CMPB";
case PPCISD::Hi: return "PPCISD::Hi";
case PPCISD::Lo: return "PPCISD::Lo";
case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY";
- case PPCISD::LOAD: return "PPCISD::LOAD";
- case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC";
case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
case PPCISD::SRL: return "PPCISD::SRL";
@@ -771,11 +967,11 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::SHL: return "PPCISD::SHL";
case PPCISD::CALL: return "PPCISD::CALL";
case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP";
- case PPCISD::CALL_TLS: return "PPCISD::CALL_TLS";
- case PPCISD::CALL_NOP_TLS: return "PPCISD::CALL_NOP_TLS";
case PPCISD::MTCTR: return "PPCISD::MTCTR";
case PPCISD::BCTRL: return "PPCISD::BCTRL";
+ case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC";
case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
+ case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE";
case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP";
case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
case PPCISD::MFOCRF: return "PPCISD::MFOCRF";
@@ -783,6 +979,8 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::VCMPo: return "PPCISD::VCMPo";
case PPCISD::LBRX: return "PPCISD::LBRX";
case PPCISD::STBRX: return "PPCISD::STBRX";
+ case PPCISD::LFIWAX: return "PPCISD::LFIWAX";
+ case PPCISD::LFIWZX: return "PPCISD::LFIWZX";
case PPCISD::LARX: return "PPCISD::LARX";
case PPCISD::STCX: return "PPCISD::STCX";
case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
@@ -793,27 +991,38 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
case PPCISD::CR6SET: return "PPCISD::CR6SET";
case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET";
- case PPCISD::ADDIS_TOC_HA: return "PPCISD::ADDIS_TOC_HA";
- case PPCISD::LD_TOC_L: return "PPCISD::LD_TOC_L";
- case PPCISD::ADDI_TOC_L: return "PPCISD::ADDI_TOC_L";
case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT";
case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L";
case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS";
case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA";
case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L";
+ case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR";
+ case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA";
case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L";
+ case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR";
+ case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L";
case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT";
case PPCISD::SC: return "PPCISD::SC";
+ case PPCISD::QVFPERM: return "PPCISD::QVFPERM";
+ case PPCISD::QVGPCI: return "PPCISD::QVGPCI";
+ case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI";
+ case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI";
+ case PPCISD::QBFLT: return "PPCISD::QBFLT";
+ case PPCISD::QVLFSb: return "PPCISD::QVLFSb";
}
}
-EVT PPCTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
+EVT PPCTargetLowering::getSetCCResultType(LLVMContext &C, EVT VT) const {
if (!VT.isVector())
return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
+
+ if (Subtarget.hasQPX())
+ return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
+
return VT.changeVectorElementTypeToInteger();
}
@@ -853,7 +1062,7 @@ static bool isConstantOrUndef(int Op, int Val) {
/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
SelectionDAG &DAG) {
- bool IsLE = DAG.getSubtarget().getDataLayout()->isLittleEndian();
+ bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian();
if (ShuffleKind == 0) {
if (IsLE)
return false;
@@ -884,7 +1093,7 @@ bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
SelectionDAG &DAG) {
- bool IsLE = DAG.getSubtarget().getDataLayout()->isLittleEndian();
+ bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian();
if (ShuffleKind == 0) {
if (IsLE)
return false;
@@ -939,7 +1148,7 @@ static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
/// the input operands are swapped (see PPCInstrAltivec.td).
bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
unsigned ShuffleKind, SelectionDAG &DAG) {
- if (DAG.getSubtarget().getDataLayout()->isLittleEndian()) {
+ if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
if (ShuffleKind == 1) // unary
return isVMerge(N, UnitSize, 0, 0);
else if (ShuffleKind == 2) // swapped
@@ -964,7 +1173,7 @@ bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
/// the input operands are swapped (see PPCInstrAltivec.td).
bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
unsigned ShuffleKind, SelectionDAG &DAG) {
- if (DAG.getSubtarget().getDataLayout()->isLittleEndian()) {
+ if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
if (ShuffleKind == 1) // unary
return isVMerge(N, UnitSize, 8, 8);
else if (ShuffleKind == 2) // swapped
@@ -1008,8 +1217,7 @@ int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
if (ShiftAmt < i) return -1;
ShiftAmt -= i;
- bool isLE = DAG.getTarget().getSubtargetImpl()->getDataLayout()->
- isLittleEndian();
+ bool isLE = DAG.getTarget().getDataLayout()->isLittleEndian();
if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
// Check the rest of the elements to see if they are consecutive.
@@ -1082,7 +1290,7 @@ unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize,
SelectionDAG &DAG) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
assert(isSplatShuffleMask(SVOp, EltSize));
- if (DAG.getSubtarget().getDataLayout()->isLittleEndian())
+ if (DAG.getTarget().getDataLayout()->isLittleEndian())
return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
else
return SVOp->getMaskElt(0) / EltSize;
@@ -1200,6 +1408,36 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
return SDValue();
}
+/// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
+/// amount, otherwise return -1.
+int PPC::isQVALIGNIShuffleMask(SDNode *N) {
+ EVT VT = N->getValueType(0);
+ if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
+ return -1;
+
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+
+ // Find the first non-undef value in the shuffle mask.
+ unsigned i;
+ for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
+ /*search*/;
+
+ if (i == 4) return -1; // all undef.
+
+ // Otherwise, check to see if the rest of the elements are consecutively
+ // numbered from this value.
+ unsigned ShiftAmt = SVOp->getMaskElt(i);
+ if (ShiftAmt < i) return -1;
+ ShiftAmt -= i;
+
+ // Check the rest of the elements to see if they are consecutive.
+ for (++i; i != 4; ++i)
+ if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
+ return -1;
+
+ return ShiftAmt;
+}
+
//===----------------------------------------------------------------------===//
// Addressing Mode Selection
//===----------------------------------------------------------------------===//
@@ -1459,9 +1697,16 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
} else
return false;
- // PowerPC doesn't have preinc load/store instructions for vectors.
- if (VT.isVector())
- return false;
+ // PowerPC doesn't have preinc load/store instructions for vectors (except
+ // for QPX, which does have preinc r+r forms).
+ if (VT.isVector()) {
+ if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
+ return false;
+ } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
+ AM = ISD::PRE_INC;
+ return true;
+ }
+ }
if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
@@ -1518,8 +1763,9 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
/// GetLabelAccessInfo - Return true if we should reference labels using a
/// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags.
-static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
- unsigned &LoOpFlags,
+static bool GetLabelAccessInfo(const TargetMachine &TM,
+ const PPCSubtarget &Subtarget,
+ unsigned &HiOpFlags, unsigned &LoOpFlags,
const GlobalValue *GV = nullptr) {
HiOpFlags = PPCII::MO_HA;
LoOpFlags = PPCII::MO_LO;
@@ -1534,7 +1780,7 @@ static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
// If this is a reference to a global value that requires a non-lazy-ptr, make
// sure that instruction lowering adds it.
- if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) {
+ if (GV && Subtarget.hasLazyResolverStub(GV)) {
HiOpFlags |= PPCII::MO_NLP_FLAG;
LoOpFlags |= PPCII::MO_NLP_FLAG;
@@ -1566,6 +1812,28 @@ static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
}
+static void setUsesTOCBasePtr(MachineFunction &MF) {
+ PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ FuncInfo->setUsesTOCBasePtr();
+}
+
+static void setUsesTOCBasePtr(SelectionDAG &DAG) {
+ setUsesTOCBasePtr(DAG.getMachineFunction());
+}
+
+static SDValue getTOCEntry(SelectionDAG &DAG, SDLoc dl, bool Is64Bit,
+ SDValue GA) {
+ EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
+ SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) :
+ DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
+
+ SDValue Ops[] = { GA, Reg };
+ return DAG.getMemIntrinsicNode(PPCISD::TOC_ENTRY, dl,
+ DAG.getVTList(VT, MVT::Other), Ops, VT,
+ MachinePointerInfo::getGOT(), 0, false, true,
+ false, 0);
+}
+
SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
@@ -1575,20 +1843,19 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
// 64-bit SVR4 ABI code is always position-independent.
// The actual address of the GlobalValue is stored in the TOC.
if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
+ setUsesTOCBasePtr(DAG);
SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
- return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA,
- DAG.getRegister(PPC::X2, MVT::i64));
+ return getTOCEntry(DAG, SDLoc(CP), true, GA);
}
unsigned MOHiFlag, MOLoFlag;
- bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
+ bool isPIC =
+ GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag);
if (isPIC && Subtarget.isSVR4ABI()) {
SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
PPCII::MO_PIC_FLAG);
- SDLoc DL(CP);
- return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i32, GA,
- DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT));
+ return getTOCEntry(DAG, SDLoc(CP), false, GA);
}
SDValue CPIHi =
@@ -1605,20 +1872,19 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
// 64-bit SVR4 ABI code is always position-independent.
// The actual address of the GlobalValue is stored in the TOC.
if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
+ setUsesTOCBasePtr(DAG);
SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
- return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA,
- DAG.getRegister(PPC::X2, MVT::i64));
+ return getTOCEntry(DAG, SDLoc(JT), true, GA);
}
unsigned MOHiFlag, MOLoFlag;
- bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
+ bool isPIC =
+ GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag);
if (isPIC && Subtarget.isSVR4ABI()) {
SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
PPCII::MO_PIC_FLAG);
- SDLoc DL(GA);
- return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), PtrVT, GA,
- DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT));
+ return getTOCEntry(DAG, SDLoc(GA), false, GA);
}
SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
@@ -1635,39 +1901,19 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
// 64-bit SVR4 ABI code is always position-independent.
// The actual BlockAddress is stored in the TOC.
if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
+ setUsesTOCBasePtr(DAG);
SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
- return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(BASDN), MVT::i64, GA,
- DAG.getRegister(PPC::X2, MVT::i64));
+ return getTOCEntry(DAG, SDLoc(BASDN), true, GA);
}
unsigned MOHiFlag, MOLoFlag;
- bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
+ bool isPIC =
+ GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag);
SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG);
}
-// Generate a call to __tls_get_addr for the given GOT entry Op.
-std::pair<SDValue,SDValue>
-PPCTargetLowering::lowerTLSCall(SDValue Op, SDLoc dl,
- SelectionDAG &DAG) const {
-
- Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
- TargetLowering::ArgListTy Args;
- TargetLowering::ArgListEntry Entry;
- Entry.Node = Op;
- Entry.Ty = IntPtrTy;
- Args.push_back(Entry);
-
- TargetLowering::CallLoweringInfo CLI(DAG);
- CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
- .setCallee(CallingConv::C, IntPtrTy,
- DAG.getTargetExternalSymbol("__tls_get_addr", getPointerTy()),
- std::move(Args), 0);
-
- return LowerCallTo(CLI);
-}
-
SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const {
@@ -1702,6 +1948,7 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
PPCII::MO_TLS);
SDValue GOTPtr;
if (is64bit) {
+ setUsesTOCBasePtr(DAG);
SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
PtrVT, GOTReg, TGA);
@@ -1713,10 +1960,10 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
}
if (Model == TLSModel::GeneralDynamic) {
- SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
- PPCII::MO_TLSGD);
+ SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
SDValue GOTPtr;
if (is64bit) {
+ setUsesTOCBasePtr(DAG);
SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
GOTReg, TGA);
@@ -1726,17 +1973,15 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
else
GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
}
- SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT,
- GOTPtr, TGA);
- std::pair<SDValue, SDValue> CallResult = lowerTLSCall(GOTEntry, dl, DAG);
- return CallResult.first;
+ return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
+ GOTPtr, TGA, TGA);
}
if (Model == TLSModel::LocalDynamic) {
- SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
- PPCII::MO_TLSLD);
+ SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
SDValue GOTPtr;
if (is64bit) {
+ setUsesTOCBasePtr(DAG);
SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
GOTReg, TGA);
@@ -1746,13 +1991,10 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
else
GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
}
- SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSLD_L, dl, PtrVT,
- GOTPtr, TGA);
- std::pair<SDValue, SDValue> CallResult = lowerTLSCall(GOTEntry, dl, DAG);
- SDValue TLSAddr = CallResult.first;
- SDValue Chain = CallResult.second;
- SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, PtrVT,
- Chain, TLSAddr, TGA);
+ SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
+ PtrVT, GOTPtr, TGA, TGA);
+ SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
+ PtrVT, TLSAddr, TGA);
return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
}
@@ -1769,20 +2011,20 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
// 64-bit SVR4 ABI code is always position-independent.
// The actual address of the GlobalValue is stored in the TOC.
if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
+ setUsesTOCBasePtr(DAG);
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
- return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA,
- DAG.getRegister(PPC::X2, MVT::i64));
+ return getTOCEntry(DAG, DL, true, GA);
}
unsigned MOHiFlag, MOLoFlag;
- bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV);
+ bool isPIC =
+ GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag, GV);
if (isPIC && Subtarget.isSVR4ABI()) {
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
GSDN->getOffset(),
PPCII::MO_PIC_FLAG);
- return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i32, GA,
- DAG.getNode(PPCISD::GlobalBaseReg, DL, MVT::i32));
+ return getTOCEntry(DAG, DL, false, GA);
}
SDValue GAHi =
@@ -2151,7 +2393,7 @@ bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
};
const unsigned NumArgRegs = array_lengthof(ArgRegs);
- unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
+ unsigned RegNum = State.getFirstUnallocated(ArgRegs);
// Skip one register if the first unallocated register has an even register
// number and there are still argument registers available which have not been
@@ -2179,7 +2421,7 @@ bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
const unsigned NumArgRegs = array_lengthof(ArgRegs);
- unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
+ unsigned RegNum = State.getFirstUnallocated(ArgRegs);
// If there is only one Floating-point register left we need to put both f64
// values of a split ppc_fp128 value on the stack.
@@ -2205,6 +2447,17 @@ static const MCPhysReg *GetFPR() {
return FPR;
}
+/// GetQFPR - Get the set of QPX registers that should be allocated for
+/// arguments.
+static const MCPhysReg *GetQFPR() {
+ static const MCPhysReg QFPR[] = {
+ PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7,
+ PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13
+ };
+
+ return QFPR;
+}
+
/// CalculateStackSlotSize - Calculates the size reserved for this argument on
/// the stack.
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
@@ -2233,6 +2486,10 @@ static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64)
Align = 16;
+ // QPX vector types stored in double-precision are padded to a 32 byte
+ // boundary.
+ else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
+ Align = 32;
// ByVal parameters are aligned as requested.
if (Flags.isByVal()) {
@@ -2271,7 +2528,7 @@ static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
unsigned ParamAreaSize,
unsigned &ArgOffset,
unsigned &AvailableFPRs,
- unsigned &AvailableVRs) {
+ unsigned &AvailableVRs, bool HasQPX) {
bool UseMemory = false;
// Respect alignment of argument on the stack.
@@ -2295,7 +2552,11 @@ static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
// However, if the argument is actually passed in an FPR or a VR,
// we don't use memory after all.
if (!Flags.isByVal()) {
- if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
+ if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
+ // QPX registers overlap with the scalar FP registers.
+ (HasQPX && (ArgVT == MVT::v4f32 ||
+ ArgVT == MVT::v4f64 ||
+ ArgVT == MVT::v4i1)))
if (AvailableFPRs > 0) {
--AvailableFPRs;
return false;
@@ -2314,10 +2575,9 @@ static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
/// EnsureStackAlignment - Round stack frame size up from NumBytes to
/// ensure minimum alignment required for target.
-static unsigned EnsureStackAlignment(const TargetMachine &Target,
+static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
unsigned NumBytes) {
- unsigned TargetAlign =
- Target.getSubtargetImpl()->getFrameLowering()->getStackAlignment();
+ unsigned TargetAlign = Lowering->getStackAlignment();
unsigned AlignMask = TargetAlign - 1;
NumBytes = (NumBytes + AlignMask) & ~AlignMask;
return NumBytes;
@@ -2398,7 +2658,7 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
*DAG.getContext());
// Reserve space for the linkage area on the stack.
- unsigned LinkageSize = PPCFrameLowering::getLinkageSize(false, false, false);
+ unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
CCInfo.AllocateStack(LinkageSize, PtrByteSize);
CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
@@ -2430,13 +2690,21 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
- case MVT::v4f32:
RC = &PPC::VRRCRegClass;
break;
+ case MVT::v4f32:
+ RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
+ break;
case MVT::v2f64:
case MVT::v2i64:
RC = &PPC::VSHRCRegClass;
break;
+ case MVT::v4f64:
+ RC = &PPC::QFRCRegClass;
+ break;
+ case MVT::v4i1:
+ RC = &PPC::QBRCRegClass;
+ break;
}
// Transform the arguments stored in physical registers into virtual ones.
@@ -2484,7 +2752,8 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
// call optimized function's reserved stack space needs to be aligned so that
// taking the difference between two stack areas will result in an aligned
// stack.
- MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
+ MinReservedArea =
+ EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
FuncInfo->setMinReservedArea(MinReservedArea);
SmallVector<SDValue, 8> MemOps;
@@ -2506,10 +2775,8 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
if (DisablePPCFloatInVariadic)
NumFPArgRegs = 0;
- FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs,
- NumGPArgRegs));
- FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs,
- NumFPArgRegs));
+ FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
+ FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
// Make room for NumGPArgRegs and NumFPArgRegs.
int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
@@ -2599,14 +2866,15 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
MachineFrameInfo *MFI = MF.getFrameInfo();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ assert(!(CallConv == CallingConv::Fast && isVarArg) &&
+ "fastcc not supported on varargs functions");
+
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Potential tail calls could cause overwriting of argument stack slots.
bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
(CallConv == CallingConv::Fast));
unsigned PtrByteSize = 8;
-
- unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false,
- isELFv2ABI);
+ unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
static const MCPhysReg GPR[] = {
PPC::X3, PPC::X4, PPC::X5, PPC::X6,
@@ -2624,9 +2892,12 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13
};
+ static const MCPhysReg *QFPR = GetQFPR();
+
const unsigned Num_GPR_Regs = array_lengthof(GPR);
const unsigned Num_FPR_Regs = 13;
const unsigned Num_VR_Regs = array_lengthof(VR);
+ const unsigned Num_QFPR_Regs = Num_FPR_Regs;
// Do a first pass over the arguments to determine whether the ABI
// guarantees that our caller has allocated the parameter save area
@@ -2642,7 +2913,8 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
for (unsigned i = 0, e = Ins.size(); i != e; ++i)
if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
PtrByteSize, LinkageSize, ParamAreaSize,
- NumBytes, AvailableFPRs, AvailableVRs))
+ NumBytes, AvailableFPRs, AvailableVRs,
+ Subtarget.hasQPX()))
HasParameterArea = true;
// Add DAG nodes to load the arguments or copy them out of registers. On
@@ -2650,7 +2922,8 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
// although the first ones are often in registers.
unsigned ArgOffset = LinkageSize;
- unsigned GPR_idx, FPR_idx = 0, VR_idx = 0;
+ unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
+ unsigned &QFPR_idx = FPR_idx;
SmallVector<SDValue, 8> MemOps;
Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
unsigned CurArgIdx = 0;
@@ -2662,22 +2935,37 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
unsigned ObjSize = ObjectVT.getStoreSize();
unsigned ArgSize = ObjSize;
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
- std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
- CurArgIdx = Ins[ArgNo].OrigArgIndex;
+ if (Ins[ArgNo].isOrigArg()) {
+ std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
+ CurArgIdx = Ins[ArgNo].getOrigArgIndex();
+ }
+ // We re-align the argument offset for each argument, except when using the
+ // fast calling convention, when we need to make sure we do that only when
+ // we'll actually use a stack slot.
+ unsigned CurArgOffset, Align;
+ auto ComputeArgOffset = [&]() {
+ /* Respect alignment of argument on the stack. */
+ Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
+ ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
+ CurArgOffset = ArgOffset;
+ };
- /* Respect alignment of argument on the stack. */
- unsigned Align =
- CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
- ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
- unsigned CurArgOffset = ArgOffset;
+ if (CallConv != CallingConv::Fast) {
+ ComputeArgOffset();
- /* Compute GPR index associated with argument offset. */
- GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
- GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
+ /* Compute GPR index associated with argument offset. */
+ GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
+ GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
+ }
// FIXME the codegen can be much improved in some cases.
// We do not have to keep everything in memory.
if (Flags.isByVal()) {
+ assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
+
+ if (CallConv == CallingConv::Fast)
+ ComputeArgOffset();
+
// ObjSize is the true size, ArgSize rounded up to multiple of registers.
ObjSize = Flags.getByValSize();
ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
@@ -2721,7 +3009,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
InVals.push_back(Arg);
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store;
@@ -2783,7 +3071,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
// passed directly. Clang may use those instead of "byval" aggregate
// types to avoid forcing arguments to memory unnecessarily.
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
@@ -2791,10 +3079,14 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
// value to MVT::i64 and then truncate to the correct register size.
ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
} else {
+ if (CallConv == CallingConv::Fast)
+ ComputeArgOffset();
+
needsLoad = true;
ArgSize = PtrByteSize;
}
- ArgOffset += 8;
+ if (CallConv != CallingConv::Fast || needsLoad)
+ ArgOffset += 8;
break;
case MVT::f32:
@@ -2808,17 +3100,20 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
if (ObjectVT == MVT::f32)
VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
else
- VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() ?
- &PPC::VSFRCRegClass :
- &PPC::F8RCRegClass);
+ VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
+ ? &PPC::VSFRCRegClass
+ : &PPC::F8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++FPR_idx;
- } else if (GPR_idx != Num_GPR_Regs) {
+ } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
+ // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
+ // once we support fp <-> gpr moves.
+
// This can only ever happen in the presence of f32 array types,
// since otherwise we never run out of FPRs before running out
// of GPRs.
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
if (ObjectVT == MVT::f32) {
@@ -2830,16 +3125,21 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
} else {
+ if (CallConv == CallingConv::Fast)
+ ComputeArgOffset();
+
needsLoad = true;
}
// When passing an array of floats, the array occupies consecutive
// space in the argument area; only round up to the next doubleword
// at the end of the array. Otherwise, each float takes 8 bytes.
- ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
- ArgOffset += ArgSize;
- if (Flags.isInConsecutiveRegsLast())
- ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
+ if (CallConv != CallingConv::Fast || needsLoad) {
+ ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
+ ArgOffset += ArgSize;
+ if (Flags.isInConsecutiveRegsLast())
+ ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
+ }
break;
case MVT::v4f32:
case MVT::v4i32:
@@ -2847,6 +3147,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
case MVT::v16i8:
case MVT::v2f64:
case MVT::v2i64:
+ if (!Subtarget.hasQPX()) {
// These can be scalar arguments or elements of a vector array type
// passed directly. The latter are used to implement ELFv2 homogenous
// vector aggregates.
@@ -2857,9 +3158,43 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++VR_idx;
} else {
+ if (CallConv == CallingConv::Fast)
+ ComputeArgOffset();
+
needsLoad = true;
}
- ArgOffset += 16;
+ if (CallConv != CallingConv::Fast || needsLoad)
+ ArgOffset += 16;
+ break;
+ } // not QPX
+
+ assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
+ "Invalid QPX parameter type");
+ /* fall through */
+
+ case MVT::v4f64:
+ case MVT::v4i1:
+ // QPX vectors are treated like their scalar floating-point subregisters
+ // (except that they're larger).
+ unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
+ if (QFPR_idx != Num_QFPR_Regs) {
+ const TargetRegisterClass *RC;
+ switch (ObjectVT.getSimpleVT().SimpleTy) {
+ case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
+ case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
+ default: RC = &PPC::QBRCRegClass; break;
+ }
+
+ unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
+ ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
+ ++QFPR_idx;
+ } else {
+ if (CallConv == CallingConv::Fast)
+ ComputeArgOffset();
+ needsLoad = true;
+ }
+ if (CallConv != CallingConv::Fast || needsLoad)
+ ArgOffset += Sz;
break;
}
@@ -2888,7 +3223,8 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
// call optimized functions' reserved stack space needs to be aligned so that
// taking the difference between two stack areas will result in an aligned
// stack.
- MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
+ MinReservedArea =
+ EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
FuncInfo->setMinReservedArea(MinReservedArea);
// If the function takes variable number of arguments, make a frame index for
@@ -2942,9 +3278,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
(CallConv == CallingConv::Fast));
unsigned PtrByteSize = isPPC64 ? 8 : 4;
-
- unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true,
- false);
+ unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
unsigned ArgOffset = LinkageSize;
// Area that is at least reserved in caller of this function.
unsigned MinReservedArea = ArgOffset;
@@ -3038,9 +3372,10 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
unsigned ObjSize = ObjectVT.getSizeInBits()/8;
unsigned ArgSize = ObjSize;
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
- std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
- CurArgIdx = Ins[ArgNo].OrigArgIndex;
-
+ if (Ins[ArgNo].isOrigArg()) {
+ std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
+ CurArgIdx = Ins[ArgNo].getOrigArgIndex();
+ }
unsigned CurArgOffset = ArgOffset;
// Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
@@ -3061,6 +3396,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
// FIXME the codegen can be much improved in some cases.
// We do not have to keep everything in memory.
if (Flags.isByVal()) {
+ assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
+
// ObjSize is the true size, ArgSize rounded up to multiple of registers.
ObjSize = Flags.getByValSize();
ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
@@ -3249,7 +3586,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
// call optimized functions' reserved stack space needs to be aligned so that
// taking the difference between two stack areas will result in an aligned
// stack.
- MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
+ MinReservedArea =
+ EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
FuncInfo->setMinReservedArea(MinReservedArea);
// If the function takes variable number of arguments, make a frame index for
@@ -3404,8 +3742,9 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
if (SPDiff) {
// Calculate the new stack slot for the return address.
int SlotSize = isPPC64 ? 8 : 4;
- int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64,
- isDarwinABI);
+ const PPCFrameLowering *FL =
+ MF.getSubtarget<PPCSubtarget>().getFrameLowering();
+ int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
NewRetAddrLoc, true);
EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
@@ -3417,8 +3756,7 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
// When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
// slot as the FP is never overwritten.
if (isDarwinABI) {
- int NewFPLoc =
- SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
+ int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset();
int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc,
true);
SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
@@ -3548,12 +3886,27 @@ void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
InFlag = Chain.getValue(1);
}
+// Is this global address that of a function that can be called by name? (as
+// opposed to something that must hold a descriptor for an indirect call).
+static bool isFunctionGlobalAddress(SDValue Callee) {
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
+ Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
+ return false;
+
+ return G->getGlobal()->getType()->getElementType()->isFunctionTy();
+ }
+
+ return false;
+}
+
static
unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
- SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall,
+ SDValue &Chain, SDValue CallSeqStart, SDLoc dl, int SPDiff,
+ bool isTailCall, bool IsPatchPoint,
SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass,
SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
- const PPCSubtarget &Subtarget) {
+ ImmutableCallSite *CS, const PPCSubtarget &Subtarget) {
bool isPPC64 = Subtarget.isPPC64();
bool isSVR4ABI = Subtarget.isSVR4ABI();
@@ -3573,7 +3926,10 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
needIndirectCall = false;
}
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ if (isFunctionGlobalAddress(Callee)) {
+ GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
+ // A call to a TLS address is actually an indirect call to a
+ // thread-specific pointer.
unsigned OpFlags = 0;
if ((DAG.getTarget().getRelocationModel() != Reloc::Static &&
(Subtarget.getTargetTriple().isMacOSX() &&
@@ -3604,7 +3960,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
(Subtarget.getTargetTriple().isMacOSX() &&
Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) ||
(Subtarget.isTargetELF() && !isPPC64 &&
- DAG.getTarget().getRelocationModel() == Reloc::PIC_) ) {
+ DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
// PC-relative references to external symbols should go through $stub,
// unless we're building with the leopard linker or later, which
// automatically synthesizes these stubs.
@@ -3616,6 +3972,16 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
needIndirectCall = false;
}
+ if (IsPatchPoint) {
+ // We'll form an invalid direct call when lowering a patchpoint; the full
+ // sequence for an indirect call is complicated, and many of the
+ // instructions introduced might have side effects (and, thus, can't be
+ // removed later). The call itself will be removed as soon as the
+ // argument/return lowering is complete, so the fact that it has the wrong
+ // kind of operands should not really matter.
+ needIndirectCall = false;
+ }
+
if (needIndirectCall) {
// Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
// to do the call, we can't use PPCISD::CALL.
@@ -3641,50 +4007,51 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// 6. On return of the callee, the TOC of the caller needs to be
// restored (this is done in FinishCall()).
//
- // All those operations are flagged together to ensure that no other
+ // The loads are scheduled at the beginning of the call sequence, and the
+ // register copies are flagged together to ensure that no other
// operations can be scheduled in between. E.g. without flagging the
- // operations together, a TOC access in the caller could be scheduled
- // between the load of the callee TOC and the branch to the callee, which
+ // copies together, a TOC access in the caller could be scheduled between
+ // the assignment of the callee TOC and the branch to the callee, which
// results in the TOC access going through the TOC of the callee instead
// of going through the TOC of the caller, which leads to incorrect code.
// Load the address of the function entry point from the function
// descriptor.
- SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue);
- SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs,
- makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2));
- Chain = LoadFuncPtr.getValue(1);
- InFlag = LoadFuncPtr.getValue(2);
+ SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1);
+ if (LDChain.getValueType() == MVT::Glue)
+ LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2);
+
+ bool LoadsInv = Subtarget.hasInvariantFunctionDescriptors();
+
+ MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr);
+ SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI,
+ false, false, LoadsInv, 8);
// Load environment pointer into r11.
- // Offset of the environment pointer within the function descriptor.
SDValue PtrOff = DAG.getIntPtrConstant(16);
-
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
- SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr,
- InFlag);
- Chain = LoadEnvPtr.getValue(1);
- InFlag = LoadEnvPtr.getValue(2);
+ SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr,
+ MPI.getWithOffset(16), false, false,
+ LoadsInv, 8);
+
+ SDValue TOCOff = DAG.getIntPtrConstant(8);
+ SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
+ SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC,
+ MPI.getWithOffset(8), false, false,
+ LoadsInv, 8);
+
+ setUsesTOCBasePtr(DAG);
+ SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr,
+ InFlag);
+ Chain = TOCVal.getValue(0);
+ InFlag = TOCVal.getValue(1);
SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr,
InFlag);
+
Chain = EnvVal.getValue(0);
InFlag = EnvVal.getValue(1);
- // Load TOC of the callee into r2. We are using a target-specific load
- // with r2 hard coded, because the result of a target-independent load
- // would never go directly into r2, since r2 is a reserved register (which
- // prevents the register allocator from allocating it), resulting in an
- // additional register being allocated and an unnecessary move instruction
- // being generated.
- VTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue TOCOff = DAG.getIntPtrConstant(8);
- SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
- SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain,
- AddTOC, InFlag);
- Chain = LoadTOCPtr.getValue(0);
- InFlag = LoadTOCPtr.getValue(1);
-
MTCTROps[0] = Chain;
MTCTROps[1] = LoadFuncPtr;
MTCTROps[2] = InFlag;
@@ -3712,23 +4079,6 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
if (Callee.getNode()) {
Ops.push_back(Chain);
Ops.push_back(Callee);
-
- // If this is a call to __tls_get_addr, find the symbol whose address
- // is to be taken and add it to the list. This will be used to
- // generate __tls_get_addr(<sym>@tlsgd) or __tls_get_addr(<sym>@tlsld).
- // We find the symbol by walking the chain to the CopyFromReg, walking
- // back from the CopyFromReg to the ADDI_TLSGD_L or ADDI_TLSLD_L, and
- // pulling the symbol from that node.
- if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
- if (!strcmp(S->getSymbol(), "__tls_get_addr")) {
- assert(!needIndirectCall && "Indirect call to __tls_get_addr???");
- SDNode *AddI = Chain.getNode()->getOperand(2).getNode();
- SDValue TGTAddr = AddI->getOperand(1);
- assert(TGTAddr.getNode()->getOpcode() == ISD::TargetGlobalTLSAddress &&
- "Didn't find target global TLS address where we expected one");
- Ops.push_back(TGTAddr);
- CallOpc = PPCISD::CALL_TLS;
- }
}
// If this is a tail call add stack pointer delta.
if (isTailCall)
@@ -3740,9 +4090,12 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
- // Direct calls in the ELFv2 ABI need the TOC register live into the call.
- if (Callee.getNode() && isELFv2ABI)
+ // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live
+ // into the call.
+ if (isSVR4ABI && isPPC64 && !IsPatchPoint) {
+ setUsesTOCBasePtr(DAG);
Ops.push_back(DAG.getRegister(PPC::X2, PtrVT));
+ }
return CallOpc;
}
@@ -3804,22 +4157,22 @@ PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
SDValue
PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
- bool isTailCall, bool isVarArg,
+ bool isTailCall, bool isVarArg, bool IsPatchPoint,
SelectionDAG &DAG,
SmallVector<std::pair<unsigned, SDValue>, 8>
&RegsToPass,
SDValue InFlag, SDValue Chain,
- SDValue &Callee,
+ SDValue CallSeqStart, SDValue &Callee,
int SPDiff, unsigned NumBytes,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SmallVectorImpl<SDValue> &InVals) const {
+ SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const {
- bool isELFv2ABI = Subtarget.isELFv2ABI();
std::vector<EVT> NodeTys;
SmallVector<SDValue, 8> Ops;
- unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff,
- isTailCall, RegsToPass, Ops, NodeTys,
- Subtarget);
+ unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
+ SPDiff, isTailCall, IsPatchPoint, RegsToPass,
+ Ops, NodeTys, CS, Subtarget);
// Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
@@ -3833,8 +4186,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0;
// Add a register mask operand representing the call-preserved registers.
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
@@ -3863,8 +4215,8 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
// stack frame. If caller and callee belong to the same module (and have the
// same TOC), the NOP will remain unchanged.
- bool needsTOCRestore = false;
- if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64()) {
+ if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() &&
+ !IsPatchPoint) {
if (CallOpc == PPCISD::BCTRL) {
// This is a call through a function pointer.
// Restore the caller TOC from the save area into R2.
@@ -3875,31 +4227,27 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
// since r2 is a reserved register (which prevents the register allocator
// from allocating it), resulting in an additional register being
// allocated and an unnecessary move instruction being generated.
- needsTOCRestore = true;
+ CallOpc = PPCISD::BCTRL_LOAD_TOC;
+
+ EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
+ unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
+ SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset);
+ SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff);
+
+ // The address needs to go after the chain input but before the flag (or
+ // any other variadic arguments).
+ Ops.insert(std::next(Ops.begin()), AddTOC);
} else if ((CallOpc == PPCISD::CALL) &&
(!isLocalCall(Callee) ||
- DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
+ DAG.getTarget().getRelocationModel() == Reloc::PIC_))
// Otherwise insert NOP for non-local calls.
CallOpc = PPCISD::CALL_NOP;
- } else if (CallOpc == PPCISD::CALL_TLS)
- // For 64-bit SVR4, TLS calls are always non-local.
- CallOpc = PPCISD::CALL_NOP_TLS;
}
Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
- if (needsTOCRestore) {
- SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
- SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
- unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(isELFv2ABI);
- SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset);
- SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff);
- Chain = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, AddTOC, InFlag);
- InFlag = Chain.getValue(1);
- }
-
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(BytesCalleePops, true),
InFlag, dl);
@@ -3923,40 +4271,43 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool &isTailCall = CLI.IsTailCall;
CallingConv::ID CallConv = CLI.CallConv;
bool isVarArg = CLI.IsVarArg;
+ bool IsPatchPoint = CLI.IsPatchPoint;
+ ImmutableCallSite *CS = CLI.CS;
if (isTailCall)
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
Ins, DAG);
- if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
+ if (!isTailCall && CS && CS->isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
if (Subtarget.isSVR4ABI()) {
if (Subtarget.isPPC64())
return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
- isTailCall, Outs, OutVals, Ins,
- dl, DAG, InVals);
+ isTailCall, IsPatchPoint, Outs, OutVals, Ins,
+ dl, DAG, InVals, CS);
else
return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
- isTailCall, Outs, OutVals, Ins,
- dl, DAG, InVals);
+ isTailCall, IsPatchPoint, Outs, OutVals, Ins,
+ dl, DAG, InVals, CS);
}
return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
- isTailCall, Outs, OutVals, Ins,
- dl, DAG, InVals);
+ isTailCall, IsPatchPoint, Outs, OutVals, Ins,
+ dl, DAG, InVals, CS);
}
SDValue
PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall,
+ bool isTailCall, bool IsPatchPoint,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const {
// See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
// of the 32-bit SVR4 ABI stack frame layout.
@@ -3986,7 +4337,7 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
*DAG.getContext());
// Reserve space for the linkage area on the stack.
- CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false, false),
+ CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
PtrByteSize);
if (isVarArg) {
@@ -4161,9 +4512,9 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp,
false, TailCallArguments);
- return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
- RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
- Ins, InVals);
+ return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, DAG,
+ RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
+ NumBytes, Ins, InVals, CS);
}
// Copy an argument into memory, being careful to do this outside the
@@ -4189,12 +4540,13 @@ PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
SDValue
PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall,
+ bool isTailCall, bool IsPatchPoint,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const {
bool isELFv2ABI = Subtarget.isELFv2ABI();
bool isLittleEndian = Subtarget.isLittleEndian();
@@ -4214,13 +4566,43 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
CallConv == CallingConv::Fast)
MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
+ assert(!(CallConv == CallingConv::Fast && isVarArg) &&
+ "fastcc not supported on varargs functions");
+
// Count how many bytes are to be pushed on the stack, including the linkage
// area, and parameter passing area. On ELFv1, the linkage area is 48 bytes
// reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
// area is 32 bytes reserved space for [SP][CR][LR][TOC].
- unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false,
- isELFv2ABI);
+ unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
unsigned NumBytes = LinkageSize;
+ unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
+ unsigned &QFPR_idx = FPR_idx;
+
+ static const MCPhysReg GPR[] = {
+ PPC::X3, PPC::X4, PPC::X5, PPC::X6,
+ PPC::X7, PPC::X8, PPC::X9, PPC::X10,
+ };
+ static const MCPhysReg *FPR = GetFPR();
+
+ static const MCPhysReg VR[] = {
+ PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
+ PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
+ };
+ static const MCPhysReg VSRH[] = {
+ PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8,
+ PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13
+ };
+
+ static const MCPhysReg *QFPR = GetQFPR();
+
+ const unsigned NumGPRs = array_lengthof(GPR);
+ const unsigned NumFPRs = 13;
+ const unsigned NumVRs = array_lengthof(VR);
+ const unsigned NumQFPRs = NumFPRs;
+
+ // When using the fast calling convention, we don't provide backing for
+ // arguments that will be in registers.
+ unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
// Add up all the space actually used.
for (unsigned i = 0; i != NumOps; ++i) {
@@ -4228,6 +4610,47 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
EVT ArgVT = Outs[i].VT;
EVT OrigVT = Outs[i].ArgVT;
+ if (CallConv == CallingConv::Fast) {
+ if (Flags.isByVal())
+ NumGPRsUsed += (Flags.getByValSize()+7)/8;
+ else
+ switch (ArgVT.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unexpected ValueType for argument!");
+ case MVT::i1:
+ case MVT::i32:
+ case MVT::i64:
+ if (++NumGPRsUsed <= NumGPRs)
+ continue;
+ break;
+ case MVT::v4i32:
+ case MVT::v8i16:
+ case MVT::v16i8:
+ case MVT::v2f64:
+ case MVT::v2i64:
+ if (++NumVRsUsed <= NumVRs)
+ continue;
+ break;
+ case MVT::v4f32:
+ // When using QPX, this is handled like a FP register, otherwise, it
+ // is an Altivec register.
+ if (Subtarget.hasQPX()) {
+ if (++NumFPRsUsed <= NumFPRs)
+ continue;
+ } else {
+ if (++NumVRsUsed <= NumVRs)
+ continue;
+ }
+ break;
+ case MVT::f32:
+ case MVT::f64:
+ case MVT::v4f64: // QPX
+ case MVT::v4i1: // QPX
+ if (++NumFPRsUsed <= NumFPRs)
+ continue;
+ break;
+ }
+ }
+
/* Respect alignment of argument on the stack. */
unsigned Align =
CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
@@ -4251,7 +4674,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// Tail call needs the stack to be aligned.
if (getTargetMachine().Options.GuaranteedTailCallOpt &&
CallConv == CallingConv::Fast)
- NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes);
+ NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
// Calculate by how many bytes the stack has to be adjusted in case of tail
// call optimization.
@@ -4284,26 +4707,6 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// must be stored to our stack, and loaded into integer regs as well, if
// any integer regs are available for argument passing.
unsigned ArgOffset = LinkageSize;
- unsigned GPR_idx, FPR_idx = 0, VR_idx = 0;
-
- static const MCPhysReg GPR[] = {
- PPC::X3, PPC::X4, PPC::X5, PPC::X6,
- PPC::X7, PPC::X8, PPC::X9, PPC::X10,
- };
- static const MCPhysReg *FPR = GetFPR();
-
- static const MCPhysReg VR[] = {
- PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
- PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
- };
- static const MCPhysReg VSRH[] = {
- PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8,
- PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13
- };
-
- const unsigned NumGPRs = array_lengthof(GPR);
- const unsigned NumFPRs = 13;
- const unsigned NumVRs = array_lengthof(VR);
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
@@ -4315,22 +4718,31 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
EVT ArgVT = Outs[i].VT;
EVT OrigVT = Outs[i].ArgVT;
- /* Respect alignment of argument on the stack. */
- unsigned Align =
- CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
- ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
-
- /* Compute GPR index associated with argument offset. */
- GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
- GPR_idx = std::min(GPR_idx, NumGPRs);
-
// PtrOff will be used to store the current argument to the stack if a
// register cannot be found for it.
SDValue PtrOff;
- PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
+ // We re-align the argument offset for each argument, except when using the
+ // fast calling convention, when we need to make sure we do that only when
+ // we'll actually use a stack slot.
+ auto ComputePtrOff = [&]() {
+ /* Respect alignment of argument on the stack. */
+ unsigned Align =
+ CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
+ ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
- PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
+ PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
+
+ PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
+ };
+
+ if (CallConv != CallingConv::Fast) {
+ ComputePtrOff();
+
+ /* Compute GPR index associated with argument offset. */
+ GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
+ GPR_idx = std::min(GPR_idx, NumGPRs);
+ }
// Promote integers to 64-bit values.
if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
@@ -4355,6 +4767,9 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
if (Size == 0)
continue;
+ if (CallConv == CallingConv::Fast)
+ ComputePtrOff();
+
// All aggregates smaller than 8 bytes must be passed right-justified.
if (Size==1 || Size==2 || Size==4) {
EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
@@ -4363,7 +4778,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
MachinePointerInfo(), VT,
false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Load));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
ArgOffset += PtrByteSize;
continue;
@@ -4425,7 +4840,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
MachinePointerInfo(),
false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Load));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
// Done with this argument.
ArgOffset += PtrByteSize;
@@ -4461,13 +4876,19 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// passed directly. Clang may use those instead of "byval" aggregate
// types to avoid forcing arguments to memory unnecessarily.
if (GPR_idx != NumGPRs) {
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Arg));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
} else {
+ if (CallConv == CallingConv::Fast)
+ ComputePtrOff();
+
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
true, isTailCall, false, MemOpChains,
TailCallArguments, dl);
+ if (CallConv == CallingConv::Fast)
+ ArgOffset += PtrByteSize;
}
- ArgOffset += PtrByteSize;
+ if (CallConv != CallingConv::Fast)
+ ArgOffset += PtrByteSize;
break;
case MVT::f32:
case MVT::f64: {
@@ -4481,6 +4902,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// then the parameter save area. For now, put all arguments to vararg
// routines always in both locations (FPR *and* GPR or stack slot).
bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
+ bool NeededLoad = false;
// First load the argument into the next available FPR.
if (FPR_idx != NumFPRs)
@@ -4489,7 +4911,10 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// Next, load the argument into GPR or stack slot if needed.
if (!NeedGPROrStack)
;
- else if (GPR_idx != NumGPRs) {
+ else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) {
+ // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
+ // once we support fp <-> gpr moves.
+
// In the non-vararg case, this can only ever happen in the
// presence of f32 array types, since otherwise we never run
// out of FPRs before running out of GPRs.
@@ -4528,8 +4953,11 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
ArgVal = SDValue();
if (ArgVal.getNode())
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx], ArgVal));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
} else {
+ if (CallConv == CallingConv::Fast)
+ ComputePtrOff();
+
// Single-precision floating-point values are mapped to the
// second (rightmost) word of the stack doubleword.
if (Arg.getValueType() == MVT::f32 &&
@@ -4541,14 +4969,18 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
true, isTailCall, false, MemOpChains,
TailCallArguments, dl);
+
+ NeededLoad = true;
}
// When passing an array of floats, the array occupies consecutive
// space in the argument area; only round up to the next doubleword
// at the end of the array. Otherwise, each float takes 8 bytes.
- ArgOffset += (Arg.getValueType() == MVT::f32 &&
- Flags.isInConsecutiveRegs()) ? 4 : 8;
- if (Flags.isInConsecutiveRegsLast())
- ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
+ if (CallConv != CallingConv::Fast || NeededLoad) {
+ ArgOffset += (Arg.getValueType() == MVT::f32 &&
+ Flags.isInConsecutiveRegs()) ? 4 : 8;
+ if (Flags.isInConsecutiveRegsLast())
+ ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
+ }
break;
}
case MVT::v4f32:
@@ -4557,6 +4989,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
case MVT::v16i8:
case MVT::v2f64:
case MVT::v2i64:
+ if (!Subtarget.hasQPX()) {
// These can be scalar arguments or elements of a vector array type
// passed directly. The latter are used to implement ELFv2 homogenous
// vector aggregates.
@@ -4607,12 +5040,73 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
RegsToPass.push_back(std::make_pair(VReg, Arg));
} else {
+ if (CallConv == CallingConv::Fast)
+ ComputePtrOff();
+
+ LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
+ true, isTailCall, true, MemOpChains,
+ TailCallArguments, dl);
+ if (CallConv == CallingConv::Fast)
+ ArgOffset += 16;
+ }
+
+ if (CallConv != CallingConv::Fast)
+ ArgOffset += 16;
+ break;
+ } // not QPX
+
+ assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
+ "Invalid QPX parameter type");
+
+ /* fall through */
+ case MVT::v4f64:
+ case MVT::v4i1: {
+ bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
+ if (isVarArg) {
+ // We could elide this store in the case where the object fits
+ // entirely in R registers. Maybe later.
+ SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(), false, false, 0);
+ MemOpChains.push_back(Store);
+ if (QFPR_idx != NumQFPRs) {
+ SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl,
+ Store, PtrOff, MachinePointerInfo(),
+ false, false, false, 0);
+ MemOpChains.push_back(Load.getValue(1));
+ RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
+ }
+ ArgOffset += (IsF32 ? 16 : 32);
+ for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
+ if (GPR_idx == NumGPRs)
+ break;
+ SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
+ DAG.getConstant(i, PtrVT));
+ SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
+ false, false, false, 0);
+ MemOpChains.push_back(Load.getValue(1));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
+ }
+ break;
+ }
+
+ // Non-varargs QPX params go into registers or on the stack.
+ if (QFPR_idx != NumQFPRs) {
+ RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
+ } else {
+ if (CallConv == CallingConv::Fast)
+ ComputePtrOff();
+
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
true, isTailCall, true, MemOpChains,
TailCallArguments, dl);
+ if (CallConv == CallingConv::Fast)
+ ArgOffset += (IsF32 ? 16 : 32);
}
- ArgOffset += 16;
+
+ if (CallConv != CallingConv::Fast)
+ ArgOffset += (IsF32 ? 16 : 32);
break;
+ }
}
}
@@ -4625,21 +5119,23 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// Check if this is an indirect call (MTCTR/BCTRL).
// See PrepareCall() for more information about calls through function
// pointers in the 64-bit SVR4 ABI.
- if (!isTailCall &&
- !dyn_cast<GlobalAddressSDNode>(Callee) &&
- !dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ if (!isTailCall && !IsPatchPoint &&
+ !isFunctionGlobalAddress(Callee) &&
+ !isa<ExternalSymbolSDNode>(Callee)) {
// Load r2 into a virtual register and store it to the TOC save area.
+ setUsesTOCBasePtr(DAG);
SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
// TOC save area offset.
- unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(isELFv2ABI);
+ unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset);
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
- Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(),
+ Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
+ MachinePointerInfo::getStack(TOCSaveOffset),
false, false, 0);
// In the ELFv2 ABI, R12 must contain the address of an indirect callee.
// This does not mean the MTCTR instruction must use R12; it's easier
// to model this as an extra parameter, so do that.
- if (isELFv2ABI)
+ if (isELFv2ABI && !IsPatchPoint)
RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
}
@@ -4656,20 +5152,21 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp,
FPOp, true, TailCallArguments);
- return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
- RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
- Ins, InVals);
+ return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, DAG,
+ RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
+ NumBytes, Ins, InVals, CS);
}
SDValue
PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall,
+ bool isTailCall, bool IsPatchPoint,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const {
unsigned NumOps = Outs.size();
@@ -4691,8 +5188,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// Count how many bytes are to be pushed on the stack, including the linkage
// area, and parameter passing area. We start with 24/48 bytes, which is
// prereserved space for [SP][CR][LR][3 x unused].
- unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true,
- false);
+ unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
unsigned NumBytes = LinkageSize;
// Add up all the space actually used.
@@ -4737,7 +5233,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// Tail call needs the stack to be aligned.
if (getTargetMachine().Options.GuaranteedTailCallOpt &&
CallConv == CallingConv::Fast)
- NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes);
+ NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
// Calculate by how many bytes the stack has to be adjusted in case of tail
// call optimization.
@@ -5030,8 +5526,8 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// not mean the MTCTR instruction must use R12; it's easier to model this as
// an extra parameter, so do that.
if (!isTailCall &&
- !dyn_cast<GlobalAddressSDNode>(Callee) &&
- !dyn_cast<ExternalSymbolSDNode>(Callee) &&
+ !isFunctionGlobalAddress(Callee) &&
+ !isa<ExternalSymbolSDNode>(Callee) &&
!isBLACompatibleAddress(Callee, DAG))
RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
PPC::R12), Callee));
@@ -5049,9 +5545,9 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp,
FPOp, true, TailCallArguments);
- return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
- RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
- Ins, InVals);
+ return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, DAG,
+ RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
+ NumBytes, Ins, InVals, CS);
}
bool
@@ -5150,7 +5646,6 @@ SDValue
PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool isPPC64 = Subtarget.isPPC64();
- bool isDarwinABI = Subtarget.isDarwinABI();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Get current frame pointer save index. The users of this index will be
@@ -5161,9 +5656,9 @@ PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
// If the frame pointer save index hasn't been defined yet.
if (!RASI) {
// Find out what the fix offset of the frame pointer save area.
- int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
+ int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
// Allocate the frame index for frame pointer save area.
- RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true);
+ RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
// Save the result.
FI->setReturnAddrSaveIndex(RASI);
}
@@ -5174,7 +5669,6 @@ SDValue
PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool isPPC64 = Subtarget.isPPC64();
- bool isDarwinABI = Subtarget.isDarwinABI();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Get current frame pointer save index. The users of this index will be
@@ -5185,9 +5679,7 @@ PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
// If the frame pointer save index hasn't been defined yet.
if (!FPSI) {
// Find out what the fix offset of the frame pointer save area.
- int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64,
- isDarwinABI);
-
+ int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
// Allocate the frame index for frame pointer save area.
FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
// Save the result.
@@ -5233,6 +5725,9 @@ SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
}
SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ if (Op.getValueType().isVector())
+ return LowerVectorLoad(Op, DAG);
+
assert(Op.getValueType() == MVT::i1 &&
"Custom lowering only for i1 loads");
@@ -5254,6 +5749,9 @@ SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
}
SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ if (Op.getOperand(1).getValueType().isVector())
+ return LowerVectorStore(Op, DAG);
+
assert(Op.getOperand(1).getValueType() == MVT::i1 &&
"Custom lowering only for i1 stores");
@@ -5381,9 +5879,9 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
return Op;
}
-// FIXME: Split this code up when LegalizeDAGTypes lands.
-SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
- SDLoc dl) const {
+void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
+ SelectionDAG &DAG,
+ SDLoc dl) const {
assert(Op.getOperand(0).getValueType().isFloatingPoint());
SDValue Src = Op.getOperand(0);
if (Src.getValueType() == MVT::f32)
@@ -5393,10 +5891,11 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
switch (Op.getSimpleValueType().SimpleTy) {
default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
case MVT::i32:
- Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
- (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ :
- PPCISD::FCTIDZ),
- dl, MVT::f64, Src);
+ Tmp = DAG.getNode(
+ Op.getOpcode() == ISD::FP_TO_SINT
+ ? PPCISD::FCTIWZ
+ : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
+ dl, MVT::f64, Src);
break;
case MVT::i64:
assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
@@ -5432,16 +5931,119 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
if (Op.getValueType() == MVT::i32 && !i32Stack) {
FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
DAG.getConstant(4, FIPtr.getValueType()));
- MPI = MachinePointerInfo();
+ MPI = MPI.getWithOffset(4);
}
- return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MPI,
- false, false, false, 0);
+ RLI.Chain = Chain;
+ RLI.Ptr = FIPtr;
+ RLI.MPI = MPI;
+}
+
+SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
+ SDLoc dl) const {
+ ReuseLoadInfo RLI;
+ LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
+
+ return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, false,
+ false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo,
+ RLI.Ranges);
+}
+
+// We're trying to insert a regular store, S, and then a load, L. If the
+// incoming value, O, is a load, we might just be able to have our load use the
+// address used by O. However, we don't know if anything else will store to
+// that address before we can load from it. To prevent this situation, we need
+// to insert our load, L, into the chain as a peer of O. To do this, we give L
+// the same chain operand as O, we create a token factor from the chain results
+// of O and L, and we replace all uses of O's chain result with that token
+// factor (see spliceIntoChain below for this last part).
+bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
+ ReuseLoadInfo &RLI,
+ SelectionDAG &DAG,
+ ISD::LoadExtType ET) const {
+ SDLoc dl(Op);
+ if (ET == ISD::NON_EXTLOAD &&
+ (Op.getOpcode() == ISD::FP_TO_UINT ||
+ Op.getOpcode() == ISD::FP_TO_SINT) &&
+ isOperationLegalOrCustom(Op.getOpcode(),
+ Op.getOperand(0).getValueType())) {
+
+ LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
+ return true;
+ }
+
+ LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
+ if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
+ LD->isNonTemporal())
+ return false;
+ if (LD->getMemoryVT() != MemVT)
+ return false;
+
+ RLI.Ptr = LD->getBasePtr();
+ if (LD->isIndexed() && LD->getOffset().getOpcode() != ISD::UNDEF) {
+ assert(LD->getAddressingMode() == ISD::PRE_INC &&
+ "Non-pre-inc AM on PPC?");
+ RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
+ LD->getOffset());
+ }
+
+ RLI.Chain = LD->getChain();
+ RLI.MPI = LD->getPointerInfo();
+ RLI.IsInvariant = LD->isInvariant();
+ RLI.Alignment = LD->getAlignment();
+ RLI.AAInfo = LD->getAAInfo();
+ RLI.Ranges = LD->getRanges();
+
+ RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
+ return true;
+}
+
+// Given the head of the old chain, ResChain, insert a token factor containing
+// it and NewResChain, and make users of ResChain now be users of that token
+// factor.
+void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
+ SDValue NewResChain,
+ SelectionDAG &DAG) const {
+ if (!ResChain)
+ return;
+
+ SDLoc dl(NewResChain);
+
+ SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ NewResChain, DAG.getUNDEF(MVT::Other));
+ assert(TF.getNode() != NewResChain.getNode() &&
+ "A new TF really is required here");
+
+ DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
+ DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
}
SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
- SelectionDAG &DAG) const {
+ SelectionDAG &DAG) const {
SDLoc dl(Op);
+
+ if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
+ if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
+ return SDValue();
+
+ SDValue Value = Op.getOperand(0);
+ // The values are now known to be -1 (false) or 1 (true). To convert this
+ // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
+ // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
+ Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
+
+ SDValue FPHalfs = DAG.getConstantFP(0.5, MVT::f64);
+ FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64,
+ FPHalfs, FPHalfs, FPHalfs, FPHalfs);
+
+ Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
+
+ if (Op.getValueType() != MVT::v4f64)
+ Value = DAG.getNode(ISD::FP_ROUND, dl,
+ Op.getValueType(), Value, DAG.getIntPtrConstant(1));
+ return Value;
+ }
+
// Don't handle ppc_fp128 here; let it be lowered to a libcall.
if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
return SDValue();
@@ -5456,13 +6058,14 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// If we have FCFIDS, then use it when converting to single-precision.
// Otherwise, convert to double-precision and then round.
- unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ?
- (Op.getOpcode() == ISD::UINT_TO_FP ?
- PPCISD::FCFIDUS : PPCISD::FCFIDS) :
- (Op.getOpcode() == ISD::UINT_TO_FP ?
- PPCISD::FCFIDU : PPCISD::FCFID);
- MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ?
- MVT::f32 : MVT::f64;
+ unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
+ ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
+ : PPCISD::FCFIDS)
+ : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
+ : PPCISD::FCFID);
+ MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
+ ? MVT::f32
+ : MVT::f64;
if (Op.getOperand(0).getValueType() == MVT::i64) {
SDValue SINT = Op.getOperand(0);
@@ -5512,7 +6115,70 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
}
- SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
+ ReuseLoadInfo RLI;
+ SDValue Bits;
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
+ Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, false,
+ false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo,
+ RLI.Ranges);
+ spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
+ } else if (Subtarget.hasLFIWAX() &&
+ canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
+ RLI.Alignment, RLI.AAInfo, RLI.Ranges);
+ SDValue Ops[] = { RLI.Chain, RLI.Ptr };
+ Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
+ DAG.getVTList(MVT::f64, MVT::Other),
+ Ops, MVT::i32, MMO);
+ spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
+ } else if (Subtarget.hasFPCVT() &&
+ canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
+ RLI.Alignment, RLI.AAInfo, RLI.Ranges);
+ SDValue Ops[] = { RLI.Chain, RLI.Ptr };
+ Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
+ DAG.getVTList(MVT::f64, MVT::Other),
+ Ops, MVT::i32, MMO);
+ spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
+ } else if (((Subtarget.hasLFIWAX() &&
+ SINT.getOpcode() == ISD::SIGN_EXTEND) ||
+ (Subtarget.hasFPCVT() &&
+ SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
+ SINT.getOperand(0).getValueType() == MVT::i32) {
+ MachineFrameInfo *FrameInfo = MF.getFrameInfo();
+ EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+
+ int FrameIdx = FrameInfo->CreateStackObject(4, 4, false);
+ SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
+
+ SDValue Store =
+ DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
+ MachinePointerInfo::getFixedStack(FrameIdx),
+ false, false, 0);
+
+ assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
+ "Expected an i32 store");
+
+ RLI.Ptr = FIdx;
+ RLI.Chain = Store;
+ RLI.MPI = MachinePointerInfo::getFixedStack(FrameIdx);
+ RLI.Alignment = 4;
+
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
+ RLI.Alignment, RLI.AAInfo, RLI.Ranges);
+ SDValue Ops[] = { RLI.Chain, RLI.Ptr };
+ Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
+ PPCISD::LFIWZX : PPCISD::LFIWAX,
+ dl, DAG.getVTList(MVT::f64, MVT::Other),
+ Ops, MVT::i32, MMO);
+ } else
+ Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
+
SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
@@ -5533,23 +6199,36 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
SDValue Ld;
if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
- int FrameIdx = FrameInfo->CreateStackObject(4, 4, false);
- SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
-
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
- MachinePointerInfo::getFixedStack(FrameIdx),
- false, false, 0);
+ ReuseLoadInfo RLI;
+ bool ReusingLoad;
+ if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
+ DAG))) {
+ int FrameIdx = FrameInfo->CreateStackObject(4, 4, false);
+ SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
+
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
+ MachinePointerInfo::getFixedStack(FrameIdx),
+ false, false, 0);
+
+ assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
+ "Expected an i32 store");
+
+ RLI.Ptr = FIdx;
+ RLI.Chain = Store;
+ RLI.MPI = MachinePointerInfo::getFixedStack(FrameIdx);
+ RLI.Alignment = 4;
+ }
- assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
- "Expected an i32 store");
MachineMemOperand *MMO =
- MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
- MachineMemOperand::MOLoad, 4, 4);
- SDValue Ops[] = { Store, FIdx };
+ MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
+ RLI.Alignment, RLI.AAInfo, RLI.Ranges);
+ SDValue Ops[] = { RLI.Chain, RLI.Ptr };
Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
PPCISD::LFIWZX : PPCISD::LFIWAX,
dl, DAG.getVTList(MVT::f64, MVT::Other),
Ops, MVT::i32, MMO);
+ if (ReusingLoad)
+ spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
} else {
assert(Subtarget.isPPC64() &&
"i32->FP without LFIWAX supported only on PPC64");
@@ -5816,6 +6495,127 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
+ if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
+ // We first build an i32 vector, load it into a QPX register,
+ // then convert it to a floating-point vector and compare it
+ // to a zero vector to get the boolean result.
+ MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
+ int FrameIdx = FrameInfo->CreateStackObject(16, 16, false);
+ MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FrameIdx);
+ EVT PtrVT = getPointerTy();
+ SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
+
+ assert(BVN->getNumOperands() == 4 &&
+ "BUILD_VECTOR for v4i1 does not have 4 operands");
+
+ bool IsConst = true;
+ for (unsigned i = 0; i < 4; ++i) {
+ if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue;
+ if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
+ IsConst = false;
+ break;
+ }
+ }
+
+ if (IsConst) {
+ Constant *One =
+ ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
+ Constant *NegOne =
+ ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
+
+ SmallVector<Constant*, 4> CV(4, NegOne);
+ for (unsigned i = 0; i < 4; ++i) {
+ if (BVN->getOperand(i).getOpcode() == ISD::UNDEF)
+ CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
+ else if (cast<ConstantSDNode>(BVN->getOperand(i))->
+ getConstantIntValue()->isZero())
+ continue;
+ else
+ CV[i] = One;
+ }
+
+ Constant *CP = ConstantVector::get(CV);
+ SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(),
+ 16 /* alignment */);
+
+ SmallVector<SDValue, 2> Ops;
+ Ops.push_back(DAG.getEntryNode());
+ Ops.push_back(CPIdx);
+
+ SmallVector<EVT, 2> ValueVTs;
+ ValueVTs.push_back(MVT::v4i1);
+ ValueVTs.push_back(MVT::Other); // chain
+ SDVTList VTs = DAG.getVTList(ValueVTs);
+
+ return DAG.getMemIntrinsicNode(PPCISD::QVLFSb,
+ dl, VTs, Ops, MVT::v4f32,
+ MachinePointerInfo::getConstantPool());
+ }
+
+ SmallVector<SDValue, 4> Stores;
+ for (unsigned i = 0; i < 4; ++i) {
+ if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue;
+
+ unsigned Offset = 4*i;
+ SDValue Idx = DAG.getConstant(Offset, FIdx.getValueType());
+ Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
+
+ unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
+ if (StoreSize > 4) {
+ Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl,
+ BVN->getOperand(i), Idx,
+ PtrInfo.getWithOffset(Offset),
+ MVT::i32, false, false, 0));
+ } else {
+ SDValue StoreValue = BVN->getOperand(i);
+ if (StoreSize < 4)
+ StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
+
+ Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl,
+ StoreValue, Idx,
+ PtrInfo.getWithOffset(Offset),
+ false, false, 0));
+ }
+ }
+
+ SDValue StoreChain;
+ if (!Stores.empty())
+ StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
+ else
+ StoreChain = DAG.getEntryNode();
+
+ // Now load from v4i32 into the QPX register; this will extend it to
+ // v4i64 but not yet convert it to a floating point. Nevertheless, this
+ // is typed as v4f64 because the QPX register integer states are not
+ // explicitly represented.
+
+ SmallVector<SDValue, 2> Ops;
+ Ops.push_back(StoreChain);
+ Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, MVT::i32));
+ Ops.push_back(FIdx);
+
+ SmallVector<EVT, 2> ValueVTs;
+ ValueVTs.push_back(MVT::v4f64);
+ ValueVTs.push_back(MVT::Other); // chain
+ SDVTList VTs = DAG.getVTList(ValueVTs);
+
+ SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
+ dl, VTs, Ops, MVT::v4i32, PtrInfo);
+ LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
+ DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, MVT::i32),
+ LoadedVect);
+
+ SDValue FPZeros = DAG.getConstantFP(0.0, MVT::f64);
+ FPZeros = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64,
+ FPZeros, FPZeros, FPZeros, FPZeros);
+
+ return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
+ }
+
+ // All other QPX vectors are handled by generic code.
+ if (Subtarget.hasQPX())
+ return SDValue();
+
// Check if this is a splat of a constant value.
APInt APSplatBits, APSplatUndef;
unsigned SplatBitSize;
@@ -6074,6 +6874,45 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
EVT VT = Op.getValueType();
bool isLittleEndian = Subtarget.isLittleEndian();
+ if (Subtarget.hasQPX()) {
+ if (VT.getVectorNumElements() != 4)
+ return SDValue();
+
+ if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
+
+ int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
+ if (AlignIdx != -1) {
+ return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
+ DAG.getConstant(AlignIdx, MVT::i32));
+ } else if (SVOp->isSplat()) {
+ int SplatIdx = SVOp->getSplatIndex();
+ if (SplatIdx >= 4) {
+ std::swap(V1, V2);
+ SplatIdx -= 4;
+ }
+
+ // FIXME: If SplatIdx == 0 and the input came from a load, then there is
+ // nothing to do.
+
+ return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
+ DAG.getConstant(SplatIdx, MVT::i32));
+ }
+
+ // Lower this into a qvgpci/qvfperm pair.
+
+ // Compute the qvgpci literal
+ unsigned idx = 0;
+ for (unsigned i = 0; i < 4; ++i) {
+ int m = SVOp->getMaskElt(i);
+ unsigned mm = m >= 0 ? (unsigned) m : i;
+ idx |= mm << (3-i)*3;
+ }
+
+ SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
+ DAG.getConstant(idx, MVT::i32));
+ return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
+ }
+
// Cases that are handled by instructions that take permute immediates
// (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
// selected by the instruction selector.
@@ -6356,6 +7195,302 @@ SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
false, false, false, 0);
}
+SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ SDNode *N = Op.getNode();
+
+ assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
+ "Unknown extract_vector_elt type");
+
+ SDValue Value = N->getOperand(0);
+
+ // The first part of this is like the store lowering except that we don't
+ // need to track the chain.
+
+ // The values are now known to be -1 (false) or 1 (true). To convert this
+ // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
+ // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
+ Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
+
+ // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
+ // understand how to form the extending load.
+ SDValue FPHalfs = DAG.getConstantFP(0.5, MVT::f64);
+ FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64,
+ FPHalfs, FPHalfs, FPHalfs, FPHalfs);
+
+ Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
+
+ // Now convert to an integer and store.
+ Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
+ DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, MVT::i32),
+ Value);
+
+ MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
+ int FrameIdx = FrameInfo->CreateStackObject(16, 16, false);
+ MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FrameIdx);
+ EVT PtrVT = getPointerTy();
+ SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
+
+ SDValue StoreChain = DAG.getEntryNode();
+ SmallVector<SDValue, 2> Ops;
+ Ops.push_back(StoreChain);
+ Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, MVT::i32));
+ Ops.push_back(Value);
+ Ops.push_back(FIdx);
+
+ SmallVector<EVT, 2> ValueVTs;
+ ValueVTs.push_back(MVT::Other); // chain
+ SDVTList VTs = DAG.getVTList(ValueVTs);
+
+ StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
+ dl, VTs, Ops, MVT::v4i32, PtrInfo);
+
+ // Extract the value requested.
+ unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ SDValue Idx = DAG.getConstant(Offset, FIdx.getValueType());
+ Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
+
+ SDValue IntVal = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
+ PtrInfo.getWithOffset(Offset),
+ false, false, false, 0);
+
+ if (!Subtarget.useCRBits())
+ return IntVal;
+
+ return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
+}
+
+/// Lowering for QPX v4i1 loads
+SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
+ SDValue LoadChain = LN->getChain();
+ SDValue BasePtr = LN->getBasePtr();
+
+ if (Op.getValueType() == MVT::v4f64 ||
+ Op.getValueType() == MVT::v4f32) {
+ EVT MemVT = LN->getMemoryVT();
+ unsigned Alignment = LN->getAlignment();
+
+ // If this load is properly aligned, then it is legal.
+ if (Alignment >= MemVT.getStoreSize())
+ return Op;
+
+ EVT ScalarVT = Op.getValueType().getScalarType(),
+ ScalarMemVT = MemVT.getScalarType();
+ unsigned Stride = ScalarMemVT.getStoreSize();
+
+ SmallVector<SDValue, 8> Vals, LoadChains;
+ for (unsigned Idx = 0; Idx < 4; ++Idx) {
+ SDValue Load;
+ if (ScalarVT != ScalarMemVT)
+ Load =
+ DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
+ BasePtr,
+ LN->getPointerInfo().getWithOffset(Idx*Stride),
+ ScalarMemVT, LN->isVolatile(), LN->isNonTemporal(),
+ LN->isInvariant(), MinAlign(Alignment, Idx*Stride),
+ LN->getAAInfo());
+ else
+ Load =
+ DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
+ LN->getPointerInfo().getWithOffset(Idx*Stride),
+ LN->isVolatile(), LN->isNonTemporal(),
+ LN->isInvariant(), MinAlign(Alignment, Idx*Stride),
+ LN->getAAInfo());
+
+ if (Idx == 0 && LN->isIndexed()) {
+ assert(LN->getAddressingMode() == ISD::PRE_INC &&
+ "Unknown addressing mode on vector load");
+ Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
+ LN->getAddressingMode());
+ }
+
+ Vals.push_back(Load);
+ LoadChains.push_back(Load.getValue(1));
+
+ BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
+ DAG.getConstant(Stride, BasePtr.getValueType()));
+ }
+
+ SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
+ SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl,
+ Op.getValueType(), Vals);
+
+ if (LN->isIndexed()) {
+ SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
+ return DAG.getMergeValues(RetOps, dl);
+ }
+
+ SDValue RetOps[] = { Value, TF };
+ return DAG.getMergeValues(RetOps, dl);
+ }
+
+ assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
+ assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
+
+ // To lower v4i1 from a byte array, we load the byte elements of the
+ // vector and then reuse the BUILD_VECTOR logic.
+
+ SmallVector<SDValue, 4> VectElmts, VectElmtChains;
+ for (unsigned i = 0; i < 4; ++i) {
+ SDValue Idx = DAG.getConstant(i, BasePtr.getValueType());
+ Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
+
+ VectElmts.push_back(DAG.getExtLoad(ISD::EXTLOAD,
+ dl, MVT::i32, LoadChain, Idx,
+ LN->getPointerInfo().getWithOffset(i),
+ MVT::i8 /* memory type */,
+ LN->isVolatile(), LN->isNonTemporal(),
+ LN->isInvariant(),
+ 1 /* alignment */, LN->getAAInfo()));
+ VectElmtChains.push_back(VectElmts[i].getValue(1));
+ }
+
+ LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
+ SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i1, VectElmts);
+
+ SDValue RVals[] = { Value, LoadChain };
+ return DAG.getMergeValues(RVals, dl);
+}
+
+/// Lowering for QPX v4i1 stores
+SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
+ SDValue StoreChain = SN->getChain();
+ SDValue BasePtr = SN->getBasePtr();
+ SDValue Value = SN->getValue();
+
+ if (Value.getValueType() == MVT::v4f64 ||
+ Value.getValueType() == MVT::v4f32) {
+ EVT MemVT = SN->getMemoryVT();
+ unsigned Alignment = SN->getAlignment();
+
+ // If this store is properly aligned, then it is legal.
+ if (Alignment >= MemVT.getStoreSize())
+ return Op;
+
+ EVT ScalarVT = Value.getValueType().getScalarType(),
+ ScalarMemVT = MemVT.getScalarType();
+ unsigned Stride = ScalarMemVT.getStoreSize();
+
+ SmallVector<SDValue, 8> Stores;
+ for (unsigned Idx = 0; Idx < 4; ++Idx) {
+ SDValue Ex =
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
+ DAG.getConstant(Idx, getVectorIdxTy()));
+ SDValue Store;
+ if (ScalarVT != ScalarMemVT)
+ Store =
+ DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
+ SN->getPointerInfo().getWithOffset(Idx*Stride),
+ ScalarMemVT, SN->isVolatile(), SN->isNonTemporal(),
+ MinAlign(Alignment, Idx*Stride), SN->getAAInfo());
+ else
+ Store =
+ DAG.getStore(StoreChain, dl, Ex, BasePtr,
+ SN->getPointerInfo().getWithOffset(Idx*Stride),
+ SN->isVolatile(), SN->isNonTemporal(),
+ MinAlign(Alignment, Idx*Stride), SN->getAAInfo());
+
+ if (Idx == 0 && SN->isIndexed()) {
+ assert(SN->getAddressingMode() == ISD::PRE_INC &&
+ "Unknown addressing mode on vector store");
+ Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
+ SN->getAddressingMode());
+ }
+
+ BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
+ DAG.getConstant(Stride, BasePtr.getValueType()));
+ Stores.push_back(Store);
+ }
+
+ SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
+
+ if (SN->isIndexed()) {
+ SDValue RetOps[] = { TF, Stores[0].getValue(1) };
+ return DAG.getMergeValues(RetOps, dl);
+ }
+
+ return TF;
+ }
+
+ assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
+ assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
+
+ // The values are now known to be -1 (false) or 1 (true). To convert this
+ // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
+ // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
+ Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
+
+ // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
+ // understand how to form the extending load.
+ SDValue FPHalfs = DAG.getConstantFP(0.5, MVT::f64);
+ FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64,
+ FPHalfs, FPHalfs, FPHalfs, FPHalfs);
+
+ Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
+
+ // Now convert to an integer and store.
+ Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
+ DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, MVT::i32),
+ Value);
+
+ MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
+ int FrameIdx = FrameInfo->CreateStackObject(16, 16, false);
+ MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FrameIdx);
+ EVT PtrVT = getPointerTy();
+ SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
+
+ SmallVector<SDValue, 2> Ops;
+ Ops.push_back(StoreChain);
+ Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, MVT::i32));
+ Ops.push_back(Value);
+ Ops.push_back(FIdx);
+
+ SmallVector<EVT, 2> ValueVTs;
+ ValueVTs.push_back(MVT::Other); // chain
+ SDVTList VTs = DAG.getVTList(ValueVTs);
+
+ StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
+ dl, VTs, Ops, MVT::v4i32, PtrInfo);
+
+ // Move data into the byte array.
+ SmallVector<SDValue, 4> Loads, LoadChains;
+ for (unsigned i = 0; i < 4; ++i) {
+ unsigned Offset = 4*i;
+ SDValue Idx = DAG.getConstant(Offset, FIdx.getValueType());
+ Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
+
+ Loads.push_back(DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
+ PtrInfo.getWithOffset(Offset),
+ false, false, false, 0));
+ LoadChains.push_back(Loads[i].getValue(1));
+ }
+
+ StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
+
+ SmallVector<SDValue, 4> Stores;
+ for (unsigned i = 0; i < 4; ++i) {
+ SDValue Idx = DAG.getConstant(i, BasePtr.getValueType());
+ Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
+
+ Stores.push_back(DAG.getTruncStore(StoreChain, dl, Loads[i], Idx,
+ SN->getPointerInfo().getWithOffset(i),
+ MVT::i8 /* memory type */,
+ SN->isNonTemporal(), SN->isVolatile(),
+ 1 /* alignment */, SN->getAAInfo()));
+ }
+
+ StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
+
+ return StoreChain;
+}
+
SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
if (Op.getValueType() == MVT::v4i32) {
@@ -6462,7 +7597,7 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::FP_TO_UINT:
case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG,
- SDLoc(Op));
+ SDLoc(Op));
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
@@ -6478,6 +7613,7 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
+ case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::MUL: return LowerMUL(Op, DAG);
// For counter-based loop handling.
@@ -6492,11 +7628,19 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const {
- const TargetMachine &TM = getTargetMachine();
SDLoc dl(N);
switch (N->getOpcode()) {
default:
llvm_unreachable("Do not know how to custom type legalize this operation!");
+ case ISD::READCYCLECOUNTER: {
+ SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
+ SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
+
+ Results.push_back(RTB);
+ Results.push_back(RTB.getValue(1));
+ Results.push_back(RTB.getValue(2));
+ break;
+ }
case ISD::INTRINSIC_W_CHAIN: {
if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
Intrinsic::ppc_is_decremented_ctr_nonzero)
@@ -6514,8 +7658,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
break;
}
case ISD::VAARG: {
- if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI()
- || TM.getSubtarget<PPCSubtarget>().isPPC64())
+ if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
return;
EVT VT = N->getValueType(0);
@@ -6597,8 +7740,7 @@ MachineBasicBlock *
PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
bool is64bit, unsigned BinOpcode) const {
// This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction *F = BB->getParent();
@@ -6621,9 +7763,8 @@ PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineRegisterInfo &RegInfo = F->getRegInfo();
unsigned TmpReg = (!BinOpcode) ? incr :
- RegInfo.createVirtualRegister(
- is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
- (const TargetRegisterClass *) &PPC::GPRCRegClass);
+ RegInfo.createVirtualRegister( is64bit ? &PPC::G8RCRegClass
+ : &PPC::GPRCRegClass);
// thisMBB:
// ...
@@ -6660,8 +7801,7 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
bool is8bit, // operation
unsigned BinOpcode) const {
// This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// In 64 bit mode we have to use 64 bits for addresses, even though the
// lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address
// registers without caring whether they're 32 or 64, but here we're
@@ -6689,9 +7829,8 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
- const TargetRegisterClass *RC =
- is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
- (const TargetRegisterClass *) &PPC::GPRCRegClass;
+ const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
+ : &PPC::GPRCRegClass;
unsigned PtrReg = RegInfo.createVirtualRegister(RC);
unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
unsigned ShiftReg = RegInfo.createVirtualRegister(RC);
@@ -6789,8 +7928,7 @@ llvm::MachineBasicBlock*
PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI->getDebugLoc();
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
@@ -6863,6 +8001,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
unsigned BufReg = MI->getOperand(1).getReg();
if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
+ setUsesTOCBasePtr(*MBB->getParent());
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
.addReg(PPC::X2)
.addImm(TOCOffset)
@@ -6873,23 +8012,21 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// Naked functions never have a base pointer, and so we use r1. For all
// other functions, this decision must be delayed until during PEI.
unsigned BaseReg;
- if (MF->getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::Naked))
+ if (MF->getFunction()->hasFnAttribute(Attribute::Naked))
BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
else
BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
MIB = BuildMI(*thisMBB, MI, DL,
TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
- .addReg(BaseReg)
- .addImm(BPOffset)
- .addReg(BufReg);
+ .addReg(BaseReg)
+ .addImm(BPOffset)
+ .addReg(BufReg);
MIB.setMemRefs(MMOBegin, MMOEnd);
// Setup
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
- const PPCRegisterInfo *TRI =
- getTargetMachine().getSubtarget<PPCSubtarget>().getRegisterInfo();
+ const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
MIB.addRegMask(TRI->getNoPreservedMask());
BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
@@ -6903,8 +8040,9 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// mainMBB:
// mainDstReg = 0
- MIB = BuildMI(mainMBB, DL,
- TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
+ MIB =
+ BuildMI(mainMBB, DL,
+ TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
// Store IP
if (Subtarget.isPPC64()) {
@@ -6938,8 +8076,7 @@ MachineBasicBlock *
PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI->getDebugLoc();
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
@@ -6958,10 +8095,13 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
// Since FP is only updated here but NOT referenced, it's treated as GPR.
unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
- unsigned BP = (PVT == MVT::i64) ? PPC::X30 :
- (Subtarget.isSVR4ABI() &&
- MF->getTarget().getRelocationModel() == Reloc::PIC_ ?
- PPC::R29 : PPC::R30);
+ unsigned BP =
+ (PVT == MVT::i64)
+ ? PPC::X30
+ : (Subtarget.isSVR4ABI() &&
+ MF->getTarget().getRelocationModel() == Reloc::PIC_
+ ? PPC::R29
+ : PPC::R30);
MachineInstrBuilder MIB;
@@ -7024,6 +8164,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
// Reload TOC
if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
+ setUsesTOCBasePtr(*MBB->getParent());
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
.addImm(TOCOffset)
.addReg(BufReg);
@@ -7043,6 +8184,22 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
MachineBasicBlock *
PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
+ if (MI->getOpcode() == TargetOpcode::STACKMAP ||
+ MI->getOpcode() == TargetOpcode::PATCHPOINT) {
+ if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() &&
+ MI->getOpcode() == TargetOpcode::PATCHPOINT) {
+ // Call lowering should have added an r2 operand to indicate a dependence
+ // on the TOC base pointer value. It can't however, because there is no
+ // way to mark the dependence as implicit there, and so the stackmap code
+ // will confuse it with a regular operand. Instead, add the dependence
+ // here.
+ setUsesTOCBasePtr(*BB->getParent());
+ MI->addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
+ }
+
+ return emitPatchPoint(MI, BB);
+ }
+
if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 ||
MI->getOpcode() == PPC::EH_SjLj_SetJmp64) {
return emitEHSjLjSetJmp(MI, BB);
@@ -7051,8 +8208,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
return emitEHSjLjLongJmp(MI, BB);
}
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// To "insert" these instructions we actually have to insert their
// control-flow patterns.
@@ -7063,9 +8219,9 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineFunction *F = BB->getParent();
if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 ||
- MI->getOpcode() == PPC::SELECT_CC_I8 ||
- MI->getOpcode() == PPC::SELECT_I4 ||
- MI->getOpcode() == PPC::SELECT_I8)) {
+ MI->getOpcode() == PPC::SELECT_CC_I8 ||
+ MI->getOpcode() == PPC::SELECT_I4 ||
+ MI->getOpcode() == PPC::SELECT_I8)) {
SmallVector<MachineOperand, 2> Cond;
if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
MI->getOpcode() == PPC::SELECT_CC_I8)
@@ -7075,8 +8231,6 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
Cond.push_back(MI->getOperand(1));
DebugLoc dl = MI->getDebugLoc();
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(),
Cond, MI->getOperand(2).getReg(),
MI->getOperand(3).getReg());
@@ -7084,6 +8238,9 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MI->getOpcode() == PPC::SELECT_CC_I8 ||
MI->getOpcode() == PPC::SELECT_CC_F4 ||
MI->getOpcode() == PPC::SELECT_CC_F8 ||
+ MI->getOpcode() == PPC::SELECT_CC_QFRC ||
+ MI->getOpcode() == PPC::SELECT_CC_QSRC ||
+ MI->getOpcode() == PPC::SELECT_CC_QBRC ||
MI->getOpcode() == PPC::SELECT_CC_VRRC ||
MI->getOpcode() == PPC::SELECT_CC_VSFRC ||
MI->getOpcode() == PPC::SELECT_CC_VSRC ||
@@ -7091,6 +8248,9 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MI->getOpcode() == PPC::SELECT_I8 ||
MI->getOpcode() == PPC::SELECT_F4 ||
MI->getOpcode() == PPC::SELECT_F8 ||
+ MI->getOpcode() == PPC::SELECT_QFRC ||
+ MI->getOpcode() == PPC::SELECT_QSRC ||
+ MI->getOpcode() == PPC::SELECT_QBRC ||
MI->getOpcode() == PPC::SELECT_VRRC ||
MI->getOpcode() == PPC::SELECT_VSFRC ||
MI->getOpcode() == PPC::SELECT_VSRC) {
@@ -7124,6 +8284,9 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MI->getOpcode() == PPC::SELECT_I8 ||
MI->getOpcode() == PPC::SELECT_F4 ||
MI->getOpcode() == PPC::SELECT_F8 ||
+ MI->getOpcode() == PPC::SELECT_QFRC ||
+ MI->getOpcode() == PPC::SELECT_QSRC ||
+ MI->getOpcode() == PPC::SELECT_QBRC ||
MI->getOpcode() == PPC::SELECT_VRRC ||
MI->getOpcode() == PPC::SELECT_VSFRC ||
MI->getOpcode() == PPC::SELECT_VSRC) {
@@ -7151,6 +8314,51 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
TII->get(PPC::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
+ } else if (MI->getOpcode() == PPC::ReadTB) {
+ // To read the 64-bit time-base register on a 32-bit target, we read the
+ // two halves. Should the counter have wrapped while it was being read, we
+ // need to try again.
+ // ...
+ // readLoop:
+ // mfspr Rx,TBU # load from TBU
+ // mfspr Ry,TB # load from TB
+ // mfspr Rz,TBU # load from TBU
+ // cmpw crX,Rx,Rz # check if ‘old’=’new’
+ // bne readLoop # branch if they're not equal
+ // ...
+
+ MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ DebugLoc dl = MI->getDebugLoc();
+ F->insert(It, readMBB);
+ F->insert(It, sinkMBB);
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ BB->addSuccessor(readMBB);
+ BB = readMBB;
+
+ MachineRegisterInfo &RegInfo = F->getRegInfo();
+ unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
+ unsigned LoReg = MI->getOperand(0).getReg();
+ unsigned HiReg = MI->getOperand(1).getReg();
+
+ BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
+ BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
+ BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
+
+ unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
+
+ BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
+ .addReg(HiReg).addReg(ReadAgainReg);
+ BuildMI(BB, dl, TII->get(PPC::BCC))
+ .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB);
+
+ BB->addSuccessor(readMBB);
+ BB->addSuccessor(sinkMBB);
}
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
@@ -7309,9 +8517,8 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
- const TargetRegisterClass *RC =
- is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
- (const TargetRegisterClass *) &PPC::GPRCRegClass;
+ const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
+ : &PPC::GPRCRegClass;
unsigned PtrReg = RegInfo.createVirtualRegister(RC);
unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
unsigned ShiftReg = RegInfo.createVirtualRegister(RC);
@@ -7453,7 +8660,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
// Restore FPSCR value.
- BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)).addImm(1).addReg(MFFSReg);
+ BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
} else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT ||
MI->getOpcode() == PPC::ANDIo_1_GT_BIT ||
MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
@@ -7493,9 +8700,11 @@ SDValue PPCTargetLowering::getRsqrtEstimate(SDValue Operand,
bool &UseOneConstNR) const {
EVT VT = Operand.getValueType();
if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
- (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
+ (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
(VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
- (VT == MVT::v2f64 && Subtarget.hasVSX())) {
+ (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
+ (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
+ (VT == MVT::v4f64 && Subtarget.hasQPX())) {
// Convergence is quadratic, so we essentially double the number of digits
// correct after every iteration. For both FRE and FRSQRTE, the minimum
// architected relative accuracy is 2^-5. When hasRecipPrec(), this is
@@ -7514,9 +8723,11 @@ SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand,
unsigned &RefinementSteps) const {
EVT VT = Operand.getValueType();
if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
- (VT == MVT::f64 && Subtarget.hasFRE()) ||
+ (VT == MVT::f64 && Subtarget.hasFRE()) ||
(VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
- (VT == MVT::v2f64 && Subtarget.hasVSX())) {
+ (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
+ (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
+ (VT == MVT::v4f64 && Subtarget.hasQPX())) {
// Convergence is quadratic, so we essentially double the number of digits
// correct after every iteration. For both FRE and FRSQRTE, the minimum
// architected relative accuracy is 2^-5. When hasRecipPrec(), this is
@@ -7529,6 +8740,28 @@ SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand,
return SDValue();
}
+bool PPCTargetLowering::combineRepeatedFPDivisors(unsigned NumUsers) const {
+ // Note: This functionality is used only when unsafe-fp-math is enabled, and
+ // on cores with reciprocal estimates (which are used when unsafe-fp-math is
+ // enabled for division), this functionality is redundant with the default
+ // combiner logic (once the division -> reciprocal/multiply transformation
+ // has taken place). As a result, this matters more for older cores than for
+ // newer ones.
+
+ // Combine multiple FDIVs with the same divisor into multiple FMULs by the
+ // reciprocal if there are two or more FDIVs (for embedded cores with only
+ // one FP pipeline) for three or more FDIVs (for generic OOO cores).
+ switch (Subtarget.getDarwinDirective()) {
+ default:
+ return NumUsers > 2;
+ case PPC::DIR_440:
+ case PPC::DIR_A2:
+ case PPC::DIR_E500mc:
+ case PPC::DIR_E5500:
+ return NumUsers > 1;
+ }
+}
+
static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
unsigned Bytes, int Dist,
SelectionDAG &DAG) {
@@ -7580,6 +8813,24 @@ static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
EVT VT;
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
default: return false;
+ case Intrinsic::ppc_qpx_qvlfd:
+ case Intrinsic::ppc_qpx_qvlfda:
+ VT = MVT::v4f64;
+ break;
+ case Intrinsic::ppc_qpx_qvlfs:
+ case Intrinsic::ppc_qpx_qvlfsa:
+ VT = MVT::v4f32;
+ break;
+ case Intrinsic::ppc_qpx_qvlfcd:
+ case Intrinsic::ppc_qpx_qvlfcda:
+ VT = MVT::v2f64;
+ break;
+ case Intrinsic::ppc_qpx_qvlfcs:
+ case Intrinsic::ppc_qpx_qvlfcsa:
+ VT = MVT::v2f32;
+ break;
+ case Intrinsic::ppc_qpx_qvlfiwa:
+ case Intrinsic::ppc_qpx_qvlfiwz:
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
case Intrinsic::ppc_vsx_lxvw4x:
@@ -7606,6 +8857,24 @@ static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
EVT VT;
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
default: return false;
+ case Intrinsic::ppc_qpx_qvstfd:
+ case Intrinsic::ppc_qpx_qvstfda:
+ VT = MVT::v4f64;
+ break;
+ case Intrinsic::ppc_qpx_qvstfs:
+ case Intrinsic::ppc_qpx_qvstfsa:
+ VT = MVT::v4f32;
+ break;
+ case Intrinsic::ppc_qpx_qvstfcd:
+ case Intrinsic::ppc_qpx_qvstfcda:
+ VT = MVT::v2f64;
+ break;
+ case Intrinsic::ppc_qpx_qvstfcs:
+ case Intrinsic::ppc_qpx_qvstfcsa:
+ VT = MVT::v2f32;
+ break;
+ case Intrinsic::ppc_qpx_qvstfiw:
+ case Intrinsic::ppc_qpx_qvstfiwa:
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
case Intrinsic::ppc_vsx_stxvw4x:
@@ -7704,8 +8973,7 @@ SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
- assert(Subtarget.useCRBits() &&
- "Expecting to be tracking CR bits");
+ assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
// If we're tracking CR bits, we need to be careful that we don't have:
// trunc(binary-ops(zext(x), zext(y)))
// or
@@ -8001,10 +9269,8 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
N->getValueType(0) != MVT::i64)
return SDValue();
- if (!((N->getOperand(0).getValueType() == MVT::i1 &&
- Subtarget.useCRBits()) ||
- (N->getOperand(0).getValueType() == MVT::i32 &&
- Subtarget.isPPC64())))
+ if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
+ (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
return SDValue();
if (N->getOperand(0).getOpcode() != ISD::AND &&
@@ -8053,6 +9319,10 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
}
}
+ // The operands of a select that must be truncated when the select is
+ // promoted because the operand is actually part of the to-be-promoted set.
+ DenseMap<SDNode *, EVT> SelectTruncOp[2];
+
// Make sure that this is a self-contained cluster of operations (which
// is not quite the same thing as saying that everything has only one
// use).
@@ -8067,18 +9337,19 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
if (User != N && !Visited.count(User))
return SDValue();
- // Make sure that we're not going to promote the non-output-value
- // operand(s) or SELECT or SELECT_CC.
- // FIXME: Although we could sometimes handle this, and it does occur in
- // practice that one of the condition inputs to the select is also one of
- // the outputs, we currently can't deal with this.
+ // If we're going to promote the non-output-value operand(s) or SELECT or
+ // SELECT_CC, record them for truncation.
if (User->getOpcode() == ISD::SELECT) {
if (User->getOperand(0) == Inputs[i])
- return SDValue();
+ SelectTruncOp[0].insert(std::make_pair(User,
+ User->getOperand(0).getValueType()));
} else if (User->getOpcode() == ISD::SELECT_CC) {
- if (User->getOperand(0) == Inputs[i] ||
- User->getOperand(1) == Inputs[i])
- return SDValue();
+ if (User->getOperand(0) == Inputs[i])
+ SelectTruncOp[0].insert(std::make_pair(User,
+ User->getOperand(0).getValueType()));
+ if (User->getOperand(1) == Inputs[i])
+ SelectTruncOp[1].insert(std::make_pair(User,
+ User->getOperand(1).getValueType()));
}
}
}
@@ -8091,18 +9362,19 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
if (User != N && !Visited.count(User))
return SDValue();
- // Make sure that we're not going to promote the non-output-value
- // operand(s) or SELECT or SELECT_CC.
- // FIXME: Although we could sometimes handle this, and it does occur in
- // practice that one of the condition inputs to the select is also one of
- // the outputs, we currently can't deal with this.
+ // If we're going to promote the non-output-value operand(s) or SELECT or
+ // SELECT_CC, record them for truncation.
if (User->getOpcode() == ISD::SELECT) {
if (User->getOperand(0) == PromOps[i])
- return SDValue();
+ SelectTruncOp[0].insert(std::make_pair(User,
+ User->getOperand(0).getValueType()));
} else if (User->getOpcode() == ISD::SELECT_CC) {
- if (User->getOperand(0) == PromOps[i] ||
- User->getOperand(1) == PromOps[i])
- return SDValue();
+ if (User->getOperand(0) == PromOps[i])
+ SelectTruncOp[0].insert(std::make_pair(User,
+ User->getOperand(0).getValueType()));
+ if (User->getOperand(1) == PromOps[i])
+ SelectTruncOp[1].insert(std::make_pair(User,
+ User->getOperand(1).getValueType()));
}
}
}
@@ -8183,6 +9455,19 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
continue;
}
+ // For SELECT and SELECT_CC nodes, we do a similar check for any
+ // to-be-promoted comparison inputs.
+ if (PromOp.getOpcode() == ISD::SELECT ||
+ PromOp.getOpcode() == ISD::SELECT_CC) {
+ if ((SelectTruncOp[0].count(PromOp.getNode()) &&
+ PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
+ (SelectTruncOp[1].count(PromOp.getNode()) &&
+ PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
+ PromOps.insert(PromOps.begin(), PromOp);
+ continue;
+ }
+ }
+
SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
PromOp.getNode()->op_end());
@@ -8201,6 +9486,18 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
}
+ // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
+ // truncate them again to the original value type.
+ if (PromOp.getOpcode() == ISD::SELECT ||
+ PromOp.getOpcode() == ISD::SELECT_CC) {
+ auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
+ if (SI0 != SelectTruncOp[0].end())
+ Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
+ auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
+ if (SI1 != SelectTruncOp[1].end())
+ Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
+ }
+
DAG.ReplaceAllUsesOfValueWith(PromOp,
DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
}
@@ -8227,9 +9524,177 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
N->getOperand(0), ShiftCst), ShiftCst);
}
+SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ assert((N->getOpcode() == ISD::SINT_TO_FP ||
+ N->getOpcode() == ISD::UINT_TO_FP) &&
+ "Need an int -> FP conversion node here");
+
+ if (!Subtarget.has64BitSupport())
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc dl(N);
+ SDValue Op(N, 0);
+
+ // Don't handle ppc_fp128 here or i1 conversions.
+ if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
+ return SDValue();
+ if (Op.getOperand(0).getValueType() == MVT::i1)
+ return SDValue();
+
+ // For i32 intermediate values, unfortunately, the conversion functions
+ // leave the upper 32 bits of the value are undefined. Within the set of
+ // scalar instructions, we have no method for zero- or sign-extending the
+ // value. Thus, we cannot handle i32 intermediate values here.
+ if (Op.getOperand(0).getValueType() == MVT::i32)
+ return SDValue();
+
+ assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
+ "UINT_TO_FP is supported only with FPCVT");
+
+ // If we have FCFIDS, then use it when converting to single-precision.
+ // Otherwise, convert to double-precision and then round.
+ unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
+ ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
+ : PPCISD::FCFIDS)
+ : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
+ : PPCISD::FCFID);
+ MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
+ ? MVT::f32
+ : MVT::f64;
+
+ // If we're converting from a float, to an int, and back to a float again,
+ // then we don't need the store/load pair at all.
+ if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
+ Subtarget.hasFPCVT()) ||
+ (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
+ SDValue Src = Op.getOperand(0).getOperand(0);
+ if (Src.getValueType() == MVT::f32) {
+ Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
+ DCI.AddToWorklist(Src.getNode());
+ }
+
+ unsigned FCTOp =
+ Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
+ PPCISD::FCTIDUZ;
+
+ SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
+ SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
+
+ if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
+ FP = DAG.getNode(ISD::FP_ROUND, dl,
+ MVT::f32, FP, DAG.getIntPtrConstant(0));
+ DCI.AddToWorklist(FP.getNode());
+ }
+
+ return FP;
+ }
+
+ return SDValue();
+}
+
+// expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
+// builtins) into loads with swaps.
+SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc dl(N);
+ SDValue Chain;
+ SDValue Base;
+ MachineMemOperand *MMO;
+
+ switch (N->getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode for little endian VSX load");
+ case ISD::LOAD: {
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ Chain = LD->getChain();
+ Base = LD->getBasePtr();
+ MMO = LD->getMemOperand();
+ // If the MMO suggests this isn't a load of a full vector, leave
+ // things alone. For a built-in, we have to make the change for
+ // correctness, so if there is a size problem that will be a bug.
+ if (MMO->getSize() < 16)
+ return SDValue();
+ break;
+ }
+ case ISD::INTRINSIC_W_CHAIN: {
+ MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
+ Chain = Intrin->getChain();
+ Base = Intrin->getBasePtr();
+ MMO = Intrin->getMemOperand();
+ break;
+ }
+ }
+
+ MVT VecTy = N->getValueType(0).getSimpleVT();
+ SDValue LoadOps[] = { Chain, Base };
+ SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
+ DAG.getVTList(VecTy, MVT::Other),
+ LoadOps, VecTy, MMO);
+ DCI.AddToWorklist(Load.getNode());
+ Chain = Load.getValue(1);
+ SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
+ DAG.getVTList(VecTy, MVT::Other), Chain, Load);
+ DCI.AddToWorklist(Swap.getNode());
+ return Swap;
+}
+
+// expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
+// builtins) into stores with swaps.
+SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc dl(N);
+ SDValue Chain;
+ SDValue Base;
+ unsigned SrcOpnd;
+ MachineMemOperand *MMO;
+
+ switch (N->getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode for little endian VSX store");
+ case ISD::STORE: {
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ Chain = ST->getChain();
+ Base = ST->getBasePtr();
+ MMO = ST->getMemOperand();
+ SrcOpnd = 1;
+ // If the MMO suggests this isn't a store of a full vector, leave
+ // things alone. For a built-in, we have to make the change for
+ // correctness, so if there is a size problem that will be a bug.
+ if (MMO->getSize() < 16)
+ return SDValue();
+ break;
+ }
+ case ISD::INTRINSIC_VOID: {
+ MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
+ Chain = Intrin->getChain();
+ // Intrin->getBasePtr() oddly does not get what we want.
+ Base = Intrin->getOperand(3);
+ MMO = Intrin->getMemOperand();
+ SrcOpnd = 2;
+ break;
+ }
+ }
+
+ SDValue Src = N->getOperand(SrcOpnd);
+ MVT VecTy = Src.getValueType().getSimpleVT();
+ SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
+ DAG.getVTList(VecTy, MVT::Other), Chain, Src);
+ DCI.AddToWorklist(Swap.getNode());
+ Chain = Swap.getValue(1);
+ SDValue StoreOps[] = { Chain, Swap, Base };
+ SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
+ DAG.getVTList(MVT::Other),
+ StoreOps, VecTy, MMO);
+ DCI.AddToWorklist(Store.getNode());
+ return Store;
+}
+
SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
- const TargetMachine &TM = getTargetMachine();
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
switch (N->getOpcode()) {
@@ -8262,40 +9727,11 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SELECT_CC:
return DAGCombineTruncBoolExt(N, DCI);
case ISD::SINT_TO_FP:
- if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
- if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
- // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
- // We allow the src/dst to be either f32/f64, but the intermediate
- // type must be i64.
- if (N->getOperand(0).getValueType() == MVT::i64 &&
- N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) {
- SDValue Val = N->getOperand(0).getOperand(0);
- if (Val.getValueType() == MVT::f32) {
- Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
- DCI.AddToWorklist(Val.getNode());
- }
-
- Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val);
- DCI.AddToWorklist(Val.getNode());
- Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val);
- DCI.AddToWorklist(Val.getNode());
- if (N->getValueType(0) == MVT::f32) {
- Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val,
- DAG.getIntPtrConstant(0));
- DCI.AddToWorklist(Val.getNode());
- }
- return Val;
- } else if (N->getOperand(0).getValueType() == MVT::i32) {
- // If the intermediate type is i32, we can avoid the load/store here
- // too.
- }
- }
- }
- break;
- case ISD::STORE:
+ case ISD::UINT_TO_FP:
+ return combineFPToIntToFP(N, DCI);
+ case ISD::STORE: {
// Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
- if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
- !cast<StoreSDNode>(N)->isTruncatingStore() &&
+ if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() &&
N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
N->getOperand(1).getValueType() == MVT::i32 &&
N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) {
@@ -8326,8 +9762,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
N->getOperand(1).getNode()->hasOneUse() &&
(N->getOperand(1).getValueType() == MVT::i32 ||
N->getOperand(1).getValueType() == MVT::i16 ||
- (TM.getSubtarget<PPCSubtarget>().hasLDBRX() &&
- TM.getSubtarget<PPCSubtarget>().isPPC64() &&
+ (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
N->getOperand(1).getValueType() == MVT::i64))) {
SDValue BSwapOp = N->getOperand(1).getOperand(0);
// Do an any-extend to 32-bits if this is a half-word input.
@@ -8343,20 +9778,45 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
Ops, cast<StoreSDNode>(N)->getMemoryVT(),
cast<StoreSDNode>(N)->getMemOperand());
}
+
+ // For little endian, VSX stores require generating xxswapd/lxvd2x.
+ EVT VT = N->getOperand(1).getValueType();
+ if (VT.isSimple()) {
+ MVT StoreVT = VT.getSimpleVT();
+ if (Subtarget.hasVSX() && Subtarget.isLittleEndian() &&
+ (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
+ StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
+ return expandVSXStoreForLE(N, DCI);
+ }
break;
+ }
case ISD::LOAD: {
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT VT = LD->getValueType(0);
- Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+
+ // For little endian, VSX loads require generating lxvd2x/xxswapd.
+ if (VT.isSimple()) {
+ MVT LoadVT = VT.getSimpleVT();
+ if (Subtarget.hasVSX() && Subtarget.isLittleEndian() &&
+ (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
+ LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
+ return expandVSXLoadForLE(N, DCI);
+ }
+
+ EVT MemVT = LD->getMemoryVT();
+ Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty);
- if (ISD::isNON_EXTLoad(N) && VT.isVector() &&
- TM.getSubtarget<PPCSubtarget>().hasAltivec() &&
- // P8 and later hardware should just use LOAD.
- !TM.getSubtarget<PPCSubtarget>().hasP8Vector() &&
- (VT == MVT::v16i8 || VT == MVT::v8i16 ||
- VT == MVT::v4i32 || VT == MVT::v4f32) &&
+ Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
+ unsigned ScalarABIAlignment = getDataLayout()->getABITypeAlignment(STy);
+ if (LD->isUnindexed() && VT.isVector() &&
+ ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
+ // P8 and later hardware should just use LOAD.
+ !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 ||
+ VT == MVT::v4i32 || VT == MVT::v4f32)) ||
+ (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
+ LD->getAlignment() >= ScalarABIAlignment)) &&
LD->getAlignment() < ABIAlignment) {
- // This is a type-legal unaligned Altivec load.
+ // This is a type-legal unaligned Altivec or QPX load.
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
bool isLittleEndian = Subtarget.isLittleEndian();
@@ -8385,10 +9845,28 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// a different base address offset from this one by an aligned amount.
// The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
// optimization later.
- Intrinsic::ID Intr = (isLittleEndian ?
- Intrinsic::ppc_altivec_lvsr :
- Intrinsic::ppc_altivec_lvsl);
- SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, MVT::v16i8);
+ Intrinsic::ID Intr, IntrLD, IntrPerm;
+ MVT PermCntlTy, PermTy, LDTy;
+ if (Subtarget.hasAltivec()) {
+ Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr :
+ Intrinsic::ppc_altivec_lvsl;
+ IntrLD = Intrinsic::ppc_altivec_lvx;
+ IntrPerm = Intrinsic::ppc_altivec_vperm;
+ PermCntlTy = MVT::v16i8;
+ PermTy = MVT::v4i32;
+ LDTy = MVT::v4i32;
+ } else {
+ Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
+ Intrinsic::ppc_qpx_qvlpcls;
+ IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
+ Intrinsic::ppc_qpx_qvlfs;
+ IntrPerm = Intrinsic::ppc_qpx_qvfperm;
+ PermCntlTy = MVT::v4f64;
+ PermTy = MVT::v4f64;
+ LDTy = MemVT.getSimpleVT();
+ }
+
+ SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
// Create the new MMO for the new base load. It is like the original MMO,
// but represents an area in memory almost twice the vector size centered
@@ -8397,18 +9875,16 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// original unaligned load.
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *BaseMMO =
- MF.getMachineMemOperand(LD->getMemOperand(),
- -LD->getMemoryVT().getStoreSize()+1,
- 2*LD->getMemoryVT().getStoreSize()-1);
+ MF.getMachineMemOperand(LD->getMemOperand(), -MemVT.getStoreSize()+1,
+ 2*MemVT.getStoreSize()-1);
// Create the new base load.
- SDValue LDXIntID = DAG.getTargetConstant(Intrinsic::ppc_altivec_lvx,
- getPointerTy());
+ SDValue LDXIntID = DAG.getTargetConstant(IntrLD, getPointerTy());
SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
SDValue BaseLoad =
DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
- DAG.getVTList(MVT::v4i32, MVT::Other),
- BaseLoadOps, MVT::v4i32, BaseMMO);
+ DAG.getVTList(PermTy, MVT::Other),
+ BaseLoadOps, LDTy, BaseMMO);
// Note that the value of IncOffset (which is provided to the next
// load's pointer info offset value, and thus used to calculate the
@@ -8432,12 +9908,12 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
MachineMemOperand *ExtraMMO =
MF.getMachineMemOperand(LD->getMemOperand(),
- 1, 2*LD->getMemoryVT().getStoreSize()-1);
+ 1, 2*MemVT.getStoreSize()-1);
SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
SDValue ExtraLoad =
DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
- DAG.getVTList(MVT::v4i32, MVT::Other),
- ExtraLoadOps, MVT::v4i32, ExtraMMO);
+ DAG.getVTList(PermTy, MVT::Other),
+ ExtraLoadOps, LDTy, ExtraMMO);
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
BaseLoad.getValue(1), ExtraLoad.getValue(1));
@@ -8449,14 +9925,19 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// and ExtraLoad here.
SDValue Perm;
if (isLittleEndian)
- Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm,
+ Perm = BuildIntrinsicOp(IntrPerm,
ExtraLoad, BaseLoad, PermCntl, DAG, dl);
else
- Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm,
+ Perm = BuildIntrinsicOp(IntrPerm,
BaseLoad, ExtraLoad, PermCntl, DAG, dl);
- if (VT != MVT::v4i32)
- Perm = DAG.getNode(ISD::BITCAST, dl, VT, Perm);
+ if (VT != PermTy)
+ Perm = Subtarget.hasAltivec() ?
+ DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
+ DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
+ DAG.getTargetConstant(1, MVT::i64));
+ // second argument is 1 because this rounding
+ // is always exact.
// The output of the permutation is our loaded result, the TokenFactor is
// our new chain.
@@ -8465,43 +9946,96 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
}
}
break;
- case ISD::INTRINSIC_WO_CHAIN: {
- bool isLittleEndian = Subtarget.isLittleEndian();
- Intrinsic::ID Intr = (isLittleEndian ?
- Intrinsic::ppc_altivec_lvsr :
- Intrinsic::ppc_altivec_lvsl);
- if (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() == Intr &&
+ case ISD::INTRINSIC_WO_CHAIN: {
+ bool isLittleEndian = Subtarget.isLittleEndian();
+ unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
+ : Intrinsic::ppc_altivec_lvsl);
+ if ((IID == Intr ||
+ IID == Intrinsic::ppc_qpx_qvlpcld ||
+ IID == Intrinsic::ppc_qpx_qvlpcls) &&
N->getOperand(1)->getOpcode() == ISD::ADD) {
- SDValue Add = N->getOperand(1);
-
- if (DAG.MaskedValueIsZero(Add->getOperand(1),
- APInt::getAllOnesValue(4 /* 16 byte alignment */).zext(
- Add.getValueType().getScalarType().getSizeInBits()))) {
- SDNode *BasePtr = Add->getOperand(0).getNode();
- for (SDNode::use_iterator UI = BasePtr->use_begin(),
- UE = BasePtr->use_end(); UI != UE; ++UI) {
- if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
- cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
- Intr) {
- // We've found another LVSL/LVSR, and this address is an aligned
- // multiple of that one. The results will be the same, so use the
- // one we've just found instead.
-
- return SDValue(*UI, 0);
+ SDValue Add = N->getOperand(1);
+
+ int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
+ 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
+
+ if (DAG.MaskedValueIsZero(
+ Add->getOperand(1),
+ APInt::getAllOnesValue(Bits /* alignment */)
+ .zext(
+ Add.getValueType().getScalarType().getSizeInBits()))) {
+ SDNode *BasePtr = Add->getOperand(0).getNode();
+ for (SDNode::use_iterator UI = BasePtr->use_begin(),
+ UE = BasePtr->use_end();
+ UI != UE; ++UI) {
+ if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
+ cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
+ // We've found another LVSL/LVSR, and this address is an aligned
+ // multiple of that one. The results will be the same, so use the
+ // one we've just found instead.
+
+ return SDValue(*UI, 0);
+ }
+ }
+ }
+
+ if (isa<ConstantSDNode>(Add->getOperand(1))) {
+ SDNode *BasePtr = Add->getOperand(0).getNode();
+ for (SDNode::use_iterator UI = BasePtr->use_begin(),
+ UE = BasePtr->use_end(); UI != UE; ++UI) {
+ if (UI->getOpcode() == ISD::ADD &&
+ isa<ConstantSDNode>(UI->getOperand(1)) &&
+ (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
+ cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
+ (1ULL << Bits) == 0) {
+ SDNode *OtherAdd = *UI;
+ for (SDNode::use_iterator VI = OtherAdd->use_begin(),
+ VE = OtherAdd->use_end(); VI != VE; ++VI) {
+ if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
+ cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
+ return SDValue(*VI, 0);
+ }
+ }
+ }
}
}
}
}
- }
break;
+ case ISD::INTRINSIC_W_CHAIN: {
+ // For little endian, VSX loads require generating lxvd2x/xxswapd.
+ if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) {
+ switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ default:
+ break;
+ case Intrinsic::ppc_vsx_lxvw4x:
+ case Intrinsic::ppc_vsx_lxvd2x:
+ return expandVSXLoadForLE(N, DCI);
+ }
+ }
+ break;
+ }
+ case ISD::INTRINSIC_VOID: {
+ // For little endian, VSX stores require generating xxswapd/stxvd2x.
+ if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) {
+ switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ default:
+ break;
+ case Intrinsic::ppc_vsx_stxvw4x:
+ case Intrinsic::ppc_vsx_stxvd2x:
+ return expandVSXStoreForLE(N, DCI);
+ }
+ }
+ break;
+ }
case ISD::BSWAP:
// Turn BSWAP (LOAD) -> lhbrx/lwbrx.
if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
N->getOperand(0).hasOneUse() &&
(N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
- (TM.getSubtarget<PPCSubtarget>().hasLDBRX() &&
- TM.getSubtarget<PPCSubtarget>().isPPC64() &&
+ (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
N->getValueType(0) == MVT::i64))) {
SDValue Load = N->getOperand(0);
LoadSDNode *LD = cast<LoadSDNode>(Load);
@@ -8705,6 +10239,38 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
return SDValue();
}
+SDValue
+PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
+ SelectionDAG &DAG,
+ std::vector<SDNode *> *Created) const {
+ // fold (sdiv X, pow2)
+ EVT VT = N->getValueType(0);
+ if (VT == MVT::i64 && !Subtarget.isPPC64())
+ return SDValue();
+ if ((VT != MVT::i32 && VT != MVT::i64) ||
+ !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
+ return SDValue();
+
+ SDLoc DL(N);
+ SDValue N0 = N->getOperand(0);
+
+ bool IsNegPow2 = (-Divisor).isPowerOf2();
+ unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
+ SDValue ShiftAmt = DAG.getConstant(Lg2, VT);
+
+ SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
+ if (Created)
+ Created->push_back(Op.getNode());
+
+ if (IsNegPow2) {
+ Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT), Op);
+ if (Created)
+ Created->push_back(Op.getNode());
+ }
+
+ return Op;
+}
+
//===----------------------------------------------------------------------===//
// Inline Assembly Support
//===----------------------------------------------------------------------===//
@@ -8746,6 +10312,38 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
}
}
+unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
+ switch (Subtarget.getDarwinDirective()) {
+ default: break;
+ case PPC::DIR_970:
+ case PPC::DIR_PWR4:
+ case PPC::DIR_PWR5:
+ case PPC::DIR_PWR5X:
+ case PPC::DIR_PWR6:
+ case PPC::DIR_PWR6X:
+ case PPC::DIR_PWR7:
+ case PPC::DIR_PWR8: {
+ if (!ML)
+ break;
+
+ const PPCInstrInfo *TII = Subtarget.getInstrInfo();
+
+ // For small loops (between 5 and 8 instructions), align to a 32-byte
+ // boundary so that the entire loop fits in one instruction-cache line.
+ uint64_t LoopSize = 0;
+ for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
+ for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J)
+ LoopSize += TII->GetInstSizeInBytes(J);
+
+ if (LoopSize > 16 && LoopSize <= 32)
+ return 5;
+
+ break;
+ }
+ }
+
+ return TargetLowering::getPrefLoopAlignment(ML);
+}
/// getConstraintType - Given a constraint, return the type of
/// constraint it is for this target.
@@ -8833,8 +10431,9 @@ PPCTargetLowering::getSingleConstraintMatchWeight(
return weight;
}
-std::pair<unsigned, const TargetRegisterClass*>
-PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
+std::pair<unsigned, const TargetRegisterClass *>
+PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
MVT VT) const {
if (Constraint.size() == 1) {
// GCC RS6000 Constraint Letters
@@ -8852,8 +10451,16 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
return std::make_pair(0U, &PPC::F4RCRegClass);
if (VT == MVT::f64 || VT == MVT::i64)
return std::make_pair(0U, &PPC::F8RCRegClass);
+ if (VT == MVT::v4f64 && Subtarget.hasQPX())
+ return std::make_pair(0U, &PPC::QFRCRegClass);
+ if (VT == MVT::v4f32 && Subtarget.hasQPX())
+ return std::make_pair(0U, &PPC::QSRCRegClass);
break;
case 'v':
+ if (VT == MVT::v4f64 && Subtarget.hasQPX())
+ return std::make_pair(0U, &PPC::QFRCRegClass);
+ if (VT == MVT::v4f32 && Subtarget.hasQPX())
+ return std::make_pair(0U, &PPC::QSRCRegClass);
return std::make_pair(0U, &PPC::VRRCRegClass);
case 'y': // crrc
return std::make_pair(0U, &PPC::CRRCRegClass);
@@ -8867,8 +10474,8 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
return std::make_pair(0U, &PPC::VSFRCRegClass);
}
- std::pair<unsigned, const TargetRegisterClass*> R =
- TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ std::pair<unsigned, const TargetRegisterClass *> R =
+ TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
// r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
// (which we call X[0-9]+). If a 64-bit value has been requested, and a
@@ -8877,12 +10484,15 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
// FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
// the AsmName field from *RegisterInfo.td, then this would not be necessary.
if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
- PPC::GPRCRegClass.contains(R.first)) {
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
+ PPC::GPRCRegClass.contains(R.first))
return std::make_pair(TRI->getMatchingSuperReg(R.first,
PPC::sub_32, &PPC::G8RCRegClass),
&PPC::G8RCRegClass);
+
+ // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
+ if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
+ R.first = PPC::CR0;
+ R.second = &PPC::CRRCRegClass;
}
return R;
@@ -8913,37 +10523,42 @@ void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'P': {
ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
if (!CST) return; // Must be an immediate to match.
- unsigned Value = CST->getZExtValue();
+ int64_t Value = CST->getSExtValue();
+ EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
+ // numbers are printed as such.
switch (Letter) {
default: llvm_unreachable("Unknown constraint letter!");
case 'I': // "I" is a signed 16-bit constant.
- if ((short)Value == (int)Value)
- Result = DAG.getTargetConstant(Value, Op.getValueType());
+ if (isInt<16>(Value))
+ Result = DAG.getTargetConstant(Value, TCVT);
break;
case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
+ if (isShiftedUInt<16, 16>(Value))
+ Result = DAG.getTargetConstant(Value, TCVT);
+ break;
case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
- if ((short)Value == 0)
- Result = DAG.getTargetConstant(Value, Op.getValueType());
+ if (isShiftedInt<16, 16>(Value))
+ Result = DAG.getTargetConstant(Value, TCVT);
break;
case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
- if ((Value >> 16) == 0)
- Result = DAG.getTargetConstant(Value, Op.getValueType());
+ if (isUInt<16>(Value))
+ Result = DAG.getTargetConstant(Value, TCVT);
break;
case 'M': // "M" is a constant that is greater than 31.
if (Value > 31)
- Result = DAG.getTargetConstant(Value, Op.getValueType());
+ Result = DAG.getTargetConstant(Value, TCVT);
break;
case 'N': // "N" is a positive constant that is an exact power of two.
- if ((int)Value > 0 && isPowerOf2_32(Value))
- Result = DAG.getTargetConstant(Value, Op.getValueType());
+ if (Value > 0 && isPowerOf2_64(Value))
+ Result = DAG.getTargetConstant(Value, TCVT);
break;
case 'O': // "O" is the constant zero.
if (Value == 0)
- Result = DAG.getTargetConstant(Value, Op.getValueType());
+ Result = DAG.getTargetConstant(Value, TCVT);
break;
case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
- if ((short)-Value == (int)-Value)
- Result = DAG.getTargetConstant(Value, Op.getValueType());
+ if (isInt<16>(-Value))
+ Result = DAG.getTargetConstant(Value, TCVT);
break;
}
break;
@@ -8963,7 +10578,9 @@ void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
// by AM is legal for this target, for a load/store of the specified type.
bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM,
Type *Ty) const {
- // FIXME: PPC does not allow r+i addressing modes for vectors!
+ // PPC does not allow r+i addressing modes for vectors!
+ if (Ty->isVectorTy() && AM.BaseOffs != 0)
+ return false;
// PPC allows a sign-extended 16-bit immediate field.
if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
@@ -9012,14 +10629,12 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
FuncInfo->setLRStoreRequired();
bool isPPC64 = Subtarget.isPPC64();
- bool isDarwinABI = Subtarget.isDarwinABI();
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset =
-
- DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI),
- isPPC64? MVT::i64 : MVT::i32);
+ DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(),
+ isPPC64 ? MVT::i64 : MVT::i32);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
FrameAddr, Offset),
@@ -9047,8 +10662,7 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
// Naked functions never have a frame pointer, and so we use r1. For all
// other functions, this decision must be delayed until during PEI.
unsigned FrameReg;
- if (MF.getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::Naked))
+ if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
else
FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
@@ -9076,7 +10690,7 @@ unsigned PPCTargetLowering::getRegisterByName(const char* RegName,
bool is64Bit = isPPC64 && VT == MVT::i64;
unsigned Reg = StringSwitch<unsigned>(RegName)
.Case("r1", is64Bit ? PPC::X1 : PPC::R1)
- .Case("r2", isDarwinABI ? 0 : (is64Bit ? PPC::X2 : PPC::R2))
+ .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2)
.Case("r13", (!isPPC64 && isDarwinABI) ? 0 :
(is64Bit ? PPC::X13 : PPC::R13))
.Default(0);
@@ -9097,6 +10711,12 @@ bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
unsigned Intrinsic) const {
switch (Intrinsic) {
+ case Intrinsic::ppc_qpx_qvlfd:
+ case Intrinsic::ppc_qpx_qvlfs:
+ case Intrinsic::ppc_qpx_qvlfcd:
+ case Intrinsic::ppc_qpx_qvlfcs:
+ case Intrinsic::ppc_qpx_qvlfiwa:
+ case Intrinsic::ppc_qpx_qvlfiwz:
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
case Intrinsic::ppc_altivec_lvebx:
@@ -9118,6 +10738,18 @@ bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::ppc_vsx_lxvd2x:
VT = MVT::v2f64;
break;
+ case Intrinsic::ppc_qpx_qvlfd:
+ VT = MVT::v4f64;
+ break;
+ case Intrinsic::ppc_qpx_qvlfs:
+ VT = MVT::v4f32;
+ break;
+ case Intrinsic::ppc_qpx_qvlfcd:
+ VT = MVT::v2f64;
+ break;
+ case Intrinsic::ppc_qpx_qvlfcs:
+ VT = MVT::v2f32;
+ break;
default:
VT = MVT::v4i32;
break;
@@ -9134,6 +10766,47 @@ bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.writeMem = false;
return true;
}
+ case Intrinsic::ppc_qpx_qvlfda:
+ case Intrinsic::ppc_qpx_qvlfsa:
+ case Intrinsic::ppc_qpx_qvlfcda:
+ case Intrinsic::ppc_qpx_qvlfcsa:
+ case Intrinsic::ppc_qpx_qvlfiwaa:
+ case Intrinsic::ppc_qpx_qvlfiwza: {
+ EVT VT;
+ switch (Intrinsic) {
+ case Intrinsic::ppc_qpx_qvlfda:
+ VT = MVT::v4f64;
+ break;
+ case Intrinsic::ppc_qpx_qvlfsa:
+ VT = MVT::v4f32;
+ break;
+ case Intrinsic::ppc_qpx_qvlfcda:
+ VT = MVT::v2f64;
+ break;
+ case Intrinsic::ppc_qpx_qvlfcsa:
+ VT = MVT::v2f32;
+ break;
+ default:
+ VT = MVT::v4i32;
+ break;
+ }
+
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = VT;
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.size = VT.getStoreSize();
+ Info.align = 1;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ return true;
+ }
+ case Intrinsic::ppc_qpx_qvstfd:
+ case Intrinsic::ppc_qpx_qvstfs:
+ case Intrinsic::ppc_qpx_qvstfcd:
+ case Intrinsic::ppc_qpx_qvstfcs:
+ case Intrinsic::ppc_qpx_qvstfiw:
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
case Intrinsic::ppc_altivec_stvebx:
@@ -9155,6 +10828,18 @@ bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::ppc_vsx_stxvd2x:
VT = MVT::v2f64;
break;
+ case Intrinsic::ppc_qpx_qvstfd:
+ VT = MVT::v4f64;
+ break;
+ case Intrinsic::ppc_qpx_qvstfs:
+ VT = MVT::v4f32;
+ break;
+ case Intrinsic::ppc_qpx_qvstfcd:
+ VT = MVT::v2f64;
+ break;
+ case Intrinsic::ppc_qpx_qvstfcs:
+ VT = MVT::v2f32;
+ break;
default:
VT = MVT::v4i32;
break;
@@ -9171,6 +10856,41 @@ bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.writeMem = true;
return true;
}
+ case Intrinsic::ppc_qpx_qvstfda:
+ case Intrinsic::ppc_qpx_qvstfsa:
+ case Intrinsic::ppc_qpx_qvstfcda:
+ case Intrinsic::ppc_qpx_qvstfcsa:
+ case Intrinsic::ppc_qpx_qvstfiwa: {
+ EVT VT;
+ switch (Intrinsic) {
+ case Intrinsic::ppc_qpx_qvstfda:
+ VT = MVT::v4f64;
+ break;
+ case Intrinsic::ppc_qpx_qvstfsa:
+ VT = MVT::v4f32;
+ break;
+ case Intrinsic::ppc_qpx_qvstfcda:
+ VT = MVT::v2f64;
+ break;
+ case Intrinsic::ppc_qpx_qvstfcsa:
+ VT = MVT::v2f32;
+ break;
+ default:
+ VT = MVT::v4i32;
+ break;
+ }
+
+ Info.opc = ISD::INTRINSIC_VOID;
+ Info.memVT = VT;
+ Info.ptrVal = I.getArgOperand(1);
+ Info.offset = 0;
+ Info.size = VT.getStoreSize();
+ Info.align = 1;
+ Info.vol = false;
+ Info.readMem = false;
+ Info.writeMem = true;
+ return true;
+ }
default:
break;
}
@@ -9229,6 +10949,31 @@ bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
return NumBits1 == 64 && NumBits2 == 32;
}
+bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
+ // Generally speaking, zexts are not free, but they are free when they can be
+ // folded with other operations.
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
+ EVT MemVT = LD->getMemoryVT();
+ if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
+ (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
+ (LD->getExtensionType() == ISD::NON_EXTLOAD ||
+ LD->getExtensionType() == ISD::ZEXTLOAD))
+ return true;
+ }
+
+ // FIXME: Add other cases...
+ // - 32-bit shifts with a zext to i64
+ // - zext after ctlz, bswap, etc.
+ // - zext after and by a constant mask
+
+ return TargetLowering::isZExtFree(Val, VT2);
+}
+
+bool PPCTargetLowering::isFPExtFree(EVT VT) const {
+ assert(VT.isFloatingPoint());
+ return true;
+}
+
bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
return isInt<16>(Imm) || isUInt<16>(Imm);
}
@@ -9289,12 +11034,30 @@ bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
return false;
}
+const MCPhysReg *
+PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
+ // LR is a callee-save register, but we must treat it as clobbered by any call
+ // site. Hence we include LR in the scratch registers, which are in turn added
+ // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
+ // to CTR, which is used by any indirect call.
+ static const MCPhysReg ScratchRegs[] = {
+ PPC::X12, PPC::LR8, PPC::CTR8, 0
+ };
+
+ return ScratchRegs;
+}
+
bool
PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
EVT VT , unsigned DefinedValues) const {
if (VT == MVT::v2i64)
return false;
+ if (Subtarget.hasQPX()) {
+ if (VT == MVT::v4f32 || VT == MVT::v4f64 || VT == MVT::v4i1)
+ return true;
+ }
+
return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
}
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index bb4d1f1..04afe88 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -61,6 +61,9 @@ namespace llvm {
///
VPERM,
+ /// The CMPB instruction (takes two operands of i32 or i64).
+ CMPB,
+
/// Hi/Lo - These represent the high and low 16-bit parts of a global
/// address respectively. These nodes have two operands, the first of
/// which must be a TargetGlobalAddress, and the second of which must be a
@@ -68,18 +71,9 @@ namespace llvm {
/// though these are usually folded into other nodes.
Hi, Lo,
- TOC_ENTRY,
-
/// The following two target-specific nodes are used for calls through
/// function pointers in the 64-bit SVR4 ABI.
- /// Like a regular LOAD but additionally taking/producing a flag.
- LOAD,
-
- /// Like LOAD (taking/producing a flag), but using r2 as hard-coded
- /// destination.
- LOAD_TOC,
-
/// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
/// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
/// compute an allocation on the stack.
@@ -94,15 +88,17 @@ namespace llvm {
/// code.
SRL, SRA, SHL,
+ /// The combination of sra[wd]i and addze used to implemented signed
+ /// integer division by a power of 2. The first operand is the dividend,
+ /// and the second is the constant shift amount (representing the
+ /// divisor).
+ SRA_ADDZE,
+
/// CALL - A direct function call.
/// CALL_NOP is a call with the special NOP which follows 64-bit
/// SVR4 calls.
CALL, CALL_NOP,
- /// CALL_TLS and CALL_NOP_TLS - Versions of CALL and CALL_NOP used
- /// to access TLS variables.
- CALL_TLS, CALL_NOP_TLS,
-
/// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
/// MTCTR instruction.
MTCTR,
@@ -111,6 +107,10 @@ namespace llvm {
/// BCTRL instruction.
BCTRL,
+ /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
+ /// instruction and the TOC reload required on SVR4 PPC64.
+ BCTRL_LOAD_TOC,
+
/// Return with a flag operand, matched by 'blr'
RET_FLAG,
@@ -125,6 +125,10 @@ namespace llvm {
/// implement truncation of i32 or i64 to i1.
ANDIo_1_EQ_BIT, ANDIo_1_GT_BIT,
+ // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
+ // target (returns (Lo, Hi)). It takes a chain operand.
+ READ_TIME_BASE,
+
// EH_SJLJ_SETJMP - SjLj exception handling setjmp.
EH_SJLJ_SETJMP,
@@ -186,7 +190,7 @@ namespace llvm {
PPC32_GOT,
/// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
- /// local dynamic TLS on PPC32.
+ /// local dynamic TLS on PPC32.
PPC32_PICGOT,
/// G8RC = ADDIS_GOT_TPREL_HA %X2, Symbol - Used by the initial-exec
@@ -213,26 +217,46 @@ namespace llvm {
/// register to sym\@got\@tlsgd\@ha.
ADDIS_TLSGD_HA,
- /// G8RC = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
+ /// %X3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
/// model, produces an ADDI8 instruction that adds G8RReg to
- /// sym\@got\@tlsgd\@l.
+ /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
+ /// ADDIS_TLSGD_L_ADDR until after register assignment.
ADDI_TLSGD_L,
+ /// %X3 = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS
+ /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
+ /// ADDIS_TLSGD_L_ADDR until after register assignment.
+ GET_TLS_ADDR,
+
+ /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
+ /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
+ /// register assignment.
+ ADDI_TLSGD_L_ADDR,
+
/// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS
/// model, produces an ADDIS8 instruction that adds the GOT base
/// register to sym\@got\@tlsld\@ha.
ADDIS_TLSLD_HA,
- /// G8RC = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
+ /// %X3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
/// model, produces an ADDI8 instruction that adds G8RReg to
- /// sym\@got\@tlsld\@l.
+ /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
+ /// ADDIS_TLSLD_L_ADDR until after register assignment.
ADDI_TLSLD_L,
- /// G8RC = ADDIS_DTPREL_HA %X3, Symbol, Chain - For the
- /// local-dynamic TLS model, produces an ADDIS8 instruction
- /// that adds X3 to sym\@dtprel\@ha. The Chain operand is needed
- /// to tie this in place following a copy to %X3 from the result
- /// of a GET_TLSLD_ADDR.
+ /// %X3 = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS
+ /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
+ /// ADDIS_TLSLD_L_ADDR until after register assignment.
+ GET_TLSLD_ADDR,
+
+ /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
+ /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
+ /// following register assignment.
+ ADDI_TLSLD_L_ADDR,
+
+ /// G8RC = ADDIS_DTPREL_HA %X3, Symbol - For the local-dynamic TLS
+ /// model, produces an ADDIS8 instruction that adds X3 to
+ /// sym\@dtprel\@ha.
ADDIS_DTPREL_HA,
/// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
@@ -250,6 +274,29 @@ namespace llvm {
/// operand identifies the operating system entry point.
SC,
+ /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
+ /// endian. Maps to an xxswapd instruction that corrects an lxvd2x
+ /// or stxvd2x instruction. The chain is necessary because the
+ /// sequence replaces a load and needs to provide the same number
+ /// of outputs.
+ XXSWAPD,
+
+ /// QVFPERM = This corresponds to the QPX qvfperm instruction.
+ QVFPERM,
+
+ /// QVGPCI = This corresponds to the QPX qvgpci instruction.
+ QVGPCI,
+
+ /// QVALIGNI = This corresponds to the QPX qvaligni instruction.
+ QVALIGNI,
+
+ /// QVESPLATI = This corresponds to the QPX qvesplati instruction.
+ QVESPLATI,
+
+ /// QBFLT = Access the underlying QPX floating-point boolean
+ /// representation.
+ QBFLT,
+
/// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
/// byte-swapping store instruction. It byte-swaps the low "Type" bits of
/// the GPRC input, then stores it through Ptr. Type can be either i16 or
@@ -276,20 +323,24 @@ namespace llvm {
/// destination 64-bit register.
LFIWZX,
- /// G8RC = ADDIS_TOC_HA %X2, Symbol - For medium and large code model,
- /// produces an ADDIS8 instruction that adds the TOC base register to
- /// sym\@toc\@ha.
- ADDIS_TOC_HA,
+ /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
+ /// Maps directly to an lxvd2x instruction that will be followed by
+ /// an xxswapd.
+ LXVD2X,
- /// G8RC = LD_TOC_L Symbol, G8RReg - For medium and large code model,
- /// produces a LD instruction with base register G8RReg and offset
- /// sym\@toc\@l. Preceded by an ADDIS_TOC_HA to form a full 32-bit offset.
- LD_TOC_L,
+ /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
+ /// Maps directly to an stxvd2x instruction that will be preceded by
+ /// an xxswapd.
+ STXVD2X,
- /// G8RC = ADDI_TOC_L G8RReg, Symbol - For medium code model, produces
- /// an ADDI8 instruction that adds G8RReg to sym\@toc\@l.
- /// Preceded by an ADDIS_TOC_HA to form a full 32-bit offset.
- ADDI_TOC_L
+ /// QBRC, CHAIN = QVLFSb CHAIN, Ptr
+ /// The 4xf32 load used for v4i1 constants.
+ QVLFSb,
+
+ /// GPRC = TOC_ENTRY GA, TOC
+ /// Loads the entry for GA from the TOC, where the TOC base is given by
+ /// the last operand.
+ TOC_ENTRY
};
}
@@ -338,14 +389,18 @@ namespace llvm {
/// size, return the constant being splatted. The ByteSize field indicates
/// the number of bytes of each element [124] -> [bhw].
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
+
+ /// If this is a qvaligni shuffle mask, return the shift
+ /// amount, otherwise return -1.
+ int isQVALIGNIShuffleMask(SDNode *N);
}
- class PPCSubtarget;
class PPCTargetLowering : public TargetLowering {
const PPCSubtarget &Subtarget;
public:
- explicit PPCTargetLowering(const PPCTargetMachine &TM);
+ explicit PPCTargetLowering(const PPCTargetMachine &TM,
+ const PPCSubtarget &STI);
/// getTargetNodeName() - This method returns the name of a target specific
/// DAG node.
@@ -353,6 +408,14 @@ namespace llvm {
MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i32; }
+ bool isCheapToSpeculateCttz() const override {
+ return true;
+ }
+
+ bool isCheapToSpeculateCtlz() const override {
+ return true;
+ }
+
/// getSetCCResultType - Return the ISD::SETCC ValueType
EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
@@ -399,8 +462,14 @@ namespace llvm {
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const override;
+ SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
+
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
+ SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+ std::vector<SDNode *> *Created) const override;
+
unsigned getRegisterByName(const char* RegName, EVT VT) const override;
void computeKnownBitsForTargetNode(const SDValue Op,
@@ -409,6 +478,8 @@ namespace llvm {
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
+ unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
+
Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
bool IsStore, bool IsLoad) const override;
Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
@@ -438,9 +509,10 @@ namespace llvm {
ConstraintWeight getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const override;
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const override;
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
+ MVT VT) const override;
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
@@ -476,6 +548,10 @@ namespace llvm {
bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
bool isTruncateFree(EVT VT1, EVT VT2) const override;
+ bool isZExtFree(SDValue Val, EVT VT2) const override;
+
+ bool isFPExtFree(EVT VT) const override;
+
/// \brief Returns true if it is beneficial to convert a load of a constant
/// to just the constant itself.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
@@ -516,6 +592,8 @@ namespace llvm {
/// expanded to fmul + fadd.
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
+ const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
+
// Should we expand the build vector with shuffles?
bool
shouldExpandBuildVectorWithShuffles(EVT VT,
@@ -541,6 +619,29 @@ namespace llvm {
}
private:
+
+ struct ReuseLoadInfo {
+ SDValue Ptr;
+ SDValue Chain;
+ SDValue ResChain;
+ MachinePointerInfo MPI;
+ bool IsInvariant;
+ unsigned Alignment;
+ AAMDNodes AAInfo;
+ const MDNode *Ranges;
+
+ ReuseLoadInfo() : IsInvariant(false), Alignment(0), Ranges(nullptr) {}
+ };
+
+ bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
+ SelectionDAG &DAG,
+ ISD::LoadExtType ET = ISD::NON_EXTLOAD) const;
+ void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
+ SelectionDAG &DAG) const;
+
+ void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
+ SelectionDAG &DAG, SDLoc dl) const;
+
SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
@@ -563,8 +664,6 @@ namespace llvm {
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
- std::pair<SDValue,SDValue> lowerTLSCall(SDValue Op, SDLoc dl,
- SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
@@ -593,26 +692,31 @@ namespace llvm {
SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
+
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue FinishCall(CallingConv::ID CallConv, SDLoc dl, bool isTailCall,
- bool isVarArg,
+ bool isVarArg, bool IsPatchPoint,
SelectionDAG &DAG,
SmallVector<std::pair<unsigned, SDValue>, 8>
&RegsToPass,
- SDValue InFlag, SDValue Chain,
+ SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
SDValue &Callee,
int SPDiff, unsigned NumBytes,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const;
SDValue
LowerFormalArguments(SDValue Chain,
@@ -669,41 +773,46 @@ namespace llvm {
SDValue
LowerCall_Darwin(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv,
- bool isVarArg, bool isTailCall,
+ bool isVarArg, bool isTailCall, bool IsPatchPoint,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const;
SDValue
LowerCall_64SVR4(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv,
- bool isVarArg, bool isTailCall,
+ bool isVarArg, bool isTailCall, bool IsPatchPoint,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const;
SDValue
LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
- bool isVarArg, bool isTailCall,
+ bool isVarArg, bool isTailCall, bool IsPatchPoint,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const;
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI,
unsigned &RefinementSteps,
bool &UseOneConstNR) const override;
SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
unsigned &RefinementSteps) const override;
+ bool combineRepeatedFPDivisors(unsigned NumUsers) const override;
CCAssignFn *useFastISelCCs(unsigned Flag) const;
};
diff --git a/lib/Target/PowerPC/PPCInstr64Bit.td b/lib/Target/PowerPC/PPCInstr64Bit.td
index 9a19abb..69c0d7d 100644
--- a/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -81,6 +81,9 @@ def HI48_64 : SDNodeXForm<imm, [{
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
+ let isReturn = 1, Uses = [LR8, RM] in
+ def BLR8 : XLForm_2_ext<19, 16, 20, 0, 0, (outs), (ins), "blr", IIC_BrB,
+ [(retflag)]>, Requires<[In64BitMode]>;
let isBranch = 1, isIndirectBranch = 1, Uses = [CTR8] in {
def BCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB,
[]>,
@@ -167,6 +170,17 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in {
}
}
}
+
+let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1,
+ Defs = [LR8, X2], Uses = [CTR8, RM], RST = 2 in {
+ def BCTRL8_LDinto_toc :
+ XLForm_2_ext_and_DSForm_1<19, 528, 20, 0, 1, 58, 0, (outs),
+ (ins memrix:$src),
+ "bctrl\n\tld 2, $src", IIC_BrB,
+ [(PPCbctrl_load_toc ixaddr:$src)]>,
+ Requires<[In64BitMode]>;
+}
+
} // Interpretation64Bit
// FIXME: Duplicating this for the asm parser should be unnecessary, but the
@@ -188,9 +202,6 @@ def : Pat<(PPCcall (i64 texternalsym:$dst)),
def : Pat<(PPCcall_nop (i64 texternalsym:$dst)),
(BL8_NOP texternalsym:$dst)>;
-def : Pat<(PPCcall_nop_tls texternalsym:$func, tglobaltlsaddr:$sym),
- (BL8_NOP_TLS texternalsym:$func, tglobaltlsaddr:$sym)>;
-
// Atomic operations
let usesCustomInserter = 1 in {
let Defs = [CR0] in {
@@ -282,7 +293,7 @@ def : Pat<(PPCtc_return CTRRC8:$dst, imm:$imm),
// 64-bit CR instructions
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def MTOCRF8: XFXForm_5a<31, 144, (outs crbitm:$FXM), (ins g8rc:$ST),
"mtocrf $FXM, $ST", IIC_BrMCRX>,
PPC970_DGroup_First, PPC970_Unit_CRU;
@@ -299,7 +310,7 @@ def MFOCRF8: XFXForm_5a<31, 19, (outs g8rc:$rT), (ins crbitm:$FXM),
def MFCR8 : XFXForm_3<31, 19, (outs g8rc:$rT), (ins),
"mfcr $rT", IIC_SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
-} // neverHasSideEffects = 1
+} // hasSideEffects = 0
let hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in {
let Defs = [CTR8] in
@@ -366,7 +377,7 @@ def MFLR8 : XFXForm_1_ext<31, 339, 8, (outs g8rc:$rT), (ins),
let PPC970_Unit = 1 in { // FXU Operations.
let Interpretation64Bit = 1 in {
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let isCodeGenOnly = 1 in {
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
@@ -517,7 +528,7 @@ defm MULHDU : XOForm_1r<31, 9, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
}
} // Interpretation64Bit
-let isCompare = 1, neverHasSideEffects = 1 in {
+let isCompare = 1, hasSideEffects = 0 in {
def CMPD : XForm_16_ext<31, 0, (outs crrc:$crD), (ins g8rc:$rA, g8rc:$rB),
"cmpd $crD, $rA, $rB", IIC_IntCompare>, isPPC64;
def CMPLD : XForm_16_ext<31, 32, (outs crrc:$crD), (ins g8rc:$rA, g8rc:$rB),
@@ -529,7 +540,7 @@ let isCompare = 1, neverHasSideEffects = 1 in {
IIC_IntCompare>, isPPC64;
}
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
defm SLD : XForm_6r<31, 27, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB),
"sld", "$rA, $rS, $rB", IIC_IntRotateD,
[(set i64:$rA, (PPCshl i64:$rS, i32:$rB))]>, isPPC64;
@@ -540,13 +551,21 @@ defm SRAD : XForm_6rc<31, 794, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB),
"srad", "$rA, $rS, $rB", IIC_IntRotateD,
[(set i64:$rA, (PPCsra i64:$rS, i32:$rB))]>, isPPC64;
-let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
+defm CNTLZW8 : XForm_11r<31, 26, (outs g8rc:$rA), (ins g8rc:$rS),
+ "cntlzw", "$rA, $rS", IIC_IntGeneral, []>;
+
defm EXTSB8 : XForm_11r<31, 954, (outs g8rc:$rA), (ins g8rc:$rS),
"extsb", "$rA, $rS", IIC_IntSimple,
[(set i64:$rA, (sext_inreg i64:$rS, i8))]>;
defm EXTSH8 : XForm_11r<31, 922, (outs g8rc:$rA), (ins g8rc:$rS),
"extsh", "$rA, $rS", IIC_IntSimple,
[(set i64:$rA, (sext_inreg i64:$rS, i16))]>;
+
+defm SLW8 : XForm_6r<31, 24, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
+ "slw", "$rA, $rS, $rB", IIC_IntGeneral, []>;
+defm SRW8 : XForm_6r<31, 536, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
+ "srw", "$rA, $rS, $rB", IIC_IntGeneral, []>;
} // Interpretation64Bit
// For fast-isel:
@@ -575,6 +594,11 @@ def POPCNTD : XForm_11<31, 506, (outs g8rc:$rA), (ins g8rc:$rS),
"popcntd $rA, $rS", IIC_IntGeneral,
[(set i64:$rA, (ctpop i64:$rS))]>;
+let isCodeGenOnly = 1, isCommutable = 1 in
+def CMPB8 : XForm_6<31, 508, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
+ "cmpb $rA, $rS, $rB", IIC_IntGeneral,
+ [(set i64:$rA, (PPCcmpb i64:$rS, i64:$rB))]>;
+
// popcntw also does a population count on the high 32 bits (storing the
// results in the high 32-bits of the output). We'll ignore that here (which is
// safe because we never separately use the high part of the 64-bit registers).
@@ -600,14 +624,12 @@ def MULLI8 : DForm_2<7, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm),
[(set i64:$rD, (mul i64:$rA, imm64SExt16:$imm))]>;
}
-let neverHasSideEffects = 1 in {
-let isCommutable = 1 in {
+let hasSideEffects = 0 in {
defm RLDIMI : MDForm_1r<30, 3, (outs g8rc:$rA),
(ins g8rc:$rSi, g8rc:$rS, u6imm:$SH, u6imm:$MBE),
"rldimi", "$rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64, RegConstraint<"$rSi = $rA">,
NoEncode<"$rSi">;
-}
// Rotate instructions.
defm RLDCL : MDSForm_1r<30, 8,
@@ -645,7 +667,11 @@ defm RLWINM8 : MForm_2r<21, (outs g8rc:$rA),
"rlwinm", "$rA, $rS, $SH, $MB, $ME", IIC_IntGeneral,
[]>;
-let isCommutable = 1 in {
+defm RLWNM8 : MForm_2r<23, (outs g8rc:$rA),
+ (ins g8rc:$rS, g8rc:$rB, u5imm:$MB, u5imm:$ME),
+ "rlwnm", "$rA, $rS, $rB, $MB, $ME", IIC_IntGeneral,
+ []>;
+
// RLWIMI can be commuted if the rotate amount is zero.
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm RLWIMI8 : MForm_2r<20, (outs g8rc:$rA),
@@ -653,15 +679,14 @@ defm RLWIMI8 : MForm_2r<20, (outs g8rc:$rA),
u5imm:$ME), "rlwimi", "$rA, $rS, $SH, $MB, $ME",
IIC_IntRotate, []>, PPC970_DGroup_Cracked,
RegConstraint<"$rSi = $rA">, NoEncode<"$rSi">;
-}
let isSelect = 1 in
def ISEL8 : AForm_4<31, 15,
(outs g8rc:$rT), (ins g8rc_nox0:$rA, g8rc:$rB, crbitrc:$cond),
- "isel $rT, $rA, $rB, $cond", IIC_IntGeneral,
+ "isel $rT, $rA, $rB, $cond", IIC_IntISEL,
[]>;
} // Interpretation64Bit
-} // neverHasSideEffects = 1
+} // hasSideEffects = 0
} // End FXU Operations.
@@ -702,7 +727,7 @@ def LWAX_32 : XForm_1<31, 341, (outs gprc:$rD), (ins memrr:$src),
} // end fast-isel isCodeGenOnly
// Update forms.
-let mayLoad = 1, neverHasSideEffects = 1 in {
+let mayLoad = 1, hasSideEffects = 0 in {
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def LHAU8 : DForm_1<43, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memri:$addr),
@@ -750,7 +775,7 @@ def LWZX8 : XForm_1<31, 23, (outs g8rc:$rD), (ins memrr:$src),
// Update forms.
-let mayLoad = 1, neverHasSideEffects = 1 in {
+let mayLoad = 1, hasSideEffects = 0 in {
def LBZU8 : DForm_1<35, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
"lbzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
@@ -809,11 +834,6 @@ def LDtocBA: Pseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg),
[(set i64:$rD,
(PPCtoc_entry tblockaddress:$disp, i64:$reg))]>, isPPC64;
-let hasSideEffects = 1, isCodeGenOnly = 1, RST = 2, Defs = [X2] in
-def LDinto_toc: DSForm_1<58, 0, (outs), (ins memrix:$src),
- "ld 2, $src", IIC_LdStLD,
- [(PPCload_toc ixaddr:$src)]>, isPPC64;
-
def LDX : XForm_1<31, 21, (outs g8rc:$rD), (ins memrr:$src),
"ldx $rD, $src", IIC_LdStLD,
[(set i64:$rD, (load xaddr:$src))]>, isPPC64;
@@ -821,7 +841,14 @@ def LDBRX : XForm_1<31, 532, (outs g8rc:$rD), (ins memrr:$src),
"ldbrx $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (PPClbrx xoaddr:$src, i64))]>, isPPC64;
-let mayLoad = 1, neverHasSideEffects = 1 in {
+let mayLoad = 1, hasSideEffects = 0, isCodeGenOnly = 1 in {
+def LHBRX8 : XForm_1<31, 790, (outs g8rc:$rD), (ins memrr:$src),
+ "lhbrx $rD, $src", IIC_LdStLoad, []>;
+def LWBRX8 : XForm_1<31, 534, (outs g8rc:$rD), (ins memrr:$src),
+ "lwbrx $rD, $src", IIC_LdStLoad, []>;
+}
+
+let mayLoad = 1, hasSideEffects = 0 in {
def LDU : DSForm_1<58, 1, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memrix:$addr),
"ldu $rD, $addr", IIC_LdStLDU,
[]>, RegConstraint<"$addr.reg = $ea_result">, isPPC64,
@@ -835,25 +862,16 @@ def LDUX : XForm_1<31, 53, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
}
}
-def : Pat<(PPCload ixaddr:$src),
- (LD ixaddr:$src)>;
-def : Pat<(PPCload xaddr:$src),
- (LDX xaddr:$src)>;
-
// Support for medium and large code model.
+let hasSideEffects = 0 in {
def ADDIStocHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, tocentry:$disp),
- "#ADDIStocHA",
- [(set i64:$rD,
- (PPCaddisTocHA i64:$reg, tglobaladdr:$disp))]>,
- isPPC64;
+ "#ADDIStocHA", []>, isPPC64;
+let mayLoad = 1 in
def LDtocL: Pseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc_nox0:$reg),
- "#LDtocL",
- [(set i64:$rD,
- (PPCldTocL tglobaladdr:$disp, i64:$reg))]>, isPPC64;
+ "#LDtocL", []>, isPPC64;
def ADDItocL: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, tocentry:$disp),
- "#ADDItocL",
- [(set i64:$rD,
- (PPCaddiTocL i64:$reg, tglobaladdr:$disp))]>, isPPC64;
+ "#ADDItocL", []>, isPPC64;
+}
// Support for thread-local storage.
def ADDISgotTprelHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
@@ -879,6 +897,28 @@ def ADDItlsgdL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
[(set i64:$rD,
(PPCaddiTlsgdL i64:$reg, tglobaltlsaddr:$disp))]>,
isPPC64;
+// LR8 is a true define, while the rest of the Defs are clobbers. X3 is
+// explicitly defined when this op is created, so not mentioned here.
+let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
+ Defs = [X0,X4,X5,X6,X7,X8,X9,X10,X11,X12,LR8,CTR8,CR0,CR1,CR5,CR6,CR7] in
+def GETtlsADDR : Pseudo<(outs g8rc:$rD), (ins g8rc:$reg, tlsgd:$sym),
+ "#GETtlsADDR",
+ [(set i64:$rD,
+ (PPCgetTlsAddr i64:$reg, tglobaltlsaddr:$sym))]>,
+ isPPC64;
+// Combined op for ADDItlsgdL and GETtlsADDR, late expanded. X3 and LR8
+// are true defines while the rest of the Defs are clobbers.
+let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
+ Defs = [X0,X3,X4,X5,X6,X7,X8,X9,X10,X11,X12,LR8,CTR8,CR0,CR1,CR5,CR6,CR7]
+ in
+def ADDItlsgdLADDR : Pseudo<(outs g8rc:$rD),
+ (ins g8rc_nox0:$reg, s16imm64:$disp, tlsgd:$sym),
+ "#ADDItlsgdLADDR",
+ [(set i64:$rD,
+ (PPCaddiTlsgdLAddr i64:$reg,
+ tglobaltlsaddr:$disp,
+ tglobaltlsaddr:$sym))]>,
+ isPPC64;
def ADDIStlsldHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDIStlsldHA",
[(set i64:$rD,
@@ -889,6 +929,28 @@ def ADDItlsldL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
[(set i64:$rD,
(PPCaddiTlsldL i64:$reg, tglobaltlsaddr:$disp))]>,
isPPC64;
+// LR8 is a true define, while the rest of the Defs are clobbers. X3 is
+// explicitly defined when this op is created, so not mentioned here.
+let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
+ Defs = [X0,X4,X5,X6,X7,X8,X9,X10,X11,X12,LR8,CTR8,CR0,CR1,CR5,CR6,CR7] in
+def GETtlsldADDR : Pseudo<(outs g8rc:$rD), (ins g8rc:$reg, tlsgd:$sym),
+ "#GETtlsldADDR",
+ [(set i64:$rD,
+ (PPCgetTlsldAddr i64:$reg, tglobaltlsaddr:$sym))]>,
+ isPPC64;
+// Combined op for ADDItlsldL and GETtlsADDR, late expanded. X3 and LR8
+// are true defines, while the rest of the Defs are clobbers.
+let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
+ Defs = [X0,X3,X4,X5,X6,X7,X8,X9,X10,X11,X12,LR8,CTR8,CR0,CR1,CR5,CR6,CR7]
+ in
+def ADDItlsldLADDR : Pseudo<(outs g8rc:$rD),
+ (ins g8rc_nox0:$reg, s16imm64:$disp, tlsgd:$sym),
+ "#ADDItlsldLADDR",
+ [(set i64:$rD,
+ (PPCaddiTlsldLAddr i64:$reg,
+ tglobaltlsaddr:$disp,
+ tglobaltlsaddr:$sym))]>,
+ isPPC64;
def ADDISdtprelHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
"#ADDISdtprelHA",
[(set i64:$rD,
@@ -1006,7 +1068,7 @@ def : Pat<(pre_store i64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
//
-let PPC970_Unit = 3, neverHasSideEffects = 1,
+let PPC970_Unit = 3, hasSideEffects = 0,
Uses = [RM] in { // FPU Operations.
defm FCFID : XForm_26r<63, 846, (outs f8rc:$frD), (ins f8rc:$frB),
"fcfid", "$frD, $frB", IIC_FPGeneral,
diff --git a/lib/Target/PowerPC/PPCInstrAltivec.td b/lib/Target/PowerPC/PPCInstrAltivec.td
index 4ef08eb..f6acd6e 100644
--- a/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -791,18 +791,27 @@ def : Pat<(store v4i32:$rS, xoaddr:$dst),
def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
+def : Pat<(v16i8 (bitconvert (v2i64 VRRC:$src))), (v16i8 VRRC:$src)>;
def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
+def : Pat<(v8i16 (bitconvert (v2i64 VRRC:$src))), (v8i16 VRRC:$src)>;
def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
+def : Pat<(v4i32 (bitconvert (v2i64 VRRC:$src))), (v4i32 VRRC:$src)>;
def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
+def : Pat<(v4f32 (bitconvert (v2i64 VRRC:$src))), (v4f32 VRRC:$src)>;
+
+def : Pat<(v2i64 (bitconvert (v16i8 VRRC:$src))), (v2i64 VRRC:$src)>;
+def : Pat<(v2i64 (bitconvert (v8i16 VRRC:$src))), (v2i64 VRRC:$src)>;
+def : Pat<(v2i64 (bitconvert (v4i32 VRRC:$src))), (v2i64 VRRC:$src)>;
+def : Pat<(v2i64 (bitconvert (v4f32 VRRC:$src))), (v2i64 VRRC:$src)>;
// Shuffles.
@@ -929,3 +938,58 @@ def : Pat<(v4f32 (fnearbyint v4f32:$vA)),
} // end HasAltivec
+def HasP8Altivec : Predicate<"PPCSubTarget->hasP8Altivec()">;
+let Predicates = [HasP8Altivec] in {
+
+// Count Leading Zeros
+def VCLZB : VXForm_2<1794, (outs vrrc:$vD), (ins vrrc:$vB),
+ "vclzb $vD, $vB", IIC_VecGeneral,
+ [(set v16i8:$vD, (ctlz v16i8:$vB))]>;
+def VCLZH : VXForm_2<1858, (outs vrrc:$vD), (ins vrrc:$vB),
+ "vclzh $vD, $vB", IIC_VecGeneral,
+ [(set v8i16:$vD, (ctlz v8i16:$vB))]>;
+def VCLZW : VXForm_2<1922, (outs vrrc:$vD), (ins vrrc:$vB),
+ "vclzw $vD, $vB", IIC_VecGeneral,
+ [(set v4i32:$vD, (ctlz v4i32:$vB))]>;
+def VCLZD : VXForm_2<1986, (outs vrrc:$vD), (ins vrrc:$vB),
+ "vclzd $vD, $vB", IIC_VecGeneral,
+ [(set v2i64:$vD, (ctlz v2i64:$vB))]>;
+
+// Population Count
+def VPOPCNTB : VXForm_2<1795, (outs vrrc:$vD), (ins vrrc:$vB),
+ "vpopcntb $vD, $vB", IIC_VecGeneral,
+ [(set v16i8:$vD, (ctpop v16i8:$vB))]>;
+def VPOPCNTH : VXForm_2<1859, (outs vrrc:$vD), (ins vrrc:$vB),
+ "vpopcnth $vD, $vB", IIC_VecGeneral,
+ [(set v8i16:$vD, (ctpop v8i16:$vB))]>;
+def VPOPCNTW : VXForm_2<1923, (outs vrrc:$vD), (ins vrrc:$vB),
+ "vpopcntw $vD, $vB", IIC_VecGeneral,
+ [(set v4i32:$vD, (ctpop v4i32:$vB))]>;
+def VPOPCNTD : VXForm_2<1987, (outs vrrc:$vD), (ins vrrc:$vB),
+ "vpopcntd $vD, $vB", IIC_VecGeneral,
+ [(set v2i64:$vD, (ctpop v2i64:$vB))]>;
+
+let isCommutable = 1 in {
+// FIXME: Use AddedComplexity > 400 to ensure these patterns match before the
+// VSX equivalents. We need to fix this up at some point. Two possible
+// solutions for this problem:
+// 1. Disable Altivec patterns that compete with VSX patterns using the
+// !HasVSX predicate. This essentially favours VSX over Altivec, in
+// hopes of reducing register pressure (larger register set using VSX
+// instructions than VMX instructions)
+// 2. Employ a more disciplined use of AddedComplexity, which would provide
+// more fine-grained control than option 1. This would be beneficial
+// if we find situations where Altivec is really preferred over VSX.
+def VEQV : VXForm_1<1668, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
+ "veqv $vD, $vA, $vB", IIC_VecGeneral,
+ [(set v4i32:$vD, (vnot_ppc (xor v4i32:$vA, v4i32:$vB)))]>;
+def VNAND : VXForm_1<1412, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
+ "vnand $vD, $vA, $vB", IIC_VecGeneral,
+ [(set v4i32:$vD, (vnot_ppc (and v4i32:$vA, v4i32:$vB)))]>;
+} // isCommutable
+
+def VORC : VXForm_1<1348, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
+ "vorc $vD, $vA, $vB", IIC_VecGeneral,
+ [(set v4i32:$vD, (or v4i32:$vA,
+ (vnot_ppc v4i32:$vB)))]>;
+} // end HasP8Altivec
diff --git a/lib/Target/PowerPC/PPCInstrFormats.td b/lib/Target/PowerPC/PPCInstrFormats.td
index aa68497..506a2d0 100644
--- a/lib/Target/PowerPC/PPCInstrFormats.td
+++ b/lib/Target/PowerPC/PPCInstrFormats.td
@@ -385,6 +385,12 @@ class XForm_tlb<bits<10> xo, dag OOL, dag IOL, string asmstr,
let RST = 0;
}
+class XForm_attn<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ let Inst{21-30} = xo;
+}
+
// This is the same as XForm_base_r3xo, but the first two operands are swapped
// when code is emitted.
class XForm_base_r3xo_swapped
@@ -556,6 +562,47 @@ class XForm_17<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
let Inst{31} = 0;
}
+// Used for QPX
+class XForm_18<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<5> FRT;
+ bits<5> FRA;
+ bits<5> FRB;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = FRT;
+ let Inst{11-15} = FRA;
+ let Inst{16-20} = FRB;
+ let Inst{21-30} = xo;
+ let Inst{31} = 0;
+}
+
+class XForm_19<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : XForm_18<opcode, xo, OOL, IOL, asmstr, itin, pattern> {
+ let FRA = 0;
+}
+
+class XForm_20<bits<6> opcode, bits<6> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<5> FRT;
+ bits<5> FRA;
+ bits<5> FRB;
+ bits<4> tttt;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = FRT;
+ let Inst{11-15} = FRA;
+ let Inst{16-20} = FRB;
+ let Inst{21-24} = tttt;
+ let Inst{25-30} = xo;
+ let Inst{31} = 0;
+}
+
class XForm_24<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
: I<opcode, OOL, IOL, asmstr, itin> {
@@ -939,6 +986,64 @@ class XLForm_3<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
let Inst{31} = 0;
}
+class XLForm_4<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<3> BF;
+ bit W;
+ bits<4> U;
+
+ bit RC = 0;
+
+ let Inst{6-8} = BF;
+ let Inst{9-10} = 0;
+ let Inst{11-14} = 0;
+ let Inst{15} = W;
+ let Inst{16-19} = U;
+ let Inst{20} = 0;
+ let Inst{21-30} = xo;
+ let Inst{31} = RC;
+}
+
+class XLForm_2_and_DSForm_1<bits<6> opcode1, bits<10> xo1, bit lk,
+ bits<6> opcode2, bits<2> xo2,
+ dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I2<opcode1, opcode2, OOL, IOL, asmstr, itin> {
+ bits<5> BO;
+ bits<5> BI;
+ bits<2> BH;
+
+ bits<5> RST;
+ bits<19> DS_RA;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = BO;
+ let Inst{11-15} = BI;
+ let Inst{16-18} = 0;
+ let Inst{19-20} = BH;
+ let Inst{21-30} = xo1;
+ let Inst{31} = lk;
+
+ let Inst{38-42} = RST;
+ let Inst{43-47} = DS_RA{18-14}; // Register #
+ let Inst{48-61} = DS_RA{13-0}; // Displacement.
+ let Inst{62-63} = xo2;
+}
+
+class XLForm_2_ext_and_DSForm_1<bits<6> opcode1, bits<10> xo1,
+ bits<5> bo, bits<5> bi, bit lk,
+ bits<6> opcode2, bits<2> xo2,
+ dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : XLForm_2_and_DSForm_1<opcode1, xo1, lk, opcode2, xo2,
+ OOL, IOL, asmstr, itin, pattern> {
+ let BO = bo;
+ let BI = bi;
+ let BH = 0;
+}
+
// 1.7.8 XFX-Form
class XFXForm_1<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin>
@@ -1036,6 +1141,25 @@ class XFLForm<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
let Inst{31} = RC;
}
+class XFLForm_1<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag>pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bit L;
+ bits<8> FLM;
+ bit W;
+ bits<5> FRB;
+
+ bit RC = 0; // set by isDOT
+ let Pattern = pattern;
+
+ let Inst{6} = L;
+ let Inst{7-14} = FLM;
+ let Inst{15} = W;
+ let Inst{16-20} = FRB;
+ let Inst{21-30} = xo;
+ let Inst{31} = RC;
+}
+
// 1.7.10 XS-Form - SRADI.
class XSForm_1<bits<6> opcode, bits<9> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
@@ -1132,6 +1256,14 @@ class AForm_4<bits<6> opcode, bits<5> xo, dag OOL, dag IOL, string asmstr,
let Inst{31} = 0;
}
+// Used for QPX
+class AForm_4a<bits<6> opcode, bits<5> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : AForm_1<opcode, xo, OOL, IOL, asmstr, itin, pattern> {
+ let FRA = 0;
+ let FRC = 0;
+}
+
// 1.7.13 M-Form
class MForm_1<bits<6> opcode, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
@@ -1356,6 +1488,49 @@ class VXRForm_1<bits<10> xo, dag OOL, dag IOL, string asmstr,
let Inst{22-31} = xo;
}
+// Z23-Form (used by QPX)
+class Z23Form_1<bits<6> opcode, bits<8> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<5> FRT;
+ bits<5> FRA;
+ bits<5> FRB;
+ bits<2> idx;
+
+ let Pattern = pattern;
+
+ bit RC = 0; // set by isDOT
+
+ let Inst{6-10} = FRT;
+ let Inst{11-15} = FRA;
+ let Inst{16-20} = FRB;
+ let Inst{21-22} = idx;
+ let Inst{23-30} = xo;
+ let Inst{31} = RC;
+}
+
+class Z23Form_2<bits<6> opcode, bits<8> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : Z23Form_1<opcode, xo, OOL, IOL, asmstr, itin, pattern> {
+ let FRB = 0;
+}
+
+class Z23Form_3<bits<6> opcode, bits<8> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<5> FRT;
+ bits<12> idx;
+
+ let Pattern = pattern;
+
+ bit RC = 0; // set by isDOT
+
+ let Inst{6-10} = FRT;
+ let Inst{11-22} = idx;
+ let Inst{23-30} = xo;
+ let Inst{31} = RC;
+}
+
//===----------------------------------------------------------------------===//
class Pseudo<dag OOL, dag IOL, string asmstr, list<dag> pattern>
: I<0, OOL, IOL, asmstr, NoItinerary> {
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index daf8790..fe9474a 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -29,6 +29,7 @@
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -51,9 +52,6 @@ opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden,
static cl::opt<bool> DisableCmpOpt("disable-ppc-cmp-opt",
cl::desc("Disable compare instruction optimization"), cl::Hidden);
-static cl::opt<bool> DisableVSXFMAMutate("disable-ppc-vsx-fma-mutation",
-cl::desc("Disable VSX FMA instruction mutation"), cl::Hidden);
-
static cl::opt<bool> VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy",
cl::desc("Causes the backend to crash instead of generating a nop VSX copy"),
cl::Hidden);
@@ -84,11 +82,11 @@ PPCInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
/// CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer
/// to use for this target when scheduling the DAG.
-ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetPostRAHazardRecognizer(
- const InstrItineraryData *II,
- const ScheduleDAG *DAG) const {
+ScheduleHazardRecognizer *
+PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
+ const ScheduleDAG *DAG) const {
unsigned Directive =
- DAG->TM.getSubtarget<PPCSubtarget>().getDarwinDirective();
+ DAG->MF.getSubtarget<PPCSubtarget>().getDarwinDirective();
if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8)
return new PPCDispatchGroupSBHazardRecognizer(II, DAG);
@@ -183,6 +181,9 @@ unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
case PPC::RESTORE_CRBIT:
case PPC::LVX:
case PPC::LXVD2X:
+ case PPC::QVLFDX:
+ case PPC::QVLFSXs:
+ case PPC::QVLFDXb:
case PPC::RESTORE_VRSAVE:
// Check for the operands added by addFrameReference (the immediate is the
// offset which defaults to 0).
@@ -209,6 +210,9 @@ unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
case PPC::SPILL_CRBIT:
case PPC::STVX:
case PPC::STXVD2X:
+ case PPC::QVSTFDX:
+ case PPC::QVSTFSXs:
+ case PPC::QVSTFDXb:
case PPC::SPILL_VRSAVE:
// Check for the operands added by addFrameReference (the immediate is the
// offset which defaults to 0).
@@ -230,10 +234,12 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
// Normal instructions can be commuted the obvious way.
if (MI->getOpcode() != PPC::RLWIMI &&
- MI->getOpcode() != PPC::RLWIMIo &&
- MI->getOpcode() != PPC::RLWIMI8 &&
- MI->getOpcode() != PPC::RLWIMI8o)
+ MI->getOpcode() != PPC::RLWIMIo)
return TargetInstrInfo::commuteInstruction(MI, NewMI);
+ // Note that RLWIMI can be commuted as a 32-bit instruction, but not as a
+ // 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because
+ // changing the relative order of the mask operands might change what happens
+ // to the high-bits of the mask (and, thus, the result).
// Cannot commute if it has a non-zero rotate count.
if (MI->getOperand(3).getImm() != 0)
@@ -699,7 +705,7 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// legalization. Promote them here.
const TargetRegisterInfo *TRI = &getRegisterInfo();
if (PPC::F8RCRegClass.contains(DestReg) &&
- PPC::VSLRCRegClass.contains(SrcReg)) {
+ PPC::VSRCRegClass.contains(SrcReg)) {
unsigned SuperReg =
TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
@@ -708,7 +714,7 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
DestReg = SuperReg;
} else if (PPC::VRRCRegClass.contains(DestReg) &&
- PPC::VSHRCRegClass.contains(SrcReg)) {
+ PPC::VSRCRegClass.contains(SrcReg)) {
unsigned SuperReg =
TRI->getMatchingSuperReg(DestReg, PPC::sub_128, &PPC::VSRCRegClass);
@@ -717,7 +723,7 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
DestReg = SuperReg;
} else if (PPC::F8RCRegClass.contains(SrcReg) &&
- PPC::VSLRCRegClass.contains(DestReg)) {
+ PPC::VSRCRegClass.contains(DestReg)) {
unsigned SuperReg =
TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
@@ -726,7 +732,7 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
SrcReg = SuperReg;
} else if (PPC::VRRCRegClass.contains(SrcReg) &&
- PPC::VSHRCRegClass.contains(DestReg)) {
+ PPC::VSRCRegClass.contains(DestReg)) {
unsigned SuperReg =
TRI->getMatchingSuperReg(SrcReg, PPC::sub_128, &PPC::VSRCRegClass);
@@ -759,6 +765,12 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Opc = PPC::XXLOR;
else if (PPC::VSFRCRegClass.contains(DestReg, SrcReg))
Opc = PPC::XXLORf;
+ else if (PPC::QFRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::QVFMR;
+ else if (PPC::QSRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::QVFMRs;
+ else if (PPC::QBRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::QVFMRb;
else if (PPC::CRBITRCRegClass.contains(DestReg, SrcReg))
Opc = PPC::CROR;
else
@@ -844,6 +856,24 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
getKillRegState(isKill)),
FrameIdx));
SpillsVRS = true;
+ } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) {
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFDX))
+ .addReg(SrcReg,
+ getKillRegState(isKill)),
+ FrameIdx));
+ NonRI = true;
+ } else if (PPC::QSRCRegClass.hasSubClassEq(RC)) {
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFSXs))
+ .addReg(SrcReg,
+ getKillRegState(isKill)),
+ FrameIdx));
+ NonRI = true;
+ } else if (PPC::QBRCRegClass.hasSubClassEq(RC)) {
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFDXb))
+ .addReg(SrcReg,
+ getKillRegState(isKill)),
+ FrameIdx));
+ NonRI = true;
} else {
llvm_unreachable("Unknown regclass!");
}
@@ -939,6 +969,18 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
DestReg),
FrameIdx));
SpillsVRS = true;
+ } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) {
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFDX), DestReg),
+ FrameIdx));
+ NonRI = true;
+ } else if (PPC::QSRCRegClass.hasSubClassEq(RC)) {
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFSXs), DestReg),
+ FrameIdx));
+ NonRI = true;
+ } else if (PPC::QBRCRegClass.hasSubClassEq(RC)) {
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFDXb), DestReg),
+ FrameIdx));
+ NonRI = true;
} else {
llvm_unreachable("Unknown regclass!");
}
@@ -1111,7 +1153,7 @@ bool PPCInstrInfo::PredicateInstruction(
MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const {
unsigned OpC = MI->getOpcode();
- if (OpC == PPC::BLR) {
+ if (OpC == PPC::BLR || OpC == PPC::BLR8) {
if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
bool isPPC64 = Subtarget.isPPC64();
MI->setDesc(get(Pred[0].getImm() ?
@@ -1275,6 +1317,7 @@ bool PPCInstrInfo::isPredicable(MachineInstr *MI) const {
return false;
case PPC::B:
case PPC::BLR:
+ case PPC::BLR8:
case PPC::BCTR:
case PPC::BCTR8:
case PPC::BCTRL:
@@ -1593,677 +1636,14 @@ unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const MachineFunction *MF = MI->getParent()->getParent();
const char *AsmStr = MI->getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
+ } else if (Opcode == TargetOpcode::STACKMAP) {
+ return MI->getOperand(1).getImm();
+ } else if (Opcode == TargetOpcode::PATCHPOINT) {
+ PatchPointOpers Opers(MI);
+ return Opers.getMetaOper(PatchPointOpers::NBytesPos).getImm();
} else {
const MCInstrDesc &Desc = get(Opcode);
return Desc.getSize();
}
}
-#undef DEBUG_TYPE
-#define DEBUG_TYPE "ppc-vsx-fma-mutate"
-
-namespace {
- // PPCVSXFMAMutate pass - For copies between VSX registers and non-VSX registers
- // (Altivec and scalar floating-point registers), we need to transform the
- // copies into subregister copies with other restrictions.
- struct PPCVSXFMAMutate : public MachineFunctionPass {
- static char ID;
- PPCVSXFMAMutate() : MachineFunctionPass(ID) {
- initializePPCVSXFMAMutatePass(*PassRegistry::getPassRegistry());
- }
-
- LiveIntervals *LIS;
-
- const PPCTargetMachine *TM;
- const PPCInstrInfo *TII;
-
-protected:
- bool processBlock(MachineBasicBlock &MBB) {
- bool Changed = false;
-
- MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
- const TargetRegisterInfo *TRI = &TII->getRegisterInfo();
- for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
- I != IE; ++I) {
- MachineInstr *MI = I;
-
- // The default (A-type) VSX FMA form kills the addend (it is taken from
- // the target register, which is then updated to reflect the result of
- // the FMA). If the instruction, however, kills one of the registers
- // used for the product, then we can use the M-form instruction (which
- // will take that value from the to-be-defined register).
-
- int AltOpc = PPC::getAltVSXFMAOpcode(MI->getOpcode());
- if (AltOpc == -1)
- continue;
-
- // This pass is run after register coalescing, and so we're looking for
- // a situation like this:
- // ...
- // %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
- // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
- // %RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
- // ...
- // %vreg9<def,tied1> = XSMADDADP %vreg9<tied0>, %vreg17, %vreg19,
- // %RM<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19
- // ...
- // Where we can eliminate the copy by changing from the A-type to the
- // M-type instruction. Specifically, for this example, this means:
- // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
- // %RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
- // is replaced by:
- // %vreg16<def,tied1> = XSMADDMDP %vreg16<tied0>, %vreg18, %vreg9,
- // %RM<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9
- // and we remove: %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
-
- SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
-
- VNInfo *AddendValNo =
- LIS->getInterval(MI->getOperand(1).getReg()).Query(FMAIdx).valueIn();
- MachineInstr *AddendMI = LIS->getInstructionFromIndex(AddendValNo->def);
-
- // The addend and this instruction must be in the same block.
-
- if (!AddendMI || AddendMI->getParent() != MI->getParent())
- continue;
-
- // The addend must be a full copy within the same register class.
-
- if (!AddendMI->isFullCopy())
- continue;
-
- unsigned AddendSrcReg = AddendMI->getOperand(1).getReg();
- if (TargetRegisterInfo::isVirtualRegister(AddendSrcReg)) {
- if (MRI.getRegClass(AddendMI->getOperand(0).getReg()) !=
- MRI.getRegClass(AddendSrcReg))
- continue;
- } else {
- // If AddendSrcReg is a physical register, make sure the destination
- // register class contains it.
- if (!MRI.getRegClass(AddendMI->getOperand(0).getReg())
- ->contains(AddendSrcReg))
- continue;
- }
-
- // In theory, there could be other uses of the addend copy before this
- // fma. We could deal with this, but that would require additional
- // logic below and I suspect it will not occur in any relevant
- // situations. Additionally, check whether the copy source is killed
- // prior to the fma. In order to replace the addend here with the
- // source of the copy, it must still be live here. We can't use
- // interval testing for a physical register, so as long as we're
- // walking the MIs we may as well test liveness here.
- bool OtherUsers = false, KillsAddendSrc = false;
- for (auto J = std::prev(I), JE = MachineBasicBlock::iterator(AddendMI);
- J != JE; --J) {
- if (J->readsVirtualRegister(AddendMI->getOperand(0).getReg())) {
- OtherUsers = true;
- break;
- }
- if (J->modifiesRegister(AddendSrcReg, TRI) ||
- J->killsRegister(AddendSrcReg, TRI)) {
- KillsAddendSrc = true;
- break;
- }
- }
-
- if (OtherUsers || KillsAddendSrc)
- continue;
-
- // Find one of the product operands that is killed by this instruction.
-
- unsigned KilledProdOp = 0, OtherProdOp = 0;
- if (LIS->getInterval(MI->getOperand(2).getReg())
- .Query(FMAIdx).isKill()) {
- KilledProdOp = 2;
- OtherProdOp = 3;
- } else if (LIS->getInterval(MI->getOperand(3).getReg())
- .Query(FMAIdx).isKill()) {
- KilledProdOp = 3;
- OtherProdOp = 2;
- }
-
- // If there are no killed product operands, then this transformation is
- // likely not profitable.
- if (!KilledProdOp)
- continue;
-
- // For virtual registers, verify that the addend source register
- // is live here (as should have been assured above).
- assert((!TargetRegisterInfo::isVirtualRegister(AddendSrcReg) ||
- LIS->getInterval(AddendSrcReg).liveAt(FMAIdx)) &&
- "Addend source register is not live!");
-
- // Transform: (O2 * O3) + O1 -> (O2 * O1) + O3.
-
- unsigned AddReg = AddendMI->getOperand(1).getReg();
- unsigned KilledProdReg = MI->getOperand(KilledProdOp).getReg();
- unsigned OtherProdReg = MI->getOperand(OtherProdOp).getReg();
-
- unsigned AddSubReg = AddendMI->getOperand(1).getSubReg();
- unsigned KilledProdSubReg = MI->getOperand(KilledProdOp).getSubReg();
- unsigned OtherProdSubReg = MI->getOperand(OtherProdOp).getSubReg();
-
- bool AddRegKill = AddendMI->getOperand(1).isKill();
- bool KilledProdRegKill = MI->getOperand(KilledProdOp).isKill();
- bool OtherProdRegKill = MI->getOperand(OtherProdOp).isKill();
-
- bool AddRegUndef = AddendMI->getOperand(1).isUndef();
- bool KilledProdRegUndef = MI->getOperand(KilledProdOp).isUndef();
- bool OtherProdRegUndef = MI->getOperand(OtherProdOp).isUndef();
-
- unsigned OldFMAReg = MI->getOperand(0).getReg();
-
- // The transformation doesn't work well with things like:
- // %vreg5 = A-form-op %vreg5, %vreg11, %vreg5;
- // so leave such things alone.
- if (OldFMAReg == KilledProdReg)
- continue;
-
- assert(OldFMAReg == AddendMI->getOperand(0).getReg() &&
- "Addend copy not tied to old FMA output!");
-
- DEBUG(dbgs() << "VSX FMA Mutation:\n " << *MI;);
-
- MI->getOperand(0).setReg(KilledProdReg);
- MI->getOperand(1).setReg(KilledProdReg);
- MI->getOperand(3).setReg(AddReg);
- MI->getOperand(2).setReg(OtherProdReg);
-
- MI->getOperand(0).setSubReg(KilledProdSubReg);
- MI->getOperand(1).setSubReg(KilledProdSubReg);
- MI->getOperand(3).setSubReg(AddSubReg);
- MI->getOperand(2).setSubReg(OtherProdSubReg);
-
- MI->getOperand(1).setIsKill(KilledProdRegKill);
- MI->getOperand(3).setIsKill(AddRegKill);
- MI->getOperand(2).setIsKill(OtherProdRegKill);
-
- MI->getOperand(1).setIsUndef(KilledProdRegUndef);
- MI->getOperand(3).setIsUndef(AddRegUndef);
- MI->getOperand(2).setIsUndef(OtherProdRegUndef);
-
- MI->setDesc(TII->get(AltOpc));
-
- DEBUG(dbgs() << " -> " << *MI);
-
- // The killed product operand was killed here, so we can reuse it now
- // for the result of the fma.
-
- LiveInterval &FMAInt = LIS->getInterval(OldFMAReg);
- VNInfo *FMAValNo = FMAInt.getVNInfoAt(FMAIdx.getRegSlot());
- for (auto UI = MRI.reg_nodbg_begin(OldFMAReg), UE = MRI.reg_nodbg_end();
- UI != UE;) {
- MachineOperand &UseMO = *UI;
- MachineInstr *UseMI = UseMO.getParent();
- ++UI;
-
- // Don't replace the result register of the copy we're about to erase.
- if (UseMI == AddendMI)
- continue;
-
- UseMO.setReg(KilledProdReg);
- UseMO.setSubReg(KilledProdSubReg);
- }
-
- // Extend the live intervals of the killed product operand to hold the
- // fma result.
-
- LiveInterval &NewFMAInt = LIS->getInterval(KilledProdReg);
- for (LiveInterval::iterator AI = FMAInt.begin(), AE = FMAInt.end();
- AI != AE; ++AI) {
- // Don't add the segment that corresponds to the original copy.
- if (AI->valno == AddendValNo)
- continue;
-
- VNInfo *NewFMAValNo =
- NewFMAInt.getNextValue(AI->start,
- LIS->getVNInfoAllocator());
-
- NewFMAInt.addSegment(LiveInterval::Segment(AI->start, AI->end,
- NewFMAValNo));
- }
- DEBUG(dbgs() << " extended: " << NewFMAInt << '\n');
-
- FMAInt.removeValNo(FMAValNo);
- DEBUG(dbgs() << " trimmed: " << FMAInt << '\n');
-
- // Remove the (now unused) copy.
-
- DEBUG(dbgs() << " removing: " << *AddendMI << '\n');
- LIS->RemoveMachineInstrFromMaps(AddendMI);
- AddendMI->eraseFromParent();
-
- Changed = true;
- }
-
- return Changed;
- }
-
-public:
- bool runOnMachineFunction(MachineFunction &MF) override {
- TM = static_cast<const PPCTargetMachine *>(&MF.getTarget());
- // If we don't have VSX then go ahead and return without doing
- // anything.
- if (!TM->getSubtargetImpl()->hasVSX())
- return false;
-
- LIS = &getAnalysis<LiveIntervals>();
-
- TII = TM->getSubtargetImpl()->getInstrInfo();
-
- bool Changed = false;
-
- if (DisableVSXFMAMutate)
- return Changed;
-
- for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
- MachineBasicBlock &B = *I++;
- if (processBlock(B))
- Changed = true;
- }
-
- return Changed;
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<LiveIntervals>();
- AU.addPreserved<LiveIntervals>();
- AU.addRequired<SlotIndexes>();
- AU.addPreserved<SlotIndexes>();
- MachineFunctionPass::getAnalysisUsage(AU);
- }
- };
-}
-
-INITIALIZE_PASS_BEGIN(PPCVSXFMAMutate, DEBUG_TYPE,
- "PowerPC VSX FMA Mutation", false, false)
-INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
-INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
-INITIALIZE_PASS_END(PPCVSXFMAMutate, DEBUG_TYPE,
- "PowerPC VSX FMA Mutation", false, false)
-
-char &llvm::PPCVSXFMAMutateID = PPCVSXFMAMutate::ID;
-
-char PPCVSXFMAMutate::ID = 0;
-FunctionPass*
-llvm::createPPCVSXFMAMutatePass() { return new PPCVSXFMAMutate(); }
-
-#undef DEBUG_TYPE
-#define DEBUG_TYPE "ppc-vsx-copy"
-
-namespace llvm {
- void initializePPCVSXCopyPass(PassRegistry&);
-}
-
-namespace {
- // PPCVSXCopy pass - For copies between VSX registers and non-VSX registers
- // (Altivec and scalar floating-point registers), we need to transform the
- // copies into subregister copies with other restrictions.
- struct PPCVSXCopy : public MachineFunctionPass {
- static char ID;
- PPCVSXCopy() : MachineFunctionPass(ID) {
- initializePPCVSXCopyPass(*PassRegistry::getPassRegistry());
- }
-
- const PPCTargetMachine *TM;
- const PPCInstrInfo *TII;
-
- bool IsRegInClass(unsigned Reg, const TargetRegisterClass *RC,
- MachineRegisterInfo &MRI) {
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- return RC->hasSubClassEq(MRI.getRegClass(Reg));
- } else if (RC->contains(Reg)) {
- return true;
- }
-
- return false;
- }
-
- bool IsVSReg(unsigned Reg, MachineRegisterInfo &MRI) {
- return IsRegInClass(Reg, &PPC::VSRCRegClass, MRI);
- }
-
- bool IsVRReg(unsigned Reg, MachineRegisterInfo &MRI) {
- return IsRegInClass(Reg, &PPC::VRRCRegClass, MRI);
- }
-
- bool IsF8Reg(unsigned Reg, MachineRegisterInfo &MRI) {
- return IsRegInClass(Reg, &PPC::F8RCRegClass, MRI);
- }
-
-protected:
- bool processBlock(MachineBasicBlock &MBB) {
- bool Changed = false;
-
- MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
- for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
- I != IE; ++I) {
- MachineInstr *MI = I;
- if (!MI->isFullCopy())
- continue;
-
- MachineOperand &DstMO = MI->getOperand(0);
- MachineOperand &SrcMO = MI->getOperand(1);
-
- if ( IsVSReg(DstMO.getReg(), MRI) &&
- !IsVSReg(SrcMO.getReg(), MRI)) {
- // This is a copy *to* a VSX register from a non-VSX register.
- Changed = true;
-
- const TargetRegisterClass *SrcRC =
- IsVRReg(SrcMO.getReg(), MRI) ? &PPC::VSHRCRegClass :
- &PPC::VSLRCRegClass;
- assert((IsF8Reg(SrcMO.getReg(), MRI) ||
- IsVRReg(SrcMO.getReg(), MRI)) &&
- "Unknown source for a VSX copy");
-
- unsigned NewVReg = MRI.createVirtualRegister(SrcRC);
- BuildMI(MBB, MI, MI->getDebugLoc(),
- TII->get(TargetOpcode::SUBREG_TO_REG), NewVReg)
- .addImm(1) // add 1, not 0, because there is no implicit clearing
- // of the high bits.
- .addOperand(SrcMO)
- .addImm(IsVRReg(SrcMO.getReg(), MRI) ? PPC::sub_128 :
- PPC::sub_64);
-
- // The source of the original copy is now the new virtual register.
- SrcMO.setReg(NewVReg);
- } else if (!IsVSReg(DstMO.getReg(), MRI) &&
- IsVSReg(SrcMO.getReg(), MRI)) {
- // This is a copy *from* a VSX register to a non-VSX register.
- Changed = true;
-
- const TargetRegisterClass *DstRC =
- IsVRReg(DstMO.getReg(), MRI) ? &PPC::VSHRCRegClass :
- &PPC::VSLRCRegClass;
- assert((IsF8Reg(DstMO.getReg(), MRI) ||
- IsVRReg(DstMO.getReg(), MRI)) &&
- "Unknown destination for a VSX copy");
-
- // Copy the VSX value into a new VSX register of the correct subclass.
- unsigned NewVReg = MRI.createVirtualRegister(DstRC);
- BuildMI(MBB, MI, MI->getDebugLoc(),
- TII->get(TargetOpcode::COPY), NewVReg)
- .addOperand(SrcMO);
-
- // Transform the original copy into a subregister extraction copy.
- SrcMO.setReg(NewVReg);
- SrcMO.setSubReg(IsVRReg(DstMO.getReg(), MRI) ? PPC::sub_128 :
- PPC::sub_64);
- }
- }
-
- return Changed;
- }
-
-public:
- bool runOnMachineFunction(MachineFunction &MF) override {
- TM = static_cast<const PPCTargetMachine *>(&MF.getTarget());
- // If we don't have VSX on the subtarget, don't do anything.
- if (!TM->getSubtargetImpl()->hasVSX())
- return false;
- TII = TM->getSubtargetImpl()->getInstrInfo();
-
- bool Changed = false;
-
- for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
- MachineBasicBlock &B = *I++;
- if (processBlock(B))
- Changed = true;
- }
-
- return Changed;
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- MachineFunctionPass::getAnalysisUsage(AU);
- }
- };
-}
-
-INITIALIZE_PASS(PPCVSXCopy, DEBUG_TYPE,
- "PowerPC VSX Copy Legalization", false, false)
-
-char PPCVSXCopy::ID = 0;
-FunctionPass*
-llvm::createPPCVSXCopyPass() { return new PPCVSXCopy(); }
-
-#undef DEBUG_TYPE
-#define DEBUG_TYPE "ppc-vsx-copy-cleanup"
-
-namespace llvm {
- void initializePPCVSXCopyCleanupPass(PassRegistry&);
-}
-
-namespace {
- // PPCVSXCopyCleanup pass - We sometimes end up generating self copies of VSX
- // registers (mostly because the ABI code still places all values into the
- // "traditional" floating-point and vector registers). Remove them here.
- struct PPCVSXCopyCleanup : public MachineFunctionPass {
- static char ID;
- PPCVSXCopyCleanup() : MachineFunctionPass(ID) {
- initializePPCVSXCopyCleanupPass(*PassRegistry::getPassRegistry());
- }
-
- const PPCTargetMachine *TM;
- const PPCInstrInfo *TII;
-
-protected:
- bool processBlock(MachineBasicBlock &MBB) {
- bool Changed = false;
-
- SmallVector<MachineInstr *, 4> ToDelete;
- for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
- I != IE; ++I) {
- MachineInstr *MI = I;
- if (MI->getOpcode() == PPC::XXLOR &&
- MI->getOperand(0).getReg() == MI->getOperand(1).getReg() &&
- MI->getOperand(0).getReg() == MI->getOperand(2).getReg())
- ToDelete.push_back(MI);
- }
-
- if (!ToDelete.empty())
- Changed = true;
-
- for (unsigned i = 0, ie = ToDelete.size(); i != ie; ++i) {
- DEBUG(dbgs() << "Removing VSX self-copy: " << *ToDelete[i]);
- ToDelete[i]->eraseFromParent();
- }
-
- return Changed;
- }
-
-public:
- bool runOnMachineFunction(MachineFunction &MF) override {
- TM = static_cast<const PPCTargetMachine *>(&MF.getTarget());
- // If we don't have VSX don't bother doing anything here.
- if (!TM->getSubtargetImpl()->hasVSX())
- return false;
- TII = TM->getSubtargetImpl()->getInstrInfo();
-
- bool Changed = false;
-
- for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
- MachineBasicBlock &B = *I++;
- if (processBlock(B))
- Changed = true;
- }
-
- return Changed;
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- MachineFunctionPass::getAnalysisUsage(AU);
- }
- };
-}
-
-INITIALIZE_PASS(PPCVSXCopyCleanup, DEBUG_TYPE,
- "PowerPC VSX Copy Cleanup", false, false)
-
-char PPCVSXCopyCleanup::ID = 0;
-FunctionPass*
-llvm::createPPCVSXCopyCleanupPass() { return new PPCVSXCopyCleanup(); }
-
-#undef DEBUG_TYPE
-#define DEBUG_TYPE "ppc-early-ret"
-STATISTIC(NumBCLR, "Number of early conditional returns");
-STATISTIC(NumBLR, "Number of early returns");
-
-namespace llvm {
- void initializePPCEarlyReturnPass(PassRegistry&);
-}
-
-namespace {
- // PPCEarlyReturn pass - For simple functions without epilogue code, move
- // returns up, and create conditional returns, to avoid unnecessary
- // branch-to-blr sequences.
- struct PPCEarlyReturn : public MachineFunctionPass {
- static char ID;
- PPCEarlyReturn() : MachineFunctionPass(ID) {
- initializePPCEarlyReturnPass(*PassRegistry::getPassRegistry());
- }
-
- const PPCTargetMachine *TM;
- const PPCInstrInfo *TII;
-
-protected:
- bool processBlock(MachineBasicBlock &ReturnMBB) {
- bool Changed = false;
-
- MachineBasicBlock::iterator I = ReturnMBB.begin();
- I = ReturnMBB.SkipPHIsAndLabels(I);
-
- // The block must be essentially empty except for the blr.
- if (I == ReturnMBB.end() || I->getOpcode() != PPC::BLR ||
- I != ReturnMBB.getLastNonDebugInstr())
- return Changed;
-
- SmallVector<MachineBasicBlock*, 8> PredToRemove;
- for (MachineBasicBlock::pred_iterator PI = ReturnMBB.pred_begin(),
- PIE = ReturnMBB.pred_end(); PI != PIE; ++PI) {
- bool OtherReference = false, BlockChanged = false;
- for (MachineBasicBlock::iterator J = (*PI)->getLastNonDebugInstr();;) {
- if (J->getOpcode() == PPC::B) {
- if (J->getOperand(0).getMBB() == &ReturnMBB) {
- // This is an unconditional branch to the return. Replace the
- // branch with a blr.
- BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BLR));
- MachineBasicBlock::iterator K = J--;
- K->eraseFromParent();
- BlockChanged = true;
- ++NumBLR;
- continue;
- }
- } else if (J->getOpcode() == PPC::BCC) {
- if (J->getOperand(2).getMBB() == &ReturnMBB) {
- // This is a conditional branch to the return. Replace the branch
- // with a bclr.
- BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BCCLR))
- .addImm(J->getOperand(0).getImm())
- .addReg(J->getOperand(1).getReg());
- MachineBasicBlock::iterator K = J--;
- K->eraseFromParent();
- BlockChanged = true;
- ++NumBCLR;
- continue;
- }
- } else if (J->getOpcode() == PPC::BC || J->getOpcode() == PPC::BCn) {
- if (J->getOperand(1).getMBB() == &ReturnMBB) {
- // This is a conditional branch to the return. Replace the branch
- // with a bclr.
- BuildMI(**PI, J, J->getDebugLoc(),
- TII->get(J->getOpcode() == PPC::BC ?
- PPC::BCLR : PPC::BCLRn))
- .addReg(J->getOperand(0).getReg());
- MachineBasicBlock::iterator K = J--;
- K->eraseFromParent();
- BlockChanged = true;
- ++NumBCLR;
- continue;
- }
- } else if (J->isBranch()) {
- if (J->isIndirectBranch()) {
- if (ReturnMBB.hasAddressTaken())
- OtherReference = true;
- } else
- for (unsigned i = 0; i < J->getNumOperands(); ++i)
- if (J->getOperand(i).isMBB() &&
- J->getOperand(i).getMBB() == &ReturnMBB)
- OtherReference = true;
- } else if (!J->isTerminator() && !J->isDebugValue())
- break;
-
- if (J == (*PI)->begin())
- break;
-
- --J;
- }
-
- if ((*PI)->canFallThrough() && (*PI)->isLayoutSuccessor(&ReturnMBB))
- OtherReference = true;
-
- // Predecessors are stored in a vector and can't be removed here.
- if (!OtherReference && BlockChanged) {
- PredToRemove.push_back(*PI);
- }
-
- if (BlockChanged)
- Changed = true;
- }
-
- for (unsigned i = 0, ie = PredToRemove.size(); i != ie; ++i)
- PredToRemove[i]->removeSuccessor(&ReturnMBB);
-
- if (Changed && !ReturnMBB.hasAddressTaken()) {
- // We now might be able to merge this blr-only block into its
- // by-layout predecessor.
- if (ReturnMBB.pred_size() == 1 &&
- (*ReturnMBB.pred_begin())->isLayoutSuccessor(&ReturnMBB)) {
- // Move the blr into the preceding block.
- MachineBasicBlock &PrevMBB = **ReturnMBB.pred_begin();
- PrevMBB.splice(PrevMBB.end(), &ReturnMBB, I);
- PrevMBB.removeSuccessor(&ReturnMBB);
- }
-
- if (ReturnMBB.pred_empty())
- ReturnMBB.eraseFromParent();
- }
-
- return Changed;
- }
-
-public:
- bool runOnMachineFunction(MachineFunction &MF) override {
- TM = static_cast<const PPCTargetMachine *>(&MF.getTarget());
- TII = TM->getSubtargetImpl()->getInstrInfo();
-
- bool Changed = false;
-
- // If the function does not have at least two blocks, then there is
- // nothing to do.
- if (MF.size() < 2)
- return Changed;
-
- for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
- MachineBasicBlock &B = *I++;
- if (processBlock(B))
- Changed = true;
- }
-
- return Changed;
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- MachineFunctionPass::getAnalysisUsage(AU);
- }
- };
-}
-
-INITIALIZE_PASS(PPCEarlyReturn, DEBUG_TYPE,
- "PowerPC Early-Return Creation", false, false)
-
-char PPCEarlyReturn::ID = 0;
-FunctionPass*
-llvm::createPPCEarlyReturnPass() { return new PPCEarlyReturn(); }
diff --git a/lib/Target/PowerPC/PPCInstrInfo.h b/lib/Target/PowerPC/PPCInstrInfo.h
index 4d310fe..4add6f9 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/lib/Target/PowerPC/PPCInstrInfo.h
@@ -106,6 +106,15 @@ public:
UseNode, UseIdx);
}
+ bool hasLowDefLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI,
+ unsigned DefIdx) const override {
+ // Machine LICM should hoist all instructions in low-register-pressure
+ // situations; none are sufficiently free to justify leaving in a loop
+ // body.
+ return false;
+ }
+
bool isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const override;
diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td
index 8c76c46..1a045b1 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/lib/Target/PowerPC/PPCInstrInfo.td
@@ -61,6 +61,27 @@ def tocentry32 : Operand<iPTR> {
let MIOperandInfo = (ops i32imm:$imm);
}
+def SDT_PPCqvfperm : SDTypeProfile<1, 3, [
+ SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVec<3>
+]>;
+def SDT_PPCqvgpci : SDTypeProfile<1, 1, [
+ SDTCisVec<0>, SDTCisInt<1>
+]>;
+def SDT_PPCqvaligni : SDTypeProfile<1, 3, [
+ SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<3>
+]>;
+def SDT_PPCqvesplati : SDTypeProfile<1, 2, [
+ SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
+]>;
+
+def SDT_PPCqbflt : SDTypeProfile<1, 1, [
+ SDTCisVec<0>, SDTCisVec<1>
+]>;
+
+def SDT_PPCqvlfsb : SDTypeProfile<1, 1, [
+ SDTCisVec<0>, SDTCisPtrTy<1>
+]>;
+
//===----------------------------------------------------------------------===//
// PowerPC specific DAG Nodes.
//
@@ -98,7 +119,8 @@ def PPCfsel : SDNode<"PPCISD::FSEL",
def PPChi : SDNode<"PPCISD::Hi", SDTIntBinOp, []>;
def PPClo : SDNode<"PPCISD::Lo", SDTIntBinOp, []>;
-def PPCtoc_entry: SDNode<"PPCISD::TOC_ENTRY", SDTIntBinOp, [SDNPMayLoad]>;
+def PPCtoc_entry: SDNode<"PPCISD::TOC_ENTRY", SDTIntBinOp,
+ [SDNPMayLoad, SDNPMemOperand]>;
def PPCvmaddfp : SDNode<"PPCISD::VMADDFP", SDTFPTernaryOp, []>;
def PPCvnmsubfp : SDNode<"PPCISD::VNMSUBFP", SDTFPTernaryOp, []>;
@@ -110,14 +132,35 @@ def PPCldGotTprelL : SDNode<"PPCISD::LD_GOT_TPREL_L", SDTIntBinOp,
def PPCaddTls : SDNode<"PPCISD::ADD_TLS", SDTIntBinOp, []>;
def PPCaddisTlsgdHA : SDNode<"PPCISD::ADDIS_TLSGD_HA", SDTIntBinOp>;
def PPCaddiTlsgdL : SDNode<"PPCISD::ADDI_TLSGD_L", SDTIntBinOp>;
+def PPCgetTlsAddr : SDNode<"PPCISD::GET_TLS_ADDR", SDTIntBinOp>;
+def PPCaddiTlsgdLAddr : SDNode<"PPCISD::ADDI_TLSGD_L_ADDR",
+ SDTypeProfile<1, 3, [
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
+ SDTCisSameAs<0, 3>, SDTCisInt<0> ]>>;
def PPCaddisTlsldHA : SDNode<"PPCISD::ADDIS_TLSLD_HA", SDTIntBinOp>;
def PPCaddiTlsldL : SDNode<"PPCISD::ADDI_TLSLD_L", SDTIntBinOp>;
-def PPCaddisDtprelHA : SDNode<"PPCISD::ADDIS_DTPREL_HA", SDTIntBinOp,
- [SDNPHasChain]>;
+def PPCgetTlsldAddr : SDNode<"PPCISD::GET_TLSLD_ADDR", SDTIntBinOp>;
+def PPCaddiTlsldLAddr : SDNode<"PPCISD::ADDI_TLSLD_L_ADDR",
+ SDTypeProfile<1, 3, [
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
+ SDTCisSameAs<0, 3>, SDTCisInt<0> ]>>;
+def PPCaddisDtprelHA : SDNode<"PPCISD::ADDIS_DTPREL_HA", SDTIntBinOp>;
def PPCaddiDtprelL : SDNode<"PPCISD::ADDI_DTPREL_L", SDTIntBinOp>;
def PPCvperm : SDNode<"PPCISD::VPERM", SDT_PPCvperm, []>;
+def PPCqvfperm : SDNode<"PPCISD::QVFPERM", SDT_PPCqvfperm, []>;
+def PPCqvgpci : SDNode<"PPCISD::QVGPCI", SDT_PPCqvgpci, []>;
+def PPCqvaligni : SDNode<"PPCISD::QVALIGNI", SDT_PPCqvaligni, []>;
+def PPCqvesplati : SDNode<"PPCISD::QVESPLATI", SDT_PPCqvesplati, []>;
+
+def PPCqbflt : SDNode<"PPCISD::QBFLT", SDT_PPCqbflt, []>;
+
+def PPCqvlfsb : SDNode<"PPCISD::QVLFSb", SDT_PPCqvlfsb,
+ [SDNPHasChain, SDNPMayLoad]>;
+
+def PPCcmpb : SDNode<"PPCISD::CMPB", SDTIntBinOp, []>;
+
// These nodes represent the 32-bit PPC shifts that operate on 6-bit shift
// amounts. These nodes are generated by the multi-precision shift code.
def PPCsrl : SDNode<"PPCISD::SRL" , SDTIntShiftOp>;
@@ -134,25 +177,18 @@ def SDT_PPCCall : SDTypeProfile<0, -1, [SDTCisInt<0>]>;
def PPCcall : SDNode<"PPCISD::CALL", SDT_PPCCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
-def PPCcall_tls : SDNode<"PPCISD::CALL_TLS", SDT_PPCCall,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
- SDNPVariadic]>;
def PPCcall_nop : SDNode<"PPCISD::CALL_NOP", SDT_PPCCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
-def PPCcall_nop_tls : SDNode<"PPCISD::CALL_NOP_TLS", SDT_PPCCall,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
- SDNPVariadic]>;
-def PPCload : SDNode<"PPCISD::LOAD", SDTypeProfile<1, 1, []>,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-def PPCload_toc : SDNode<"PPCISD::LOAD_TOC", SDTypeProfile<0, 1, []>,
- [SDNPHasChain, SDNPSideEffect,
- SDNPInGlue, SDNPOutGlue]>;
def PPCmtctr : SDNode<"PPCISD::MTCTR", SDT_PPCCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def PPCbctrl : SDNode<"PPCISD::BCTRL", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
+def PPCbctrl_load_toc : SDNode<"PPCISD::BCTRL_LOAD_TOC",
+ SDTypeProfile<0, 1, []>,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
def retflag : SDNode<"PPCISD::RET_FLAG", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
@@ -195,12 +231,6 @@ def PPClarx : SDNode<"PPCISD::LARX", SDT_PPClarx,
def PPCstcx : SDNode<"PPCISD::STCX", SDT_PPCstcx,
[SDNPHasChain, SDNPMayStore]>;
-// Instructions to support medium and large code model
-def PPCaddisTocHA : SDNode<"PPCISD::ADDIS_TOC_HA", SDTIntBinOp, []>;
-def PPCldTocL : SDNode<"PPCISD::LD_TOC_L", SDTIntBinOp, [SDNPMayLoad]>;
-def PPCaddiTocL : SDNode<"PPCISD::ADDI_TOC_L", SDTIntBinOp, []>;
-
-
// Instructions to support dynamic alloca.
def SDTDynOp : SDTypeProfile<1, 2, []>;
def PPCdynalloc : SDNode<"PPCISD::DYNALLOC", SDTDynOp, [SDNPHasChain]>;
@@ -460,6 +490,15 @@ def u6imm : Operand<i32> {
let ParserMatchClass = PPCU6ImmAsmOperand;
let DecoderMethod = "decodeUImmOperand<6>";
}
+def PPCU12ImmAsmOperand : AsmOperandClass {
+ let Name = "U12Imm"; let PredicateMethod = "isU12Imm";
+ let RenderMethod = "addImmOperands";
+}
+def u12imm : Operand<i32> {
+ let PrintMethod = "printU12ImmOperand";
+ let ParserMatchClass = PPCU12ImmAsmOperand;
+ let DecoderMethod = "decodeUImmOperand<12>";
+}
def PPCS16ImmAsmOperand : AsmOperandClass {
let Name = "S16Imm"; let PredicateMethod = "isS16Imm";
let RenderMethod = "addS16ImmOperands";
@@ -675,6 +714,10 @@ def IsPPC4xx : Predicate<"PPCSubTarget->isPPC4xx()">;
def IsPPC6xx : Predicate<"PPCSubTarget->isPPC6xx()">;
def IsE500 : Predicate<"PPCSubTarget->isE500()">;
def HasSPE : Predicate<"PPCSubTarget->HasSPE()">;
+def HasICBT : Predicate<"PPCSubTarget->hasICBT()">;
+
+def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">;
+def NaNsFPMath : Predicate<"!TM.Options.NoNaNsFPMath">;
//===----------------------------------------------------------------------===//
// PowerPC Multiclass Definitions.
@@ -1010,7 +1053,7 @@ def RESTORE_CRBIT : Pseudo<(outs crbitrc:$cond), (ins memri:$F),
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
let isReturn = 1, Uses = [LR, RM] in
def BLR : XLForm_2_ext<19, 16, 20, 0, 0, (outs), (ins), "blr", IIC_BrB,
- [(retflag)]>;
+ [(retflag)]>, Requires<[In32BitMode]>;
let isBranch = 1, isIndirectBranch = 1, Uses = [CTR] in {
def BCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB,
[]>;
@@ -1313,14 +1356,14 @@ def DCBZL : DCB_Form<1014, 1, (outs), (ins memrr:$dst), "dcbzl $dst",
PPC970_DGroup_Single;
def ICBT : XForm_icbt<31, 22, (outs), (ins u4imm:$CT, memrr:$src),
- "icbt $CT, $src", IIC_LdStLoad>, Requires<[IsBookE]>;
+ "icbt $CT, $src", IIC_LdStLoad>, Requires<[HasICBT]>;
def : Pat<(prefetch xoaddr:$dst, (i32 0), imm, (i32 1)),
(DCBT xoaddr:$dst)>; // data prefetch for loads
def : Pat<(prefetch xoaddr:$dst, (i32 1), imm, (i32 1)),
(DCBTST xoaddr:$dst)>; // data prefetch for stores
def : Pat<(prefetch xoaddr:$dst, (i32 0), imm, (i32 0)),
- (ICBT 0, xoaddr:$dst)>; // inst prefetch (for read)
+ (ICBT 0, xoaddr:$dst)>, Requires<[HasICBT]>; // inst prefetch (for read)
// Atomic operations
let usesCustomInserter = 1 in {
@@ -1454,7 +1497,7 @@ def LFD : DForm_1<50, (outs f8rc:$rD), (ins memri:$src),
// Unindexed (r+i) Loads with Update (preinc).
-let mayLoad = 1, neverHasSideEffects = 1 in {
+let mayLoad = 1, hasSideEffects = 0 in {
def LBZU : DForm_1<35, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
"lbzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
@@ -1797,7 +1840,7 @@ def NOP_GT_PWR7 : DForm_4_fixedreg_zero<24, 2, (outs), (ins),
"ori 2, 2, 0", IIC_IntSimple, []>;
}
-let isCompare = 1, neverHasSideEffects = 1 in {
+let isCompare = 1, hasSideEffects = 0 in {
def CMPWI : DForm_5_ext<11, (outs crrc:$crD), (ins gprc:$rA, s16imm:$imm),
"cmpwi $crD, $rA, $imm", IIC_IntCompare>;
def CMPLWI : DForm_6_ext<10, (outs crrc:$dst), (ins gprc:$src1, u16imm:$src2),
@@ -1805,7 +1848,7 @@ let isCompare = 1, neverHasSideEffects = 1 in {
}
}
-let PPC970_Unit = 1, neverHasSideEffects = 1 in { // FXU Operations.
+let PPC970_Unit = 1, hasSideEffects = 0 in { // FXU Operations.
let isCommutable = 1 in {
defm NAND : XForm_6r<31, 476, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
"nand", "$rA, $rS, $rB", IIC_IntSimple,
@@ -1848,7 +1891,7 @@ defm SRAW : XForm_6rc<31, 792, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
}
let PPC970_Unit = 1 in { // FXU Operations.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
defm SRAWI : XForm_10rc<31, 824, (outs gprc:$rA), (ins gprc:$rS, u5imm:$SH),
"srawi", "$rA, $rS, $SH", IIC_IntShift,
[(set i32:$rA, (sra i32:$rS, (i32 imm:$SH)))]>;
@@ -1861,8 +1904,13 @@ defm EXTSB : XForm_11r<31, 954, (outs gprc:$rA), (ins gprc:$rS),
defm EXTSH : XForm_11r<31, 922, (outs gprc:$rA), (ins gprc:$rS),
"extsh", "$rA, $rS", IIC_IntSimple,
[(set i32:$rA, (sext_inreg i32:$rS, i16))]>;
+
+let isCommutable = 1 in
+def CMPB : XForm_6<31, 508, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
+ "cmpb $rA, $rS, $rB", IIC_IntGeneral,
+ [(set i32:$rA, (PPCcmpb i32:$rS, i32:$rB))]>;
}
-let isCompare = 1, neverHasSideEffects = 1 in {
+let isCompare = 1, hasSideEffects = 0 in {
def CMPW : XForm_16_ext<31, 0, (outs crrc:$crD), (ins gprc:$rA, gprc:$rB),
"cmpw $crD, $rA, $rB", IIC_IntCompare>;
def CMPLW : XForm_16_ext<31, 32, (outs crrc:$crD), (ins gprc:$rA, gprc:$rB),
@@ -1872,7 +1920,7 @@ let isCompare = 1, neverHasSideEffects = 1 in {
let PPC970_Unit = 3 in { // FPU Operations.
//def FCMPO : XForm_17<63, 32, (outs CRRC:$crD), (ins FPRC:$fA, FPRC:$fB),
// "fcmpo $crD, $fA, $fB", IIC_FPCompare>;
-let isCompare = 1, neverHasSideEffects = 1 in {
+let isCompare = 1, hasSideEffects = 0 in {
def FCMPUS : XForm_17<63, 0, (outs crrc:$crD), (ins f4rc:$fA, f4rc:$fB),
"fcmpu $crD, $fA, $fB", IIC_FPCompare>;
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
@@ -1881,7 +1929,7 @@ let isCompare = 1, neverHasSideEffects = 1 in {
}
let Uses = [RM] in {
- let neverHasSideEffects = 1 in {
+ let hasSideEffects = 0 in {
defm FCTIW : XForm_26r<63, 14, (outs f8rc:$frD), (ins f8rc:$frB),
"fctiw", "$frD, $frB", IIC_FPGeneral,
[]>;
@@ -1902,7 +1950,7 @@ let Uses = [RM] in {
[(set f32:$frD, (frnd f32:$frB))]>;
}
- let neverHasSideEffects = 1 in {
+ let hasSideEffects = 0 in {
let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FRIPD : XForm_26r<63, 456, (outs f8rc:$frD), (ins f8rc:$frB),
"frip", "$frD, $frB", IIC_FPGeneral,
@@ -1939,13 +1987,13 @@ let Uses = [RM] in {
/// often coalesced away and we don't want the dispatch group builder to think
/// that they will fill slots (which could cause the load of a LSU reject to
/// sneak into a d-group with a store).
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
defm FMR : XForm_26r<63, 72, (outs f4rc:$frD), (ins f4rc:$frB),
"fmr", "$frD, $frB", IIC_FPGeneral,
[]>, // (set f32:$frD, f32:$frB)
PPC970_Unit_Pseudo;
-let PPC970_Unit = 3, neverHasSideEffects = 1 in { // FPU Operations.
+let PPC970_Unit = 3, hasSideEffects = 0 in { // FPU Operations.
// These are artificially split into two different forms, for 4/8 byte FP.
defm FABSS : XForm_26r<63, 264, (outs f4rc:$frD), (ins f4rc:$frB),
"fabs", "$frD, $frB", IIC_FPGeneral,
@@ -1994,11 +2042,20 @@ defm FRSQRTES : XForm_26r<59, 26, (outs f4rc:$frD), (ins f4rc:$frB),
// XL-Form instructions. condition register logical ops.
//
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def MCRF : XLForm_3<19, 0, (outs crrc:$BF), (ins crrc:$BFA),
"mcrf $BF, $BFA", IIC_BrMCR>,
PPC970_DGroup_First, PPC970_Unit_CRU;
+// FIXME: According to the ISA (section 2.5.1 of version 2.06), the
+// condition-register logical instructions have preferred forms. Specifically,
+// it is preferred that the bit specified by the BT field be in the same
+// condition register as that specified by the bit BB. We might want to account
+// for this via hinting the register allocator and anti-dep breakers, or we
+// could constrain the register class to force this constraint and then loosen
+// it during register allocation via convertToThreeAddress or some similar
+// mechanism.
+
let isCommutable = 1 in {
def CRAND : XLForm_1<19, 257, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
@@ -2072,6 +2129,12 @@ def MTSPR : XFXForm_1<31, 467, (outs), (ins i32imm:$SPR, gprc:$RT),
def MFTB : XFXForm_1<31, 371, (outs gprc:$RT), (ins i32imm:$SPR),
"mftb $RT, $SPR", IIC_SprMFTB>, Deprecated<DeprecatedMFTB>;
+// A pseudo-instruction used to implement the read of the 64-bit cycle counter
+// on a 32-bit target.
+let hasSideEffects = 1, usesCustomInserter = 1 in
+def ReadTB : Pseudo<(outs gprc:$lo, gprc:$hi), (ins),
+ "#ReadTB", []>;
+
let Uses = [CTR] in {
def MFCTR : XFXForm_1_ext<31, 339, 9, (outs gprc:$rT), (ins),
"mfctr $rT", IIC_SprMFSPR>,
@@ -2133,7 +2196,7 @@ let mayLoad = 1 in
def RESTORE_VRSAVE : Pseudo<(outs VRSAVERC:$vrsave), (ins memri:$F),
"#RESTORE_VRSAVE", []>;
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def MTOCRF: XFXForm_5a<31, 144, (outs crbitm:$FXM), (ins gprc:$ST),
"mtocrf $FXM, $ST", IIC_BrMCRX>,
PPC970_DGroup_First, PPC970_Unit_CRU;
@@ -2150,7 +2213,7 @@ def MFOCRF: XFXForm_5a<31, 19, (outs gprc:$rT), (ins crbitm:$FXM),
def MFCR : XFXForm_3<31, 19, (outs gprc:$rT), (ins),
"mfcr $rT", IIC_SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
-} // neverHasSideEffects = 1
+} // hasSideEffects = 0
// Pseudo instruction to perform FADD in round-to-zero mode.
let usesCustomInserter = 1, Uses = [RM] in {
@@ -2167,19 +2230,24 @@ let Uses = [RM], Defs = [RM] in {
def MTFSB1 : XForm_43<63, 38, (outs), (ins u5imm:$FM),
"mtfsb1 $FM", IIC_IntMTFSB0, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
- def MTFSF : XFLForm<63, 711, (outs), (ins i32imm:$FM, f8rc:$rT),
- "mtfsf $FM, $rT", IIC_IntMTFSB0, []>,
- PPC970_DGroup_Single, PPC970_Unit_FPU;
+ let isCodeGenOnly = 1 in
+ def MTFSFb : XFLForm<63, 711, (outs), (ins i32imm:$FM, f8rc:$rT),
+ "mtfsf $FM, $rT", IIC_IntMTFSB0, []>,
+ PPC970_DGroup_Single, PPC970_Unit_FPU;
}
let Uses = [RM] in {
def MFFS : XForm_42<63, 583, (outs f8rc:$rT), (ins),
"mffs $rT", IIC_IntMFFS,
[(set f64:$rT, (PPCmffs))]>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
+
+ let Defs = [CR1] in
+ def MFFSo : XForm_42<63, 583, (outs f8rc:$rT), (ins),
+ "mffs. $rT", IIC_IntMFFS, []>, isDOT;
}
-let PPC970_Unit = 1, neverHasSideEffects = 1 in { // FXU Operations.
+let PPC970_Unit = 1, hasSideEffects = 0 in { // FXU Operations.
// XO-Form instructions. Arithmetic instructions that can set overflow bit
let isCommutable = 1 in
defm ADD4 : XOForm_1r<31, 266, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
@@ -2250,7 +2318,7 @@ defm SUBFZE : XOForm_3rc<31, 200, 0, (outs gprc:$rT), (ins gprc:$rA),
// A-Form instructions. Most of the instructions executed in the FPU are of
// this type.
//
-let PPC970_Unit = 3, neverHasSideEffects = 1 in { // FPU Operations.
+let PPC970_Unit = 3, hasSideEffects = 0 in { // FPU Operations.
let Uses = [RM] in {
let isCommutable = 1 in {
defm FMADD : AForm_1r<63, 29,
@@ -2346,12 +2414,12 @@ let Uses = [RM] in {
}
}
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let PPC970_Unit = 1 in { // FXU Operations.
let isSelect = 1 in
def ISEL : AForm_4<31, 15,
(outs gprc:$rT), (ins gprc_nor0:$rA, gprc:$rB, crbitrc:$cond),
- "isel $rT, $rA, $rB, $cond", IIC_IntGeneral,
+ "isel $rT, $rA, $rB, $cond", IIC_IntISEL,
[]>;
}
@@ -2382,7 +2450,7 @@ defm RLWNM : MForm_2r<23, (outs gprc:$rA),
"rlwnm", "$rA, $rS, $rB, $MB, $ME", IIC_IntGeneral,
[]>;
}
-} // neverHasSideEffects = 1
+} // hasSideEffects = 0
//===----------------------------------------------------------------------===//
// PowerPC Instruction Patterns
@@ -2433,9 +2501,6 @@ def : Pat<(PPCcall (i32 tglobaladdr:$dst)),
def : Pat<(PPCcall (i32 texternalsym:$dst)),
(BL texternalsym:$dst)>;
-def : Pat<(PPCcall_tls texternalsym:$func, tglobaltlsaddr:$sym),
- (BL_TLS texternalsym:$func, tglobaltlsaddr:$sym)>;
-
def : Pat<(PPCtc_return (i32 tglobaladdr:$dst), imm:$imm),
(TCRETURNdi tglobaladdr:$dst, imm:$imm)>;
@@ -2490,10 +2555,49 @@ def ADDItlsgdL32 : Pseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
"#ADDItlsgdL32",
[(set i32:$rD,
(PPCaddiTlsgdL i32:$reg, tglobaltlsaddr:$disp))]>;
+// LR is a true define, while the rest of the Defs are clobbers. R3 is
+// explicitly defined when this op is created, so not mentioned here.
+let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
+ Defs = [R0,R4,R5,R6,R7,R8,R9,R10,R11,R12,LR,CTR,CR0,CR1,CR5,CR6,CR7] in
+def GETtlsADDR32 : Pseudo<(outs gprc:$rD), (ins gprc:$reg, tlsgd32:$sym),
+ "GETtlsADDR32",
+ [(set i32:$rD,
+ (PPCgetTlsAddr i32:$reg, tglobaltlsaddr:$sym))]>;
+// Combined op for ADDItlsgdL32 and GETtlsADDR32, late expanded. R3 and LR
+// are true defines while the rest of the Defs are clobbers.
+let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
+ Defs = [R0,R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,LR,CTR,CR0,CR1,CR5,CR6,CR7] in
+def ADDItlsgdLADDR32 : Pseudo<(outs gprc:$rD),
+ (ins gprc_nor0:$reg, s16imm:$disp, tlsgd32:$sym),
+ "#ADDItlsgdLADDR32",
+ [(set i32:$rD,
+ (PPCaddiTlsgdLAddr i32:$reg,
+ tglobaltlsaddr:$disp,
+ tglobaltlsaddr:$sym))]>;
def ADDItlsldL32 : Pseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
"#ADDItlsldL32",
[(set i32:$rD,
(PPCaddiTlsldL i32:$reg, tglobaltlsaddr:$disp))]>;
+// LR is a true define, while the rest of the Defs are clobbers. R3 is
+// explicitly defined when this op is created, so not mentioned here.
+let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
+ Defs = [R0,R4,R5,R6,R7,R8,R9,R10,R11,R12,LR,CTR,CR0,CR1,CR5,CR6,CR7] in
+def GETtlsldADDR32 : Pseudo<(outs gprc:$rD), (ins gprc:$reg, tlsgd32:$sym),
+ "GETtlsldADDR32",
+ [(set i32:$rD,
+ (PPCgetTlsldAddr i32:$reg,
+ tglobaltlsaddr:$sym))]>;
+// Combined op for ADDItlsldL32 and GETtlsADDR32, late expanded. R3 and LR
+// are true defines while the rest of the Defs are clobbers.
+let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
+ Defs = [R0,R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,LR,CTR,CR0,CR1,CR5,CR6,CR7] in
+def ADDItlsldLADDR32 : Pseudo<(outs gprc:$rD),
+ (ins gprc_nor0:$reg, s16imm:$disp, tlsgd32:$sym),
+ "#ADDItlsldLADDR32",
+ [(set i32:$rD,
+ (PPCaddiTlsldLAddr i32:$reg,
+ tglobaltlsaddr:$disp,
+ tglobaltlsaddr:$sym))]>;
def ADDIdtprelL32 : Pseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
"#ADDIdtprelL32",
[(set i32:$rD,
@@ -2578,6 +2682,7 @@ include "PPCInstrAltivec.td"
include "PPCInstrSPE.td"
include "PPCInstr64Bit.td"
include "PPCInstrVSX.td"
+include "PPCInstrQPX.td"
def crnot : OutPatFrag<(ops node:$in),
(CRNOR $in, $in)>;
@@ -3108,7 +3213,8 @@ def ISYNC : XLForm_2_ext<19, 150, 0, 0, 0, (outs), (ins),
def ICBI : XForm_1a<31, 982, (outs), (ins memrr:$src),
"icbi $src", IIC_LdStICBI, []>;
-def EIEIO : XForm_24_eieio<31, 854, (outs), (ins),
+// We used to have EIEIO as value but E[0-9A-Z] is a reserved name
+def EnforceIEIO : XForm_24_eieio<31, 854, (outs), (ins),
"eieio", IIC_LdStLoad, []>;
def WAIT : XForm_24_sync<31, 62, (outs), (ins i32imm:$L),
@@ -3161,6 +3267,28 @@ def MFMSR : XForm_rs<31, 83, (outs gprc:$RT), (ins),
def MTMSRD : XForm_mtmsr<31, 178, (outs), (ins gprc:$RS, i32imm:$L),
"mtmsrd $RS, $L", IIC_SprMTMSRD>;
+def MCRFS : XLForm_3<63, 64, (outs crrc:$BF), (ins crrc:$BFA),
+ "mcrfs $BF, $BFA", IIC_BrMCR>;
+
+def MTFSFI : XLForm_4<63, 134, (outs crrc:$BF), (ins i32imm:$U, i32imm:$W),
+ "mtfsfi $BF, $U, $W", IIC_IntMFFS>;
+
+def MTFSFIo : XLForm_4<63, 134, (outs crrc:$BF), (ins i32imm:$U, i32imm:$W),
+ "mtfsfi. $BF, $U, $W", IIC_IntMFFS>, isDOT;
+
+def : InstAlias<"mtfsfi $BF, $U", (MTFSFI crrc:$BF, i32imm:$U, 0)>;
+def : InstAlias<"mtfsfi. $BF, $U", (MTFSFIo crrc:$BF, i32imm:$U, 0)>;
+
+def MTFSF : XFLForm_1<63, 711, (outs),
+ (ins i32imm:$FLM, f8rc:$FRB, i32imm:$L, i32imm:$W),
+ "mtfsf $FLM, $FRB, $L, $W", IIC_IntMFFS, []>;
+def MTFSFo : XFLForm_1<63, 711, (outs),
+ (ins i32imm:$FLM, f8rc:$FRB, i32imm:$L, i32imm:$W),
+ "mtfsf. $FLM, $FRB, $L, $W", IIC_IntMFFS, []>, isDOT;
+
+def : InstAlias<"mtfsf $FLM, $FRB", (MTFSF i32imm:$FLM, f8rc:$FRB, 0, 0)>;
+def : InstAlias<"mtfsf. $FLM, $FRB", (MTFSFo i32imm:$FLM, f8rc:$FRB, 0, 0)>;
+
def SLBIE : XForm_16b<31, 434, (outs), (ins gprc:$RB),
"slbie $RB", IIC_SprSLBIE, []>;
@@ -3232,6 +3360,26 @@ def MFDCR : XFXForm_1<31, 323, (outs gprc:$RT), (ins i32imm:$SPR),
def MTDCR : XFXForm_1<31, 451, (outs), (ins gprc:$RT, i32imm:$SPR),
"mtdcr $SPR, $RT", IIC_SprMTSPR>, Requires<[IsPPC4xx]>;
+def ATTN : XForm_attn<0, 256, (outs), (ins), "attn", IIC_BrB>;
+
+def LBZCIX : XForm_base_r3xo<31, 853, (outs gprc:$RST), (ins gprc:$A, gprc:$B),
+ "lbzcix $RST, $A, $B", IIC_LdStLoad, []>;
+def LHZCIX : XForm_base_r3xo<31, 821, (outs gprc:$RST), (ins gprc:$A, gprc:$B),
+ "lhzcix $RST, $A, $B", IIC_LdStLoad, []>;
+def LWZCIX : XForm_base_r3xo<31, 789, (outs gprc:$RST), (ins gprc:$A, gprc:$B),
+ "lwzcix $RST, $A, $B", IIC_LdStLoad, []>;
+def LDCIX : XForm_base_r3xo<31, 885, (outs gprc:$RST), (ins gprc:$A, gprc:$B),
+ "ldcix $RST, $A, $B", IIC_LdStLoad, []>;
+
+def STBCIX : XForm_base_r3xo<31, 981, (outs), (ins gprc:$RST, gprc:$A, gprc:$B),
+ "stbcix $RST, $A, $B", IIC_LdStLoad, []>;
+def STHCIX : XForm_base_r3xo<31, 949, (outs), (ins gprc:$RST, gprc:$A, gprc:$B),
+ "sthcix $RST, $A, $B", IIC_LdStLoad, []>;
+def STWCIX : XForm_base_r3xo<31, 917, (outs), (ins gprc:$RST, gprc:$A, gprc:$B),
+ "stwcix $RST, $A, $B", IIC_LdStLoad, []>;
+def STDCIX : XForm_base_r3xo<31, 1013, (outs), (ins gprc:$RST, gprc:$A, gprc:$B),
+ "stdcix $RST, $A, $B", IIC_LdStLoad, []>;
+
//===----------------------------------------------------------------------===//
// PowerPC Assembler Instruction Aliases
//
@@ -3497,6 +3645,9 @@ def : InstAlias<"rotlw. $rA, $rS, $rB", (RLWNMo gprc:$rA, gprc:$rS, gprc:$rB, 0,
def : InstAlias<"clrlwi $rA, $rS, $n", (RLWINM gprc:$rA, gprc:$rS, 0, u5imm:$n, 31)>;
def : InstAlias<"clrlwi. $rA, $rS, $n", (RLWINMo gprc:$rA, gprc:$rS, 0, u5imm:$n, 31)>;
+def : InstAlias<"cntlz $rA, $rS", (CNTLZW gprc:$rA, gprc:$rS)>;
+def : InstAlias<"cntlz. $rA, $rS", (CNTLZWo gprc:$rA, gprc:$rS)>;
+
def EXTLDI : PPCAsmPseudo<"extldi $rA, $rS, $n, $b",
(ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>;
def EXTLDIo : PPCAsmPseudo<"extldi. $rA, $rS, $n, $b",
diff --git a/lib/Target/PowerPC/PPCInstrQPX.td b/lib/Target/PowerPC/PPCInstrQPX.td
new file mode 100644
index 0000000..c984d46
--- /dev/null
+++ b/lib/Target/PowerPC/PPCInstrQPX.td
@@ -0,0 +1,1192 @@
+//===- PPCInstrQPX.td - The PowerPC QPX Extension --*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the QPX extension to the PowerPC instruction set.
+// Reference:
+// Book Q: QPX Architecture Definition. IBM (as updated in) 2011.
+//
+//===----------------------------------------------------------------------===//
+
+def PPCRegQFRCAsmOperand : AsmOperandClass {
+ let Name = "RegQFRC"; let PredicateMethod = "isRegNumber";
+}
+def qfrc : RegisterOperand<QFRC> {
+ let ParserMatchClass = PPCRegQFRCAsmOperand;
+}
+def PPCRegQSRCAsmOperand : AsmOperandClass {
+ let Name = "RegQSRC"; let PredicateMethod = "isRegNumber";
+}
+def qsrc : RegisterOperand<QSRC> {
+ let ParserMatchClass = PPCRegQSRCAsmOperand;
+}
+def PPCRegQBRCAsmOperand : AsmOperandClass {
+ let Name = "RegQBRC"; let PredicateMethod = "isRegNumber";
+}
+def qbrc : RegisterOperand<QBRC> {
+ let ParserMatchClass = PPCRegQBRCAsmOperand;
+}
+
+//===----------------------------------------------------------------------===//
+// Helpers for defining instructions that directly correspond to intrinsics.
+
+// QPXA1_Int - A AForm_1 intrinsic definition.
+class QPXA1_Int<bits<6> opcode, bits<5> xo, string opc, Intrinsic IntID>
+ : AForm_1<opcode, xo, (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB, qfrc:$FRC),
+ !strconcat(opc, " $FRT, $FRA, $FRC, $FRB"), IIC_FPFused,
+ [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB, v4f64:$FRC))]>;
+// QPXA1s_Int - A AForm_1 intrinsic definition (simple instructions).
+class QPXA1s_Int<bits<6> opcode, bits<5> xo, string opc, Intrinsic IntID>
+ : AForm_1<opcode, xo, (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB, qfrc:$FRC),
+ !strconcat(opc, " $FRT, $FRA, $FRC, $FRB"), IIC_VecPerm,
+ [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB, v4f64:$FRC))]>;
+// QPXA2_Int - A AForm_2 intrinsic definition.
+class QPXA2_Int<bits<6> opcode, bits<5> xo, string opc, Intrinsic IntID>
+ : AForm_2<opcode, xo, (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB),
+ !strconcat(opc, " $FRT, $FRA, $FRB"), IIC_FPGeneral,
+ [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB))]>;
+// QPXA3_Int - A AForm_3 intrinsic definition.
+class QPXA3_Int<bits<6> opcode, bits<5> xo, string opc, Intrinsic IntID>
+ : AForm_3<opcode, xo, (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRC),
+ !strconcat(opc, " $FRT, $FRA, $FRC"), IIC_FPGeneral,
+ [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRC))]>;
+// QPXA4_Int - A AForm_4a intrinsic definition.
+class QPXA4_Int<bits<6> opcode, bits<5> xo, string opc, Intrinsic IntID>
+ : AForm_4a<opcode, xo, (outs qfrc:$FRT), (ins qfrc:$FRB),
+ !strconcat(opc, " $FRT, $FRB"), IIC_FPGeneral,
+ [(set v4f64:$FRT, (IntID v4f64:$FRB))]>;
+// QPXX18_Int - A XForm_18 intrinsic definition.
+class QPXX18_Int<bits<6> opcode, bits<10> xo, string opc, Intrinsic IntID>
+ : XForm_18<opcode, xo, (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB),
+ !strconcat(opc, " $FRT, $FRA, $FRB"), IIC_FPCompare,
+ [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB))]>;
+// QPXX19_Int - A XForm_19 intrinsic definition.
+class QPXX19_Int<bits<6> opcode, bits<10> xo, string opc, Intrinsic IntID>
+ : XForm_19<opcode, xo, (outs qfrc:$FRT), (ins qfrc:$FRB),
+ !strconcat(opc, " $FRT, $FRB"), IIC_FPGeneral,
+ [(set v4f64:$FRT, (IntID v4f64:$FRB))]>;
+
+//===----------------------------------------------------------------------===//
+// Pattern Frags.
+
+def extloadv4f32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v4f32;
+}]>;
+
+def truncstorev4f32 : PatFrag<(ops node:$val, node:$ptr),
+ (truncstore node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4f32;
+}]>;
+def pre_truncstv4f32 : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (pre_truncst node:$val,
+ node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4f32;
+}]>;
+
+def fround_inexact : PatFrag<(ops node:$val), (fround node:$val), [{
+ return cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() == 0;
+}]>;
+
+def fround_exact : PatFrag<(ops node:$val), (fround node:$val), [{
+ return cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() == 1;
+}]>;
+
+let FastIselShouldIgnore = 1 in // FastIsel should ignore all u12 instrs.
+ def u12 : ImmLeaf<i32, [{ return (Imm & 0xFFF) == Imm; }]>;
+
+//===----------------------------------------------------------------------===//
+// Instruction Definitions.
+
+def HasQPX : Predicate<"PPCSubTarget->hasQPX()">;
+let Predicates = [HasQPX] in {
+let DecoderNamespace = "QPX" in {
+let hasSideEffects = 0 in { // QPX instructions don't have side effects.
+let Uses = [RM] in {
+ // Add Instructions
+ let isCommutable = 1 in {
+ def QVFADD : AForm_2<4, 21,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB),
+ "qvfadd $FRT, $FRA, $FRB", IIC_FPGeneral,
+ [(set v4f64:$FRT, (fadd v4f64:$FRA, v4f64:$FRB))]>;
+ let isCodeGenOnly = 1 in
+ def QVFADDS : QPXA2_Int<0, 21, "qvfadds", int_ppc_qpx_qvfadds>;
+ def QVFADDSs : AForm_2<0, 21,
+ (outs qsrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB),
+ "qvfadds $FRT, $FRA, $FRB", IIC_FPGeneral,
+ [(set v4f32:$FRT, (fadd v4f32:$FRA, v4f32:$FRB))]>;
+ }
+ def QVFSUB : AForm_2<4, 20,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB),
+ "qvfsub $FRT, $FRA, $FRB", IIC_FPGeneral,
+ [(set v4f64:$FRT, (fsub v4f64:$FRA, v4f64:$FRB))]>;
+ let isCodeGenOnly = 1 in
+ def QVFSUBS : QPXA2_Int<0, 20, "qvfsubs", int_ppc_qpx_qvfsubs>;
+ def QVFSUBSs : AForm_2<0, 20,
+ (outs qsrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB),
+ "qvfsubs $FRT, $FRA, $FRB", IIC_FPGeneral,
+ [(set v4f32:$FRT, (fsub v4f32:$FRA, v4f32:$FRB))]>;
+
+ // Estimate Instructions
+ def QVFRE : AForm_4a<4, 24, (outs qfrc:$FRT), (ins qfrc:$FRB),
+ "qvfre $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f64:$FRT, (PPCfre v4f64:$FRB))]>;
+ def QVFRES : QPXA4_Int<0, 24, "qvfres", int_ppc_qpx_qvfres>;
+ let isCodeGenOnly = 1 in
+ def QVFRESs : AForm_4a<0, 24, (outs qsrc:$FRT), (ins qsrc:$FRB),
+ "qvfres $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f32:$FRT, (PPCfre v4f32:$FRB))]>;
+
+ def QVFRSQRTE : AForm_4a<4, 26, (outs qfrc:$FRT), (ins qfrc:$FRB),
+ "qvfrsqrte $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f64:$FRT, (PPCfrsqrte v4f64:$FRB))]>;
+ def QVFRSQRTES : QPXA4_Int<0, 26, "qvfrsqrtes", int_ppc_qpx_qvfrsqrtes>;
+ let isCodeGenOnly = 1 in
+ def QVFRSQRTESs : AForm_4a<0, 26, (outs qsrc:$FRT), (ins qsrc:$FRB),
+ "qvfrsqrtes $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f32:$FRT, (PPCfrsqrte v4f32:$FRB))]>;
+
+ // Multiply Instructions
+ let isCommutable = 1 in {
+ def QVFMUL : AForm_3<4, 25,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRC),
+ "qvfmul $FRT, $FRA, $FRC", IIC_FPGeneral,
+ [(set v4f64:$FRT, (fmul v4f64:$FRA, v4f64:$FRC))]>;
+ let isCodeGenOnly = 1 in
+ def QVFMULS : QPXA3_Int<0, 25, "qvfmuls", int_ppc_qpx_qvfmuls>;
+ def QVFMULSs : AForm_3<0, 25,
+ (outs qsrc:$FRT), (ins qsrc:$FRA, qsrc:$FRC),
+ "qvfmuls $FRT, $FRA, $FRC", IIC_FPGeneral,
+ [(set v4f32:$FRT, (fmul v4f32:$FRA, v4f32:$FRC))]>;
+ }
+ def QVFXMUL : QPXA3_Int<4, 17, "qvfxmul", int_ppc_qpx_qvfxmul>;
+ def QVFXMULS : QPXA3_Int<0, 17, "qvfxmuls", int_ppc_qpx_qvfxmuls>;
+
+ // Multiply-add instructions
+ def QVFMADD : AForm_1<4, 29,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB, qfrc:$FRC),
+ "qvfmadd $FRT, $FRA, $FRC, $FRB", IIC_FPFused,
+ [(set v4f64:$FRT, (fma v4f64:$FRA, v4f64:$FRC, v4f64:$FRB))]>;
+ let isCodeGenOnly = 1 in
+ def QVFMADDS : QPXA1_Int<0, 29, "qvfmadds", int_ppc_qpx_qvfmadds>;
+ def QVFMADDSs : AForm_1<0, 29,
+ (outs qsrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB, qsrc:$FRC),
+ "qvfmadds $FRT, $FRA, $FRC, $FRB", IIC_FPFused,
+ [(set v4f32:$FRT, (fma v4f32:$FRA, v4f32:$FRC, v4f32:$FRB))]>;
+ def QVFNMADD : AForm_1<4, 31,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB, qfrc:$FRC),
+ "qvfnmadd $FRT, $FRA, $FRC, $FRB", IIC_FPFused,
+ [(set v4f64:$FRT, (fneg (fma v4f64:$FRA, v4f64:$FRC,
+ v4f64:$FRB)))]>;
+ let isCodeGenOnly = 1 in
+ def QVFNMADDS : QPXA1_Int<0, 31, "qvfnmadds", int_ppc_qpx_qvfnmadds>;
+ def QVFNMADDSs : AForm_1<0, 31,
+ (outs qsrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB, qsrc:$FRC),
+ "qvfnmadds $FRT, $FRA, $FRC, $FRB", IIC_FPFused,
+ [(set v4f32:$FRT, (fneg (fma v4f32:$FRA, v4f32:$FRC,
+ v4f32:$FRB)))]>;
+ def QVFMSUB : AForm_1<4, 28,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB, qfrc:$FRC),
+ "qvfmsub $FRT, $FRA, $FRC, $FRB", IIC_FPFused,
+ [(set v4f64:$FRT, (fma v4f64:$FRA, v4f64:$FRC,
+ (fneg v4f64:$FRB)))]>;
+ let isCodeGenOnly = 1 in
+ def QVFMSUBS : QPXA1_Int<0, 28, "qvfmsubs", int_ppc_qpx_qvfmsubs>;
+ def QVFMSUBSs : AForm_1<0, 28,
+ (outs qsrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB, qsrc:$FRC),
+ "qvfmsubs $FRT, $FRA, $FRC, $FRB", IIC_FPFused,
+ [(set v4f32:$FRT, (fma v4f32:$FRA, v4f32:$FRC,
+ (fneg v4f32:$FRB)))]>;
+ def QVFNMSUB : AForm_1<4, 30,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB, qfrc:$FRC),
+ "qvfnmsub $FRT, $FRA, $FRC, $FRB", IIC_FPFused,
+ [(set v4f64:$FRT, (fneg (fma v4f64:$FRA, v4f64:$FRC,
+ (fneg v4f64:$FRB))))]>;
+ let isCodeGenOnly = 1 in
+ def QVFNMSUBS : QPXA1_Int<0, 30, "qvfnmsubs", int_ppc_qpx_qvfnmsubs>;
+ def QVFNMSUBSs : AForm_1<0, 30,
+ (outs qsrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB, qsrc:$FRC),
+ "qvfnmsubs $FRT, $FRA, $FRC, $FRB", IIC_FPFused,
+ [(set v4f32:$FRT, (fneg (fma v4f32:$FRA, v4f32:$FRC,
+ (fneg v4f32:$FRB))))]>;
+ def QVFXMADD : QPXA1_Int<4, 9, "qvfxmadd", int_ppc_qpx_qvfxmadd>;
+ def QVFXMADDS : QPXA1_Int<0, 9, "qvfxmadds", int_ppc_qpx_qvfxmadds>;
+ def QVFXXNPMADD : QPXA1_Int<4, 11, "qvfxxnpmadd", int_ppc_qpx_qvfxxnpmadd>;
+ def QVFXXNPMADDS : QPXA1_Int<0, 11, "qvfxxnpmadds", int_ppc_qpx_qvfxxnpmadds>;
+ def QVFXXCPNMADD : QPXA1_Int<4, 3, "qvfxxcpnmadd", int_ppc_qpx_qvfxxcpnmadd>;
+ def QVFXXCPNMADDS : QPXA1_Int<0, 3, "qvfxxcpnmadds", int_ppc_qpx_qvfxxcpnmadds>;
+ def QVFXXMADD : QPXA1_Int<4, 1, "qvfxxmadd", int_ppc_qpx_qvfxxmadd>;
+ def QVFXXMADDS : QPXA1_Int<0, 1, "qvfxxmadds", int_ppc_qpx_qvfxxmadds>;
+
+ // Select Instruction
+ let isCodeGenOnly = 1 in
+ def QVFSEL : QPXA1s_Int<4, 23, "qvfsel", int_ppc_qpx_qvfsel>;
+ def QVFSELb : AForm_1<4, 23, (outs qfrc:$FRT),
+ (ins qbrc:$FRA, qfrc:$FRB, qfrc:$FRC),
+ "qvfsel $FRT, $FRA, $FRC, $FRB", IIC_VecPerm,
+ [(set v4f64:$FRT, (vselect v4i1:$FRA,
+ v4f64:$FRC, v4f64:$FRB))]>;
+ let isCodeGenOnly = 1 in
+ def QVFSELbs : AForm_1<4, 23, (outs qsrc:$FRT),
+ (ins qbrc:$FRA, qsrc:$FRB, qsrc:$FRC),
+ "qvfsel $FRT, $FRA, $FRC, $FRB", IIC_VecPerm,
+ [(set v4f32:$FRT, (vselect v4i1:$FRA,
+ v4f32:$FRC, v4f32:$FRB))]>;
+ let isCodeGenOnly = 1 in
+ def QVFSELbb: AForm_1<4, 23, (outs qbrc:$FRT),
+ (ins qbrc:$FRA, qbrc:$FRB, qbrc:$FRC),
+ "qvfsel $FRT, $FRA, $FRC, $FRB", IIC_VecPerm,
+ [(set v4i1:$FRT, (vselect v4i1:$FRA,
+ v4i1:$FRC, v4i1:$FRB))]>;
+
+ // SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after
+ // instruction selection into a branch sequence.
+ let usesCustomInserter = 1 in {
+ def SELECT_CC_QFRC: Pseudo<(outs qfrc:$dst), (ins crrc:$cond, qfrc:$T, qfrc:$F,
+ i32imm:$BROPC), "#SELECT_CC_QFRC",
+ []>;
+ def SELECT_CC_QSRC: Pseudo<(outs qsrc:$dst), (ins crrc:$cond, qsrc:$T, qsrc:$F,
+ i32imm:$BROPC), "#SELECT_CC_QSRC",
+ []>;
+ def SELECT_CC_QBRC: Pseudo<(outs qbrc:$dst), (ins crrc:$cond, qbrc:$T, qbrc:$F,
+ i32imm:$BROPC), "#SELECT_CC_QBRC",
+ []>;
+
+ // SELECT_* pseudo instructions, like SELECT_CC_* but taking condition
+ // register bit directly.
+ def SELECT_QFRC: Pseudo<(outs qfrc:$dst), (ins crbitrc:$cond,
+ qfrc:$T, qfrc:$F), "#SELECT_QFRC",
+ [(set v4f64:$dst,
+ (select i1:$cond, v4f64:$T, v4f64:$F))]>;
+ def SELECT_QSRC: Pseudo<(outs qsrc:$dst), (ins crbitrc:$cond,
+ qsrc:$T, qsrc:$F), "#SELECT_QSRC",
+ [(set v4f32:$dst,
+ (select i1:$cond, v4f32:$T, v4f32:$F))]>;
+ def SELECT_QBRC: Pseudo<(outs qbrc:$dst), (ins crbitrc:$cond,
+ qbrc:$T, qbrc:$F), "#SELECT_QBRC",
+ [(set v4i1:$dst,
+ (select i1:$cond, v4i1:$T, v4i1:$F))]>;
+ }
+
+ // Convert and Round Instructions
+ def QVFCTID : QPXX19_Int<4, 814, "qvfctid", int_ppc_qpx_qvfctid>;
+ let isCodeGenOnly = 1 in
+ def QVFCTIDb : XForm_19<4, 814, (outs qbrc:$FRT), (ins qbrc:$FRB),
+ "qvfctid $FRT, $FRB", IIC_FPGeneral, []>;
+
+ def QVFCTIDU : QPXX19_Int<4, 942, "qvfctidu", int_ppc_qpx_qvfctidu>;
+ def QVFCTIDZ : QPXX19_Int<4, 815, "qvfctidz", int_ppc_qpx_qvfctidz>;
+ def QVFCTIDUZ : QPXX19_Int<4, 943, "qvfctiduz", int_ppc_qpx_qvfctiduz>;
+ def QVFCTIW : QPXX19_Int<4, 14, "qvfctiw", int_ppc_qpx_qvfctiw>;
+ def QVFCTIWU : QPXX19_Int<4, 142, "qvfctiwu", int_ppc_qpx_qvfctiwu>;
+ def QVFCTIWZ : QPXX19_Int<4, 15, "qvfctiwz", int_ppc_qpx_qvfctiwz>;
+ def QVFCTIWUZ : QPXX19_Int<4, 143, "qvfctiwuz", int_ppc_qpx_qvfctiwuz>;
+ def QVFCFID : QPXX19_Int<4, 846, "qvfcfid", int_ppc_qpx_qvfcfid>;
+ let isCodeGenOnly = 1 in
+ def QVFCFIDb : XForm_19<4, 846, (outs qbrc:$FRT), (ins qbrc:$FRB),
+ "qvfcfid $FRT, $FRB", IIC_FPGeneral, []>;
+
+ def QVFCFIDU : QPXX19_Int<4, 974, "qvfcfidu", int_ppc_qpx_qvfcfidu>;
+ def QVFCFIDS : QPXX19_Int<0, 846, "qvfcfids", int_ppc_qpx_qvfcfids>;
+ def QVFCFIDUS : QPXX19_Int<0, 974, "qvfcfidus", int_ppc_qpx_qvfcfidus>;
+
+ let isCodeGenOnly = 1 in
+ def QVFRSP : QPXX19_Int<4, 12, "qvfrsp", int_ppc_qpx_qvfrsp>;
+ def QVFRSPs : XForm_19<4, 12,
+ (outs qsrc:$FRT), (ins qfrc:$FRB),
+ "qvfrsp $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f32:$FRT, (fround_inexact v4f64:$FRB))]>;
+
+ def QVFRIZ : XForm_19<4, 424, (outs qfrc:$FRT), (ins qfrc:$FRB),
+ "qvfriz $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f64:$FRT, (ftrunc v4f64:$FRB))]>;
+ let isCodeGenOnly = 1 in
+ def QVFRIZs : XForm_19<4, 424, (outs qsrc:$FRT), (ins qsrc:$FRB),
+ "qvfriz $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f32:$FRT, (ftrunc v4f32:$FRB))]>;
+
+ def QVFRIN : XForm_19<4, 392, (outs qfrc:$FRT), (ins qfrc:$FRB),
+ "qvfrin $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f64:$FRT, (frnd v4f64:$FRB))]>;
+ let isCodeGenOnly = 1 in
+ def QVFRINs : XForm_19<4, 392, (outs qsrc:$FRT), (ins qsrc:$FRB),
+ "qvfrin $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f32:$FRT, (frnd v4f32:$FRB))]>;
+
+ def QVFRIP : XForm_19<4, 456, (outs qfrc:$FRT), (ins qfrc:$FRB),
+ "qvfrip $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f64:$FRT, (fceil v4f64:$FRB))]>;
+ let isCodeGenOnly = 1 in
+ def QVFRIPs : XForm_19<4, 456, (outs qsrc:$FRT), (ins qsrc:$FRB),
+ "qvfrip $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f32:$FRT, (fceil v4f32:$FRB))]>;
+
+ def QVFRIM : XForm_19<4, 488, (outs qfrc:$FRT), (ins qfrc:$FRB),
+ "qvfrim $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f64:$FRT, (ffloor v4f64:$FRB))]>;
+ let isCodeGenOnly = 1 in
+ def QVFRIMs : XForm_19<4, 488, (outs qsrc:$FRT), (ins qsrc:$FRB),
+ "qvfrim $FRT, $FRB", IIC_FPGeneral,
+ [(set v4f32:$FRT, (ffloor v4f32:$FRB))]>;
+
+ // Move Instructions
+ def QVFMR : XForm_19<4, 72,
+ (outs qfrc:$FRT), (ins qfrc:$FRB),
+ "qvfmr $FRT, $FRB", IIC_VecPerm,
+ [/* (set v4f64:$FRT, v4f64:$FRB) */]>;
+ let isCodeGenOnly = 1 in {
+ def QVFMRs : XForm_19<4, 72,
+ (outs qsrc:$FRT), (ins qsrc:$FRB),
+ "qvfmr $FRT, $FRB", IIC_VecPerm,
+ [/* (set v4f32:$FRT, v4f32:$FRB) */]>;
+ def QVFMRb : XForm_19<4, 72,
+ (outs qbrc:$FRT), (ins qbrc:$FRB),
+ "qvfmr $FRT, $FRB", IIC_VecPerm,
+ [/* (set v4i1:$FRT, v4i1:$FRB) */]>;
+ }
+ def QVFNEG : XForm_19<4, 40,
+ (outs qfrc:$FRT), (ins qfrc:$FRB),
+ "qvfneg $FRT, $FRB", IIC_VecPerm,
+ [(set v4f64:$FRT, (fneg v4f64:$FRB))]>;
+ let isCodeGenOnly = 1 in
+ def QVFNEGs : XForm_19<4, 40,
+ (outs qsrc:$FRT), (ins qsrc:$FRB),
+ "qvfneg $FRT, $FRB", IIC_VecPerm,
+ [(set v4f32:$FRT, (fneg v4f32:$FRB))]>;
+ def QVFABS : XForm_19<4, 264,
+ (outs qfrc:$FRT), (ins qfrc:$FRB),
+ "qvfabs $FRT, $FRB", IIC_VecPerm,
+ [(set v4f64:$FRT, (fabs v4f64:$FRB))]>;
+ let isCodeGenOnly = 1 in
+ def QVFABSs : XForm_19<4, 264,
+ (outs qsrc:$FRT), (ins qsrc:$FRB),
+ "qvfabs $FRT, $FRB", IIC_VecPerm,
+ [(set v4f32:$FRT, (fabs v4f32:$FRB))]>;
+ def QVFNABS : XForm_19<4, 136,
+ (outs qfrc:$FRT), (ins qfrc:$FRB),
+ "qvfnabs $FRT, $FRB", IIC_VecPerm,
+ [(set v4f64:$FRT, (fneg (fabs v4f64:$FRB)))]>;
+ let isCodeGenOnly = 1 in
+ def QVFNABSs : XForm_19<4, 136,
+ (outs qsrc:$FRT), (ins qsrc:$FRB),
+ "qvfnabs $FRT, $FRB", IIC_VecPerm,
+ [(set v4f32:$FRT, (fneg (fabs v4f32:$FRB)))]>;
+ def QVFCPSGN : XForm_18<4, 8,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB),
+ "qvfcpsgn $FRT, $FRA, $FRB", IIC_VecPerm,
+ [(set v4f64:$FRT, (fcopysign v4f64:$FRB, v4f64:$FRA))]>;
+ let isCodeGenOnly = 1 in
+ def QVFCPSGNs : XForm_18<4, 8,
+ (outs qsrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB),
+ "qvfcpsgn $FRT, $FRA, $FRB", IIC_VecPerm,
+ [(set v4f32:$FRT, (fcopysign v4f32:$FRB, v4f32:$FRA))]>;
+
+ def QVALIGNI : Z23Form_1<4, 5,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB, u2imm:$idx),
+ "qvaligni $FRT, $FRA, $FRB, $idx", IIC_VecPerm,
+ [(set v4f64:$FRT,
+ (PPCqvaligni v4f64:$FRA, v4f64:$FRB,
+ (i32 imm:$idx)))]>;
+ let isCodeGenOnly = 1 in
+ def QVALIGNIs : Z23Form_1<4, 5,
+ (outs qsrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB, u2imm:$idx),
+ "qvaligni $FRT, $FRA, $FRB, $idx", IIC_VecPerm,
+ [(set v4f32:$FRT,
+ (PPCqvaligni v4f32:$FRA, v4f32:$FRB,
+ (i32 imm:$idx)))]>;
+ let isCodeGenOnly = 1 in
+ def QVALIGNIb : Z23Form_1<4, 5,
+ (outs qbrc:$FRT), (ins qbrc:$FRA, qbrc:$FRB, u2imm:$idx),
+ "qvaligni $FRT, $FRA, $FRB, $idx", IIC_VecPerm,
+ [(set v4i1:$FRT,
+ (PPCqvaligni v4i1:$FRA, v4i1:$FRB,
+ (i32 imm:$idx)))]>;
+
+ def QVESPLATI : Z23Form_2<4, 37,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, u2imm:$idx),
+ "qvesplati $FRT, $FRA, $idx", IIC_VecPerm,
+ [(set v4f64:$FRT,
+ (PPCqvesplati v4f64:$FRA, (i32 imm:$idx)))]>;
+ let isCodeGenOnly = 1 in
+ def QVESPLATIs : Z23Form_2<4, 37,
+ (outs qsrc:$FRT), (ins qsrc:$FRA, u2imm:$idx),
+ "qvesplati $FRT, $FRA, $idx", IIC_VecPerm,
+ [(set v4f32:$FRT,
+ (PPCqvesplati v4f32:$FRA, (i32 imm:$idx)))]>;
+ let isCodeGenOnly = 1 in
+ def QVESPLATIb : Z23Form_2<4, 37,
+ (outs qbrc:$FRT), (ins qbrc:$FRA, u2imm:$idx),
+ "qvesplati $FRT, $FRA, $idx", IIC_VecPerm,
+ [(set v4i1:$FRT,
+ (PPCqvesplati v4i1:$FRA, (i32 imm:$idx)))]>;
+
+ def QVFPERM : AForm_1<4, 6,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB, qfrc:$FRC),
+ "qvfperm $FRT, $FRA, $FRB, $FRC", IIC_VecPerm,
+ [(set v4f64:$FRT,
+ (PPCqvfperm v4f64:$FRA, v4f64:$FRB, v4f64:$FRC))]>;
+ let isCodeGenOnly = 1 in
+ def QVFPERMs : AForm_1<4, 6,
+ (outs qsrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB, qfrc:$FRC),
+ "qvfperm $FRT, $FRA, $FRB, $FRC", IIC_VecPerm,
+ [(set v4f32:$FRT,
+ (PPCqvfperm v4f32:$FRA, v4f32:$FRB, v4f64:$FRC))]>;
+
+ let isReMaterializable = 1, isAsCheapAsAMove = 1 in
+ def QVGPCI : Z23Form_3<4, 133,
+ (outs qfrc:$FRT), (ins u12imm:$idx),
+ "qvgpci $FRT, $idx", IIC_VecPerm,
+ [(set v4f64:$FRT, (PPCqvgpci (u12:$idx)))]>;
+
+ // Compare Instruction
+ let isCodeGenOnly = 1 in
+ def QVFTSTNAN : QPXX18_Int<4, 64, "qvftstnan", int_ppc_qpx_qvftstnan>;
+ def QVFTSTNANb : XForm_18<4, 64, (outs qbrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB),
+ "qvftstnan $FRT, $FRA, $FRB", IIC_FPCompare,
+ [(set v4i1:$FRT,
+ (setcc v4f64:$FRA, v4f64:$FRB, SETUO))]>;
+ let isCodeGenOnly = 1 in
+ def QVFTSTNANbs : XForm_18<4, 64, (outs qbrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB),
+ "qvftstnan $FRT, $FRA, $FRB", IIC_FPCompare,
+ [(set v4i1:$FRT,
+ (setcc v4f32:$FRA, v4f32:$FRB, SETUO))]>;
+ let isCodeGenOnly = 1 in
+ def QVFCMPLT : QPXX18_Int<4, 96, "qvfcmplt", int_ppc_qpx_qvfcmplt>;
+ def QVFCMPLTb : XForm_18<4, 96, (outs qbrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB),
+ "qvfcmplt $FRT, $FRA, $FRB", IIC_FPCompare,
+ [(set v4i1:$FRT,
+ (setcc v4f64:$FRA, v4f64:$FRB, SETOLT))]>;
+ let isCodeGenOnly = 1 in
+ def QVFCMPLTbs : XForm_18<4, 96, (outs qbrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB),
+ "qvfcmplt $FRT, $FRA, $FRB", IIC_FPCompare,
+ [(set v4i1:$FRT,
+ (setcc v4f32:$FRA, v4f32:$FRB, SETOLT))]>;
+ let isCodeGenOnly = 1 in
+ def QVFCMPGT : QPXX18_Int<4, 32, "qvfcmpgt", int_ppc_qpx_qvfcmpgt>;
+ def QVFCMPGTb : XForm_18<4, 32, (outs qbrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB),
+ "qvfcmpgt $FRT, $FRA, $FRB", IIC_FPCompare,
+ [(set v4i1:$FRT,
+ (setcc v4f64:$FRA, v4f64:$FRB, SETOGT))]>;
+ let isCodeGenOnly = 1 in
+ def QVFCMPGTbs : XForm_18<4, 32, (outs qbrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB),
+ "qvfcmpgt $FRT, $FRA, $FRB", IIC_FPCompare,
+ [(set v4i1:$FRT,
+ (setcc v4f32:$FRA, v4f32:$FRB, SETOGT))]>;
+ let isCodeGenOnly = 1 in
+ def QVFCMPEQ : QPXX18_Int<4, 0, "qvfcmpeq", int_ppc_qpx_qvfcmpeq>;
+ def QVFCMPEQb : XForm_18<4, 0, (outs qbrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB),
+ "qvfcmpeq $FRT, $FRA, $FRB", IIC_FPCompare,
+ [(set v4i1:$FRT,
+ (setcc v4f64:$FRA, v4f64:$FRB, SETOEQ))]>;
+ let isCodeGenOnly = 1 in
+ def QVFCMPEQbs : XForm_18<4, 0, (outs qbrc:$FRT), (ins qsrc:$FRA, qsrc:$FRB),
+ "qvfcmpeq $FRT, $FRA, $FRB", IIC_FPCompare,
+ [(set v4i1:$FRT,
+ (setcc v4f32:$FRA, v4f32:$FRB, SETOEQ))]>;
+
+ let isCodeGenOnly = 1 in
+ def QVFLOGICAL : XForm_20<4, 4,
+ (outs qfrc:$FRT), (ins qfrc:$FRA, qfrc:$FRB, u12imm:$tttt),
+ "qvflogical $FRT, $FRA, $FRB, $tttt", IIC_VecPerm, []>;
+ def QVFLOGICALb : XForm_20<4, 4,
+ (outs qbrc:$FRT), (ins qbrc:$FRA, qbrc:$FRB, u12imm:$tttt),
+ "qvflogical $FRT, $FRA, $FRB, $tttt", IIC_VecPerm, []>;
+ let isCodeGenOnly = 1 in
+ def QVFLOGICALs : XForm_20<4, 4,
+ (outs qbrc:$FRT), (ins qbrc:$FRA, qbrc:$FRB, u12imm:$tttt),
+ "qvflogical $FRT, $FRA, $FRB, $tttt", IIC_VecPerm, []>;
+
+ // Load indexed instructions
+ let mayLoad = 1, canFoldAsLoad = 1 in {
+ def QVLFDX : XForm_1<31, 583,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfdx $FRT, $src", IIC_LdStLFD,
+ [(set v4f64:$FRT, (load xoaddr:$src))]>;
+ let isCodeGenOnly = 1 in
+ def QVLFDXb : XForm_1<31, 583,
+ (outs qbrc:$FRT), (ins memrr:$src),
+ "qvlfdx $FRT, $src", IIC_LdStLFD, []>;
+
+ let RC = 1 in
+ def QVLFDXA : XForm_1<31, 583,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfdxa $FRT, $src", IIC_LdStLFD, []>;
+
+ def QVLFDUX : XForm_1<31, 615,
+ (outs qfrc:$FRT, ptr_rc_nor0:$ea_result),
+ (ins memrr:$src),
+ "qvlfdux $FRT, $src", IIC_LdStLFDU, []>,
+ RegConstraint<"$src.ptrreg = $ea_result">,
+ NoEncode<"$ea_result">;
+ let RC = 1 in
+ def QVLFDUXA : XForm_1<31, 615,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfduxa $FRT, $src", IIC_LdStLFD, []>;
+
+ def QVLFSX : XForm_1<31, 519,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfsx $FRT, $src", IIC_LdStLFD,
+ [(set v4f64:$FRT, (extloadv4f32 xoaddr:$src))]>;
+
+ let isCodeGenOnly = 1 in
+ def QVLFSXb : XForm_1<31, 519,
+ (outs qbrc:$FRT), (ins memrr:$src),
+ "qvlfsx $FRT, $src", IIC_LdStLFD,
+ [(set v4i1:$FRT, (PPCqvlfsb xoaddr:$src))]>;
+ let isCodeGenOnly = 1 in
+ def QVLFSXs : XForm_1<31, 519,
+ (outs qsrc:$FRT), (ins memrr:$src),
+ "qvlfsx $FRT, $src", IIC_LdStLFD,
+ [(set v4f32:$FRT, (load xoaddr:$src))]>;
+
+ let RC = 1 in
+ def QVLFSXA : XForm_1<31, 519,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfsxa $FRT, $src", IIC_LdStLFD, []>;
+
+ def QVLFSUX : XForm_1<31, 551,
+ (outs qsrc:$FRT, ptr_rc_nor0:$ea_result),
+ (ins memrr:$src),
+ "qvlfsux $FRT, $src", IIC_LdStLFDU, []>,
+ RegConstraint<"$src.ptrreg = $ea_result">,
+ NoEncode<"$ea_result">;
+
+ let RC = 1 in
+ def QVLFSUXA : XForm_1<31, 551,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfsuxa $FRT, $src", IIC_LdStLFD, []>;
+
+ def QVLFCDX : XForm_1<31, 71,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfcdx $FRT, $src", IIC_LdStLFD, []>;
+ let RC = 1 in
+ def QVLFCDXA : XForm_1<31, 71,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfcdxa $FRT, $src", IIC_LdStLFD, []>;
+
+ def QVLFCDUX : XForm_1<31, 103,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfcdux $FRT, $src", IIC_LdStLFD, []>;
+ let RC = 1 in
+ def QVLFCDUXA : XForm_1<31, 103,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfcduxa $FRT, $src", IIC_LdStLFD, []>;
+
+ def QVLFCSX : XForm_1<31, 7,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfcsx $FRT, $src", IIC_LdStLFD, []>;
+ let isCodeGenOnly = 1 in
+ def QVLFCSXs : XForm_1<31, 7,
+ (outs qsrc:$FRT), (ins memrr:$src),
+ "qvlfcsx $FRT, $src", IIC_LdStLFD, []>;
+
+ let RC = 1 in
+ def QVLFCSXA : XForm_1<31, 7,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfcsxa $FRT, $src", IIC_LdStLFD, []>;
+
+ def QVLFCSUX : XForm_1<31, 39,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfcsux $FRT, $src", IIC_LdStLFD, []>;
+ let RC = 1 in
+ def QVLFCSUXA : XForm_1<31, 39,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfcsuxa $FRT, $src", IIC_LdStLFD, []>;
+
+ def QVLFIWAX : XForm_1<31, 871,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfiwax $FRT, $src", IIC_LdStLFD, []>;
+ let RC = 1 in
+ def QVLFIWAXA : XForm_1<31, 871,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfiwaxa $FRT, $src", IIC_LdStLFD, []>;
+
+ def QVLFIWZX : XForm_1<31, 839,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfiwzx $FRT, $src", IIC_LdStLFD, []>;
+ let RC = 1 in
+ def QVLFIWZXA : XForm_1<31, 839,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlfiwzxa $FRT, $src", IIC_LdStLFD, []>;
+ }
+
+
+ def QVLPCLDX : XForm_1<31, 582,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlpcldx $FRT, $src", IIC_LdStLFD, []>;
+ def QVLPCLSX : XForm_1<31, 518,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlpclsx $FRT, $src", IIC_LdStLFD, []>;
+ let isCodeGenOnly = 1 in
+ def QVLPCLSXint : XForm_11<31, 518,
+ (outs qfrc:$FRT), (ins G8RC:$src),
+ "qvlpclsx $FRT, 0, $src", IIC_LdStLFD, []>;
+ def QVLPCRDX : XForm_1<31, 70,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlpcrdx $FRT, $src", IIC_LdStLFD, []>;
+ def QVLPCRSX : XForm_1<31, 6,
+ (outs qfrc:$FRT), (ins memrr:$src),
+ "qvlpcrsx $FRT, $src", IIC_LdStLFD, []>;
+
+ // Store indexed instructions
+ let mayStore = 1 in {
+ def QVSTFDX : XForm_8<31, 711,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfdx $FRT, $dst", IIC_LdStSTFD,
+ [(store qfrc:$FRT, xoaddr:$dst)]>;
+ let isCodeGenOnly = 1 in
+ def QVSTFDXb : XForm_8<31, 711,
+ (outs), (ins qbrc:$FRT, memrr:$dst),
+ "qvstfdx $FRT, $dst", IIC_LdStSTFD, []>;
+
+ let RC = 1 in
+ def QVSTFDXA : XForm_8<31, 711,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfdxa $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFDUX : XForm_8<31, 743, (outs ptr_rc_nor0:$ea_res),
+ (ins qfrc:$FRT, memrr:$dst),
+ "qvstfdux $FRT, $dst", IIC_LdStSTFDU, []>,
+ RegConstraint<"$dst.ptrreg = $ea_res">,
+ NoEncode<"$ea_res">;
+
+ let RC = 1 in
+ def QVSTFDUXA : XForm_8<31, 743,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfduxa $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFDXI : XForm_8<31, 709,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfdxi $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFDXIA : XForm_8<31, 709,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfdxia $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFDUXI : XForm_8<31, 741,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfduxi $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFDUXIA : XForm_8<31, 741,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfduxia $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFSX : XForm_8<31, 647,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfsx $FRT, $dst", IIC_LdStSTFD,
+ [(truncstorev4f32 qfrc:$FRT, xoaddr:$dst)]>;
+ let isCodeGenOnly = 1 in
+ def QVSTFSXs : XForm_8<31, 647,
+ (outs), (ins qsrc:$FRT, memrr:$dst),
+ "qvstfsx $FRT, $dst", IIC_LdStSTFD,
+ [(store qsrc:$FRT, xoaddr:$dst)]>;
+
+ let RC = 1 in
+ def QVSTFSXA : XForm_8<31, 647,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfsxa $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFSUX : XForm_8<31, 679, (outs ptr_rc_nor0:$ea_res),
+ (ins qsrc:$FRT, memrr:$dst),
+ "qvstfsux $FRT, $dst", IIC_LdStSTFDU, []>,
+ RegConstraint<"$dst.ptrreg = $ea_res">,
+ NoEncode<"$ea_res">;
+ let isCodeGenOnly = 1 in
+ def QVSTFSUXs: XForm_8<31, 679, (outs ptr_rc_nor0:$ea_res),
+ (ins qfrc:$FRT, memrr:$dst),
+ "qvstfsux $FRT, $dst", IIC_LdStSTFDU, []>,
+ RegConstraint<"$dst.ptrreg = $ea_res">,
+ NoEncode<"$ea_res">;
+
+ let RC = 1 in
+ def QVSTFSUXA : XForm_8<31, 679,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfsuxa $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFSXI : XForm_8<31, 645,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfsxi $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFSXIA : XForm_8<31, 645,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfsxia $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFSUXI : XForm_8<31, 677,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfsuxi $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFSUXIA : XForm_8<31, 677,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfsuxia $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFCDX : XForm_8<31, 199,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcdx $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFCDXA : XForm_8<31, 199,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcdxa $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFCSX : XForm_8<31, 135,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcsx $FRT, $dst", IIC_LdStSTFD, []>;
+ let isCodeGenOnly = 1 in
+ def QVSTFCSXs : XForm_8<31, 135,
+ (outs), (ins qsrc:$FRT, memrr:$dst),
+ "qvstfcsx $FRT, $dst", IIC_LdStSTFD, []>;
+
+ let RC = 1 in
+ def QVSTFCSXA : XForm_8<31, 135,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcsxa $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFCDUX : XForm_8<31, 231,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcdux $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFCDUXA : XForm_8<31, 231,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcduxa $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFCSUX : XForm_8<31, 167,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcsux $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFCSUXA : XForm_8<31, 167,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcsuxa $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFCDXI : XForm_8<31, 197,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcdxi $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFCDXIA : XForm_8<31, 197,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcdxia $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFCSXI : XForm_8<31, 133,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcsxi $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFCSXIA : XForm_8<31, 133,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcsxia $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFCDUXI : XForm_8<31, 229,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcduxi $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFCDUXIA : XForm_8<31, 229,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcduxia $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFCSUXI : XForm_8<31, 165,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcsuxi $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFCSUXIA : XForm_8<31, 165,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfcsuxia $FRT, $dst", IIC_LdStSTFD, []>;
+
+ def QVSTFIWX : XForm_8<31, 967,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfiwx $FRT, $dst", IIC_LdStSTFD, []>;
+ let RC = 1 in
+ def QVSTFIWXA : XForm_8<31, 967,
+ (outs), (ins qfrc:$FRT, memrr:$dst),
+ "qvstfiwxa $FRT, $dst", IIC_LdStSTFD, []>;
+ }
+}
+
+} // neverHasSideEffects
+}
+
+def : InstAlias<"qvfclr $FRT",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRT, qbrc:$FRT, 0)>;
+def : InstAlias<"qvfand $FRT, $FRA, $FRB",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRA, qbrc:$FRB, 1)>;
+def : InstAlias<"qvfandc $FRT, $FRA, $FRB",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRA, qbrc:$FRB, 4)>;
+def : InstAlias<"qvfctfb $FRT, $FRA",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRA, qbrc:$FRA, 5)>;
+def : InstAlias<"qvfxor $FRT, $FRA, $FRB",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRA, qbrc:$FRB, 6)>;
+def : InstAlias<"qvfor $FRT, $FRA, $FRB",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRA, qbrc:$FRB, 7)>;
+def : InstAlias<"qvfnor $FRT, $FRA, $FRB",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRA, qbrc:$FRB, 8)>;
+def : InstAlias<"qvfequ $FRT, $FRA, $FRB",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRA, qbrc:$FRB, 9)>;
+def : InstAlias<"qvfnot $FRT, $FRA",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRA, qbrc:$FRA, 10)>;
+def : InstAlias<"qvforc $FRT, $FRA, $FRB",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRA, qbrc:$FRB, 13)>;
+def : InstAlias<"qvfnand $FRT, $FRA, $FRB",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRA, qbrc:$FRB, 14)>;
+def : InstAlias<"qvfset $FRT",
+ (QVFLOGICALb qbrc:$FRT, qbrc:$FRT, qbrc:$FRT, 15)>;
+
+//===----------------------------------------------------------------------===//
+// Additional QPX Patterns
+//
+
+def : Pat<(v4f64 (scalar_to_vector f64:$A)),
+ (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), $A, sub_64)>;
+def : Pat<(v4f32 (scalar_to_vector f32:$A)),
+ (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), $A, sub_64)>;
+
+def : Pat<(f64 (vector_extract v4f64:$S, 0)),
+ (EXTRACT_SUBREG $S, sub_64)>;
+def : Pat<(f32 (vector_extract v4f32:$S, 0)),
+ (EXTRACT_SUBREG $S, sub_64)>;
+
+def : Pat<(f64 (vector_extract v4f64:$S, 1)),
+ (EXTRACT_SUBREG (QVESPLATI $S, 1), sub_64)>;
+def : Pat<(f64 (vector_extract v4f64:$S, 2)),
+ (EXTRACT_SUBREG (QVESPLATI $S, 2), sub_64)>;
+def : Pat<(f64 (vector_extract v4f64:$S, 3)),
+ (EXTRACT_SUBREG (QVESPLATI $S, 3), sub_64)>;
+
+def : Pat<(f32 (vector_extract v4f32:$S, 1)),
+ (EXTRACT_SUBREG (QVESPLATIs $S, 1), sub_64)>;
+def : Pat<(f32 (vector_extract v4f32:$S, 2)),
+ (EXTRACT_SUBREG (QVESPLATIs $S, 2), sub_64)>;
+def : Pat<(f32 (vector_extract v4f32:$S, 3)),
+ (EXTRACT_SUBREG (QVESPLATIs $S, 3), sub_64)>;
+
+def : Pat<(f64 (vector_extract v4f64:$S, i64:$F)),
+ (EXTRACT_SUBREG (QVFPERM $S, $S,
+ (QVLPCLSXint (RLDICR $F, 2,
+ /* 63-2 = */ 61))),
+ sub_64)>;
+def : Pat<(f32 (vector_extract v4f32:$S, i64:$F)),
+ (EXTRACT_SUBREG (QVFPERMs $S, $S,
+ (QVLPCLSXint (RLDICR $F, 2,
+ /* 63-2 = */ 61))),
+ sub_64)>;
+
+def : Pat<(int_ppc_qpx_qvfperm v4f64:$A, v4f64:$B, v4f64:$C),
+ (QVFPERM $A, $B, $C)>;
+
+def : Pat<(int_ppc_qpx_qvfcpsgn v4f64:$A, v4f64:$B),
+ (QVFCPSGN $A, $B)>;
+
+// FCOPYSIGN's operand types need not agree.
+def : Pat<(fcopysign v4f64:$frB, v4f32:$frA),
+ (QVFCPSGN (COPY_TO_REGCLASS $frA, QFRC), $frB)>;
+def : Pat<(fcopysign QSRC:$frB, QFRC:$frA),
+ (QVFCPSGNs (COPY_TO_REGCLASS $frA, QSRC), $frB)>;
+
+def : Pat<(int_ppc_qpx_qvfneg v4f64:$A), (QVFNEG $A)>;
+def : Pat<(int_ppc_qpx_qvfabs v4f64:$A), (QVFABS $A)>;
+def : Pat<(int_ppc_qpx_qvfnabs v4f64:$A), (QVFNABS $A)>;
+
+def : Pat<(int_ppc_qpx_qvfriz v4f64:$A), (QVFRIZ $A)>;
+def : Pat<(int_ppc_qpx_qvfrin v4f64:$A), (QVFRIN $A)>;
+def : Pat<(int_ppc_qpx_qvfrip v4f64:$A), (QVFRIP $A)>;
+def : Pat<(int_ppc_qpx_qvfrim v4f64:$A), (QVFRIM $A)>;
+
+def : Pat<(int_ppc_qpx_qvfre v4f64:$A), (QVFRE $A)>;
+def : Pat<(int_ppc_qpx_qvfrsqrte v4f64:$A), (QVFRSQRTE $A)>;
+
+def : Pat<(int_ppc_qpx_qvfadd v4f64:$A, v4f64:$B),
+ (QVFADD $A, $B)>;
+def : Pat<(int_ppc_qpx_qvfsub v4f64:$A, v4f64:$B),
+ (QVFSUB $A, $B)>;
+def : Pat<(int_ppc_qpx_qvfmul v4f64:$A, v4f64:$B),
+ (QVFMUL $A, $B)>;
+
+// Additional QVFNMSUB patterns: -a*c + b == -(a*c - b)
+def : Pat<(fma (fneg v4f64:$A), v4f64:$C, v4f64:$B),
+ (QVFNMSUB $A, $B, $C)>;
+def : Pat<(fma v4f64:$A, (fneg v4f64:$C), v4f64:$B),
+ (QVFNMSUB $A, $B, $C)>;
+def : Pat<(fma (fneg v4f32:$A), v4f32:$C, v4f32:$B),
+ (QVFNMSUBSs $A, $B, $C)>;
+def : Pat<(fma v4f32:$A, (fneg v4f32:$C), v4f32:$B),
+ (QVFNMSUBSs $A, $B, $C)>;
+
+def : Pat<(int_ppc_qpx_qvfmadd v4f64:$A, v4f64:$B, v4f64:$C),
+ (QVFMADD $A, $B, $C)>;
+def : Pat<(int_ppc_qpx_qvfnmadd v4f64:$A, v4f64:$B, v4f64:$C),
+ (QVFNMADD $A, $B, $C)>;
+def : Pat<(int_ppc_qpx_qvfmsub v4f64:$A, v4f64:$B, v4f64:$C),
+ (QVFMSUB $A, $B, $C)>;
+def : Pat<(int_ppc_qpx_qvfnmsub v4f64:$A, v4f64:$B, v4f64:$C),
+ (QVFNMSUB $A, $B, $C)>;
+
+def : Pat<(int_ppc_qpx_qvlfd xoaddr:$src),
+ (QVLFDX xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfda xoaddr:$src),
+ (QVLFDXA xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfs xoaddr:$src),
+ (QVLFSX xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfsa xoaddr:$src),
+ (QVLFSXA xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfcda xoaddr:$src),
+ (QVLFCDXA xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfcd xoaddr:$src),
+ (QVLFCDX xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfcsa xoaddr:$src),
+ (QVLFCSXA xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfcs xoaddr:$src),
+ (QVLFCSX xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfda xoaddr:$src),
+ (QVLFDXA xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfiwaa xoaddr:$src),
+ (QVLFIWAXA xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfiwa xoaddr:$src),
+ (QVLFIWAX xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfiwza xoaddr:$src),
+ (QVLFIWZXA xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfiwz xoaddr:$src),
+ (QVLFIWZX xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlfsa xoaddr:$src),
+ (QVLFSXA xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlpcld xoaddr:$src),
+ (QVLPCLDX xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlpcls xoaddr:$src),
+ (QVLPCLSX xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlpcrd xoaddr:$src),
+ (QVLPCRDX xoaddr:$src)>;
+def : Pat<(int_ppc_qpx_qvlpcrs xoaddr:$src),
+ (QVLPCRSX xoaddr:$src)>;
+
+def : Pat<(int_ppc_qpx_qvstfd v4f64:$T, xoaddr:$dst),
+ (QVSTFDX $T, xoaddr:$dst)>;
+def : Pat<(int_ppc_qpx_qvstfs v4f64:$T, xoaddr:$dst),
+ (QVSTFSX $T, xoaddr:$dst)>;
+def : Pat<(int_ppc_qpx_qvstfcda v4f64:$T, xoaddr:$dst),
+ (QVSTFCDXA $T, xoaddr:$dst)>;
+def : Pat<(int_ppc_qpx_qvstfcd v4f64:$T, xoaddr:$dst),
+ (QVSTFCDX $T, xoaddr:$dst)>;
+def : Pat<(int_ppc_qpx_qvstfcsa v4f64:$T, xoaddr:$dst),
+ (QVSTFCSXA $T, xoaddr:$dst)>;
+def : Pat<(int_ppc_qpx_qvstfcs v4f64:$T, xoaddr:$dst),
+ (QVSTFCSX $T, xoaddr:$dst)>;
+def : Pat<(int_ppc_qpx_qvstfda v4f64:$T, xoaddr:$dst),
+ (QVSTFDXA $T, xoaddr:$dst)>;
+def : Pat<(int_ppc_qpx_qvstfiwa v4f64:$T, xoaddr:$dst),
+ (QVSTFIWXA $T, xoaddr:$dst)>;
+def : Pat<(int_ppc_qpx_qvstfiw v4f64:$T, xoaddr:$dst),
+ (QVSTFIWX $T, xoaddr:$dst)>;
+def : Pat<(int_ppc_qpx_qvstfsa v4f64:$T, xoaddr:$dst),
+ (QVSTFSXA $T, xoaddr:$dst)>;
+
+def : Pat<(pre_store v4f64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
+ (QVSTFDUX $rS, $ptrreg, $ptroff)>;
+def : Pat<(pre_store v4f32:$rS, iPTR:$ptrreg, iPTR:$ptroff),
+ (QVSTFSUX $rS, $ptrreg, $ptroff)>;
+def : Pat<(pre_truncstv4f32 v4f64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
+ (QVSTFSUXs $rS, $ptrreg, $ptroff)>;
+
+def : Pat<(int_ppc_qpx_qvflogical v4f64:$A, v4f64:$B, (i32 imm:$idx)),
+ (QVFLOGICAL $A, $B, imm:$idx)>;
+def : Pat<(int_ppc_qpx_qvgpci (u12:$idx)),
+ (QVGPCI imm:$idx)>;
+
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETOGE),
+ (QVFLOGICALb (QVFCMPLTb $FRA, $FRB),
+ (QVFTSTNANb $FRA, $FRB), (i32 8))>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETOLE),
+ (QVFLOGICALb (QVFCMPGTb $FRA, $FRB),
+ (QVFTSTNANb $FRA, $FRB), (i32 8))>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETONE),
+ (QVFLOGICALb (QVFCMPEQb $FRA, $FRB),
+ (QVFTSTNANb $FRA, $FRB), (i32 8))>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETO),
+ (QVFLOGICALb (QVFTSTNANb $FRA, $FRB),
+ (QVFTSTNANb $FRA, $FRB), (i32 10))>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETUEQ),
+ (QVFLOGICALb (QVFCMPEQb $FRA, $FRB),
+ (QVFTSTNANb $FRA, $FRB), (i32 7))>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETUGT),
+ (QVFLOGICALb (QVFCMPGTb $FRA, $FRB),
+ (QVFTSTNANb $FRA, $FRB), (i32 7))>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETUGE),
+ (QVFLOGICALb (QVFTSTNANb $FRA, $FRB),
+ (QVFCMPLTb $FRA, $FRB), (i32 13))>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETULT),
+ (QVFLOGICALb (QVFCMPLTb $FRA, $FRB),
+ (QVFTSTNANb $FRA, $FRB), (i32 7))>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETULE),
+ (QVFLOGICALb (QVFTSTNANb $FRA, $FRB),
+ (QVFCMPGTb $FRA, $FRB), (i32 13))>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETUNE),
+ (QVFLOGICALb (QVFTSTNANb $FRA, $FRB),
+ (QVFCMPEQb $FRA, $FRB), (i32 13))>;
+
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETEQ),
+ (QVFCMPEQb $FRA, $FRB)>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETGT),
+ (QVFCMPGTb $FRA, $FRB)>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETGE),
+ (QVFLOGICALb (QVFCMPLTb $FRA, $FRB),
+ (QVFCMPLTb $FRA, $FRB), (i32 10))>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETLT),
+ (QVFCMPLTb $FRA, $FRB)>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETLE),
+ (QVFLOGICALb (QVFCMPGTb $FRA, $FRB),
+ (QVFCMPGTb $FRA, $FRB), (i32 10))>;
+def : Pat<(setcc v4f64:$FRA, v4f64:$FRB, SETNE),
+ (QVFLOGICALb (QVFCMPEQb $FRA, $FRB),
+ (QVFCMPEQb $FRA, $FRB), (i32 10))>;
+
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETOGE),
+ (QVFLOGICALb (QVFCMPLTbs $FRA, $FRB),
+ (QVFTSTNANbs $FRA, $FRB), (i32 8))>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETOLE),
+ (QVFLOGICALb (QVFCMPGTbs $FRA, $FRB),
+ (QVFTSTNANbs $FRA, $FRB), (i32 8))>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETONE),
+ (QVFLOGICALb (QVFCMPEQbs $FRA, $FRB),
+ (QVFTSTNANbs $FRA, $FRB), (i32 8))>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETO),
+ (QVFLOGICALb (QVFTSTNANbs $FRA, $FRB),
+ (QVFTSTNANbs $FRA, $FRB), (i32 10))>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETUEQ),
+ (QVFLOGICALb (QVFCMPEQbs $FRA, $FRB),
+ (QVFTSTNANbs $FRA, $FRB), (i32 7))>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETUGT),
+ (QVFLOGICALb (QVFCMPGTbs $FRA, $FRB),
+ (QVFTSTNANbs $FRA, $FRB), (i32 7))>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETUGE),
+ (QVFLOGICALb (QVFTSTNANbs $FRA, $FRB),
+ (QVFCMPLTbs $FRA, $FRB), (i32 13))>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETULT),
+ (QVFLOGICALb (QVFCMPLTbs $FRA, $FRB),
+ (QVFTSTNANbs $FRA, $FRB), (i32 7))>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETULE),
+ (QVFLOGICALb (QVFTSTNANbs $FRA, $FRB),
+ (QVFCMPGTbs $FRA, $FRB), (i32 13))>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETUNE),
+ (QVFLOGICALb (QVFTSTNANbs $FRA, $FRB),
+ (QVFCMPEQbs $FRA, $FRB), (i32 13))>;
+
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETEQ),
+ (QVFCMPEQbs $FRA, $FRB)>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETGT),
+ (QVFCMPGTbs $FRA, $FRB)>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETGE),
+ (QVFLOGICALb (QVFCMPLTbs $FRA, $FRB),
+ (QVFCMPLTbs $FRA, $FRB), (i32 10))>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETLT),
+ (QVFCMPLTbs $FRA, $FRB)>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETLE),
+ (QVFLOGICALb (QVFCMPGTbs $FRA, $FRB),
+ (QVFCMPGTbs $FRA, $FRB), (i32 10))>;
+def : Pat<(setcc v4f32:$FRA, v4f32:$FRB, SETNE),
+ (QVFLOGICALb (QVFCMPEQbs $FRA, $FRB),
+ (QVFCMPEQbs $FRA, $FRB), (i32 10))>;
+
+def : Pat<(and v4i1:$FRA, (not v4i1:$FRB)),
+ (QVFLOGICALb $FRA, $FRB, (i32 4))>;
+def : Pat<(not (or v4i1:$FRA, v4i1:$FRB)),
+ (QVFLOGICALb $FRA, $FRB, (i32 8))>;
+def : Pat<(not (xor v4i1:$FRA, v4i1:$FRB)),
+ (QVFLOGICALb $FRA, $FRB, (i32 9))>;
+def : Pat<(or v4i1:$FRA, (not v4i1:$FRB)),
+ (QVFLOGICALb $FRA, $FRB, (i32 13))>;
+def : Pat<(not (and v4i1:$FRA, v4i1:$FRB)),
+ (QVFLOGICALb $FRA, $FRB, (i32 14))>;
+
+def : Pat<(and v4i1:$FRA, v4i1:$FRB),
+ (QVFLOGICALb $FRA, $FRB, (i32 1))>;
+def : Pat<(or v4i1:$FRA, v4i1:$FRB),
+ (QVFLOGICALb $FRA, $FRB, (i32 7))>;
+def : Pat<(xor v4i1:$FRA, v4i1:$FRB),
+ (QVFLOGICALb $FRA, $FRB, (i32 6))>;
+def : Pat<(not v4i1:$FRA),
+ (QVFLOGICALb $FRA, $FRA, (i32 10))>;
+
+def : Pat<(v4f64 (fextend v4f32:$src)),
+ (COPY_TO_REGCLASS $src, QFRC)>;
+
+def : Pat<(v4f32 (fround_exact v4f64:$src)),
+ (COPY_TO_REGCLASS $src, QSRC)>;
+
+// Extract the underlying floating-point values from the
+// QPX (-1.0, 1.0) boolean representation.
+def : Pat<(v4f64 (PPCqbflt v4i1:$src)),
+ (COPY_TO_REGCLASS $src, QFRC)>;
+
+def : Pat<(v4f64 (selectcc i1:$lhs, i1:$rhs, v4f64:$tval, v4f64:$fval, SETLT)),
+ (SELECT_QFRC (CRANDC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(v4f64 (selectcc i1:$lhs, i1:$rhs, v4f64:$tval, v4f64:$fval, SETLE)),
+ (SELECT_QFRC (CRORC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(v4f64 (selectcc i1:$lhs, i1:$rhs, v4f64:$tval, v4f64:$fval, SETEQ)),
+ (SELECT_QFRC (CREQV $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4f64 (selectcc i1:$lhs, i1:$rhs, v4f64:$tval, v4f64:$fval, SETGE)),
+ (SELECT_QFRC (CRORC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4f64 (selectcc i1:$lhs, i1:$rhs, v4f64:$tval, v4f64:$fval, SETGT)),
+ (SELECT_QFRC (CRANDC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4f64 (selectcc i1:$lhs, i1:$rhs, v4f64:$tval, v4f64:$fval, SETNE)),
+ (SELECT_QFRC (CRXOR $lhs, $rhs), $tval, $fval)>;
+
+def : Pat<(v4f32 (selectcc i1:$lhs, i1:$rhs, v4f32:$tval, v4f32:$fval, SETLT)),
+ (SELECT_QSRC (CRANDC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(v4f32 (selectcc i1:$lhs, i1:$rhs, v4f32:$tval, v4f32:$fval, SETLE)),
+ (SELECT_QSRC (CRORC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(v4f32 (selectcc i1:$lhs, i1:$rhs, v4f32:$tval, v4f32:$fval, SETEQ)),
+ (SELECT_QSRC (CREQV $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4f32 (selectcc i1:$lhs, i1:$rhs, v4f32:$tval, v4f32:$fval, SETGE)),
+ (SELECT_QSRC (CRORC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4f32 (selectcc i1:$lhs, i1:$rhs, v4f32:$tval, v4f32:$fval, SETGT)),
+ (SELECT_QSRC (CRANDC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4f32 (selectcc i1:$lhs, i1:$rhs, v4f32:$tval, v4f32:$fval, SETNE)),
+ (SELECT_QSRC (CRXOR $lhs, $rhs), $tval, $fval)>;
+
+def : Pat<(v4i1 (selectcc i1:$lhs, i1:$rhs, v4i1:$tval, v4i1:$fval, SETLT)),
+ (SELECT_QBRC (CRANDC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(v4i1 (selectcc i1:$lhs, i1:$rhs, v4i1:$tval, v4i1:$fval, SETLE)),
+ (SELECT_QBRC (CRORC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(v4i1 (selectcc i1:$lhs, i1:$rhs, v4i1:$tval, v4i1:$fval, SETEQ)),
+ (SELECT_QBRC (CREQV $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4i1 (selectcc i1:$lhs, i1:$rhs, v4i1:$tval, v4i1:$fval, SETGE)),
+ (SELECT_QBRC (CRORC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4i1 (selectcc i1:$lhs, i1:$rhs, v4i1:$tval, v4i1:$fval, SETGT)),
+ (SELECT_QBRC (CRANDC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4i1 (selectcc i1:$lhs, i1:$rhs, v4i1:$tval, v4i1:$fval, SETNE)),
+ (SELECT_QBRC (CRXOR $lhs, $rhs), $tval, $fval)>;
+
+} // end HasQPX
+
+let Predicates = [HasQPX, NoNaNsFPMath] in {
+def : Pat<(fminnum v4f64:$FRA, v4f64:$FRB),
+ (QVFSELb (QVFCMPLTb $FRA, $FRB), $FRB, $FRA)>;
+def : Pat<(fmaxnum v4f64:$FRA, v4f64:$FRB),
+ (QVFSELb (QVFCMPGTb $FRA, $FRB), $FRB, $FRA)>;
+
+def : Pat<(fminnum v4f32:$FRA, v4f32:$FRB),
+ (QVFSELbs (QVFCMPLTbs $FRA, $FRB), $FRB, $FRA)>;
+def : Pat<(fmaxnum v4f32:$FRA, v4f32:$FRB),
+ (QVFSELbs (QVFCMPGTbs $FRA, $FRB), $FRB, $FRA)>;
+}
+
+let Predicates = [HasQPX, NaNsFPMath] in {
+// When either of these operands is NaN, we should return the other operand.
+// QVFCMPLT/QVFCMPGT return false is either operand is NaN, which means we need
+// to explicitly or with a NaN test on the second operand.
+def : Pat<(fminnum v4f64:$FRA, v4f64:$FRB),
+ (QVFSELb (QVFLOGICALb (QVFCMPLTb $FRA, $FRB),
+ (QVFTSTNANb $FRB, $FRB), (i32 7)),
+ $FRB, $FRA)>;
+def : Pat<(fmaxnum v4f64:$FRA, v4f64:$FRB),
+ (QVFSELb (QVFLOGICALb (QVFCMPGTb $FRA, $FRB),
+ (QVFTSTNANb $FRB, $FRB), (i32 7)),
+ $FRB, $FRA)>;
+
+def : Pat<(fminnum v4f32:$FRA, v4f32:$FRB),
+ (QVFSELbs (QVFLOGICALb (QVFCMPLTbs $FRA, $FRB),
+ (QVFTSTNANbs $FRB, $FRB), (i32 7)),
+ $FRB, $FRA)>;
+def : Pat<(fmaxnum v4f32:$FRA, v4f32:$FRB),
+ (QVFSELbs (QVFLOGICALb (QVFCMPGTbs $FRA, $FRB),
+ (QVFTSTNANbs $FRB, $FRB), (i32 7)),
+ $FRB, $FRA)>;
+}
+
diff --git a/lib/Target/PowerPC/PPCInstrVSX.td b/lib/Target/PowerPC/PPCInstrVSX.td
index 2c8f998..d6cb3a0 100644
--- a/lib/Target/PowerPC/PPCInstrVSX.td
+++ b/lib/Target/PowerPC/PPCInstrVSX.td
@@ -25,6 +25,23 @@ def vsfrc : RegisterOperand<VSFRC> {
let ParserMatchClass = PPCRegVSFRCAsmOperand;
}
+// Little-endian-specific nodes.
+def SDT_PPClxvd2x : SDTypeProfile<1, 1, [
+ SDTCisVT<0, v2f64>, SDTCisPtrTy<1>
+]>;
+def SDT_PPCstxvd2x : SDTypeProfile<0, 2, [
+ SDTCisVT<0, v2f64>, SDTCisPtrTy<1>
+]>;
+def SDT_PPCxxswapd : SDTypeProfile<1, 1, [
+ SDTCisSameAs<0, 1>
+]>;
+
+def PPClxvd2x : SDNode<"PPCISD::LXVD2X", SDT_PPClxvd2x,
+ [SDNPHasChain, SDNPMayLoad]>;
+def PPCstxvd2x : SDNode<"PPCISD::STXVD2X", SDT_PPCstxvd2x,
+ [SDNPHasChain, SDNPMayStore]>;
+def PPCxxswapd : SDNode<"PPCISD::XXSWAPD", SDT_PPCxxswapd, [SDNPHasChain]>;
+
multiclass XX3Form_Rcr<bits<6> opcode, bits<7> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
@@ -40,9 +57,12 @@ multiclass XX3Form_Rcr<bits<6> opcode, bits<7> xo, dag OOL, dag IOL,
}
def HasVSX : Predicate<"PPCSubTarget->hasVSX()">;
+def IsLittleEndian : Predicate<"PPCSubTarget->isLittleEndian()">;
+def IsBigEndian : Predicate<"!PPCSubTarget->isLittleEndian()">;
+
let Predicates = [HasVSX] in {
let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns.
-let neverHasSideEffects = 1 in { // VSX instructions don't have side effects.
+let hasSideEffects = 0 in { // VSX instructions don't have side effects.
let Uses = [RM] in {
// Load indexed instructions
@@ -77,12 +97,12 @@ let Uses = [RM] in {
def STXVD2X : XX1Form<31, 972,
(outs), (ins vsrc:$XT, memrr:$dst),
"stxvd2x $XT, $dst", IIC_LdStSTFD,
- [(int_ppc_vsx_stxvd2x v2f64:$XT, xoaddr:$dst)]>;
+ [(store v2f64:$XT, xoaddr:$dst)]>;
def STXVW4X : XX1Form<31, 908,
(outs), (ins vsrc:$XT, memrr:$dst),
"stxvw4x $XT, $dst", IIC_LdStSTFD,
- [(int_ppc_vsx_stxvw4x v4i32:$XT, xoaddr:$dst)]>;
+ [(store v4i32:$XT, xoaddr:$dst)]>;
}
// Add/Mul Instructions
@@ -728,7 +748,7 @@ let Uses = [RM] in {
def XXSPLTW : XX2Form_2<60, 164,
(outs vsrc:$XT), (ins vsrc:$XB, u2imm:$UIM),
"xxspltw $XT, $XB, $UIM", IIC_VecPerm, []>;
-} // neverHasSideEffects
+} // hasSideEffects
// SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after
// instruction selection into a branch sequence.
@@ -773,6 +793,8 @@ def : InstAlias<"xxswapd $XT, $XB",
(XXPERMDI vsrc:$XT, vsrc:$XB, vsrc:$XB, 2)>;
let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns.
+
+let Predicates = [IsBigEndian] in {
def : Pat<(v2f64 (scalar_to_vector f64:$A)),
(v2f64 (SUBREG_TO_REG (i64 1), $A, sub_64))>;
@@ -780,6 +802,18 @@ def : Pat<(f64 (vector_extract v2f64:$S, 0)),
(f64 (EXTRACT_SUBREG $S, sub_64))>;
def : Pat<(f64 (vector_extract v2f64:$S, 1)),
(f64 (EXTRACT_SUBREG (XXPERMDI $S, $S, 2), sub_64))>;
+}
+
+let Predicates = [IsLittleEndian] in {
+def : Pat<(v2f64 (scalar_to_vector f64:$A)),
+ (v2f64 (XXPERMDI (SUBREG_TO_REG (i64 1), $A, sub_64),
+ (SUBREG_TO_REG (i64 1), $A, sub_64), 0))>;
+
+def : Pat<(f64 (vector_extract v2f64:$S, 0)),
+ (f64 (EXTRACT_SUBREG (XXPERMDI $S, $S, 2), sub_64))>;
+def : Pat<(f64 (vector_extract v2f64:$S, 1)),
+ (f64 (EXTRACT_SUBREG $S, sub_64))>;
+}
// Additional fnmsub patterns: -a*c + b == -(a*c - b)
def : Pat<(fma (fneg f64:$A), f64:$C, f64:$B),
@@ -854,11 +888,21 @@ def : Pat<(v2f64 (sint_to_fp (sext_inreg v2i64:$C, v2i32))),
def : Pat<(v2f64 (load xoaddr:$src)), (LXVD2X xoaddr:$src)>;
def : Pat<(v2i64 (load xoaddr:$src)), (LXVD2X xoaddr:$src)>;
def : Pat<(v4i32 (load xoaddr:$src)), (LXVW4X xoaddr:$src)>;
+def : Pat<(v2f64 (PPClxvd2x xoaddr:$src)), (LXVD2X xoaddr:$src)>;
// Stores.
-def : Pat<(store v2f64:$rS, xoaddr:$dst), (STXVD2X $rS, xoaddr:$dst)>;
+def : Pat<(int_ppc_vsx_stxvd2x v2f64:$rS, xoaddr:$dst),
+ (STXVD2X $rS, xoaddr:$dst)>;
def : Pat<(store v2i64:$rS, xoaddr:$dst), (STXVD2X $rS, xoaddr:$dst)>;
-def : Pat<(store v4i32:$rS, xoaddr:$dst), (STXVW4X $rS, xoaddr:$dst)>;
+def : Pat<(int_ppc_vsx_stxvw4x v4i32:$rS, xoaddr:$dst),
+ (STXVW4X $rS, xoaddr:$dst)>;
+def : Pat<(PPCstxvd2x v2f64:$rS, xoaddr:$dst), (STXVD2X $rS, xoaddr:$dst)>;
+
+// Permutes.
+def : Pat<(v2f64 (PPCxxswapd v2f64:$src)), (XXPERMDI $src, $src, 2)>;
+def : Pat<(v2i64 (PPCxxswapd v2i64:$src)), (XXPERMDI $src, $src, 2)>;
+def : Pat<(v4f32 (PPCxxswapd v4f32:$src)), (XXPERMDI $src, $src, 2)>;
+def : Pat<(v4i32 (PPCxxswapd v4i32:$src)), (XXPERMDI $src, $src, 2)>;
// Selects.
def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETLT)),
@@ -896,3 +940,28 @@ def : Pat<(int_ppc_vsx_xvdivdp v2f64:$A, v2f64:$B),
} // AddedComplexity
} // HasVSX
+// The following VSX instructions were introduced in Power ISA 2.07
+/* FIXME: if the operands are v2i64, these patterns will not match.
+ we should define new patterns or otherwise match the same patterns
+ when the elements are larger than i32.
+*/
+def HasP8Vector : Predicate<"PPCSubTarget->hasP8Vector()">;
+let Predicates = [HasP8Vector] in {
+let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns.
+let isCommutable = 1 in {
+ def XXLEQV : XX3Form<60, 186,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xxleqv $XT, $XA, $XB", IIC_VecGeneral,
+ [(set v4i32:$XT, (vnot_ppc (xor v4i32:$XA, v4i32:$XB)))]>;
+ def XXLNAND : XX3Form<60, 178,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xxlnand $XT, $XA, $XB", IIC_VecGeneral,
+ [(set v4i32:$XT, (vnot_ppc (and v4i32:$XA,
+ v4i32:$XB)))]>;
+ } // isCommutable
+def XXLORC : XX3Form<60, 170,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xxlorc $XT, $XA, $XB", IIC_VecGeneral,
+ [(set v4i32:$XT, (or v4i32:$XA, (vnot_ppc v4i32:$XB)))]>;
+} // AddedComplexity = 500
+} // HasP8Vector
diff --git a/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp b/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
new file mode 100644
index 0000000..efd2d92
--- /dev/null
+++ b/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
@@ -0,0 +1,231 @@
+//===-------- PPCLoopDataPrefetch.cpp - Loop Data Prefetching Pass --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a Loop Data Prefetching Pass.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "ppc-loop-data-prefetch"
+#include "PPC.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/CodeMetrics.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+using namespace llvm;
+
+// By default, we limit this to creating 16 PHIs (which is a little over half
+// of the allocatable register set).
+static cl::opt<bool>
+PrefetchWrites("ppc-loop-prefetch-writes", cl::Hidden, cl::init(false),
+ cl::desc("Prefetch write addresses"));
+
+// This seems like a reasonable default for the BG/Q (this pass is enabled, by
+// default, only on the BG/Q).
+static cl::opt<unsigned>
+PrefDist("ppc-loop-prefetch-distance", cl::Hidden, cl::init(300),
+ cl::desc("The loop prefetch distance"));
+
+static cl::opt<unsigned>
+CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
+ cl::desc("The loop prefetch cache line size"));
+
+namespace llvm {
+ void initializePPCLoopDataPrefetchPass(PassRegistry&);
+}
+
+namespace {
+
+ class PPCLoopDataPrefetch : public FunctionPass {
+ public:
+ static char ID; // Pass ID, replacement for typeid
+ PPCLoopDataPrefetch() : FunctionPass(ID) {
+ initializePPCLoopDataPrefetchPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<AssumptionCacheTracker>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addPreserved<LoopInfoWrapperPass>();
+ AU.addRequired<ScalarEvolution>();
+ // FIXME: For some reason, preserving SE here breaks LSR (even if
+ // this pass changes nothing).
+ // AU.addPreserved<ScalarEvolution>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ }
+
+ bool runOnFunction(Function &F) override;
+ bool runOnLoop(Loop *L);
+
+ private:
+ AssumptionCache *AC;
+ LoopInfo *LI;
+ ScalarEvolution *SE;
+ const TargetTransformInfo *TTI;
+ const DataLayout *DL;
+ };
+}
+
+char PPCLoopDataPrefetch::ID = 0;
+INITIALIZE_PASS_BEGIN(PPCLoopDataPrefetch, "ppc-loop-data-prefetch",
+ "PPC Loop Data Prefetch", false, false)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_END(PPCLoopDataPrefetch, "ppc-loop-data-prefetch",
+ "PPC Loop Data Prefetch", false, false)
+
+FunctionPass *llvm::createPPCLoopDataPrefetchPass() { return new PPCLoopDataPrefetch(); }
+
+bool PPCLoopDataPrefetch::runOnFunction(Function &F) {
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ SE = &getAnalysis<ScalarEvolution>();
+ DL = F.getParent()->getDataLayout();
+ AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
+ TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+
+ bool MadeChange = false;
+
+ for (LoopInfo::iterator I = LI->begin(), E = LI->end();
+ I != E; ++I) {
+ Loop *L = *I;
+ MadeChange |= runOnLoop(L);
+ }
+
+ return MadeChange;
+}
+
+bool PPCLoopDataPrefetch::runOnLoop(Loop *L) {
+ bool MadeChange = false;
+
+ // Only prefetch in the inner-most loop
+ if (!L->empty())
+ return MadeChange;
+
+ SmallPtrSet<const Value *, 32> EphValues;
+ CodeMetrics::collectEphemeralValues(L, AC, EphValues);
+
+ // Calculate the number of iterations ahead to prefetch
+ CodeMetrics Metrics;
+ for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
+ I != IE; ++I) {
+
+ // If the loop already has prefetches, then assume that the user knows
+ // what he or she is doing and don't add any more.
+ for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end();
+ J != JE; ++J)
+ if (CallInst *CI = dyn_cast<CallInst>(J))
+ if (Function *F = CI->getCalledFunction())
+ if (F->getIntrinsicID() == Intrinsic::prefetch)
+ return MadeChange;
+
+ Metrics.analyzeBasicBlock(*I, *TTI, EphValues);
+ }
+ unsigned LoopSize = Metrics.NumInsts;
+ if (!LoopSize)
+ LoopSize = 1;
+
+ unsigned ItersAhead = PrefDist/LoopSize;
+ if (!ItersAhead)
+ ItersAhead = 1;
+
+ SmallVector<std::pair<Instruction *, const SCEVAddRecExpr *>, 16> PrefLoads;
+ for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
+ I != IE; ++I) {
+ for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end();
+ J != JE; ++J) {
+ Value *PtrValue;
+ Instruction *MemI;
+
+ if (LoadInst *LMemI = dyn_cast<LoadInst>(J)) {
+ MemI = LMemI;
+ PtrValue = LMemI->getPointerOperand();
+ } else if (StoreInst *SMemI = dyn_cast<StoreInst>(J)) {
+ if (!PrefetchWrites) continue;
+ MemI = SMemI;
+ PtrValue = SMemI->getPointerOperand();
+ } else continue;
+
+ unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace();
+ if (PtrAddrSpace)
+ continue;
+
+ if (L->isLoopInvariant(PtrValue))
+ continue;
+
+ const SCEV *LSCEV = SE->getSCEV(PtrValue);
+ const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
+ if (!LSCEVAddRec)
+ continue;
+
+ // We don't want to double prefetch individual cache lines. If this load
+ // is known to be within one cache line of some other load that has
+ // already been prefetched, then don't prefetch this one as well.
+ bool DupPref = false;
+ for (SmallVector<std::pair<Instruction *, const SCEVAddRecExpr *>,
+ 16>::iterator K = PrefLoads.begin(), KE = PrefLoads.end();
+ K != KE; ++K) {
+ const SCEV *PtrDiff = SE->getMinusSCEV(LSCEVAddRec, K->second);
+ if (const SCEVConstant *ConstPtrDiff =
+ dyn_cast<SCEVConstant>(PtrDiff)) {
+ int64_t PD = abs64(ConstPtrDiff->getValue()->getSExtValue());
+ if (PD < (int64_t) CacheLineSize) {
+ DupPref = true;
+ break;
+ }
+ }
+ }
+ if (DupPref)
+ continue;
+
+ const SCEV *NextLSCEV = SE->getAddExpr(LSCEVAddRec, SE->getMulExpr(
+ SE->getConstant(LSCEVAddRec->getType(), ItersAhead),
+ LSCEVAddRec->getStepRecurrence(*SE)));
+ if (!isSafeToExpand(NextLSCEV, *SE))
+ continue;
+
+ PrefLoads.push_back(std::make_pair(MemI, LSCEVAddRec));
+
+ Type *I8Ptr = Type::getInt8PtrTy((*I)->getContext(), PtrAddrSpace);
+ SCEVExpander SCEVE(*SE, "prefaddr");
+ Value *PrefPtrValue = SCEVE.expandCodeFor(NextLSCEV, I8Ptr, MemI);
+
+ IRBuilder<> Builder(MemI);
+ Module *M = (*I)->getParent()->getParent();
+ Type *I32 = Type::getInt32Ty((*I)->getContext());
+ Value *PrefetchFunc = Intrinsic::getDeclaration(M, Intrinsic::prefetch);
+ Builder.CreateCall4(PrefetchFunc, PrefPtrValue,
+ ConstantInt::get(I32, MemI->mayReadFromMemory() ? 0 : 1),
+ ConstantInt::get(I32, 3), ConstantInt::get(I32, 1));
+
+ MadeChange = true;
+ }
+ }
+
+ return MadeChange;
+}
+
diff --git a/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp b/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
new file mode 100644
index 0000000..df65227
--- /dev/null
+++ b/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
@@ -0,0 +1,382 @@
+//===------ PPCLoopPreIncPrep.cpp - Loop Pre-Inc. AM Prep. Pass -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a pass to prepare loops for pre-increment addressing
+// modes. Additional PHIs are created for loop induction variables used by
+// load/store instructions so that the pre-increment forms can be used.
+// Generically, this means transforming loops like this:
+// for (int i = 0; i < n; ++i)
+// array[i] = c;
+// to look like this:
+// T *p = array[-1];
+// for (int i = 0; i < n; ++i)
+// *++p = c;
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "ppc-loop-preinc-prep"
+#include "PPC.h"
+#include "PPCTargetMachine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/CodeMetrics.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+using namespace llvm;
+
+// By default, we limit this to creating 16 PHIs (which is a little over half
+// of the allocatable register set).
+static cl::opt<unsigned> MaxVars("ppc-preinc-prep-max-vars",
+ cl::Hidden, cl::init(16),
+ cl::desc("Potential PHI threshold for PPC preinc loop prep"));
+
+namespace llvm {
+ void initializePPCLoopPreIncPrepPass(PassRegistry&);
+}
+
+namespace {
+
+ class PPCLoopPreIncPrep : public FunctionPass {
+ public:
+ static char ID; // Pass ID, replacement for typeid
+ PPCLoopPreIncPrep() : FunctionPass(ID), TM(nullptr) {
+ initializePPCLoopPreIncPrepPass(*PassRegistry::getPassRegistry());
+ }
+ PPCLoopPreIncPrep(PPCTargetMachine &TM) : FunctionPass(ID), TM(&TM) {
+ initializePPCLoopPreIncPrepPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addPreserved<LoopInfoWrapperPass>();
+ AU.addRequired<ScalarEvolution>();
+ }
+
+ bool runOnFunction(Function &F) override;
+
+ bool runOnLoop(Loop *L);
+ void simplifyLoopLatch(Loop *L);
+ bool rotateLoop(Loop *L);
+
+ private:
+ PPCTargetMachine *TM;
+ LoopInfo *LI;
+ ScalarEvolution *SE;
+ const DataLayout *DL;
+ };
+}
+
+char PPCLoopPreIncPrep::ID = 0;
+static const char *name = "Prepare loop for pre-inc. addressing modes";
+INITIALIZE_PASS_BEGIN(PPCLoopPreIncPrep, DEBUG_TYPE, name, false, false)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_END(PPCLoopPreIncPrep, DEBUG_TYPE, name, false, false)
+
+FunctionPass *llvm::createPPCLoopPreIncPrepPass(PPCTargetMachine &TM) {
+ return new PPCLoopPreIncPrep(TM);
+}
+
+namespace {
+ struct SCEVLess : std::binary_function<const SCEV *, const SCEV *, bool>
+ {
+ SCEVLess(ScalarEvolution *SE) : SE(SE) {}
+
+ bool operator() (const SCEV *X, const SCEV *Y) const {
+ const SCEV *Diff = SE->getMinusSCEV(X, Y);
+ return cast<SCEVConstant>(Diff)->getValue()->getSExtValue() < 0;
+ }
+
+ protected:
+ ScalarEvolution *SE;
+ };
+}
+
+static bool IsPtrInBounds(Value *BasePtr) {
+ Value *StrippedBasePtr = BasePtr;
+ while (BitCastInst *BC = dyn_cast<BitCastInst>(StrippedBasePtr))
+ StrippedBasePtr = BC->getOperand(0);
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(StrippedBasePtr))
+ return GEP->isInBounds();
+
+ return false;
+}
+
+static Value *GetPointerOperand(Value *MemI) {
+ if (LoadInst *LMemI = dyn_cast<LoadInst>(MemI)) {
+ return LMemI->getPointerOperand();
+ } else if (StoreInst *SMemI = dyn_cast<StoreInst>(MemI)) {
+ return SMemI->getPointerOperand();
+ } else if (IntrinsicInst *IMemI = dyn_cast<IntrinsicInst>(MemI)) {
+ if (IMemI->getIntrinsicID() == Intrinsic::prefetch)
+ return IMemI->getArgOperand(0);
+ }
+
+ return 0;
+}
+
+bool PPCLoopPreIncPrep::runOnFunction(Function &F) {
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ SE = &getAnalysis<ScalarEvolution>();
+
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : 0;
+
+ bool MadeChange = false;
+
+ for (LoopInfo::iterator I = LI->begin(), E = LI->end();
+ I != E; ++I) {
+ Loop *L = *I;
+ MadeChange |= runOnLoop(L);
+ }
+
+ return MadeChange;
+}
+
+bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
+ bool MadeChange = false;
+
+ if (!DL)
+ return MadeChange;
+
+ // Only prep. the inner-most loop
+ if (!L->empty())
+ return MadeChange;
+
+ BasicBlock *Header = L->getHeader();
+
+ const PPCSubtarget *ST =
+ TM ? TM->getSubtargetImpl(*Header->getParent()) : nullptr;
+
+ unsigned HeaderLoopPredCount = 0;
+ for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
+ PI != PE; ++PI) {
+ ++HeaderLoopPredCount;
+ }
+
+ // Collect buckets of comparable addresses used by loads and stores.
+ typedef std::multimap<const SCEV *, Instruction *, SCEVLess> Bucket;
+ SmallVector<Bucket, 16> Buckets;
+ for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
+ I != IE; ++I) {
+ for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end();
+ J != JE; ++J) {
+ Value *PtrValue;
+ Instruction *MemI;
+
+ if (LoadInst *LMemI = dyn_cast<LoadInst>(J)) {
+ MemI = LMemI;
+ PtrValue = LMemI->getPointerOperand();
+ } else if (StoreInst *SMemI = dyn_cast<StoreInst>(J)) {
+ MemI = SMemI;
+ PtrValue = SMemI->getPointerOperand();
+ } else if (IntrinsicInst *IMemI = dyn_cast<IntrinsicInst>(J)) {
+ if (IMemI->getIntrinsicID() == Intrinsic::prefetch) {
+ MemI = IMemI;
+ PtrValue = IMemI->getArgOperand(0);
+ } else continue;
+ } else continue;
+
+ unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace();
+ if (PtrAddrSpace)
+ continue;
+
+ // There are no update forms for Altivec vector load/stores.
+ if (ST && ST->hasAltivec() &&
+ PtrValue->getType()->getPointerElementType()->isVectorTy())
+ continue;
+
+ if (L->isLoopInvariant(PtrValue))
+ continue;
+
+ const SCEV *LSCEV = SE->getSCEV(PtrValue);
+ if (!isa<SCEVAddRecExpr>(LSCEV))
+ continue;
+
+ bool FoundBucket = false;
+ for (unsigned i = 0, e = Buckets.size(); i != e; ++i)
+ for (Bucket::iterator K = Buckets[i].begin(), KE = Buckets[i].end();
+ K != KE; ++K) {
+ const SCEV *Diff = SE->getMinusSCEV(K->first, LSCEV);
+ if (isa<SCEVConstant>(Diff)) {
+ Buckets[i].insert(std::make_pair(LSCEV, MemI));
+ FoundBucket = true;
+ break;
+ }
+ }
+
+ if (!FoundBucket) {
+ Buckets.push_back(Bucket(SCEVLess(SE)));
+ Buckets[Buckets.size()-1].insert(std::make_pair(LSCEV, MemI));
+ }
+ }
+ }
+
+ if (Buckets.empty() || Buckets.size() > MaxVars)
+ return MadeChange;
+
+ BasicBlock *LoopPredecessor = L->getLoopPredecessor();
+ // If there is no loop predecessor, or the loop predecessor's terminator
+ // returns a value (which might contribute to determining the loop's
+ // iteration space), insert a new preheader for the loop.
+ if (!LoopPredecessor ||
+ !LoopPredecessor->getTerminator()->getType()->isVoidTy())
+ LoopPredecessor = InsertPreheaderForLoop(L, this);
+ if (!LoopPredecessor)
+ return MadeChange;
+
+ SmallSet<BasicBlock *, 16> BBChanged;
+ for (unsigned i = 0, e = Buckets.size(); i != e; ++i) {
+ // The base address of each bucket is transformed into a phi and the others
+ // are rewritten as offsets of that variable.
+
+ const SCEVAddRecExpr *BasePtrSCEV =
+ cast<SCEVAddRecExpr>(Buckets[i].begin()->first);
+ if (!BasePtrSCEV->isAffine())
+ continue;
+
+ Instruction *MemI = Buckets[i].begin()->second;
+ Value *BasePtr = GetPointerOperand(MemI);
+ assert(BasePtr && "No pointer operand");
+
+ Type *I8PtrTy = Type::getInt8PtrTy(MemI->getParent()->getContext(),
+ BasePtr->getType()->getPointerAddressSpace());
+
+ const SCEV *BasePtrStartSCEV = BasePtrSCEV->getStart();
+ if (!SE->isLoopInvariant(BasePtrStartSCEV, L))
+ continue;
+
+ const SCEVConstant *BasePtrIncSCEV =
+ dyn_cast<SCEVConstant>(BasePtrSCEV->getStepRecurrence(*SE));
+ if (!BasePtrIncSCEV)
+ continue;
+ BasePtrStartSCEV = SE->getMinusSCEV(BasePtrStartSCEV, BasePtrIncSCEV);
+ if (!isSafeToExpand(BasePtrStartSCEV, *SE))
+ continue;
+
+ PHINode *NewPHI = PHINode::Create(I8PtrTy, HeaderLoopPredCount,
+ MemI->hasName() ? MemI->getName() + ".phi" : "",
+ Header->getFirstNonPHI());
+
+ SCEVExpander SCEVE(*SE, "pistart");
+ Value *BasePtrStart = SCEVE.expandCodeFor(BasePtrStartSCEV, I8PtrTy,
+ LoopPredecessor->getTerminator());
+
+ // Note that LoopPredecessor might occur in the predecessor list multiple
+ // times, and we need to add it the right number of times.
+ for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
+ PI != PE; ++PI) {
+ if (*PI != LoopPredecessor)
+ continue;
+
+ NewPHI->addIncoming(BasePtrStart, LoopPredecessor);
+ }
+
+ Instruction *InsPoint = Header->getFirstInsertionPt();
+ GetElementPtrInst *PtrInc =
+ GetElementPtrInst::Create(NewPHI, BasePtrIncSCEV->getValue(),
+ MemI->hasName() ? MemI->getName() + ".inc" : "", InsPoint);
+ PtrInc->setIsInBounds(IsPtrInBounds(BasePtr));
+ for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
+ PI != PE; ++PI) {
+ if (*PI == LoopPredecessor)
+ continue;
+
+ NewPHI->addIncoming(PtrInc, *PI);
+ }
+
+ Instruction *NewBasePtr;
+ if (PtrInc->getType() != BasePtr->getType())
+ NewBasePtr = new BitCastInst(PtrInc, BasePtr->getType(),
+ PtrInc->hasName() ? PtrInc->getName() + ".cast" : "", InsPoint);
+ else
+ NewBasePtr = PtrInc;
+
+ if (Instruction *IDel = dyn_cast<Instruction>(BasePtr))
+ BBChanged.insert(IDel->getParent());
+ BasePtr->replaceAllUsesWith(NewBasePtr);
+ RecursivelyDeleteTriviallyDeadInstructions(BasePtr);
+
+ Value *LastNewPtr = NewBasePtr;
+ for (Bucket::iterator I = std::next(Buckets[i].begin()),
+ IE = Buckets[i].end(); I != IE; ++I) {
+ Value *Ptr = GetPointerOperand(I->second);
+ assert(Ptr && "No pointer operand");
+ if (Ptr == LastNewPtr)
+ continue;
+
+ Instruction *RealNewPtr;
+ const SCEVConstant *Diff =
+ cast<SCEVConstant>(SE->getMinusSCEV(I->first, BasePtrSCEV));
+ if (Diff->isZero()) {
+ RealNewPtr = NewBasePtr;
+ } else {
+ Instruction *PtrIP = dyn_cast<Instruction>(Ptr);
+ if (PtrIP && isa<Instruction>(NewBasePtr) &&
+ cast<Instruction>(NewBasePtr)->getParent() == PtrIP->getParent())
+ PtrIP = 0;
+ else if (isa<PHINode>(PtrIP))
+ PtrIP = PtrIP->getParent()->getFirstInsertionPt();
+ else if (!PtrIP)
+ PtrIP = I->second;
+
+ GetElementPtrInst *NewPtr =
+ GetElementPtrInst::Create(PtrInc, Diff->getValue(),
+ I->second->hasName() ? I->second->getName() + ".off" : "", PtrIP);
+ if (!PtrIP)
+ NewPtr->insertAfter(cast<Instruction>(PtrInc));
+ NewPtr->setIsInBounds(IsPtrInBounds(Ptr));
+ RealNewPtr = NewPtr;
+ }
+
+ if (Instruction *IDel = dyn_cast<Instruction>(Ptr))
+ BBChanged.insert(IDel->getParent());
+
+ Instruction *ReplNewPtr;
+ if (Ptr->getType() != RealNewPtr->getType()) {
+ ReplNewPtr = new BitCastInst(RealNewPtr, Ptr->getType(),
+ Ptr->hasName() ? Ptr->getName() + ".cast" : "");
+ ReplNewPtr->insertAfter(RealNewPtr);
+ } else
+ ReplNewPtr = RealNewPtr;
+
+ Ptr->replaceAllUsesWith(ReplNewPtr);
+ RecursivelyDeleteTriviallyDeadInstructions(Ptr);
+
+ LastNewPtr = RealNewPtr;
+ }
+
+ MadeChange = true;
+ }
+
+ for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
+ I != IE; ++I) {
+ if (BBChanged.count(*I))
+ DeleteDeadPHIs(*I);
+ }
+
+ return MadeChange;
+}
+
diff --git a/lib/Target/PowerPC/PPCMCInstLower.cpp b/lib/Target/PowerPC/PPCMCInstLower.cpp
index 880b520..819738b 100644
--- a/lib/Target/PowerPC/PPCMCInstLower.cpp
+++ b/lib/Target/PowerPC/PPCMCInstLower.cpp
@@ -13,8 +13,8 @@
//===----------------------------------------------------------------------===//
#include "PPC.h"
-#include "PPCSubtarget.h"
#include "MCTargetDesc/PPCMCExpr.h"
+#include "PPCSubtarget.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/AsmPrinter.h"
@@ -38,7 +38,7 @@ static MachineModuleInfoMachO &getMachOMMI(AsmPrinter &AP) {
static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){
const TargetMachine &TM = AP.TM;
Mangler *Mang = AP.Mang;
- const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = TM.getDataLayout();
MCContext &Ctx = AP.OutContext;
bool isDarwin = Triple(TM.getTargetTriple()).isOSDarwin();
@@ -137,12 +137,6 @@ static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
case PPCII::MO_TLS:
RefKind = MCSymbolRefExpr::VK_PPC_TLS;
break;
- case PPCII::MO_TLSGD:
- RefKind = MCSymbolRefExpr::VK_PPC_TLSGD;
- break;
- case PPCII::MO_TLSLD:
- RefKind = MCSymbolRefExpr::VK_PPC_TLSLD;
- break;
}
if (MO.getTargetFlags() == PPCII::MO_PLT_OR_STUB && !isDarwin)
diff --git a/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp b/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp
index 4aff95a..dd896a9 100644
--- a/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp
+++ b/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp
@@ -18,7 +18,8 @@ using namespace llvm;
void PPCFunctionInfo::anchor() { }
MCSymbol *PPCFunctionInfo::getPICOffsetSymbol() const {
- const DataLayout *DL = MF.getSubtarget().getDataLayout();
- return MF.getContext().GetOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix())+
- Twine(MF.getFunctionNumber())+"$poff");
+ const DataLayout *DL = MF.getTarget().getDataLayout();
+ return MF.getContext().GetOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix()) +
+ Twine(MF.getFunctionNumber()) +
+ "$poff");
}
diff --git a/lib/Target/PowerPC/PPCMachineFunctionInfo.h b/lib/Target/PowerPC/PPCMachineFunctionInfo.h
index 83de799..607cdf6 100644
--- a/lib/Target/PowerPC/PPCMachineFunctionInfo.h
+++ b/lib/Target/PowerPC/PPCMachineFunctionInfo.h
@@ -35,6 +35,9 @@ class PPCFunctionInfo : public MachineFunctionInfo {
/// Frame index where the old base pointer is stored.
int BasePointerSaveIndex;
+ /// Frame index where the old PIC base pointer is stored.
+ int PICBasePointerSaveIndex;
+
/// MustSaveLR - Indicates whether LR is defined (or clobbered) in the current
/// function. This is only valid after the initial scan of the function by
/// PEI.
@@ -59,6 +62,9 @@ class PPCFunctionInfo : public MachineFunctionInfo {
/// entry, even though LR may otherwise apparently not be used.
bool LRStoreRequired;
+ /// This function makes use of the PPC64 ELF TOC base pointer (register r2).
+ bool UsesTOCBasePtr;
+
/// MinReservedArea - This is the frame size that is at least reserved in a
/// potential caller (parameter+linkage area).
unsigned MinReservedArea;
@@ -103,11 +109,13 @@ public:
: FramePointerSaveIndex(0),
ReturnAddrSaveIndex(0),
BasePointerSaveIndex(0),
+ PICBasePointerSaveIndex(0),
HasSpills(false),
HasNonRISpills(false),
SpillsCR(false),
SpillsVRSAVE(false),
LRStoreRequired(false),
+ UsesTOCBasePtr(false),
MinReservedArea(0),
TailCallSPDelta(0),
HasFastCall(false),
@@ -128,6 +136,9 @@ public:
int getBasePointerSaveIndex() const { return BasePointerSaveIndex; }
void setBasePointerSaveIndex(int Idx) { BasePointerSaveIndex = Idx; }
+ int getPICBasePointerSaveIndex() const { return PICBasePointerSaveIndex; }
+ void setPICBasePointerSaveIndex(int Idx) { PICBasePointerSaveIndex = Idx; }
+
unsigned getMinReservedArea() const { return MinReservedArea; }
void setMinReservedArea(unsigned size) { MinReservedArea = size; }
@@ -157,6 +168,9 @@ public:
void setLRStoreRequired() { LRStoreRequired = true; }
bool isLRStoreRequired() const { return LRStoreRequired; }
+ void setUsesTOCBasePtr() { UsesTOCBasePtr = true; }
+ bool usesTOCBasePtr() const { return UsesTOCBasePtr; }
+
void setHasFastCall() { HasFastCall = true; }
bool hasFastCall() const { return HasFastCall;}
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 9b9966f..c9a9684 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -99,6 +99,14 @@ PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
const MCPhysReg*
PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg) {
+ if (Subtarget.hasVSX())
+ return CSR_64_AllRegs_VSX_SaveList;
+ if (Subtarget.hasAltivec())
+ return CSR_64_AllRegs_Altivec_SaveList;
+ return CSR_64_AllRegs_SaveList;
+ }
+
if (Subtarget.isDarwinABI())
return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ?
CSR_Darwin64_Altivec_SaveList :
@@ -107,9 +115,14 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
CSR_Darwin32_Altivec_SaveList :
CSR_Darwin32_SaveList);
+ // On PPC64, we might need to save r2 (but only if it is not reserved).
+ bool SaveR2 = MF->getRegInfo().isAllocatable(PPC::X2);
+
return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ?
- CSR_SVR464_Altivec_SaveList :
- CSR_SVR464_SaveList) :
+ (SaveR2 ? CSR_SVR464_R2_Altivec_SaveList :
+ CSR_SVR464_Altivec_SaveList) :
+ (SaveR2 ? CSR_SVR464_R2_SaveList :
+ CSR_SVR464_SaveList)) :
(Subtarget.hasAltivec() ?
CSR_SVR432_Altivec_SaveList :
CSR_SVR432_SaveList);
@@ -117,6 +130,14 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const uint32_t*
PPCRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
+ if (CC == CallingConv::AnyReg) {
+ if (Subtarget.hasVSX())
+ return CSR_64_AllRegs_VSX_RegMask;
+ if (Subtarget.hasAltivec())
+ return CSR_64_AllRegs_Altivec_RegMask;
+ return CSR_64_AllRegs_RegMask;
+ }
+
if (Subtarget.isDarwinABI())
return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ?
CSR_Darwin64_Altivec_RegMask :
@@ -138,10 +159,18 @@ PPCRegisterInfo::getNoPreservedMask() const {
return CSR_NoRegs_RegMask;
}
+void PPCRegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
+ unsigned PseudoRegs[] = { PPC::ZERO, PPC::ZERO8, PPC::RM };
+ for (unsigned i = 0, ie = array_lengthof(PseudoRegs); i != ie; ++i) {
+ unsigned Reg = PseudoRegs[i];
+ Mask[Reg / 32] &= ~(1u << (Reg % 32));
+ }
+}
+
BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
- const PPCFrameLowering *PPCFI = static_cast<const PPCFrameLowering *>(
- MF.getSubtarget().getFrameLowering());
+ const PPCFrameLowering *PPCFI =
+ static_cast<const PPCFrameLowering *>(Subtarget.getFrameLowering());
// The ZERO register is not really a register, but the representation of r0
// when used in instructions that treat r0 as the constant 0.
@@ -192,7 +221,16 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// The 64-bit SVR4 ABI reserves r2 for the TOC pointer.
if (Subtarget.isSVR4ABI()) {
- Reserved.set(PPC::X2);
+ // We only reserve r2 if we need to use the TOC pointer. If we have no
+ // explicit uses of the TOC pointer (meaning we're a leaf function with
+ // no constant-pool loads, etc.) and we have no potential uses inside an
+ // inline asm block, then we can treat r2 has an ordinary callee-saved
+ // register.
+ const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ if (FuncInfo->usesTOCBasePtr() || MF.hasInlineAsm())
+ Reserved.set(PPC::X2);
+ else
+ Reserved.reset(PPC::R2);
}
}
@@ -220,10 +258,9 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
-unsigned
-PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
- MachineFunction &MF) const {
- const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+unsigned PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
+ MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
const unsigned DefaultSafety = 1;
switch (RC->getID()) {
@@ -238,6 +275,9 @@ PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
}
case PPC::F8RCRegClassID:
case PPC::F4RCRegClassID:
+ case PPC::QFRCRegClassID:
+ case PPC::QSRCRegClassID:
+ case PPC::QBRCRegClassID:
case PPC::VRRCRegClassID:
case PPC::VFRCRegClassID:
case PPC::VSLRCRegClassID:
@@ -251,8 +291,8 @@ PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
}
}
-const TargetRegisterClass*
-PPCRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC)const {
+const TargetRegisterClass *PPCRegisterInfo::getLargestLegalSuperClass(
+ const TargetRegisterClass *RC) const {
if (Subtarget.hasVSX()) {
// With VSX, we can inflate various sub-register classes to the full VSX
// register set.
@@ -287,7 +327,7 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const {
// Get the frame info.
MachineFrameInfo *MFI = MF.getFrameInfo();
// Get the instruction info.
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
// Determine whether 64-bit pointers are used.
bool LP64 = Subtarget.isPPC64();
DebugLoc dl = MI.getDebugLoc();
@@ -298,10 +338,7 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const {
unsigned FrameSize = MFI->getStackSize();
// Get stack alignments.
- unsigned TargetAlign = MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment();
+ unsigned TargetAlign = Subtarget.getFrameLowering()->getStackAlignment();
unsigned MaxAlign = MFI->getMaxAlignment();
assert((maxCallFrameSize & (MaxAlign-1)) == 0 &&
"Maximum call-frame size not sufficiently aligned");
@@ -406,7 +443,7 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
// Get the instruction's basic block.
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
bool LP64 = Subtarget.isPPC64();
@@ -450,7 +487,7 @@ void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II,
// Get the instruction's basic block.
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
bool LP64 = Subtarget.isPPC64();
@@ -523,7 +560,7 @@ void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II,
// Get the instruction's basic block.
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
bool LP64 = Subtarget.isPPC64();
@@ -566,7 +603,7 @@ void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II,
// Get the instruction's basic block.
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
bool LP64 = Subtarget.isPPC64();
@@ -613,7 +650,7 @@ void PPCRegisterInfo::lowerVRSAVESpilling(MachineBasicBlock::iterator II,
// Get the instruction's basic block.
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
@@ -638,7 +675,7 @@ void PPCRegisterInfo::lowerVRSAVERestore(MachineBasicBlock::iterator II,
// Get the instruction's basic block.
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
@@ -700,7 +737,10 @@ static unsigned getOffsetONFromFION(const MachineInstr &MI,
// Take into account whether it's an add or mem instruction
unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2;
if (MI.isInlineAsm())
- OffsetOperandNo = FIOperandNum-1;
+ OffsetOperandNo = FIOperandNum - 1;
+ else if (MI.getOpcode() == TargetOpcode::STACKMAP ||
+ MI.getOpcode() == TargetOpcode::PATCHPOINT)
+ OffsetOperandNo = FIOperandNum + 1;
return OffsetOperandNo;
}
@@ -718,7 +758,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// Get the basic block's function.
MachineFunction &MF = *MBB.getParent();
// Get the instruction info.
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
// Get the frame info.
MachineFrameInfo *MFI = MF.getFrameInfo();
DebugLoc dl = MI.getDebugLoc();
@@ -772,7 +812,8 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// If the instruction is not present in ImmToIdxMap, then it has no immediate
// form (and must be r+r).
- bool noImmForm = !MI.isInlineAsm() && !ImmToIdxMap.count(OpC);
+ bool noImmForm = !MI.isInlineAsm() && OpC != TargetOpcode::STACKMAP &&
+ OpC != TargetOpcode::PATCHPOINT && !ImmToIdxMap.count(OpC);
// Now add the frame object offset to the offset from r1.
int Offset = MFI->getObjectOffset(FrameIndex);
@@ -783,8 +824,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// to Offset to get the correct offset.
// Naked functions have stack size 0, although getStackSize may not reflect that
// because we didn't call all the pieces that compute it for naked functions.
- if (!MF.getFunction()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked)) {
+ if (!MF.getFunction()->hasFnAttribute(Attribute::Naked)) {
if (!(hasBasePointer(MF) && FrameIndex < 0))
Offset += MFI->getStackSize();
}
@@ -796,8 +836,10 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// only "std" to a stack slot that is at least 4-byte aligned, but it can
// happen in invalid code.
assert(OpC != PPC::DBG_VALUE &&
- "This should be handle in a target independent way");
- if (!noImmForm && isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0)) {
+ "This should be handled in a target-independent way");
+ if (!noImmForm && ((isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0)) ||
+ OpC == TargetOpcode::STACKMAP ||
+ OpC == TargetOpcode::PATCHPOINT)) {
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
return;
}
@@ -843,7 +885,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
unsigned PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+ const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
if (!Subtarget.isPPC64())
return TFI->hasFP(MF) ? PPC::R31 : PPC::R1;
@@ -887,14 +929,9 @@ bool PPCRegisterInfo::canRealignStack(const MachineFunction &MF) const {
bool PPCRegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *F = MF.getFunction();
- unsigned StackAlign = MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment();
- bool requiresRealignment =
- ((MFI->getMaxAlignment() > StackAlign) ||
- F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::StackAlignment));
+ unsigned StackAlign = Subtarget.getFrameLowering()->getStackAlignment();
+ bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
+ F->hasFnAttribute(Attribute::StackAlignment));
return requiresRealignment && canRealignStack(MF);
}
@@ -928,8 +965,8 @@ needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
MachineBasicBlock &MBB = *MI->getParent();
MachineFunction &MF = *MBB.getParent();
- const PPCFrameLowering *PPCFI = static_cast<const PPCFrameLowering *>(
- MF.getSubtarget().getFrameLowering());
+ const PPCFrameLowering *PPCFI =
+ static_cast<const PPCFrameLowering *>(Subtarget.getFrameLowering());
unsigned StackEst =
PPCFI->determineFrameLayout(MF, false, true);
@@ -963,7 +1000,7 @@ materializeFrameBaseRegister(MachineBasicBlock *MBB,
DL = Ins->getDebugLoc();
const MachineFunction &MF = *MBB->getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
const MCInstrDesc &MCID = TII.get(ADDriOpc);
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
@@ -988,7 +1025,7 @@ void PPCRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
const MCInstrDesc &MCID = MI.getDesc();
MachineRegisterInfo &MRI = MF.getRegInfo();
MRI.constrainRegClass(BaseReg,
@@ -1008,6 +1045,8 @@ bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
Offset += MI->getOperand(OffsetOperandNo).getImm();
return MI->getOpcode() == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm
+ MI->getOpcode() == TargetOpcode::STACKMAP ||
+ MI->getOpcode() == TargetOpcode::PATCHPOINT ||
(isInt<16>(Offset) && (!usesIXAddr(*MI) || (Offset & 3) == 0));
}
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.h b/lib/Target/PowerPC/PPCRegisterInfo.h
index c182f95..4c2ef90 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -49,6 +49,8 @@ public:
const uint32_t *getCallPreservedMask(CallingConv::ID CC) const override;
const uint32_t *getNoPreservedMask() const;
+ void adjustStackMapLiveOutMask(uint32_t *Mask) const override;
+
BitVector getReservedRegs(const MachineFunction &MF) const override;
/// We require the register scavenger.
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.td b/lib/Target/PowerPC/PPCRegisterInfo.td
index b3d145b..9a7df96 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.td
+++ b/lib/Target/PowerPC/PPCRegisterInfo.td
@@ -49,6 +49,13 @@ class FPR<bits<5> num, string n> : PPCReg<n> {
let HWEncoding{4-0} = num;
}
+// QFPR - One of the 32 256-bit floating-point vector registers (used for QPX)
+class QFPR<FPR SubReg, string n> : PPCReg<n> {
+ let HWEncoding = SubReg.HWEncoding;
+ let SubRegs = [SubReg];
+ let SubRegIndices = [sub_64];
+}
+
// VF - One of the 32 64-bit floating-point subregisters of the vector
// registers (used by VSX).
class VF<bits<5> num, string n> : PPCReg<n> {
@@ -114,6 +121,12 @@ foreach Index = 0-31 in {
def VF#Index : VF<Index, "vs" # !add(Index, 32)>;
}
+// QPX Floating-point registers
+foreach Index = 0-31 in {
+ def QF#Index : QFPR<!cast<FPR>("F"#Index), "q"#Index>,
+ DwarfRegNum<[!add(Index, 32), !add(Index, 32)]>;
+}
+
// Vector registers
foreach Index = 0-31 in {
def V#Index : VR<!cast<VF>("VF"#Index), "v"#Index>,
@@ -131,8 +144,8 @@ foreach Index = 0-31 in {
}
// The reprsentation of r0 when treated as the constant 0.
-def ZERO : GPR<0, "0">;
-def ZERO8 : GP8<ZERO, "0">;
+def ZERO : GPR<0, "0">, DwarfRegAlias<R0>;
+def ZERO8 : GP8<ZERO, "0">, DwarfRegAlias<X0>;
// Representations of the frame pointer used by ISD::FRAMEADDR.
def FP : GPR<0 /* arbitrary */, "**FRAME POINTER**">;
@@ -188,13 +201,6 @@ def CR6 : CR<6, "cr6", [CR6LT, CR6GT, CR6EQ, CR6UN]>, DwarfRegNum<[74, 74]>;
def CR7 : CR<7, "cr7", [CR7LT, CR7GT, CR7EQ, CR7UN]>, DwarfRegNum<[75, 75]>;
}
-// The full condition-code register. This is not modeled fully, but defined
-// here primarily, for compatibility with gcc, to allow the inline asm "cc"
-// clobber specification to work.
-def CC : PPCReg<"cc">, DwarfRegAlias<CR0> {
- let Aliases = [CR0, CR1, CR2, CR3, CR4, CR5, CR6, CR7];
-}
-
// Link register
def LR : SPR<8, "lr">, DwarfRegNum<[-2, 65]>;
//let Aliases = [LR] in
@@ -210,7 +216,7 @@ def VRSAVE: SPR<256, "vrsave">, DwarfRegNum<[109]>;
// Carry bit. In the architecture this is really bit 0 of the XER register
// (which really is SPR register 1); this is the only bit interesting to a
// compiler.
-def CARRY: SPR<1, "ca">;
+def CARRY: SPR<1, "ca">, DwarfRegNum<[76]>;
// FP rounding mode: bits 30 and 31 of the FP status and control register
// This is not allocated as a normal register; it appears only in
@@ -219,25 +225,57 @@ def CARRY: SPR<1, "ca">;
// most registers, it has to be done in code; to make this work all the
// return and call instructions are described as Uses of RM, so instructions
// that do nothing but change RM will not get deleted.
-// Also, in the architecture it is not really a SPR; 512 is arbitrary.
-def RM: SPR<512, "**ROUNDING MODE**">;
+def RM: PPCReg<"**ROUNDING MODE**">;
/// Register classes
// Allocate volatiles first
// then nonvolatiles in reverse order since stmw/lmw save from rN to r31
def GPRC : RegisterClass<"PPC", [i32], 32, (add (sequence "R%u", 2, 12),
(sequence "R%u", 30, 13),
- R31, R0, R1, FP, BP)>;
+ R31, R0, R1, FP, BP)> {
+ // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
+ // put it at the end of the list.
+ let AltOrders = [(add (sub GPRC, R2), R2)];
+ let AltOrderSelect = [{
+ const PPCSubtarget &S = MF.getSubtarget<PPCSubtarget>();
+ return S.isPPC64() && S.isSVR4ABI();
+ }];
+}
def G8RC : RegisterClass<"PPC", [i64], 64, (add (sequence "X%u", 2, 12),
(sequence "X%u", 30, 14),
- X31, X13, X0, X1, FP8, BP8)>;
+ X31, X13, X0, X1, FP8, BP8)> {
+ // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
+ // put it at the end of the list.
+ let AltOrders = [(add (sub G8RC, X2), X2)];
+ let AltOrderSelect = [{
+ const PPCSubtarget &S = MF.getSubtarget<PPCSubtarget>();
+ return S.isPPC64() && S.isSVR4ABI();
+ }];
+}
// For some instructions r0 is special (representing the value 0 instead of
// the value in the r0 register), and we use these register subclasses to
// prevent r0 from being allocated for use by those instructions.
-def GPRC_NOR0 : RegisterClass<"PPC", [i32], 32, (add (sub GPRC, R0), ZERO)>;
-def G8RC_NOX0 : RegisterClass<"PPC", [i64], 64, (add (sub G8RC, X0), ZERO8)>;
+def GPRC_NOR0 : RegisterClass<"PPC", [i32], 32, (add (sub GPRC, R0), ZERO)> {
+ // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
+ // put it at the end of the list.
+ let AltOrders = [(add (sub GPRC_NOR0, R2), R2)];
+ let AltOrderSelect = [{
+ const PPCSubtarget &S = MF.getSubtarget<PPCSubtarget>();
+ return S.isPPC64() && S.isSVR4ABI();
+ }];
+}
+
+def G8RC_NOX0 : RegisterClass<"PPC", [i64], 64, (add (sub G8RC, X0), ZERO8)> {
+ // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
+ // put it at the end of the list.
+ let AltOrders = [(add (sub G8RC_NOX0, X2), X2)];
+ let AltOrderSelect = [{
+ const PPCSubtarget &S = MF.getSubtarget<PPCSubtarget>();
+ return S.isPPC64() && S.isSVR4ABI();
+ }];
+}
// Allocate volatiles first, then non-volatiles in reverse order. With the SVR4
// ABI the size of the Floating-point register save area is determined by the
@@ -250,7 +288,7 @@ def F8RC : RegisterClass<"PPC", [f64], 64, (add (sequence "F%u", 0, 13),
(sequence "F%u", 31, 14))>;
def F4RC : RegisterClass<"PPC", [f32], 32, (add F8RC)>;
-def VRRC : RegisterClass<"PPC", [v16i8,v8i16,v4i32,v4f32], 128,
+def VRRC : RegisterClass<"PPC", [v16i8,v8i16,v4i32,v2i64,v4f32], 128,
(add V2, V3, V4, V5, V0, V1, V6, V7, V8, V9, V10, V11,
V12, V13, V14, V15, V16, V17, V18, V19, V31, V30,
V29, V28, V27, V26, V25, V24, V23, V22, V21, V20)>;
@@ -278,6 +316,16 @@ def VFRC : RegisterClass<"PPC", [f64], 64,
VF22, VF21, VF20)>;
def VSFRC : RegisterClass<"PPC", [f64], 64, (add F8RC, VFRC)>;
+// For QPX
+def QFRC : RegisterClass<"PPC", [v4f64], 256, (add (sequence "QF%u", 0, 13),
+ (sequence "QF%u", 31, 14))>;
+def QSRC : RegisterClass<"PPC", [v4f32], 128, (add QFRC)>;
+def QBRC : RegisterClass<"PPC", [v4i1], 256, (add QFRC)> {
+ // These are actually stored as floating-point values where a positive
+ // number is true and anything else (including NaN) is false.
+ let Size = 256;
+}
+
def CRBITRC : RegisterClass<"PPC", [i1], 32,
(add CR2LT, CR2GT, CR2EQ, CR2UN,
CR3LT, CR3GT, CR3EQ, CR3UN,
@@ -308,7 +356,3 @@ def CARRYRC : RegisterClass<"PPC", [i32], 32, (add CARRY)> {
let CopyCost = -1;
}
-def CCRC : RegisterClass<"PPC", [i32], 32, (add CC)> {
- let isAllocatable = 0;
-}
-
diff --git a/lib/Target/PowerPC/PPCSchedule.td b/lib/Target/PowerPC/PPCSchedule.td
index 7f80121..2f3a1f9 100644
--- a/lib/Target/PowerPC/PPCSchedule.td
+++ b/lib/Target/PowerPC/PPCSchedule.td
@@ -13,6 +13,7 @@
def IIC_IntSimple : InstrItinClass;
def IIC_IntGeneral : InstrItinClass;
def IIC_IntCompare : InstrItinClass;
+def IIC_IntISEL : InstrItinClass;
def IIC_IntDivD : InstrItinClass;
def IIC_IntDivW : InstrItinClass;
def IIC_IntMFFS : InstrItinClass;
@@ -119,6 +120,7 @@ include "PPCScheduleG4.td"
include "PPCScheduleG4Plus.td"
include "PPCScheduleG5.td"
include "PPCScheduleP7.td"
+include "PPCScheduleP8.td"
include "PPCScheduleA2.td"
include "PPCScheduleE500mc.td"
include "PPCScheduleE5500.td"
@@ -216,6 +218,7 @@ include "PPCScheduleE5500.td"
// fsub IIC_FPAddSub
// fsubs IIC_FPGeneral
// icbi IIC_LdStICBI
+// isel IIC_IntISEL
// isync IIC_SprISYNC
// lbz IIC_LdStLoad
// lbzu IIC_LdStLoadUpd
diff --git a/lib/Target/PowerPC/PPCSchedule440.td b/lib/Target/PowerPC/PPCSchedule440.td
index 218fed2..04a43bc 100644
--- a/lib/Target/PowerPC/PPCSchedule440.td
+++ b/lib/Target/PowerPC/PPCSchedule440.td
@@ -121,6 +121,14 @@ def PPC440Itineraries : ProcessorItineraries<
[2, 0, 0],
[P440_GPR_Bypass,
P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntISEL, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC, P440_LRACC]>,
+ InstrStage<1, [P440_IEXE1, P440_JEXE1]>,
+ InstrStage<1, [P440_IEXE2, P440_JEXE2]>,
+ InstrStage<1, [P440_IWB, P440_JWB]>],
+ [2, 0, 0, 0],
+ [P440_GPR_Bypass,
+ P440_GPR_Bypass, P440_GPR_Bypass, NoBypass]>,
InstrItinData<IIC_IntCompare, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
InstrStage<1, [P440_IRACC, P440_LRACC]>,
InstrStage<1, [P440_IEXE1, P440_JEXE1]>,
diff --git a/lib/Target/PowerPC/PPCScheduleA2.td b/lib/Target/PowerPC/PPCScheduleA2.td
index 1447696..21a357a 100644
--- a/lib/Target/PowerPC/PPCScheduleA2.td
+++ b/lib/Target/PowerPC/PPCScheduleA2.td
@@ -29,6 +29,8 @@ def PPCA2Itineraries : ProcessorItineraries<
[1, 0, 0]>,
InstrItinData<IIC_IntGeneral, [InstrStage<1, [A2_XU]>],
[2, 0, 0]>,
+ InstrItinData<IIC_IntISEL, [InstrStage<1, [A2_XU]>],
+ [2, 0, 0, 0]>,
InstrItinData<IIC_IntCompare, [InstrStage<1, [A2_XU]>],
[2, 0, 0]>,
InstrItinData<IIC_IntDivW, [InstrStage<1, [A2_XU]>],
diff --git a/lib/Target/PowerPC/PPCScheduleE500mc.td b/lib/Target/PowerPC/PPCScheduleE500mc.td
index dab89e3..36b8517 100644
--- a/lib/Target/PowerPC/PPCScheduleE500mc.td
+++ b/lib/Target/PowerPC/PPCScheduleE500mc.td
@@ -54,6 +54,12 @@ def PPCE500mcItineraries : ProcessorItineraries<
[4, 1, 1], // Latency = 1
[E500_GPR_Bypass,
E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_IntISEL, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1]>],
+ [4, 1, 1, 1], // Latency = 1
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass,
+ E500_CR_Bypass]>,
InstrItinData<IIC_IntCompare, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
InstrStage<1, [E500_SFX0, E500_SFX1]>],
[5, 1, 1], // Latency = 1 or 2
diff --git a/lib/Target/PowerPC/PPCScheduleE5500.td b/lib/Target/PowerPC/PPCScheduleE5500.td
index de097d9..7c2693e 100644
--- a/lib/Target/PowerPC/PPCScheduleE5500.td
+++ b/lib/Target/PowerPC/PPCScheduleE5500.td
@@ -58,6 +58,12 @@ def PPCE5500Itineraries : ProcessorItineraries<
[5, 2, 2], // Latency = 1
[E5500_GPR_Bypass,
E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntISEL, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1]>],
+ [5, 2, 2, 2], // Latency = 1
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass,
+ E5500_CR_Bypass]>,
InstrItinData<IIC_IntCompare, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
InstrStage<1, [E5500_SFX0, E5500_SFX1]>],
[6, 2, 2], // Latency = 1 or 2
diff --git a/lib/Target/PowerPC/PPCScheduleP7.td b/lib/Target/PowerPC/PPCScheduleP7.td
index d3e4269..635d154 100644
--- a/lib/Target/PowerPC/PPCScheduleP7.td
+++ b/lib/Target/PowerPC/PPCScheduleP7.td
@@ -89,6 +89,10 @@ def P7Itineraries : ProcessorItineraries<
P7_DU3, P7_DU4], 0>,
InstrStage<1, [P7_FX1, P7_FX2]>],
[1, 1, 1]>,
+ InstrItinData<IIC_IntISEL, [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2], 0>,
+ InstrStage<1, [P7_BRU]>],
+ [1, 1, 1, 1]>,
InstrItinData<IIC_IntCompare , [InstrStage<1, [P7_DU1, P7_DU2,
P7_DU3, P7_DU4], 0>,
InstrStage<1, [P7_FX1, P7_FX2]>],
@@ -380,6 +384,9 @@ def P7Model : SchedMachineModel {
// Itineraries are queried instead.
let MispredictPenalty = 16;
+ // Try to make sure we have at least 10 dispatch groups in a loop.
+ let LoopMicroOpBufferSize = 40;
+
let Itineraries = P7Itineraries;
}
diff --git a/lib/Target/PowerPC/PPCScheduleP8.td b/lib/Target/PowerPC/PPCScheduleP8.td
new file mode 100644
index 0000000..020739b
--- /dev/null
+++ b/lib/Target/PowerPC/PPCScheduleP8.td
@@ -0,0 +1,401 @@
+//===-- PPCScheduleP8.td - PPC P8 Scheduling Definitions ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the itinerary class data for the POWER8 processor.
+//
+//===----------------------------------------------------------------------===//
+
+// Scheduling for the P8 involves tracking two types of resources:
+// 1. The dispatch bundle slots
+// 2. The functional unit resources
+
+// Dispatch units:
+def P8_DU1 : FuncUnit;
+def P8_DU2 : FuncUnit;
+def P8_DU3 : FuncUnit;
+def P8_DU4 : FuncUnit;
+def P8_DU5 : FuncUnit;
+def P8_DU6 : FuncUnit;
+def P8_DU7 : FuncUnit; // Only branch instructions will use DU7,DU8
+def P8_DU8 : FuncUnit;
+
+// 10 insns per cycle (2-LU, 2-LSU, 2-FXU, 2-FPU, 1-CRU, 1-BRU).
+
+def P8_LU1 : FuncUnit; // Loads or fixed-point operations 1
+def P8_LU2 : FuncUnit; // Loads or fixed-point operations 2
+
+// Load/Store pipelines can handle Stores, fixed-point loads, and simple
+// fixed-point operations.
+def P8_LSU1 : FuncUnit; // Load/Store pipeline 1
+def P8_LSU2 : FuncUnit; // Load/Store pipeline 2
+
+// Fixed Point unit
+def P8_FXU1 : FuncUnit; // FX pipeline 1
+def P8_FXU2 : FuncUnit; // FX pipeline 2
+
+// The Floating-Point Unit (FPU) and Vector Media Extension (VMX) units
+// are combined on P7 and newer into a Vector Scalar Unit (VSU).
+// The P8 Instruction latency documents still refers to the unit as the
+// FPU, so keep in mind that FPU==VSU.
+// In contrast to the P7, the VMX units on P8 are symmetric, so no need to
+// split vector integer ops or 128-bit load/store/perms to the specific units.
+def P8_FPU1 : FuncUnit; // VS pipeline 1
+def P8_FPU2 : FuncUnit; // VS pipeline 2
+
+def P8_CRU : FuncUnit; // CR unit (CR logicals and move-from-SPRs)
+def P8_BRU : FuncUnit; // BR unit
+
+def P8Itineraries : ProcessorItineraries<
+ [P8_DU1, P8_DU2, P8_DU3, P8_DU4, P8_DU5, P8_DU6, P8_DU7, P8_DU8,
+ P8_LU1, P8_LU2, P8_LSU1, P8_LSU2, P8_FXU1, P8_FXU2,
+ P8_FPU1, P8_FPU2, P8_CRU, P8_BRU], [], [
+ InstrItinData<IIC_IntSimple , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2,
+ P8_LU1, P8_LU2,
+ P8_LSU1, P8_LSU2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_IntGeneral , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2, P8_LU1,
+ P8_LU2, P8_LSU1, P8_LSU2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_IntISEL, [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2], 0>,
+ InstrStage<1, [P8_BRU]>],
+ [1, 1, 1, 1]>,
+ InstrItinData<IIC_IntCompare , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_IntDivW , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<15, [P8_FXU1, P8_FXU2]>],
+ [15, 1, 1]>,
+ InstrItinData<IIC_IntDivD , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<23, [P8_FXU1, P8_FXU2]>],
+ [23, 1, 1]>,
+ InstrItinData<IIC_IntMulHW , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [4, 1, 1]>,
+ InstrItinData<IIC_IntMulHWU , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [4, 1, 1]>,
+ InstrItinData<IIC_IntMulLI , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [4, 1, 1]>,
+ InstrItinData<IIC_IntRotate , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_IntRotateD , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_IntShift , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_IntTrapW , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [1, 1]>,
+ InstrItinData<IIC_IntTrapD , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [1, 1]>,
+ InstrItinData<IIC_BrB , [InstrStage<1, [P8_DU7, P8_DU8], 0>,
+ InstrStage<1, [P8_BRU]>],
+ [3, 1, 1]>,
+ // FIXME - the Br* groups below are not branch related, so should probably
+ // be renamed.
+ // IIC_BrCR consists of the cr* instructions. (crand,crnor,creqv, etc).
+ // and should be 'First' in dispatch.
+ InstrItinData<IIC_BrCR , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_CRU]>],
+ [3, 1, 1]>,
+ // IIC_BrMCR consists of the mcrf instruction.
+ InstrItinData<IIC_BrMCR , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_CRU]>],
+ [3, 1, 1]>,
+ // IIC_BrMCRX consists of mcrxr (obsolete instruction) and mtcrf, which
+ // should be first in the dispatch group.
+ InstrItinData<IIC_BrMCRX , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_BrMCRX , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [3, 1]>,
+ InstrItinData<IIC_LdStLoad , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_LdStLoadUpd , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2 ], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [2, 2, 1, 1]>,
+ // Update-Indexed form loads/stores are no longer first and last in the
+ // dispatch group. They are simply cracked, so require DU1,DU2.
+ InstrItinData<IIC_LdStLoadUpdX, [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [3, 3, 1, 1]>,
+ InstrItinData<IIC_LdStLD , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_LdStLDU , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [2, 2, 1, 1]>,
+ InstrItinData<IIC_LdStLDUX , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [3, 3, 1, 1]>,
+ InstrItinData<IIC_LdStLFD , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_LU1, P8_LU2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLVecX , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_LU1, P8_LU2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLFDU , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_LU1, P8_LU2], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [3, 3, 1, 1]>,
+ InstrItinData<IIC_LdStLFDUX , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_LU1, P8_LU2], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [3, 3, 1, 1]>,
+ InstrItinData<IIC_LdStLHA , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2,
+ P8_LU1, P8_LU2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLHAU , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [4, 4, 1, 1]>,
+ // first+last in dispatch group.
+ InstrItinData<IIC_LdStLHAUX , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_DU3], 0>,
+ InstrStage<1, [P8_DU4], 0>,
+ InstrStage<1, [P8_DU5], 0>,
+ InstrStage<1, [P8_DU6], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [4, 4, 1, 1]>,
+ InstrItinData<IIC_LdStLWA , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2]>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLWARX, [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_DU3], 0>,
+ InstrStage<1, [P8_DU4], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2]>],
+ [3, 1, 1]>,
+ // first+last
+ InstrItinData<IIC_LdStLDARX, [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_DU3], 0>,
+ InstrStage<1, [P8_DU4], 0>,
+ InstrStage<1, [P8_DU5], 0>,
+ InstrStage<1, [P8_DU6], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLMW , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2,
+ P8_LU1, P8_LU2]>],
+ [2, 1, 1]>,
+// Stores are dual-issued from the issue queue, so may only take up one
+// dispatch slot. The instruction will be broken into two IOPS. The agen
+// op is issued to the LSU, and the data op (register fetch) is issued
+// to either the LU (GPR store) or the VSU (FPR store).
+ InstrItinData<IIC_LdStStore , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2]>,
+ InstrStage<1, [P8_LU1, P8_LU2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_LdStSTD , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_LU1, P8_LU2,
+ P8_LSU1, P8_LSU2]>]
+ [1, 1, 1]>,
+ InstrItinData<IIC_LdStSTDU , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_LU1, P8_LU2,
+ P8_LSU1, P8_LSU2], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [2, 1, 1, 1]>,
+ // First+last
+ InstrItinData<IIC_LdStSTDUX , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_DU3], 0>,
+ InstrStage<1, [P8_DU4], 0>,
+ InstrStage<1, [P8_DU5], 0>,
+ InstrStage<1, [P8_DU6], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<IIC_LdStSTFD , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_LdStSTFDU , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<IIC_LdStSTVEBX , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_LdStSTDCX , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_DU3], 0>,
+ InstrStage<1, [P8_DU4], 0>,
+ InstrStage<1, [P8_DU5], 0>,
+ InstrStage<1, [P8_DU6], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2], 0>,
+ InstrStage<1, [P8_LU1, P8_LU2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_LdStSTWCX , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_DU2], 0>,
+ InstrStage<1, [P8_DU3], 0>,
+ InstrStage<1, [P8_DU4], 0>,
+ InstrStage<1, [P8_DU5], 0>,
+ InstrStage<1, [P8_DU6], 0>,
+ InstrStage<1, [P8_LSU1, P8_LSU2], 0>,
+ InstrStage<1, [P8_LU1, P8_LU2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_SprMFCR , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_CRU]>],
+ [6, 1]>,
+ InstrItinData<IIC_SprMFCRF , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_CRU]>],
+ [3, 1]>,
+ InstrItinData<IIC_SprMTSPR , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_FXU1, P8_FXU2]>],
+ [4, 1]>, // mtctr
+ InstrItinData<IIC_FPGeneral , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [5, 1, 1]>,
+ InstrItinData<IIC_FPCompare , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [8, 1, 1]>,
+ InstrItinData<IIC_FPDivD , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [33, 1, 1]>,
+ InstrItinData<IIC_FPDivS , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [27, 1, 1]>,
+ InstrItinData<IIC_FPSqrtD , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [44, 1, 1]>,
+ InstrItinData<IIC_FPSqrtS , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [32, 1, 1]>,
+ InstrItinData<IIC_FPFused , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [5, 1, 1, 1]>,
+ InstrItinData<IIC_FPRes , [InstrStage<1, [P8_DU1, P8_DU2, P8_DU3,
+ P8_DU4, P8_DU5, P8_DU6], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [5, 1, 1]>,
+ InstrItinData<IIC_VecGeneral , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_VecVSL , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_VecVSR , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_VecFP , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [6, 1, 1]>,
+ InstrItinData<IIC_VecFPCompare, [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [6, 1, 1]>,
+ InstrItinData<IIC_VecFPRound , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [6, 1, 1]>,
+ InstrItinData<IIC_VecComplex , [InstrStage<1, [P8_DU1], 0>,
+ InstrStage<1, [P8_FPU1, P8_FPU2]>],
+ [7, 1, 1]>,
+ InstrItinData<IIC_VecPerm , [InstrStage<1, [P8_DU1, P8_DU2], 0>,
+ InstrStage<1, [P8_FPU2, P8_FPU2]>],
+ [3, 1, 1]>
+]>;
+
+// ===---------------------------------------------------------------------===//
+// P8 machine model for scheduling and other instruction cost heuristics.
+// P8 has an 8 insn dispatch group (6 non-branch, 2 branch) and can issue up
+// to 10 insns per cycle (2-LU, 2-LSU, 2-FXU, 2-FPU, 1-CRU, 1-BRU).
+
+def P8Model : SchedMachineModel {
+ let IssueWidth = 8; // up to 8 instructions dispatched per cycle.
+ // up to six non-branch instructions.
+ // up to two branches in a dispatch group.
+
+ let MinLatency = 0; // Out-of-order dispatch.
+ let LoadLatency = 3; // Optimistic load latency assuming bypass.
+ // This is overriden by OperandCycles if the
+ // Itineraries are queried instead.
+ let MispredictPenalty = 16;
+
+ // Try to make sure we have at least 10 dispatch groups in a loop.
+ let LoopMicroOpBufferSize = 60;
+
+ let Itineraries = P8Itineraries;
+}
+
diff --git a/lib/Target/PowerPC/PPCSubtarget.cpp b/lib/Target/PowerPC/PPCSubtarget.cpp
index 04e7ec6..c91428d 100644
--- a/lib/Target/PowerPC/PPCSubtarget.cpp
+++ b/lib/Target/PowerPC/PPCSubtarget.cpp
@@ -14,11 +14,13 @@
#include "PPCSubtarget.h"
#include "PPC.h"
#include "PPCRegisterInfo.h"
+#include "PPCTargetMachine.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetMachine.h"
@@ -32,39 +34,12 @@ using namespace llvm;
#define GET_SUBTARGETINFO_CTOR
#include "PPCGenSubtargetInfo.inc"
-/// Return the datalayout string of a subtarget.
-static std::string getDataLayoutString(const Triple &T) {
- bool is64Bit = T.getArch() == Triple::ppc64 || T.getArch() == Triple::ppc64le;
- std::string Ret;
-
- // Most PPC* platforms are big endian, PPC64LE is little endian.
- if (T.getArch() == Triple::ppc64le)
- Ret = "e";
- else
- Ret = "E";
-
- Ret += DataLayout::getManglingComponent(T);
-
- // PPC32 has 32 bit pointers. The PS3 (OS Lv2) is a PPC64 machine with 32 bit
- // pointers.
- if (!is64Bit || T.getOS() == Triple::Lv2)
- Ret += "-p:32:32";
-
- // Note, the alignment values for f64 and i64 on ppc64 in Darwin
- // documentation are wrong; these are correct (i.e. "what gcc does").
- if (is64Bit || !T.isOSDarwin())
- Ret += "-i64:64";
- else
- Ret += "-f64:32:64";
-
- // PPC64 has 32 and 64 bit registers, PPC32 has only 32 bit ones.
- if (is64Bit)
- Ret += "-n32:64";
- else
- Ret += "-n32";
-
- return Ret;
-}
+static cl::opt<bool> UseSubRegLiveness("ppc-track-subreg-liveness",
+cl::desc("Enable subregister liveness tracking for PPC"), cl::Hidden);
+
+static cl::opt<bool> QPXStackUnaligned("qpx-stack-unaligned",
+ cl::desc("Even when QPX is enabled the stack is not 32-byte aligned"),
+ cl::Hidden);
PPCSubtarget &PPCSubtarget::initializeSubtargetDependencies(StringRef CPU,
StringRef FS) {
@@ -76,12 +51,10 @@ PPCSubtarget &PPCSubtarget::initializeSubtargetDependencies(StringRef CPU,
PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, const PPCTargetMachine &TM)
: PPCGenSubtargetInfo(TT, CPU, FS), TargetTriple(TT),
- DL(getDataLayoutString(TargetTriple)),
IsPPC64(TargetTriple.getArch() == Triple::ppc64 ||
TargetTriple.getArch() == Triple::ppc64le),
- TargetABI(PPC_ABI_UNKNOWN),
- FrameLowering(initializeSubtargetDependencies(CPU, FS)), InstrInfo(*this),
- TLInfo(TM), TSInfo(&DL) {}
+ TM(TM), FrameLowering(initializeSubtargetDependencies(CPU, FS)),
+ InstrInfo(*this), TLInfo(TM, *this), TSInfo(TM.getDataLayout()) {}
void PPCSubtarget::initializeEnvironment() {
StackAlignment = 16;
@@ -95,6 +68,7 @@ void PPCSubtarget::initializeEnvironment() {
HasQPX = false;
HasVSX = false;
HasP8Vector = false;
+ HasP8Altivec = false;
HasFCPSGN = false;
HasFSQRT = false;
HasFRE = false;
@@ -108,6 +82,7 @@ void PPCSubtarget::initializeEnvironment() {
HasFPCVT = false;
HasISEL = false;
HasPOPCNTD = false;
+ HasCMPB = false;
HasLDBRX = false;
IsBookE = false;
HasOnlyMSYNC = false;
@@ -117,13 +92,21 @@ void PPCSubtarget::initializeEnvironment() {
DeprecatedMFTB = false;
DeprecatedDST = false;
HasLazyResolverStubs = false;
+ HasICBT = false;
+ HasInvariantFunctionDescriptors = false;
+ IsQPXStackUnaligned = false;
}
void PPCSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
// Determine default and user specified characteristics
std::string CPUName = CPU;
- if (CPUName.empty())
- CPUName = "generic";
+ if (CPUName.empty()) {
+ // If cross-compiling with -march=ppc64le without -mcpu
+ if (TargetTriple.getArch() == Triple::ppc64le)
+ CPUName = "ppc64le";
+ else
+ CPUName = "generic";
+ }
#if (defined(__APPLE__) || defined(__linux__)) && \
(defined(__ppc__) || defined(__powerpc__))
if (CPUName == "generic")
@@ -148,35 +131,18 @@ void PPCSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
// QPX requires a 32-byte aligned stack. Note that we need to do this if
// we're compiling for a BG/Q system regardless of whether or not QPX
// is enabled because external functions will assume this alignment.
- if (hasQPX() || isBGQ())
- StackAlignment = 32;
+ IsQPXStackUnaligned = QPXStackUnaligned;
+ StackAlignment = getPlatformStackAlignment();
// Determine endianness.
+ // FIXME: Part of the TargetMachine.
IsLittleEndian = (TargetTriple.getArch() == Triple::ppc64le);
-
- // FIXME: For now, we disable VSX in little-endian mode until endian
- // issues in those instructions can be addressed.
- if (IsLittleEndian) {
- HasVSX = false;
- HasP8Vector = false;
- }
-
- // Determine default ABI.
- if (TargetABI == PPC_ABI_UNKNOWN) {
- if (!isDarwin() && IsPPC64) {
- if (IsLittleEndian)
- TargetABI = PPC_ABI_ELFv2;
- else
- TargetABI = PPC_ABI_ELFv1;
- }
- }
}
/// hasLazyResolverStub - Return true if accesses to the specified global have
/// to go through a dyld lazy resolution stub. This means that an extra load
/// is required to get the address of the global.
-bool PPCSubtarget::hasLazyResolverStub(const GlobalValue *GV,
- const TargetMachine &TM) const {
+bool PPCSubtarget::hasLazyResolverStub(const GlobalValue *GV) const {
// We never have stubs if HasLazyResolverStubs=false or if in static mode.
if (!HasLazyResolverStubs || TM.getRelocationModel() == Reloc::Static)
return false;
@@ -240,3 +206,9 @@ bool PPCSubtarget::useAA() const {
return needsAggressiveScheduling(DarwinDirective);
}
+bool PPCSubtarget::enableSubRegLiveness() const {
+ return UseSubRegLiveness;
+}
+
+bool PPCSubtarget::isELFv2ABI() const { return TM.isELFv2ABI(); }
+bool PPCSubtarget::isPPC64() const { return TM.isPPC64(); }
diff --git a/lib/Target/PowerPC/PPCSubtarget.h b/lib/Target/PowerPC/PPCSubtarget.h
index 1df19c3..247a96d 100644
--- a/lib/Target/PowerPC/PPCSubtarget.h
+++ b/lib/Target/PowerPC/PPCSubtarget.h
@@ -15,8 +15,8 @@
#define LLVM_LIB_TARGET_POWERPC_PPCSUBTARGET_H
#include "PPCFrameLowering.h"
-#include "PPCInstrInfo.h"
#include "PPCISelLowering.h"
+#include "PPCInstrInfo.h"
#include "PPCSelectionDAGInfo.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/DataLayout.h"
@@ -68,9 +68,6 @@ protected:
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
- // Calculates type size & alignment
- const DataLayout DL;
-
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned StackAlignment;
@@ -92,6 +89,7 @@ protected:
bool HasQPX;
bool HasVSX;
bool HasP8Vector;
+ bool HasP8Altivec;
bool HasFCPSGN;
bool HasFSQRT;
bool HasFRE, HasFRES, HasFRSQRTE, HasFRSQRTES;
@@ -102,6 +100,7 @@ protected:
bool HasFPCVT;
bool HasISEL;
bool HasPOPCNTD;
+ bool HasCMPB;
bool HasLDBRX;
bool IsBookE;
bool HasOnlyMSYNC;
@@ -112,13 +111,15 @@ protected:
bool DeprecatedDST;
bool HasLazyResolverStubs;
bool IsLittleEndian;
+ bool HasICBT;
+ bool HasInvariantFunctionDescriptors;
- enum {
- PPC_ABI_UNKNOWN,
- PPC_ABI_ELFv1,
- PPC_ABI_ELFv2
- } TargetABI;
+ /// When targeting QPX running a stock PPC64 Linux kernel where the stack
+ /// alignment has not been changed, we need to keep the 16-byte alignment
+ /// of the stack.
+ bool IsQPXStackUnaligned;
+ const PPCTargetMachine &TM;
PPCFrameLowering FrameLowering;
PPCInstrInfo InstrInfo;
PPCTargetLowering TLInfo;
@@ -153,7 +154,6 @@ public:
const PPCFrameLowering *getFrameLowering() const override {
return &FrameLowering;
}
- const DataLayout *getDataLayout() const override { return &DL; }
const PPCInstrInfo *getInstrInfo() const override { return &InstrInfo; }
const PPCTargetLowering *getTargetLowering() const override {
return &TLInfo;
@@ -164,6 +164,7 @@ public:
const PPCRegisterInfo *getRegisterInfo() const override {
return &getInstrInfo()->getRegisterInfo();
}
+ const PPCTargetMachine &getTargetMachine() const { return TM; }
/// initializeSubtargetDependencies - Initializes using a CPU and feature string
/// so that we can use initializer lists for subtarget initialization.
@@ -176,7 +177,7 @@ private:
public:
/// isPPC64 - Return true if we are generating code for 64-bit pointer mode.
///
- bool isPPC64() const { return IsPPC64; }
+ bool isPPC64() const;
/// has64BitSupport - Return true if the selected CPU supports 64-bit
/// instructions, regardless of whether we are in 32-bit or 64-bit mode.
@@ -194,8 +195,7 @@ public:
/// hasLazyResolverStub - Return true if accesses to the specified global have
/// to go through a dyld lazy resolution stub. This means that an extra load
/// is required to get the address of the global.
- bool hasLazyResolverStub(const GlobalValue *GV,
- const TargetMachine &TM) const;
+ bool hasLazyResolverStub(const GlobalValue *GV) const;
// isLittleEndian - True if generating little-endian code
bool isLittleEndian() const { return IsLittleEndian; }
@@ -217,9 +217,11 @@ public:
bool hasQPX() const { return HasQPX; }
bool hasVSX() const { return HasVSX; }
bool hasP8Vector() const { return HasP8Vector; }
+ bool hasP8Altivec() const { return HasP8Altivec; }
bool hasMFOCRF() const { return HasMFOCRF; }
bool hasISEL() const { return HasISEL; }
bool hasPOPCNTD() const { return HasPOPCNTD; }
+ bool hasCMPB() const { return HasCMPB; }
bool hasLDBRX() const { return HasLDBRX; }
bool isBookE() const { return IsBookE; }
bool hasOnlyMSYNC() const { return HasOnlyMSYNC; }
@@ -228,6 +230,18 @@ public:
bool isE500() const { return IsE500; }
bool isDeprecatedMFTB() const { return DeprecatedMFTB; }
bool isDeprecatedDST() const { return DeprecatedDST; }
+ bool hasICBT() const { return HasICBT; }
+ bool hasInvariantFunctionDescriptors() const {
+ return HasInvariantFunctionDescriptors;
+ }
+
+ bool isQPXStackUnaligned() const { return IsQPXStackUnaligned; }
+ unsigned getPlatformStackAlignment() const {
+ if ((hasQPX() || isBGQ()) && !isQPXStackUnaligned())
+ return 32;
+
+ return 16;
+ }
const Triple &getTargetTriple() const { return TargetTriple; }
@@ -239,9 +253,9 @@ public:
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
- bool isDarwinABI() const { return isDarwin(); }
- bool isSVR4ABI() const { return !isDarwin(); }
- bool isELFv2ABI() const { return TargetABI == PPC_ABI_ELFv2; }
+ bool isDarwinABI() const { return isTargetMachO() || isDarwin(); }
+ bool isSVR4ABI() const { return !isDarwinABI(); }
+ bool isELFv2ABI() const;
bool enableEarlyIfConversion() const override { return hasISEL(); }
@@ -257,6 +271,8 @@ public:
MachineInstr *end,
unsigned NumRegionInstrs) const override;
bool useAA() const override;
+
+ bool enableSubRegLiveness() const override;
};
} // End llvm namespace
diff --git a/lib/Target/PowerPC/PPCTLSDynamicCall.cpp b/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
new file mode 100644
index 0000000..270fc71
--- /dev/null
+++ b/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
@@ -0,0 +1,168 @@
+//===---------- PPCTLSDynamicCall.cpp - TLS Dynamic Call Fixup ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass expands ADDItls{ld,gd}LADDR[32] machine instructions into
+// separate ADDItls[gd]L[32] and GETtlsADDR[32] instructions, both of
+// which define GPR3. A copy is added from GPR3 to the target virtual
+// register of the original instruction. The GETtlsADDR[32] is really
+// a call instruction, so its target register is constrained to be GPR3.
+// This is not true of ADDItls[gd]L[32], but there is a legacy linker
+// optimization bug that requires the target register of the addi of
+// a local- or general-dynamic TLS access sequence to be GPR3.
+//
+// This is done in a late pass so that TLS variable accesses can be
+// fully commoned by MachineCSE.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPCInstrInfo.h"
+#include "PPC.h"
+#include "PPCInstrBuilder.h"
+#include "PPCTargetMachine.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "ppc-tls-dynamic-call"
+
+namespace llvm {
+ void initializePPCTLSDynamicCallPass(PassRegistry&);
+}
+
+namespace {
+ struct PPCTLSDynamicCall : public MachineFunctionPass {
+ static char ID;
+ PPCTLSDynamicCall() : MachineFunctionPass(ID) {
+ initializePPCTLSDynamicCallPass(*PassRegistry::getPassRegistry());
+ }
+
+ const PPCInstrInfo *TII;
+ LiveIntervals *LIS;
+
+protected:
+ bool processBlock(MachineBasicBlock &MBB) {
+ bool Changed = false;
+ bool Is64Bit = MBB.getParent()->getSubtarget<PPCSubtarget>().isPPC64();
+
+ for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
+ I != IE; ++I) {
+ MachineInstr *MI = I;
+
+ if (MI->getOpcode() != PPC::ADDItlsgdLADDR &&
+ MI->getOpcode() != PPC::ADDItlsldLADDR &&
+ MI->getOpcode() != PPC::ADDItlsgdLADDR32 &&
+ MI->getOpcode() != PPC::ADDItlsldLADDR32)
+ continue;
+
+ DEBUG(dbgs() << "TLS Dynamic Call Fixup:\n " << *MI;);
+
+ unsigned OutReg = MI->getOperand(0).getReg();
+ unsigned InReg = MI->getOperand(1).getReg();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned GPR3 = Is64Bit ? PPC::X3 : PPC::R3;
+ unsigned Opc1, Opc2;
+ SmallVector<unsigned, 4> OrigRegs;
+ OrigRegs.push_back(OutReg);
+ OrigRegs.push_back(InReg);
+ OrigRegs.push_back(GPR3);
+
+ switch (MI->getOpcode()) {
+ default:
+ llvm_unreachable("Opcode inconsistency error");
+ case PPC::ADDItlsgdLADDR:
+ Opc1 = PPC::ADDItlsgdL;
+ Opc2 = PPC::GETtlsADDR;
+ break;
+ case PPC::ADDItlsldLADDR:
+ Opc1 = PPC::ADDItlsldL;
+ Opc2 = PPC::GETtlsldADDR;
+ break;
+ case PPC::ADDItlsgdLADDR32:
+ Opc1 = PPC::ADDItlsgdL32;
+ Opc2 = PPC::GETtlsADDR32;
+ break;
+ case PPC::ADDItlsldLADDR32:
+ Opc1 = PPC::ADDItlsldL32;
+ Opc2 = PPC::GETtlsldADDR32;
+ break;
+ }
+
+ // Expand into two ops built prior to the existing instruction.
+ MachineInstr *Addi = BuildMI(MBB, I, DL, TII->get(Opc1), GPR3)
+ .addReg(InReg);
+ Addi->addOperand(MI->getOperand(2));
+
+ // The ADDItls* instruction is the first instruction in the
+ // repair range.
+ MachineBasicBlock::iterator First = I;
+ --First;
+
+ MachineInstr *Call = (BuildMI(MBB, I, DL, TII->get(Opc2), GPR3)
+ .addReg(GPR3));
+ Call->addOperand(MI->getOperand(3));
+
+ BuildMI(MBB, I, DL, TII->get(TargetOpcode::COPY), OutReg)
+ .addReg(GPR3);
+
+ // The COPY is the last instruction in the repair range.
+ MachineBasicBlock::iterator Last = I;
+ --Last;
+
+ // Move past the original instruction and remove it.
+ ++I;
+ MI->removeFromParent();
+
+ // Repair the live intervals.
+ LIS->repairIntervalsInRange(&MBB, First, Last, OrigRegs);
+ Changed = true;
+ }
+
+ return Changed;
+ }
+
+public:
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ TII = MF.getSubtarget<PPCSubtarget>().getInstrInfo();
+ LIS = &getAnalysis<LiveIntervals>();
+
+ bool Changed = false;
+
+ for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
+ MachineBasicBlock &B = *I++;
+ if (processBlock(B))
+ Changed = true;
+ }
+
+ return Changed;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ };
+}
+
+INITIALIZE_PASS_BEGIN(PPCTLSDynamicCall, DEBUG_TYPE,
+ "PowerPC TLS Dynamic Call Fixup", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_END(PPCTLSDynamicCall, DEBUG_TYPE,
+ "PowerPC TLS Dynamic Call Fixup", false, false)
+
+char PPCTLSDynamicCall::ID = 0;
+FunctionPass*
+llvm::createPPCTLSDynamicCallPass() { return new PPCTLSDynamicCall(); }
diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp
index f15189c..b219e93 100644
--- a/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -12,26 +12,42 @@
//===----------------------------------------------------------------------===//
#include "PPCTargetMachine.h"
-#include "PPCTargetObjectFile.h"
#include "PPC.h"
+#include "PPCTargetObjectFile.h"
+#include "PPCTargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/MC/MCStreamer.h"
-#include "llvm/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/Scalar.h"
using namespace llvm;
static cl::
opt<bool> DisableCTRLoops("disable-ppc-ctrloops", cl::Hidden,
cl::desc("Disable CTR loops for PPC"));
+static cl::
+opt<bool> DisablePreIncPrep("disable-ppc-preinc-prep", cl::Hidden,
+ cl::desc("Disable PPC loop preinc prep"));
+
static cl::opt<bool>
VSXFMAMutateEarly("schedule-ppc-vsx-fma-mutation-early",
cl::Hidden, cl::desc("Schedule VSX FMA instruction mutation early"));
+static cl::opt<bool>
+EnableGEPOpt("ppc-gep-opt", cl::Hidden,
+ cl::desc("Enable optimizations on complex GEPs"),
+ cl::init(true));
+
+static cl::opt<bool>
+EnablePrefetch("enable-ppc-prefetching",
+ cl::desc("disable software prefetching on PPC"),
+ cl::init(false), cl::Hidden);
+
extern "C" void LLVMInitializePowerPCTarget() {
// Register the targets
RegisterTargetMachine<PPC32TargetMachine> A(ThePPC32Target);
@@ -39,6 +55,40 @@ extern "C" void LLVMInitializePowerPCTarget() {
RegisterTargetMachine<PPC64TargetMachine> C(ThePPC64LETarget);
}
+/// Return the datalayout string of a subtarget.
+static std::string getDataLayoutString(const Triple &T) {
+ bool is64Bit = T.getArch() == Triple::ppc64 || T.getArch() == Triple::ppc64le;
+ std::string Ret;
+
+ // Most PPC* platforms are big endian, PPC64LE is little endian.
+ if (T.getArch() == Triple::ppc64le)
+ Ret = "e";
+ else
+ Ret = "E";
+
+ Ret += DataLayout::getManglingComponent(T);
+
+ // PPC32 has 32 bit pointers. The PS3 (OS Lv2) is a PPC64 machine with 32 bit
+ // pointers.
+ if (!is64Bit || T.getOS() == Triple::Lv2)
+ Ret += "-p:32:32";
+
+ // Note, the alignment values for f64 and i64 on ppc64 in Darwin
+ // documentation are wrong; these are correct (i.e. "what gcc does").
+ if (is64Bit || !T.isOSDarwin())
+ Ret += "-i64:64";
+ else
+ Ret += "-f64:32:64";
+
+ // PPC64 has 32 and 64 bit registers, PPC32 has only 32 bit ones.
+ if (is64Bit)
+ Ret += "-n32:64";
+ else
+ Ret += "-n32";
+
+ return Ret;
+}
+
static std::string computeFSAdditions(StringRef FS, CodeGenOpt::Level OL, StringRef TT) {
std::string FullFS = FS;
Triple TargetTriple(TT);
@@ -58,6 +108,14 @@ static std::string computeFSAdditions(StringRef FS, CodeGenOpt::Level OL, String
else
FullFS = "+crbits";
}
+
+ if (OL != CodeGenOpt::None) {
+ if (!FullFS.empty())
+ FullFS = "+invariant-function-descriptors," + FullFS;
+ else
+ FullFS = "+invariant-function-descriptors";
+ }
+
return FullFS;
}
@@ -70,6 +128,30 @@ static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
return make_unique<PPC64LinuxTargetObjectFile>();
}
+static PPCTargetMachine::PPCABI computeTargetABI(const Triple &TT,
+ const TargetOptions &Options) {
+ if (Options.MCOptions.getABIName().startswith("elfv1"))
+ return PPCTargetMachine::PPC_ABI_ELFv1;
+ else if (Options.MCOptions.getABIName().startswith("elfv2"))
+ return PPCTargetMachine::PPC_ABI_ELFv2;
+
+ assert(Options.MCOptions.getABIName().empty() &&
+ "Unknown target-abi option!");
+
+ if (!TT.isMacOSX()) {
+ switch (TT.getArch()) {
+ case Triple::ppc64le:
+ return PPCTargetMachine::PPC_ABI_ELFv2;
+ case Triple::ppc64:
+ return PPCTargetMachine::PPC_ABI_ELFv1;
+ default:
+ // Fallthrough.
+ ;
+ }
+ }
+ return PPCTargetMachine::PPC_ABI_UNKNOWN;
+}
+
// The FeatureString here is a little subtle. We are modifying the feature string
// with what are (currently) non-function specific overrides as it goes into the
// LLVMTargetMachine constructor and then using the stored value in the
@@ -81,7 +163,8 @@ PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT, StringRef CPU,
: LLVMTargetMachine(T, TT, CPU, computeFSAdditions(FS, OL, TT), Options, RM,
CM, OL),
TLOF(createTLOF(Triple(getTargetTriple()))),
- Subtarget(TT, CPU, TargetFS, *this) {
+ TargetABI(computeTargetABI(Triple(TT), Options)),
+ DL(getDataLayoutString(Triple(TT))), Subtarget(TT, CPU, TargetFS, *this) {
initAsmInfo();
}
@@ -109,11 +192,8 @@ PPC64TargetMachine::PPC64TargetMachine(const Target &T, StringRef TT,
const PPCSubtarget *
PPCTargetMachine::getSubtargetImpl(const Function &F) const {
- AttributeSet FnAttrs = F.getAttributes();
- Attribute CPUAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu");
- Attribute FSAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features");
+ Attribute CPUAttr = F.getFnAttribute("target-cpu");
+ Attribute FSAttr = F.getFnAttribute("target-features");
std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
? CPUAttr.getValueAsString().str()
@@ -148,17 +228,13 @@ public:
return getTM<PPCTargetMachine>();
}
- const PPCSubtarget &getPPCSubtarget() const {
- return *getPPCTargetMachine().getSubtargetImpl();
- }
-
void addIRPasses() override;
bool addPreISel() override;
bool addILPOpts() override;
bool addInstSelector() override;
- bool addPreRegAlloc() override;
- bool addPreSched2() override;
- bool addPreEmitPass() override;
+ void addPreRegAlloc() override;
+ void addPreSched2() override;
+ void addPreEmitPass() override;
};
} // namespace
@@ -168,10 +244,37 @@ TargetPassConfig *PPCTargetMachine::createPassConfig(PassManagerBase &PM) {
void PPCPassConfig::addIRPasses() {
addPass(createAtomicExpandPass(&getPPCTargetMachine()));
+
+ // For the BG/Q (or if explicitly requested), add explicit data prefetch
+ // intrinsics.
+ bool UsePrefetching =
+ Triple(TM->getTargetTriple()).getVendor() == Triple::BGQ &&
+ getOptLevel() != CodeGenOpt::None;
+ if (EnablePrefetch.getNumOccurrences() > 0)
+ UsePrefetching = EnablePrefetch;
+ if (UsePrefetching)
+ addPass(createPPCLoopDataPrefetchPass());
+
+ if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
+ // Call SeparateConstOffsetFromGEP pass to extract constants within indices
+ // and lower a GEP with multiple indices to either arithmetic operations or
+ // multiple GEPs with single index.
+ addPass(createSeparateConstOffsetFromGEPPass(TM, true));
+ // Call EarlyCSE pass to find and remove subexpressions in the lowered
+ // result.
+ addPass(createEarlyCSEPass());
+ // Do loop invariant code motion in case part of the lowered result is
+ // invariant.
+ addPass(createLICMPass());
+ }
+
TargetPassConfig::addIRPasses();
}
bool PPCPassConfig::addPreISel() {
+ if (!DisablePreIncPrep && getOptLevel() != CodeGenOpt::None)
+ addPass(createPPCLoopPreIncPrepPass(getPPCTargetMachine()));
+
if (!DisableCTRLoops && getOptLevel() != CodeGenOpt::None)
addPass(createPPCCTRLoops(getPPCTargetMachine()));
@@ -196,35 +299,27 @@ bool PPCPassConfig::addInstSelector() {
return false;
}
-bool PPCPassConfig::addPreRegAlloc() {
+void PPCPassConfig::addPreRegAlloc() {
initializePPCVSXFMAMutatePass(*PassRegistry::getPassRegistry());
insertPass(VSXFMAMutateEarly ? &RegisterCoalescerID : &MachineSchedulerID,
&PPCVSXFMAMutateID);
- return false;
+ if (getPPCTargetMachine().getRelocationModel() == Reloc::PIC_)
+ addPass(createPPCTLSDynamicCallPass());
}
-bool PPCPassConfig::addPreSched2() {
- addPass(createPPCVSXCopyCleanupPass());
-
+void PPCPassConfig::addPreSched2() {
if (getOptLevel() != CodeGenOpt::None)
addPass(&IfConverterID);
-
- return true;
}
-bool PPCPassConfig::addPreEmitPass() {
+void PPCPassConfig::addPreEmitPass() {
if (getOptLevel() != CodeGenOpt::None)
- addPass(createPPCEarlyReturnPass());
+ addPass(createPPCEarlyReturnPass(), false);
// Must run branch selection immediately preceding the asm printer.
- addPass(createPPCBranchSelectionPass());
- return false;
+ addPass(createPPCBranchSelectionPass(), false);
}
-void PPCTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
- // Add first the target-independent BasicTTI pass, then our PPC pass. This
- // allows the PPC pass to delegate to the target independent layer when
- // appropriate.
- PM.add(createBasicTargetTransformInfoPass(this));
- PM.add(createPPCTargetTransformInfoPass(this));
+TargetIRAnalysis PPCTargetMachine::getTargetIRAnalysis() {
+ return TargetIRAnalysis(
+ [this](Function &F) { return TargetTransformInfo(PPCTTIImpl(this, F)); });
}
-
diff --git a/lib/Target/PowerPC/PPCTargetMachine.h b/lib/Target/PowerPC/PPCTargetMachine.h
index 5095d73..6508484 100644
--- a/lib/Target/PowerPC/PPCTargetMachine.h
+++ b/lib/Target/PowerPC/PPCTargetMachine.h
@@ -24,30 +24,41 @@ namespace llvm {
/// PPCTargetMachine - Common code between 32-bit and 64-bit PowerPC targets.
///
class PPCTargetMachine : public LLVMTargetMachine {
+public:
+ enum PPCABI { PPC_ABI_UNKNOWN, PPC_ABI_ELFv1, PPC_ABI_ELFv2 };
+private:
std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ PPCABI TargetABI;
+ // Calculates type size & alignment
+ const DataLayout DL;
PPCSubtarget Subtarget;
mutable StringMap<std::unique_ptr<PPCSubtarget>> SubtargetMap;
public:
- PPCTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
+ PPCTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL);
~PPCTargetMachine() override;
+ const DataLayout *getDataLayout() const override { return &DL; }
const PPCSubtarget *getSubtargetImpl() const override { return &Subtarget; }
const PPCSubtarget *getSubtargetImpl(const Function &F) const override;
// Pass Pipeline Configuration
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
- /// \brief Register PPC analysis passes with a pass manager.
- void addAnalysisPasses(PassManagerBase &PM) override;
+ TargetIRAnalysis getTargetIRAnalysis() override;
+
TargetLoweringObjectFile *getObjFileLowering() const override {
return TLOF.get();
}
+ bool isELFv2ABI() const { return TargetABI == PPC_ABI_ELFv2; }
+ bool isPPC64() const {
+ Triple TT(getTargetTriple());
+ return (TT.getArch() == Triple::ppc64 || TT.getArch() == Triple::ppc64le);
+ };
};
/// PPC32TargetMachine - PowerPC 32-bit target machine.
diff --git a/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index 37624ed..073bbb0 100644
--- a/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -1,4 +1,4 @@
-//===-- PPCTargetTransformInfo.cpp - PPC specific TTI pass ----------------===//
+//===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,17 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-/// \file
-/// This file implements a TargetTransformInfo analysis pass specific to the
-/// PPC target machine. It uses the target's detailed information to provide
-/// more precise answers to certain TTI queries, while letting the target
-/// independent and default TTI implementations handle the rest.
-///
-//===----------------------------------------------------------------------===//
-#include "PPC.h"
-#include "PPCTargetMachine.h"
+#include "PPCTargetTransformInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/CostTable.h"
@@ -28,115 +21,23 @@ using namespace llvm;
static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
-// Declare the pass initialization routine locally as target-specific passes
-// don't have a target-wide initialization entry point, and so we rely on the
-// pass constructor initialization.
-namespace llvm {
-void initializePPCTTIPass(PassRegistry &);
-}
-
-namespace {
-
-class PPCTTI final : public ImmutablePass, public TargetTransformInfo {
- const TargetMachine *TM;
- const PPCSubtarget *ST;
- const PPCTargetLowering *TLI;
-
-public:
- PPCTTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) {
- llvm_unreachable("This pass cannot be directly constructed");
- }
-
- PPCTTI(const PPCTargetMachine *TM)
- : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
- TLI(TM->getSubtargetImpl()->getTargetLowering()) {
- initializePPCTTIPass(*PassRegistry::getPassRegistry());
- }
-
- void initializePass() override {
- pushTTIStack(this);
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- TargetTransformInfo::getAnalysisUsage(AU);
- }
-
- /// Pass identification.
- static char ID;
-
- /// Provide necessary pointer adjustments for the two base classes.
- void *getAdjustedAnalysisPointer(const void *ID) override {
- if (ID == &TargetTransformInfo::ID)
- return (TargetTransformInfo*)this;
- return this;
- }
-
- /// \name Scalar TTI Implementations
- /// @{
- unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
-
- unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
- Type *Ty) const override;
- unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
- Type *Ty) const override;
-
- PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
- void getUnrollingPreferences(const Function *F, Loop *L,
- UnrollingPreferences &UP) const override;
-
- /// @}
-
- /// \name Vector TTI Implementations
- /// @{
-
- unsigned getNumberOfRegisters(bool Vector) const override;
- unsigned getRegisterBitWidth(bool Vector) const override;
- unsigned getMaxInterleaveFactor() const override;
- unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
- OperandValueKind, OperandValueProperties,
- OperandValueProperties) const override;
- unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
- int Index, Type *SubTp) const override;
- unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const override;
- unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy) const override;
- unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index) const override;
- unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
- unsigned AddressSpace) const override;
-
- /// @}
-};
-
-} // end anonymous namespace
-
-INITIALIZE_AG_PASS(PPCTTI, TargetTransformInfo, "ppctti",
- "PPC Target Transform Info", true, true, false)
-char PPCTTI::ID = 0;
-
-ImmutablePass *
-llvm::createPPCTargetTransformInfoPass(const PPCTargetMachine *TM) {
- return new PPCTTI(TM);
-}
-
-
//===----------------------------------------------------------------------===//
//
// PPC cost model.
//
//===----------------------------------------------------------------------===//
-PPCTTI::PopcntSupportKind PPCTTI::getPopcntSupport(unsigned TyWidth) const {
+TargetTransformInfo::PopcntSupportKind
+PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
if (ST->hasPOPCNTD() && TyWidth <= 64)
- return PSK_FastHardware;
- return PSK_Software;
+ return TTI::PSK_FastHardware;
+ return TTI::PSK_Software;
}
-unsigned PPCTTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
+unsigned PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
if (DisablePPCConstHoist)
- return TargetTransformInfo::getIntImmCost(Imm, Ty);
+ return BaseT::getIntImmCost(Imm, Ty);
assert(Ty->isIntegerTy());
@@ -145,28 +46,28 @@ unsigned PPCTTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
return ~0U;
if (Imm == 0)
- return TCC_Free;
+ return TTI::TCC_Free;
if (Imm.getBitWidth() <= 64) {
if (isInt<16>(Imm.getSExtValue()))
- return TCC_Basic;
+ return TTI::TCC_Basic;
if (isInt<32>(Imm.getSExtValue())) {
// A constant that can be materialized using lis.
if ((Imm.getZExtValue() & 0xFFFF) == 0)
- return TCC_Basic;
+ return TTI::TCC_Basic;
- return 2 * TCC_Basic;
+ return 2 * TTI::TCC_Basic;
}
}
- return 4 * TCC_Basic;
+ return 4 * TTI::TCC_Basic;
}
-unsigned PPCTTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
- const APInt &Imm, Type *Ty) const {
+unsigned PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
+ const APInt &Imm, Type *Ty) {
if (DisablePPCConstHoist)
- return TargetTransformInfo::getIntImmCost(IID, Idx, Imm, Ty);
+ return BaseT::getIntImmCost(IID, Idx, Imm, Ty);
assert(Ty->isIntegerTy());
@@ -175,22 +76,32 @@ unsigned PPCTTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
return ~0U;
switch (IID) {
- default: return TCC_Free;
+ default:
+ return TTI::TCC_Free;
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow:
case Intrinsic::ssub_with_overflow:
case Intrinsic::usub_with_overflow:
if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
- return TCC_Free;
+ return TTI::TCC_Free;
+ break;
+ case Intrinsic::experimental_stackmap:
+ if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
+ return TTI::TCC_Free;
+ break;
+ case Intrinsic::experimental_patchpoint_void:
+ case Intrinsic::experimental_patchpoint_i64:
+ if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
+ return TTI::TCC_Free;
break;
}
- return PPCTTI::getIntImmCost(Imm, Ty);
+ return PPCTTIImpl::getIntImmCost(Imm, Ty);
}
-unsigned PPCTTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
- Type *Ty) const {
+unsigned PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
+ const APInt &Imm, Type *Ty) {
if (DisablePPCConstHoist)
- return TargetTransformInfo::getIntImmCost(Opcode, Idx, Imm, Ty);
+ return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty);
assert(Ty->isIntegerTy());
@@ -202,14 +113,15 @@ unsigned PPCTTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
ZeroFree = false;
switch (Opcode) {
- default: return TCC_Free;
+ default:
+ return TTI::TCC_Free;
case Instruction::GetElementPtr:
// Always hoist the base address of a GetElementPtr. This prevents the
// creation of new constants for every base constant that gets constant
// folded with the offset.
if (Idx == 0)
- return 2 * TCC_Basic;
- return TCC_Free;
+ return 2 * TTI::TCC_Basic;
+ return TTI::TCC_Free;
case Instruction::And:
RunFree = true; // (for the rotate-and-mask instructions)
// Fallthrough...
@@ -241,52 +153,54 @@ unsigned PPCTTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
}
if (ZeroFree && Imm == 0)
- return TCC_Free;
+ return TTI::TCC_Free;
if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
if (isInt<16>(Imm.getSExtValue()))
- return TCC_Free;
+ return TTI::TCC_Free;
if (RunFree) {
if (Imm.getBitWidth() <= 32 &&
(isShiftedMask_32(Imm.getZExtValue()) ||
isShiftedMask_32(~Imm.getZExtValue())))
- return TCC_Free;
-
+ return TTI::TCC_Free;
if (ST->isPPC64() &&
(isShiftedMask_64(Imm.getZExtValue()) ||
isShiftedMask_64(~Imm.getZExtValue())))
- return TCC_Free;
+ return TTI::TCC_Free;
}
if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
- return TCC_Free;
+ return TTI::TCC_Free;
if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
- return TCC_Free;
+ return TTI::TCC_Free;
}
- return PPCTTI::getIntImmCost(Imm, Ty);
+ return PPCTTIImpl::getIntImmCost(Imm, Ty);
}
-void PPCTTI::getUnrollingPreferences(const Function *F, Loop *L,
- UnrollingPreferences &UP) const {
- if (TM->getSubtarget<PPCSubtarget>(F).getDarwinDirective() == PPC::DIR_A2) {
+void PPCTTIImpl::getUnrollingPreferences(Loop *L,
+ TTI::UnrollingPreferences &UP) {
+ if (ST->getDarwinDirective() == PPC::DIR_A2) {
// The A2 is in-order with a deep pipeline, and concatenation unrolling
// helps expose latency-hiding opportunities to the instruction scheduler.
UP.Partial = UP.Runtime = true;
}
+
+ BaseT::getUnrollingPreferences(L, UP);
}
-unsigned PPCTTI::getNumberOfRegisters(bool Vector) const {
- if (Vector && !ST->hasAltivec())
+unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector) {
+ if (Vector && !ST->hasAltivec() && !ST->hasQPX())
return 0;
return ST->hasVSX() ? 64 : 32;
}
-unsigned PPCTTI::getRegisterBitWidth(bool Vector) const {
+unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) {
if (Vector) {
+ if (ST->hasQPX()) return 256;
if (ST->hasAltivec()) return 128;
return 0;
}
@@ -297,7 +211,7 @@ unsigned PPCTTI::getRegisterBitWidth(bool Vector) const {
}
-unsigned PPCTTI::getMaxInterleaveFactor() const {
+unsigned PPCTTIImpl::getMaxInterleaveFactor() {
unsigned Directive = ST->getDarwinDirective();
// The 440 has no SIMD support, but floating-point instructions
// have a 5-cycle latency, so unroll by 5x for latency hiding.
@@ -313,40 +227,46 @@ unsigned PPCTTI::getMaxInterleaveFactor() const {
if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
return 1;
+ // For P7 and P8, floating-point instructions have a 6-cycle latency and
+ // there are two execution units, so unroll by 12x for latency hiding.
+ if (Directive == PPC::DIR_PWR7 ||
+ Directive == PPC::DIR_PWR8)
+ return 12;
+
// For most things, modern systems have two execution units (and
// out-of-order execution).
return 2;
}
-unsigned PPCTTI::getArithmeticInstrCost(
- unsigned Opcode, Type *Ty, OperandValueKind Op1Info,
- OperandValueKind Op2Info, OperandValueProperties Opd1PropInfo,
- OperandValueProperties Opd2PropInfo) const {
+unsigned PPCTTIImpl::getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
+ TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
+ TTI::OperandValueProperties Opd2PropInfo) {
assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
// Fallback to the default implementation.
- return TargetTransformInfo::getArithmeticInstrCost(
- Opcode, Ty, Op1Info, Op2Info, Opd1PropInfo, Opd2PropInfo);
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
+ Opd1PropInfo, Opd2PropInfo);
}
-unsigned PPCTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
- Type *SubTp) const {
- return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
+unsigned PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
+ Type *SubTp) {
+ return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
}
-unsigned PPCTTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
+unsigned PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
- return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
+ return BaseT::getCastInstrCost(Opcode, Dst, Src);
}
-unsigned PPCTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy) const {
- return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
+unsigned PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) {
+ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
-unsigned PPCTTI::getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index) const {
+unsigned PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) {
assert(Val->isVectorTy() && "This must be a vector type");
int ISD = TLI->InstructionOpcodeToISD(Opcode);
@@ -357,7 +277,13 @@ unsigned PPCTTI::getVectorInstrCost(unsigned Opcode, Type *Val,
if (Index == 0)
return 0;
- return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
+ return BaseT::getVectorInstrCost(Opcode, Val, Index);
+ } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) {
+ // Floating point scalars are already located in index #0.
+ if (Index == 0)
+ return 0;
+
+ return BaseT::getVectorInstrCost(Opcode, Val, Index);
}
// Estimated cost of a load-hit-store delay. This was obtained
@@ -374,21 +300,20 @@ unsigned PPCTTI::getVectorInstrCost(unsigned Opcode, Type *Val,
// these need to be estimated as very costly.
if (ISD == ISD::EXTRACT_VECTOR_ELT ||
ISD == ISD::INSERT_VECTOR_ELT)
- return LHSPenalty +
- TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
+ return LHSPenalty + BaseT::getVectorInstrCost(Opcode, Val, Index);
- return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
+ return BaseT::getVectorInstrCost(Opcode, Val, Index);
}
-unsigned PPCTTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
- unsigned AddressSpace) const {
+unsigned PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
+ unsigned Alignment,
+ unsigned AddressSpace) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
"Invalid Opcode");
- unsigned Cost =
- TargetTransformInfo::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
+ unsigned Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
// VSX loads/stores support unaligned access.
if (ST->hasVSX()) {
diff --git a/lib/Target/PowerPC/PPCTargetTransformInfo.h b/lib/Target/PowerPC/PPCTargetTransformInfo.h
new file mode 100644
index 0000000..cef7079
--- /dev/null
+++ b/lib/Target/PowerPC/PPCTargetTransformInfo.h
@@ -0,0 +1,103 @@
+//===-- PPCTargetTransformInfo.h - PPC specific TTI -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file a TargetTransformInfo::Concept conforming object specific to the
+/// PPC target machine. It uses the target's detailed information to
+/// provide more precise answers to certain TTI queries, while letting the
+/// target independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_POWERPC_PPCTARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_POWERPC_PPCTARGETTRANSFORMINFO_H
+
+#include "PPC.h"
+#include "PPCTargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+
+class PPCTTIImpl : public BasicTTIImplBase<PPCTTIImpl> {
+ typedef BasicTTIImplBase<PPCTTIImpl> BaseT;
+ typedef TargetTransformInfo TTI;
+ friend BaseT;
+
+ const PPCSubtarget *ST;
+ const PPCTargetLowering *TLI;
+
+ const PPCSubtarget *getST() const { return ST; }
+ const PPCTargetLowering *getTLI() const { return TLI; }
+
+public:
+ explicit PPCTTIImpl(const PPCTargetMachine *TM, Function &F)
+ : BaseT(TM), ST(TM->getSubtargetImpl(F)), TLI(ST->getTargetLowering()) {}
+
+ // Provide value semantics. MSVC requires that we spell all of these out.
+ PPCTTIImpl(const PPCTTIImpl &Arg)
+ : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
+ PPCTTIImpl(PPCTTIImpl &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
+ TLI(std::move(Arg.TLI)) {}
+ PPCTTIImpl &operator=(const PPCTTIImpl &RHS) {
+ BaseT::operator=(static_cast<const BaseT &>(RHS));
+ ST = RHS.ST;
+ TLI = RHS.TLI;
+ return *this;
+ }
+ PPCTTIImpl &operator=(PPCTTIImpl &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ ST = std::move(RHS.ST);
+ TLI = std::move(RHS.TLI);
+ return *this;
+ }
+
+ /// \name Scalar TTI Implementations
+ /// @{
+
+ using BaseT::getIntImmCost;
+ unsigned getIntImmCost(const APInt &Imm, Type *Ty);
+
+ unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty);
+ unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty);
+
+ TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
+ void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP);
+
+ /// @}
+
+ /// \name Vector TTI Implementations
+ /// @{
+
+ unsigned getNumberOfRegisters(bool Vector);
+ unsigned getRegisterBitWidth(bool Vector);
+ unsigned getMaxInterleaveFactor();
+ unsigned getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty,
+ TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+ TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+ TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+ TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
+ unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
+ Type *SubTp);
+ unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
+ unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace);
+
+ /// @}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/PowerPC/PPCVSXCopy.cpp b/lib/Target/PowerPC/PPCVSXCopy.cpp
new file mode 100644
index 0000000..5e3ae2a
--- /dev/null
+++ b/lib/Target/PowerPC/PPCVSXCopy.cpp
@@ -0,0 +1,176 @@
+//===-------------- PPCVSXCopy.cpp - VSX Copy Legalization ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A pass which deals with the complexity of generating legal VSX register
+// copies to/from register classes which partially overlap with the VSX
+// register file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPCInstrInfo.h"
+#include "MCTargetDesc/PPCPredicates.h"
+#include "PPC.h"
+#include "PPCHazardRecognizers.h"
+#include "PPCInstrBuilder.h"
+#include "PPCMachineFunctionInfo.h"
+#include "PPCTargetMachine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "ppc-vsx-copy"
+
+namespace llvm {
+ void initializePPCVSXCopyPass(PassRegistry&);
+}
+
+namespace {
+ // PPCVSXCopy pass - For copies between VSX registers and non-VSX registers
+ // (Altivec and scalar floating-point registers), we need to transform the
+ // copies into subregister copies with other restrictions.
+ struct PPCVSXCopy : public MachineFunctionPass {
+ static char ID;
+ PPCVSXCopy() : MachineFunctionPass(ID) {
+ initializePPCVSXCopyPass(*PassRegistry::getPassRegistry());
+ }
+
+ const TargetInstrInfo *TII;
+
+ bool IsRegInClass(unsigned Reg, const TargetRegisterClass *RC,
+ MachineRegisterInfo &MRI) {
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ return RC->hasSubClassEq(MRI.getRegClass(Reg));
+ } else if (RC->contains(Reg)) {
+ return true;
+ }
+
+ return false;
+ }
+
+ bool IsVSReg(unsigned Reg, MachineRegisterInfo &MRI) {
+ return IsRegInClass(Reg, &PPC::VSRCRegClass, MRI);
+ }
+
+ bool IsVRReg(unsigned Reg, MachineRegisterInfo &MRI) {
+ return IsRegInClass(Reg, &PPC::VRRCRegClass, MRI);
+ }
+
+ bool IsF8Reg(unsigned Reg, MachineRegisterInfo &MRI) {
+ return IsRegInClass(Reg, &PPC::F8RCRegClass, MRI);
+ }
+
+protected:
+ bool processBlock(MachineBasicBlock &MBB) {
+ bool Changed = false;
+
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
+ I != IE; ++I) {
+ MachineInstr *MI = I;
+ if (!MI->isFullCopy())
+ continue;
+
+ MachineOperand &DstMO = MI->getOperand(0);
+ MachineOperand &SrcMO = MI->getOperand(1);
+
+ if ( IsVSReg(DstMO.getReg(), MRI) &&
+ !IsVSReg(SrcMO.getReg(), MRI)) {
+ // This is a copy *to* a VSX register from a non-VSX register.
+ Changed = true;
+
+ const TargetRegisterClass *SrcRC =
+ IsVRReg(SrcMO.getReg(), MRI) ? &PPC::VSHRCRegClass :
+ &PPC::VSLRCRegClass;
+ assert((IsF8Reg(SrcMO.getReg(), MRI) ||
+ IsVRReg(SrcMO.getReg(), MRI)) &&
+ "Unknown source for a VSX copy");
+
+ unsigned NewVReg = MRI.createVirtualRegister(SrcRC);
+ BuildMI(MBB, MI, MI->getDebugLoc(),
+ TII->get(TargetOpcode::SUBREG_TO_REG), NewVReg)
+ .addImm(1) // add 1, not 0, because there is no implicit clearing
+ // of the high bits.
+ .addOperand(SrcMO)
+ .addImm(IsVRReg(SrcMO.getReg(), MRI) ? PPC::sub_128 :
+ PPC::sub_64);
+
+ // The source of the original copy is now the new virtual register.
+ SrcMO.setReg(NewVReg);
+ } else if (!IsVSReg(DstMO.getReg(), MRI) &&
+ IsVSReg(SrcMO.getReg(), MRI)) {
+ // This is a copy *from* a VSX register to a non-VSX register.
+ Changed = true;
+
+ const TargetRegisterClass *DstRC =
+ IsVRReg(DstMO.getReg(), MRI) ? &PPC::VSHRCRegClass :
+ &PPC::VSLRCRegClass;
+ assert((IsF8Reg(DstMO.getReg(), MRI) ||
+ IsVRReg(DstMO.getReg(), MRI)) &&
+ "Unknown destination for a VSX copy");
+
+ // Copy the VSX value into a new VSX register of the correct subclass.
+ unsigned NewVReg = MRI.createVirtualRegister(DstRC);
+ BuildMI(MBB, MI, MI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), NewVReg)
+ .addOperand(SrcMO);
+
+ // Transform the original copy into a subregister extraction copy.
+ SrcMO.setReg(NewVReg);
+ SrcMO.setSubReg(IsVRReg(DstMO.getReg(), MRI) ? PPC::sub_128 :
+ PPC::sub_64);
+ }
+ }
+
+ return Changed;
+ }
+
+public:
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ // If we don't have VSX on the subtarget, don't do anything.
+ const PPCSubtarget &STI = MF.getSubtarget<PPCSubtarget>();
+ if (!STI.hasVSX())
+ return false;
+ TII = STI.getInstrInfo();
+
+ bool Changed = false;
+
+ for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
+ MachineBasicBlock &B = *I++;
+ if (processBlock(B))
+ Changed = true;
+ }
+
+ return Changed;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ };
+}
+
+INITIALIZE_PASS(PPCVSXCopy, DEBUG_TYPE,
+ "PowerPC VSX Copy Legalization", false, false)
+
+char PPCVSXCopy::ID = 0;
+FunctionPass*
+llvm::createPPCVSXCopyPass() { return new PPCVSXCopy(); }
+
diff --git a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
new file mode 100644
index 0000000..f352fa6
--- /dev/null
+++ b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -0,0 +1,335 @@
+//===--------------- PPCVSXFMAMutate.cpp - VSX FMA Mutation ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass mutates the form of VSX FMA instructions to avoid unnecessary
+// copies.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPCInstrInfo.h"
+#include "MCTargetDesc/PPCPredicates.h"
+#include "PPC.h"
+#include "PPCInstrBuilder.h"
+#include "PPCMachineFunctionInfo.h"
+#include "PPCTargetMachine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+static cl::opt<bool> DisableVSXFMAMutate("disable-ppc-vsx-fma-mutation",
+cl::desc("Disable VSX FMA instruction mutation"), cl::Hidden);
+
+#define DEBUG_TYPE "ppc-vsx-fma-mutate"
+
+namespace llvm { namespace PPC {
+ int getAltVSXFMAOpcode(uint16_t Opcode);
+} }
+
+namespace {
+ // PPCVSXFMAMutate pass - For copies between VSX registers and non-VSX registers
+ // (Altivec and scalar floating-point registers), we need to transform the
+ // copies into subregister copies with other restrictions.
+ struct PPCVSXFMAMutate : public MachineFunctionPass {
+ static char ID;
+ PPCVSXFMAMutate() : MachineFunctionPass(ID) {
+ initializePPCVSXFMAMutatePass(*PassRegistry::getPassRegistry());
+ }
+
+ LiveIntervals *LIS;
+ const PPCInstrInfo *TII;
+
+protected:
+ bool processBlock(MachineBasicBlock &MBB) {
+ bool Changed = false;
+
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ const TargetRegisterInfo *TRI = &TII->getRegisterInfo();
+ for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
+ I != IE; ++I) {
+ MachineInstr *MI = I;
+
+ // The default (A-type) VSX FMA form kills the addend (it is taken from
+ // the target register, which is then updated to reflect the result of
+ // the FMA). If the instruction, however, kills one of the registers
+ // used for the product, then we can use the M-form instruction (which
+ // will take that value from the to-be-defined register).
+
+ int AltOpc = PPC::getAltVSXFMAOpcode(MI->getOpcode());
+ if (AltOpc == -1)
+ continue;
+
+ // This pass is run after register coalescing, and so we're looking for
+ // a situation like this:
+ // ...
+ // %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
+ // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
+ // %RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+ // ...
+ // %vreg9<def,tied1> = XSMADDADP %vreg9<tied0>, %vreg17, %vreg19,
+ // %RM<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19
+ // ...
+ // Where we can eliminate the copy by changing from the A-type to the
+ // M-type instruction. Specifically, for this example, this means:
+ // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
+ // %RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+ // is replaced by:
+ // %vreg16<def,tied1> = XSMADDMDP %vreg16<tied0>, %vreg18, %vreg9,
+ // %RM<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9
+ // and we remove: %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
+
+ SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
+
+ VNInfo *AddendValNo =
+ LIS->getInterval(MI->getOperand(1).getReg()).Query(FMAIdx).valueIn();
+ MachineInstr *AddendMI = LIS->getInstructionFromIndex(AddendValNo->def);
+
+ // The addend and this instruction must be in the same block.
+
+ if (!AddendMI || AddendMI->getParent() != MI->getParent())
+ continue;
+
+ // The addend must be a full copy within the same register class.
+
+ if (!AddendMI->isFullCopy())
+ continue;
+
+ unsigned AddendSrcReg = AddendMI->getOperand(1).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(AddendSrcReg)) {
+ if (MRI.getRegClass(AddendMI->getOperand(0).getReg()) !=
+ MRI.getRegClass(AddendSrcReg))
+ continue;
+ } else {
+ // If AddendSrcReg is a physical register, make sure the destination
+ // register class contains it.
+ if (!MRI.getRegClass(AddendMI->getOperand(0).getReg())
+ ->contains(AddendSrcReg))
+ continue;
+ }
+
+ // In theory, there could be other uses of the addend copy before this
+ // fma. We could deal with this, but that would require additional
+ // logic below and I suspect it will not occur in any relevant
+ // situations. Additionally, check whether the copy source is killed
+ // prior to the fma. In order to replace the addend here with the
+ // source of the copy, it must still be live here. We can't use
+ // interval testing for a physical register, so as long as we're
+ // walking the MIs we may as well test liveness here.
+ bool OtherUsers = false, KillsAddendSrc = false;
+ for (auto J = std::prev(I), JE = MachineBasicBlock::iterator(AddendMI);
+ J != JE; --J) {
+ if (J->readsVirtualRegister(AddendMI->getOperand(0).getReg())) {
+ OtherUsers = true;
+ break;
+ }
+ if (J->modifiesRegister(AddendSrcReg, TRI) ||
+ J->killsRegister(AddendSrcReg, TRI)) {
+ KillsAddendSrc = true;
+ break;
+ }
+ }
+
+ if (OtherUsers || KillsAddendSrc)
+ continue;
+
+ // Find one of the product operands that is killed by this instruction.
+
+ unsigned KilledProdOp = 0, OtherProdOp = 0;
+ if (LIS->getInterval(MI->getOperand(2).getReg())
+ .Query(FMAIdx).isKill()) {
+ KilledProdOp = 2;
+ OtherProdOp = 3;
+ } else if (LIS->getInterval(MI->getOperand(3).getReg())
+ .Query(FMAIdx).isKill()) {
+ KilledProdOp = 3;
+ OtherProdOp = 2;
+ }
+
+ // If there are no killed product operands, then this transformation is
+ // likely not profitable.
+ if (!KilledProdOp)
+ continue;
+
+ // For virtual registers, verify that the addend source register
+ // is live here (as should have been assured above).
+ assert((!TargetRegisterInfo::isVirtualRegister(AddendSrcReg) ||
+ LIS->getInterval(AddendSrcReg).liveAt(FMAIdx)) &&
+ "Addend source register is not live!");
+
+ // Transform: (O2 * O3) + O1 -> (O2 * O1) + O3.
+
+ unsigned AddReg = AddendMI->getOperand(1).getReg();
+ unsigned KilledProdReg = MI->getOperand(KilledProdOp).getReg();
+ unsigned OtherProdReg = MI->getOperand(OtherProdOp).getReg();
+
+ unsigned AddSubReg = AddendMI->getOperand(1).getSubReg();
+ unsigned KilledProdSubReg = MI->getOperand(KilledProdOp).getSubReg();
+ unsigned OtherProdSubReg = MI->getOperand(OtherProdOp).getSubReg();
+
+ bool AddRegKill = AddendMI->getOperand(1).isKill();
+ bool KilledProdRegKill = MI->getOperand(KilledProdOp).isKill();
+ bool OtherProdRegKill = MI->getOperand(OtherProdOp).isKill();
+
+ bool AddRegUndef = AddendMI->getOperand(1).isUndef();
+ bool KilledProdRegUndef = MI->getOperand(KilledProdOp).isUndef();
+ bool OtherProdRegUndef = MI->getOperand(OtherProdOp).isUndef();
+
+ unsigned OldFMAReg = MI->getOperand(0).getReg();
+
+ // The transformation doesn't work well with things like:
+ // %vreg5 = A-form-op %vreg5, %vreg11, %vreg5;
+ // so leave such things alone.
+ if (OldFMAReg == KilledProdReg)
+ continue;
+
+ assert(OldFMAReg == AddendMI->getOperand(0).getReg() &&
+ "Addend copy not tied to old FMA output!");
+
+ DEBUG(dbgs() << "VSX FMA Mutation:\n " << *MI;);
+
+ MI->getOperand(0).setReg(KilledProdReg);
+ MI->getOperand(1).setReg(KilledProdReg);
+ MI->getOperand(3).setReg(AddReg);
+ MI->getOperand(2).setReg(OtherProdReg);
+
+ MI->getOperand(0).setSubReg(KilledProdSubReg);
+ MI->getOperand(1).setSubReg(KilledProdSubReg);
+ MI->getOperand(3).setSubReg(AddSubReg);
+ MI->getOperand(2).setSubReg(OtherProdSubReg);
+
+ MI->getOperand(1).setIsKill(KilledProdRegKill);
+ MI->getOperand(3).setIsKill(AddRegKill);
+ MI->getOperand(2).setIsKill(OtherProdRegKill);
+
+ MI->getOperand(1).setIsUndef(KilledProdRegUndef);
+ MI->getOperand(3).setIsUndef(AddRegUndef);
+ MI->getOperand(2).setIsUndef(OtherProdRegUndef);
+
+ MI->setDesc(TII->get(AltOpc));
+
+ DEBUG(dbgs() << " -> " << *MI);
+
+ // The killed product operand was killed here, so we can reuse it now
+ // for the result of the fma.
+
+ LiveInterval &FMAInt = LIS->getInterval(OldFMAReg);
+ VNInfo *FMAValNo = FMAInt.getVNInfoAt(FMAIdx.getRegSlot());
+ for (auto UI = MRI.reg_nodbg_begin(OldFMAReg), UE = MRI.reg_nodbg_end();
+ UI != UE;) {
+ MachineOperand &UseMO = *UI;
+ MachineInstr *UseMI = UseMO.getParent();
+ ++UI;
+
+ // Don't replace the result register of the copy we're about to erase.
+ if (UseMI == AddendMI)
+ continue;
+
+ UseMO.setReg(KilledProdReg);
+ UseMO.setSubReg(KilledProdSubReg);
+ }
+
+ // Extend the live intervals of the killed product operand to hold the
+ // fma result.
+
+ LiveInterval &NewFMAInt = LIS->getInterval(KilledProdReg);
+ for (LiveInterval::iterator AI = FMAInt.begin(), AE = FMAInt.end();
+ AI != AE; ++AI) {
+ // Don't add the segment that corresponds to the original copy.
+ if (AI->valno == AddendValNo)
+ continue;
+
+ VNInfo *NewFMAValNo =
+ NewFMAInt.getNextValue(AI->start,
+ LIS->getVNInfoAllocator());
+
+ NewFMAInt.addSegment(LiveInterval::Segment(AI->start, AI->end,
+ NewFMAValNo));
+ }
+ DEBUG(dbgs() << " extended: " << NewFMAInt << '\n');
+
+ FMAInt.removeValNo(FMAValNo);
+ DEBUG(dbgs() << " trimmed: " << FMAInt << '\n');
+
+ // Remove the (now unused) copy.
+
+ DEBUG(dbgs() << " removing: " << *AddendMI << '\n');
+ LIS->RemoveMachineInstrFromMaps(AddendMI);
+ AddendMI->eraseFromParent();
+
+ Changed = true;
+ }
+
+ return Changed;
+ }
+
+public:
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ // If we don't have VSX then go ahead and return without doing
+ // anything.
+ const PPCSubtarget &STI = MF.getSubtarget<PPCSubtarget>();
+ if (!STI.hasVSX())
+ return false;
+
+ LIS = &getAnalysis<LiveIntervals>();
+
+ TII = STI.getInstrInfo();
+
+ bool Changed = false;
+
+ if (DisableVSXFMAMutate)
+ return Changed;
+
+ for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
+ MachineBasicBlock &B = *I++;
+ if (processBlock(B))
+ Changed = true;
+ }
+
+ return Changed;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ };
+}
+
+INITIALIZE_PASS_BEGIN(PPCVSXFMAMutate, DEBUG_TYPE,
+ "PowerPC VSX FMA Mutation", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_END(PPCVSXFMAMutate, DEBUG_TYPE,
+ "PowerPC VSX FMA Mutation", false, false)
+
+char &llvm::PPCVSXFMAMutateID = PPCVSXFMAMutate::ID;
+
+char PPCVSXFMAMutate::ID = 0;
+FunctionPass*
+llvm::createPPCVSXFMAMutatePass() { return new PPCVSXFMAMutate(); }
+
+
diff --git a/lib/Target/PowerPC/README.txt b/lib/Target/PowerPC/README.txt
index 514f840..4132b04 100644
--- a/lib/Target/PowerPC/README.txt
+++ b/lib/Target/PowerPC/README.txt
@@ -5,38 +5,6 @@ TODO:
===-------------------------------------------------------------------------===
-On PPC64, this:
-
-long f2 (long x) { return 0xfffffff000000000UL; }
-long f3 (long x) { return 0x1ffffffffUL; }
-
-could compile into:
-
-_f2:
- li r3,-1
- rldicr r3,r3,0,27
- blr
-_f3:
- li r3,-1
- rldicl r3,r3,0,31
- blr
-
-we produce:
-
-_f2:
- lis r2, 4095
- ori r2, r2, 65535
- sldi r3, r2, 36
- blr
-_f3:
- li r2, 1
- sldi r2, r2, 32
- oris r2, r2, 65535
- ori r3, r2, 65535
- blr
-
-===-------------------------------------------------------------------------===
-
This code:
unsigned add32carry(unsigned sum, unsigned x) {
@@ -63,40 +31,6 @@ Ick.
===-------------------------------------------------------------------------===
-Support 'update' load/store instructions. These are cracked on the G5, but are
-still a codesize win.
-
-With preinc enabled, this:
-
-long *%test4(long *%X, long *%dest) {
- %Y = getelementptr long* %X, int 4
- %A = load long* %Y
- store long %A, long* %dest
- ret long* %Y
-}
-
-compiles to:
-
-_test4:
- mr r2, r3
- lwzu r5, 32(r2)
- lwz r3, 36(r3)
- stw r5, 0(r4)
- stw r3, 4(r4)
- mr r3, r2
- blr
-
-with -sched=list-burr, I get:
-
-_test4:
- lwz r2, 36(r3)
- lwzu r5, 32(r3)
- stw r2, 4(r4)
- stw r5, 0(r4)
- blr
-
-===-------------------------------------------------------------------------===
-
We compile the hottest inner loop of viterbi to:
li r6, 0
@@ -184,33 +118,6 @@ http://gcc.gnu.org/ml/gcc-patches/2006-02/msg00133.html
===-------------------------------------------------------------------------===
-Compile offsets from allocas:
-
-int *%test() {
- %X = alloca { int, int }
- %Y = getelementptr {int,int}* %X, int 0, uint 1
- ret int* %Y
-}
-
-into a single add, not two:
-
-_test:
- addi r2, r1, -8
- addi r3, r2, 4
- blr
-
---> important for C++.
-
-===-------------------------------------------------------------------------===
-
-No loads or stores of the constants should be needed:
-
-struct foo { double X, Y; };
-void xxx(struct foo F);
-void bar() { struct foo R = { 1.0, 2.0 }; xxx(R); }
-
-===-------------------------------------------------------------------------===
-
Darwin Stub removal:
We still generate calls to foo$stub, and stubs, on Darwin. This is not
@@ -269,57 +176,6 @@ just fastcc.
===-------------------------------------------------------------------------===
-Compile this:
-
-int foo(int a) {
- int b = (a < 8);
- if (b) {
- return b * 3; // ignore the fact that this is always 3.
- } else {
- return 2;
- }
-}
-
-into something not this:
-
-_foo:
-1) cmpwi cr7, r3, 8
- mfcr r2, 1
- rlwinm r2, r2, 29, 31, 31
-1) cmpwi cr0, r3, 7
- bgt cr0, LBB1_2 ; UnifiedReturnBlock
-LBB1_1: ; then
- rlwinm r2, r2, 0, 31, 31
- mulli r3, r2, 3
- blr
-LBB1_2: ; UnifiedReturnBlock
- li r3, 2
- blr
-
-In particular, the two compares (marked 1) could be shared by reversing one.
-This could be done in the dag combiner, by swapping a BR_CC when a SETCC of the
-same operands (but backwards) exists. In this case, this wouldn't save us
-anything though, because the compares still wouldn't be shared.
-
-===-------------------------------------------------------------------------===
-
-We should custom expand setcc instead of pretending that we have it. That
-would allow us to expose the access of the crbit after the mfcr, allowing
-that access to be trivially folded into other ops. A simple example:
-
-int foo(int a, int b) { return (a < b) << 4; }
-
-compiles into:
-
-_foo:
- cmpw cr7, r3, r4
- mfcr r2, 1
- rlwinm r2, r2, 29, 31, 31
- slwi r3, r2, 4
- blr
-
-===-------------------------------------------------------------------------===
-
Fold add and sub with constant into non-extern, non-weak addresses so this:
static int a;
@@ -347,48 +203,6 @@ _foo:
===-------------------------------------------------------------------------===
-We generate really bad code for this:
-
-int f(signed char *a, _Bool b, _Bool c) {
- signed char t = 0;
- if (b) t = *a;
- if (c) *a = t;
-}
-
-===-------------------------------------------------------------------------===
-
-This:
-int test(unsigned *P) { return *P >> 24; }
-
-Should compile to:
-
-_test:
- lbz r3,0(r3)
- blr
-
-not:
-
-_test:
- lwz r2, 0(r3)
- srwi r3, r2, 24
- blr
-
-===-------------------------------------------------------------------------===
-
-On the G5, logical CR operations are more expensive in their three
-address form: ops that read/write the same register are half as expensive as
-those that read from two registers that are different from their destination.
-
-We should model this with two separate instructions. The isel should generate
-the "two address" form of the instructions. When the register allocator
-detects that it needs to insert a copy due to the two-addresness of the CR
-logical op, it will invoke PPCInstrInfo::convertToThreeAddress. At this point
-we can convert to the "three address" instruction, to save code space.
-
-This only matters when we start generating cr logical ops.
-
-===-------------------------------------------------------------------------===
-
We should compile these two functions to the same thing:
#include <stdlib.h>
@@ -474,27 +288,6 @@ http://www.lcs.mit.edu/pubs/pdf/MIT-LCS-TM-600.pdf
===-------------------------------------------------------------------------===
-float foo(float X) { return (int)(X); }
-
-Currently produces:
-
-_foo:
- fctiwz f0, f1
- stfd f0, -8(r1)
- lwz r2, -4(r1)
- extsw r2, r2
- std r2, -16(r1)
- lfd f0, -16(r1)
- fcfid f0, f0
- frsp f1, f0
- blr
-
-We could use a target dag combine to turn the lwz/extsw into an lwa when the
-lwz has a single use. Since LWA is cracked anyway, this would be a codesize
-win only.
-
-===-------------------------------------------------------------------------===
-
We generate ugly code for this:
void func(unsigned int *ret, float dx, float dy, float dz, float dw) {
@@ -552,32 +345,6 @@ _foo:
===-------------------------------------------------------------------------===
-We compile:
-
-unsigned test6(unsigned x) {
- return ((x & 0x00FF0000) >> 16) | ((x & 0x000000FF) << 16);
-}
-
-into:
-
-_test6:
- lis r2, 255
- rlwinm r3, r3, 16, 0, 31
- ori r2, r2, 255
- and r3, r3, r2
- blr
-
-GCC gets it down to:
-
-_test6:
- rlwinm r0,r3,16,8,15
- rlwinm r3,r3,16,24,31
- or r3,r3,r0
- blr
-
-
-===-------------------------------------------------------------------------===
-
Consider a function like this:
float foo(float X) { return X + 1234.4123f; }
@@ -674,48 +441,6 @@ _bar:
===-------------------------------------------------------------------------===
-We currently compile 32-bit bswap:
-
-declare i32 @llvm.bswap.i32(i32 %A)
-define i32 @test(i32 %A) {
- %B = call i32 @llvm.bswap.i32(i32 %A)
- ret i32 %B
-}
-
-to:
-
-_test:
- rlwinm r2, r3, 24, 16, 23
- slwi r4, r3, 24
- rlwimi r2, r3, 8, 24, 31
- rlwimi r4, r3, 8, 8, 15
- rlwimi r4, r2, 0, 16, 31
- mr r3, r4
- blr
-
-it would be more efficient to produce:
-
-_foo: mr r0,r3
- rlwinm r3,r3,8,0xffffffff
- rlwimi r3,r0,24,0,7
- rlwimi r3,r0,24,16,23
- blr
-
-===-------------------------------------------------------------------------===
-
-test/CodeGen/PowerPC/2007-03-24-cntlzd.ll compiles to:
-
-__ZNK4llvm5APInt17countLeadingZerosEv:
- ld r2, 0(r3)
- cntlzd r2, r2
- or r2, r2, r2 <<-- silly.
- addi r3, r2, -64
- blr
-
-The dead or is a 'truncate' from 64- to 32-bits.
-
-===-------------------------------------------------------------------------===
-
We generate horrible ppc code for this:
#define N 2000000
diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h
index 261075e..fb87cc5 100644
--- a/lib/Target/R600/AMDGPU.h
+++ b/lib/Target/R600/AMDGPU.h
@@ -38,6 +38,7 @@ FunctionPass *createAMDGPUCFGStructurizerPass();
// SI Passes
FunctionPass *createSITypeRewriter();
FunctionPass *createSIAnnotateControlFlowPass();
+FunctionPass *createSIFoldOperandsPass();
FunctionPass *createSILowerI1CopiesPass();
FunctionPass *createSIShrinkInstructionsPass();
FunctionPass *createSILoadStoreOptimizerPass(TargetMachine &tm);
@@ -46,6 +47,10 @@ FunctionPass *createSIFixSGPRCopiesPass(TargetMachine &tm);
FunctionPass *createSIFixSGPRLiveRangesPass();
FunctionPass *createSICodeEmitterPass(formatted_raw_ostream &OS);
FunctionPass *createSIInsertWaits(TargetMachine &tm);
+FunctionPass *createSIPrepareScratchRegs();
+
+void initializeSIFoldOperandsPass(PassRegistry &);
+extern char &SIFoldOperandsID;
void initializeSILowerI1CopiesPass(PassRegistry &);
extern char &SILowerI1CopiesID;
@@ -59,19 +64,20 @@ Pass *createAMDGPUStructurizeCFGPass();
FunctionPass *createAMDGPUISelDag(TargetMachine &tm);
ModulePass *createAMDGPUAlwaysInlinePass();
-/// \brief Creates an AMDGPU-specific Target Transformation Info pass.
-ImmutablePass *
-createAMDGPUTargetTransformInfoPass(const AMDGPUTargetMachine *TM);
-
void initializeSIFixSGPRLiveRangesPass(PassRegistry&);
extern char &SIFixSGPRLiveRangesID;
extern Target TheAMDGPUTarget;
+extern Target TheGCNTarget;
namespace AMDGPU {
enum TargetIndex {
- TI_CONSTDATA_START
+ TI_CONSTDATA_START,
+ TI_SCRATCH_RSRC_DWORD0,
+ TI_SCRATCH_RSRC_DWORD1,
+ TI_SCRATCH_RSRC_DWORD2,
+ TI_SCRATCH_RSRC_DWORD3
};
}
diff --git a/lib/Target/R600/AMDGPU.td b/lib/Target/R600/AMDGPU.td
index 4cf1243..a7d48b3 100644
--- a/lib/Target/R600/AMDGPU.td
+++ b/lib/Target/R600/AMDGPU.td
@@ -48,6 +48,12 @@ def FeatureFP64Denormals : SubtargetFeature<"fp64-denormals",
"Enable double precision denormal handling",
[FeatureFP64]>;
+def FeatureFastFMAF32 : SubtargetFeature<"fast-fmaf",
+ "FastFMAF32",
+ "true",
+ "Assuming f32 fma is at least as fast as mul + add",
+ []>;
+
// Some instructions do not support denormals despite this flag. Using
// fp32 denormals also causes instructions to run at the double
// precision rate for the device.
@@ -92,6 +98,11 @@ def FeatureFlatAddressSpace : SubtargetFeature<"flat-address-space",
"true",
"Support flat address space">;
+def FeatureVGPRSpilling : SubtargetFeature<"vgpr-spilling",
+ "EnableVGPRSpilling",
+ "true",
+ "Enable spilling of VGPRs to scratch memory">;
+
class SubtargetFeatureFetchLimit <string Value> :
SubtargetFeature <"fetch"#Value,
"TexVTXClauseSize",
@@ -147,10 +158,16 @@ def FeatureSouthernIslands : SubtargetFeatureGeneration<"SOUTHERN_ISLANDS",
def FeatureSeaIslands : SubtargetFeatureGeneration<"SEA_ISLANDS",
[Feature64BitPtr, FeatureFP64, FeatureLocalMemorySize65536,
FeatureWavefrontSize64, FeatureFlatAddressSpace]>;
+
+def FeatureVolcanicIslands : SubtargetFeatureGeneration<"VOLCANIC_ISLANDS",
+ [Feature64BitPtr, FeatureFP64, FeatureLocalMemorySize65536,
+ FeatureWavefrontSize64, FeatureFlatAddressSpace]>;
+
//===----------------------------------------------------------------------===//
def AMDGPUInstrInfo : InstrInfo {
let guessInstructionProperties = 1;
+ let noNamedPositionallyEncodedOperands = 1;
}
def AMDGPUAsmParser : AsmParser {
diff --git a/lib/Target/R600/AMDGPUAsmPrinter.cpp b/lib/Target/R600/AMDGPUAsmPrinter.cpp
index 5511d7c..92bc314 100644
--- a/lib/Target/R600/AMDGPUAsmPrinter.cpp
+++ b/lib/Target/R600/AMDGPUAsmPrinter.cpp
@@ -18,6 +18,7 @@
#include "AMDGPUAsmPrinter.h"
#include "AMDGPU.h"
+#include "AMDKernelCodeT.h"
#include "AMDGPUSubtarget.h"
#include "R600Defines.h"
#include "R600MachineFunctionInfo.h"
@@ -57,7 +58,7 @@ using namespace llvm;
// instructions to run at the double precision rate for the device so it's
// probably best to just report no single precision denormals.
static uint32_t getFPMode(const MachineFunction &F) {
- const AMDGPUSubtarget& ST = F.getTarget().getSubtarget<AMDGPUSubtarget>();
+ const AMDGPUSubtarget& ST = F.getSubtarget<AMDGPUSubtarget>();
// TODO: Is there any real use for the flush in only / flush out only modes?
uint32_t FP32Denormals =
@@ -72,19 +73,20 @@ static uint32_t getFPMode(const MachineFunction &F) {
FP_DENORM_MODE_DP(FP64Denormals);
}
-static AsmPrinter *createAMDGPUAsmPrinterPass(TargetMachine &tm,
- MCStreamer &Streamer) {
- return new AMDGPUAsmPrinter(tm, Streamer);
+static AsmPrinter *
+createAMDGPUAsmPrinterPass(TargetMachine &tm,
+ std::unique_ptr<MCStreamer> &&Streamer) {
+ return new AMDGPUAsmPrinter(tm, std::move(Streamer));
}
extern "C" void LLVMInitializeR600AsmPrinter() {
TargetRegistry::RegisterAsmPrinter(TheAMDGPUTarget, createAMDGPUAsmPrinterPass);
+ TargetRegistry::RegisterAsmPrinter(TheGCNTarget, createAMDGPUAsmPrinterPass);
}
-AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {
- DisasmEnabled = TM.getSubtarget<AMDGPUSubtarget>().dumpCode();
-}
+AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)) {}
void AMDGPUAsmPrinter::EmitEndOfAsmFile(Module &M) {
@@ -106,14 +108,17 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
EmitFunctionHeader();
MCContext &Context = getObjFileLowering().getContext();
- const MCSectionELF *ConfigSection = Context.getELFSection(".AMDGPU.config",
- ELF::SHT_PROGBITS, 0,
- SectionKind::getReadOnly());
+ const MCSectionELF *ConfigSection =
+ Context.getELFSection(".AMDGPU.config", ELF::SHT_PROGBITS, 0);
OutStreamer.SwitchSection(ConfigSection);
- const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
+ const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
SIProgramInfo KernelInfo;
- if (STM.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ if (STM.isAmdHsaOS()) {
+ getSIProgramInfo(KernelInfo, MF);
+ EmitAmdKernelCodeT(MF, KernelInfo);
+ OutStreamer.EmitCodeAlignment(2 << (MF.getAlignment() - 1));
+ } else if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
getSIProgramInfo(KernelInfo, MF);
EmitProgramInfoSI(MF, KernelInfo);
} else {
@@ -128,10 +133,8 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
EmitFunctionBody();
if (isVerbose()) {
- const MCSectionELF *CommentSection
- = Context.getELFSection(".AMDGPU.csdata",
- ELF::SHT_PROGBITS, 0,
- SectionKind::getReadOnly());
+ const MCSectionELF *CommentSection =
+ Context.getELFSection(".AMDGPU.csdata", ELF::SHT_PROGBITS, 0);
OutStreamer.SwitchSection(CommentSection);
if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
@@ -156,22 +159,16 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
}
if (STM.dumpCode()) {
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- MF.dump();
-#endif
- if (DisasmEnabled) {
- OutStreamer.SwitchSection(Context.getELFSection(".AMDGPU.disasm",
- ELF::SHT_NOTE, 0,
- SectionKind::getReadOnly()));
+ OutStreamer.SwitchSection(
+ Context.getELFSection(".AMDGPU.disasm", ELF::SHT_NOTE, 0));
- for (size_t i = 0; i < DisasmLines.size(); ++i) {
- std::string Comment(DisasmLineMaxLen - DisasmLines[i].size(), ' ');
- Comment += " ; " + HexLines[i] + "\n";
+ for (size_t i = 0; i < DisasmLines.size(); ++i) {
+ std::string Comment(DisasmLineMaxLen - DisasmLines[i].size(), ' ');
+ Comment += " ; " + HexLines[i] + "\n";
- OutStreamer.EmitBytes(StringRef(DisasmLines[i]));
- OutStreamer.EmitBytes(StringRef(Comment));
- }
+ OutStreamer.EmitBytes(StringRef(DisasmLines[i]));
+ OutStreamer.EmitBytes(StringRef(Comment));
}
}
@@ -181,10 +178,10 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
unsigned MaxGPR = 0;
bool killPixel = false;
- const R600RegisterInfo *RI = static_cast<const R600RegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
+ const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
+ const R600RegisterInfo *RI =
+ static_cast<const R600RegisterInfo *>(STM.getRegisterInfo());
const R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
- const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
for (const MachineBasicBlock &MBB : MF) {
for (const MachineInstr &MI : MBB) {
@@ -240,13 +237,15 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
const MachineFunction &MF) const {
+ const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
+ const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
uint64_t CodeSize = 0;
unsigned MaxSGPR = 0;
unsigned MaxVGPR = 0;
bool VCCUsed = false;
bool FlatUsed = false;
- const SIRegisterInfo *RI = static_cast<const SIRegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
+ const SIRegisterInfo *RI =
+ static_cast<const SIRegisterInfo *>(STM.getRegisterInfo());
for (const MachineBasicBlock &MBB : MF) {
for (const MachineInstr &MI : MBB) {
@@ -285,7 +284,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
if (AMDGPU::SReg_32RegClass.contains(reg)) {
isSGPR = true;
width = 1;
- } else if (AMDGPU::VReg_32RegClass.contains(reg)) {
+ } else if (AMDGPU::VGPR_32RegClass.contains(reg)) {
isSGPR = false;
width = 1;
} else if (AMDGPU::SReg_64RegClass.contains(reg)) {
@@ -340,6 +339,8 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
ProgInfo.NumVGPR = MaxVGPR + 1;
ProgInfo.NumSGPR = MaxSGPR + 1;
+ ProgInfo.VGPRBlocks = (ProgInfo.NumVGPR - 1) / 4;
+ ProgInfo.SGPRBlocks = (ProgInfo.NumSGPR - 1) / 8;
// Set the value to initialize FP_ROUND and FP_DENORM parts of the mode
// register.
ProgInfo.FloatMode = getFPMode(MF);
@@ -356,21 +357,6 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
ProgInfo.FlatUsed = FlatUsed;
ProgInfo.VCCUsed = VCCUsed;
ProgInfo.CodeLen = CodeSize;
-}
-
-void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
- const SIProgramInfo &KernelInfo) {
- const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
- const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
-
- unsigned RsrcReg;
- switch (MFI->getShaderType()) {
- default: // Fall through
- case ShaderType::COMPUTE: RsrcReg = R_00B848_COMPUTE_PGM_RSRC1; break;
- case ShaderType::GEOMETRY: RsrcReg = R_00B228_SPI_SHADER_PGM_RSRC1_GS; break;
- case ShaderType::PIXEL: RsrcReg = R_00B028_SPI_SHADER_PGM_RSRC1_PS; break;
- case ShaderType::VERTEX: RsrcReg = R_00B128_SPI_SHADER_PGM_RSRC1_VS; break;
- }
unsigned LDSAlignShift;
if (STM.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
@@ -384,59 +370,203 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
unsigned LDSSpillSize = MFI->LDSWaveSpillSize *
MFI->getMaximumWorkGroupSize(MF);
- unsigned LDSBlocks =
- RoundUpToAlignment(MFI->LDSSize + LDSSpillSize,
- 1 << LDSAlignShift) >> LDSAlignShift;
+ ProgInfo.LDSSize = MFI->LDSSize + LDSSpillSize;
+ ProgInfo.LDSBlocks =
+ RoundUpToAlignment(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
// Scratch is allocated in 256 dword blocks.
unsigned ScratchAlignShift = 10;
// We need to program the hardware with the amount of scratch memory that
- // is used by the entire wave. KernelInfo.ScratchSize is the amount of
+ // is used by the entire wave. ProgInfo.ScratchSize is the amount of
// scratch memory used per thread.
- unsigned ScratchBlocks =
- RoundUpToAlignment(KernelInfo.ScratchSize * STM.getWavefrontSize(),
+ ProgInfo.ScratchBlocks =
+ RoundUpToAlignment(ProgInfo.ScratchSize * STM.getWavefrontSize(),
1 << ScratchAlignShift) >> ScratchAlignShift;
- unsigned VGPRBlocks = (KernelInfo.NumVGPR - 1) / 4;
- unsigned SGPRBlocks = (KernelInfo.NumSGPR - 1) / 8;
+ ProgInfo.ComputePGMRSrc1 =
+ S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
+ S_00B848_SGPRS(ProgInfo.SGPRBlocks) |
+ S_00B848_PRIORITY(ProgInfo.Priority) |
+ S_00B848_FLOAT_MODE(ProgInfo.FloatMode) |
+ S_00B848_PRIV(ProgInfo.Priv) |
+ S_00B848_DX10_CLAMP(ProgInfo.DX10Clamp) |
+ S_00B848_IEEE_MODE(ProgInfo.DebugMode) |
+ S_00B848_IEEE_MODE(ProgInfo.IEEEMode);
+
+ ProgInfo.ComputePGMRSrc2 =
+ S_00B84C_SCRATCH_EN(ProgInfo.ScratchBlocks > 0) |
+ S_00B84C_USER_SGPR(MFI->NumUserSGPRs) |
+ S_00B84C_TGID_X_EN(1) |
+ S_00B84C_TGID_Y_EN(1) |
+ S_00B84C_TGID_Z_EN(1) |
+ S_00B84C_TG_SIZE_EN(1) |
+ S_00B84C_TIDIG_COMP_CNT(2) |
+ S_00B84C_LDS_SIZE(ProgInfo.LDSBlocks);
+}
+
+static unsigned getRsrcReg(unsigned ShaderType) {
+ switch (ShaderType) {
+ default: // Fall through
+ case ShaderType::COMPUTE: return R_00B848_COMPUTE_PGM_RSRC1;
+ case ShaderType::GEOMETRY: return R_00B228_SPI_SHADER_PGM_RSRC1_GS;
+ case ShaderType::PIXEL: return R_00B028_SPI_SHADER_PGM_RSRC1_PS;
+ case ShaderType::VERTEX: return R_00B128_SPI_SHADER_PGM_RSRC1_VS;
+ }
+}
+
+void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
+ const SIProgramInfo &KernelInfo) {
+ const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
+ const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+ unsigned RsrcReg = getRsrcReg(MFI->getShaderType());
if (MFI->getShaderType() == ShaderType::COMPUTE) {
OutStreamer.EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4);
- const uint32_t ComputePGMRSrc1 =
- S_00B848_VGPRS(VGPRBlocks) |
- S_00B848_SGPRS(SGPRBlocks) |
- S_00B848_PRIORITY(KernelInfo.Priority) |
- S_00B848_FLOAT_MODE(KernelInfo.FloatMode) |
- S_00B848_PRIV(KernelInfo.Priv) |
- S_00B848_DX10_CLAMP(KernelInfo.DX10Clamp) |
- S_00B848_IEEE_MODE(KernelInfo.DebugMode) |
- S_00B848_IEEE_MODE(KernelInfo.IEEEMode);
-
- OutStreamer.EmitIntValue(ComputePGMRSrc1, 4);
+ OutStreamer.EmitIntValue(KernelInfo.ComputePGMRSrc1, 4);
OutStreamer.EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4);
- const uint32_t ComputePGMRSrc2 =
- S_00B84C_LDS_SIZE(LDSBlocks) |
- S_00B02C_SCRATCH_EN(ScratchBlocks > 0);
-
- OutStreamer.EmitIntValue(ComputePGMRSrc2, 4);
+ OutStreamer.EmitIntValue(KernelInfo.ComputePGMRSrc2, 4);
OutStreamer.EmitIntValue(R_00B860_COMPUTE_TMPRING_SIZE, 4);
- OutStreamer.EmitIntValue(S_00B860_WAVESIZE(ScratchBlocks), 4);
+ OutStreamer.EmitIntValue(S_00B860_WAVESIZE(KernelInfo.ScratchBlocks), 4);
// TODO: Should probably note flat usage somewhere. SC emits a "FlatPtr32 =
// 0" comment but I don't see a corresponding field in the register spec.
} else {
OutStreamer.EmitIntValue(RsrcReg, 4);
- OutStreamer.EmitIntValue(S_00B028_VGPRS(VGPRBlocks) |
- S_00B028_SGPRS(SGPRBlocks), 4);
+ OutStreamer.EmitIntValue(S_00B028_VGPRS(KernelInfo.VGPRBlocks) |
+ S_00B028_SGPRS(KernelInfo.SGPRBlocks), 4);
+ if (STM.isVGPRSpillingEnabled(MFI)) {
+ OutStreamer.EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4);
+ OutStreamer.EmitIntValue(S_0286E8_WAVESIZE(KernelInfo.ScratchBlocks), 4);
+ }
}
if (MFI->getShaderType() == ShaderType::PIXEL) {
OutStreamer.EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4);
- OutStreamer.EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(LDSBlocks), 4);
+ OutStreamer.EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(KernelInfo.LDSBlocks), 4);
OutStreamer.EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
OutStreamer.EmitIntValue(MFI->PSInputAddr, 4);
}
}
+
+void AMDGPUAsmPrinter::EmitAmdKernelCodeT(const MachineFunction &MF,
+ const SIProgramInfo &KernelInfo) const {
+ const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+ const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
+ amd_kernel_code_t header;
+
+ memset(&header, 0, sizeof(header));
+
+ header.amd_code_version_major = AMD_CODE_VERSION_MAJOR;
+ header.amd_code_version_minor = AMD_CODE_VERSION_MINOR;
+
+ header.struct_byte_size = sizeof(amd_kernel_code_t);
+
+ header.target_chip = STM.getAmdKernelCodeChipID();
+
+ header.kernel_code_entry_byte_offset = (1ULL << MF.getAlignment());
+
+ header.compute_pgm_resource_registers =
+ KernelInfo.ComputePGMRSrc1 |
+ (KernelInfo.ComputePGMRSrc2 << 32);
+
+ // Code Properties:
+ header.code_properties = AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR |
+ AMD_CODE_PROPERTY_IS_PTR64;
+
+ if (KernelInfo.FlatUsed)
+ header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT;
+
+ if (KernelInfo.ScratchBlocks)
+ header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE;
+
+ header.workitem_private_segment_byte_size = KernelInfo.ScratchSize;
+ header.workgroup_group_segment_byte_size = KernelInfo.LDSSize;
+
+ // MFI->ABIArgOffset is the number of bytes for the kernel arguments
+ // plus 36. 36 is the number of bytes reserved at the begining of the
+ // input buffer to store work-group size information.
+ // FIXME: We should be adding the size of the implicit arguments
+ // to this value.
+ header.kernarg_segment_byte_size = MFI->ABIArgOffset;
+
+ header.wavefront_sgpr_count = KernelInfo.NumSGPR;
+ header.workitem_vgpr_count = KernelInfo.NumVGPR;
+
+ // FIXME: What values do I put for these alignments
+ header.kernarg_segment_alignment = 0;
+ header.group_segment_alignment = 0;
+ header.private_segment_alignment = 0;
+
+ header.code_type = 1; // HSA_EXT_CODE_KERNEL
+
+ header.wavefront_size = STM.getWavefrontSize();
+
+ const MCSectionELF *VersionSection =
+ OutContext.getELFSection(".hsa.version", ELF::SHT_PROGBITS, 0);
+ OutStreamer.SwitchSection(VersionSection);
+ OutStreamer.EmitBytes(Twine("HSA Code Unit:" +
+ Twine(header.hsail_version_major) + "." +
+ Twine(header.hsail_version_minor) + ":" +
+ "AMD:" +
+ Twine(header.amd_code_version_major) + "." +
+ Twine(header.amd_code_version_minor) + ":" +
+ "GFX8.1:0").str());
+
+ OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
+
+ if (isVerbose()) {
+ OutStreamer.emitRawComment("amd_code_version_major = " +
+ Twine(header.amd_code_version_major), false);
+ OutStreamer.emitRawComment("amd_code_version_minor = " +
+ Twine(header.amd_code_version_minor), false);
+ OutStreamer.emitRawComment("struct_byte_size = " +
+ Twine(header.struct_byte_size), false);
+ OutStreamer.emitRawComment("target_chip = " +
+ Twine(header.target_chip), false);
+ OutStreamer.emitRawComment(" compute_pgm_rsrc1: " +
+ Twine::utohexstr(KernelInfo.ComputePGMRSrc1), false);
+ OutStreamer.emitRawComment(" compute_pgm_rsrc2: " +
+ Twine::utohexstr(KernelInfo.ComputePGMRSrc2), false);
+ OutStreamer.emitRawComment("enable_sgpr_private_segment_buffer = " +
+ Twine((bool)(header.code_properties &
+ AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE)), false);
+ OutStreamer.emitRawComment("enable_sgpr_kernarg_segment_ptr = " +
+ Twine((bool)(header.code_properties &
+ AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)), false);
+ OutStreamer.emitRawComment("private_element_size = 2 ", false);
+ OutStreamer.emitRawComment("is_ptr64 = " +
+ Twine((bool)(header.code_properties & AMD_CODE_PROPERTY_IS_PTR64)), false);
+ OutStreamer.emitRawComment("workitem_private_segment_byte_size = " +
+ Twine(header.workitem_private_segment_byte_size),
+ false);
+ OutStreamer.emitRawComment("workgroup_group_segment_byte_size = " +
+ Twine(header.workgroup_group_segment_byte_size),
+ false);
+ OutStreamer.emitRawComment("gds_segment_byte_size = " +
+ Twine(header.gds_segment_byte_size), false);
+ OutStreamer.emitRawComment("kernarg_segment_byte_size = " +
+ Twine(header.kernarg_segment_byte_size), false);
+ OutStreamer.emitRawComment("wavefront_sgpr_count = " +
+ Twine(header.wavefront_sgpr_count), false);
+ OutStreamer.emitRawComment("workitem_vgpr_count = " +
+ Twine(header.workitem_vgpr_count), false);
+ OutStreamer.emitRawComment("code_type = " + Twine(header.code_type), false);
+ OutStreamer.emitRawComment("wavefront_size = " +
+ Twine((int)header.wavefront_size), false);
+ OutStreamer.emitRawComment("optimization_level = " +
+ Twine(header.optimization_level), false);
+ OutStreamer.emitRawComment("hsail_profile = " +
+ Twine(header.hsail_profile), false);
+ OutStreamer.emitRawComment("hsail_machine_model = " +
+ Twine(header.hsail_machine_model), false);
+ OutStreamer.emitRawComment("hsail_version_major = " +
+ Twine(header.hsail_version_major), false);
+ OutStreamer.emitRawComment("hsail_version_minor = " +
+ Twine(header.hsail_version_minor), false);
+ }
+
+ OutStreamer.EmitBytes(StringRef((char*)&header, sizeof(header)));
+}
diff --git a/lib/Target/R600/AMDGPUAsmPrinter.h b/lib/Target/R600/AMDGPUAsmPrinter.h
index b9a0767..58ffb1e 100644
--- a/lib/Target/R600/AMDGPUAsmPrinter.h
+++ b/lib/Target/R600/AMDGPUAsmPrinter.h
@@ -24,8 +24,8 @@ class AMDGPUAsmPrinter : public AsmPrinter {
private:
struct SIProgramInfo {
SIProgramInfo() :
- NumVGPR(0),
- NumSGPR(0),
+ VGPRBlocks(0),
+ SGPRBlocks(0),
Priority(0),
FloatMode(0),
Priv(0),
@@ -33,13 +33,19 @@ private:
DebugMode(0),
IEEEMode(0),
ScratchSize(0),
+ ComputePGMRSrc1(0),
+ LDSBlocks(0),
+ ScratchBlocks(0),
+ ComputePGMRSrc2(0),
+ NumVGPR(0),
+ NumSGPR(0),
FlatUsed(false),
VCCUsed(false),
CodeLen(0) {}
// Fields set in PGM_RSRC1 pm4 packet.
- uint32_t NumVGPR;
- uint32_t NumSGPR;
+ uint32_t VGPRBlocks;
+ uint32_t SGPRBlocks;
uint32_t Priority;
uint32_t FloatMode;
uint32_t Priv;
@@ -48,6 +54,17 @@ private:
uint32_t IEEEMode;
uint32_t ScratchSize;
+ uint64_t ComputePGMRSrc1;
+
+ // Fields set in PGM_RSRC2 pm4 packet.
+ uint32_t LDSBlocks;
+ uint32_t ScratchBlocks;
+
+ uint64_t ComputePGMRSrc2;
+
+ uint32_t NumVGPR;
+ uint32_t NumSGPR;
+ uint32_t LDSSize;
bool FlatUsed;
// Bonus information for debugging.
@@ -64,9 +81,12 @@ private:
/// can correctly setup the GPU state.
void EmitProgramInfoR600(const MachineFunction &MF);
void EmitProgramInfoSI(const MachineFunction &MF, const SIProgramInfo &KernelInfo);
+ void EmitAmdKernelCodeT(const MachineFunction &MF,
+ const SIProgramInfo &KernelInfo) const;
public:
- explicit AMDGPUAsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
+ explicit AMDGPUAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer);
bool runOnMachineFunction(MachineFunction &MF) override;
@@ -80,7 +100,6 @@ public:
void EmitEndOfAsmFile(Module &M) override;
protected:
- bool DisasmEnabled;
std::vector<std::string> DisasmLines, HexLines;
size_t DisasmLineMaxLen;
};
diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
index 90b6672..b5ab703 100644
--- a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
+++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
@@ -39,11 +39,11 @@ namespace {
class AMDGPUDAGToDAGISel : public SelectionDAGISel {
// Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
// make the right decision when generating code for different targets.
- const AMDGPUSubtarget &Subtarget;
+ const AMDGPUSubtarget *Subtarget;
public:
AMDGPUDAGToDAGISel(TargetMachine &TM);
virtual ~AMDGPUDAGToDAGISel();
-
+ bool runOnMachineFunction(MachineFunction &MF) override;
SDNode *Select(SDNode *N) override;
const char *getPassName() const override;
void PostprocessISelDAG() override;
@@ -95,9 +95,9 @@ private:
SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
SDValue &TFE) const;
bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
- SDValue &Offset) const;
+ SDValue &SOffset, SDValue &Offset) const;
bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
- SDValue &VAddr, SDValue &Offset,
+ SDValue &VAddr, SDValue &SOffset, SDValue &Offset,
SDValue &SLC) const;
bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
SDValue &SOffset, SDValue &ImmOffset) const;
@@ -113,6 +113,9 @@ private:
bool SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, SDValue &SrcMods,
SDValue &Omod) const;
+ bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods,
+ SDValue &Clamp,
+ SDValue &Omod) const;
SDNode *SelectADD_SUB_I64(SDNode *N);
SDNode *SelectDIV_SCALE(SDNode *N);
@@ -129,7 +132,11 @@ FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
}
AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
- : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
+ : SelectionDAGISel(TM) {}
+
+bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &static_cast<const AMDGPUSubtarget &>(MF.getSubtarget());
+ return SelectionDAGISel::runOnMachineFunction(MF);
}
AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
@@ -153,7 +160,7 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
switch (N->getMachineOpcode()) {
default: {
const MCInstrDesc &Desc =
- TM.getSubtargetImpl()->getInstrInfo()->get(N->getMachineOpcode());
+ Subtarget->getInstrInfo()->get(N->getMachineOpcode());
unsigned OpIdx = Desc.getNumDefs() + OpNo;
if (OpIdx >= Desc.getNumOperands())
return nullptr;
@@ -161,17 +168,17 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
if (RegClass == -1)
return nullptr;
- return TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RegClass);
+ return Subtarget->getRegisterInfo()->getRegClass(RegClass);
}
case AMDGPU::REG_SEQUENCE: {
unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
const TargetRegisterClass *SuperRC =
- TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RCID);
+ Subtarget->getRegisterInfo()->getRegClass(RCID);
SDValue SubRegOp = N->getOperand(OpNo + 1);
unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
- return TM.getSubtargetImpl()->getRegisterInfo()->getSubClassWithSubReg(
- SuperRC, SubRegIdx);
+ return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
+ SubRegIdx);
}
}
}
@@ -241,7 +248,6 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
return nullptr; // Already selected.
}
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
switch (Opc) {
default: break;
// We are selecting i64 ADD here instead of custom lower it during
@@ -250,7 +256,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
case ISD::ADD:
case ISD::SUB: {
if (N->getValueType(0) != MVT::i64 ||
- ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
+ Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
break;
return SelectADD_SUB_I64(N);
@@ -259,15 +265,12 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
case AMDGPUISD::BUILD_VERTICAL_VECTOR:
case ISD::BUILD_VECTOR: {
unsigned RegClassID;
- const AMDGPURegisterInfo *TRI = static_cast<const AMDGPURegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
- const SIRegisterInfo *SIRI = static_cast<const SIRegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
+ const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo();
EVT VT = N->getValueType(0);
unsigned NumVectorElts = VT.getVectorNumElements();
EVT EltVT = VT.getVectorElementType();
assert(EltVT.bitsEq(MVT::i32));
- if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
bool UseVReg = true;
for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
U != E; ++U) {
@@ -278,12 +281,12 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
if (!RC) {
continue;
}
- if (SIRI->isSGPRClass(RC)) {
+ if (static_cast<const SIRegisterInfo *>(TRI)->isSGPRClass(RC)) {
UseVReg = false;
}
}
switch(NumVectorElts) {
- case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
+ case 1: RegClassID = UseVReg ? AMDGPU::VGPR_32RegClassID :
AMDGPU::SReg_32RegClassID;
break;
case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
@@ -365,7 +368,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
}
case ISD::BUILD_PAIR: {
SDValue RC, SubReg0, SubReg1;
- if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
break;
}
if (N->getValueType(0) == MVT::i128) {
@@ -387,8 +390,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
case ISD::Constant:
case ISD::ConstantFP: {
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
- if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
+ if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
break;
@@ -414,8 +416,55 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
N->getValueType(0), Ops);
}
+ case ISD::LOAD: {
+ // To simplify the TableGen patters, we replace all i64 loads with
+ // v2i32 loads. Alternatively, we could promote i64 loads to v2i32
+ // during DAG legalization, however, so places (ExpandUnalignedLoad)
+ // in the DAG legalizer assume that if i64 is legal, so doing this
+ // promotion early can cause problems.
+ EVT VT = N->getValueType(0);
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ if (VT != MVT::i64 || LD->getExtensionType() != ISD::NON_EXTLOAD)
+ break;
+
+ SDValue NewLoad = CurDAG->getLoad(MVT::v2i32, SDLoc(N), LD->getChain(),
+ LD->getBasePtr(), LD->getMemOperand());
+ SDValue BitCast = CurDAG->getNode(ISD::BITCAST, SDLoc(N),
+ MVT::i64, NewLoad);
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLoad.getValue(1));
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), BitCast);
+ SelectCode(NewLoad.getNode());
+ N = BitCast.getNode();
+ break;
+ }
+
+ case ISD::STORE: {
+ // Handle i64 stores here for the same reason mentioned above for loads.
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ SDValue Value = ST->getValue();
+ if (Value.getValueType() != MVT::i64 || ST->isTruncatingStore())
+ break;
+
+ SDValue NewValue = CurDAG->getNode(ISD::BITCAST, SDLoc(N),
+ MVT::v2i32, Value);
+ SDValue NewStore = CurDAG->getStore(ST->getChain(), SDLoc(N), NewValue,
+ ST->getBasePtr(), ST->getMemOperand());
+
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewStore);
+
+ if (NewValue.getOpcode() == ISD::BITCAST) {
+ Select(NewStore.getNode());
+ return SelectCode(NewValue.getNode());
+ }
+
+ // getNode() may fold the bitcast if its input was another bitcast. If that
+ // happens we should only select the new store.
+ N = NewStore.getNode();
+ break;
+ }
+
case AMDGPUISD::REGISTER_LOAD: {
- if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
break;
SDValue Addr, Offset;
@@ -431,7 +480,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
Ops);
}
case AMDGPUISD::REGISTER_STORE: {
- if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
break;
SDValue Addr, Offset;
SelectADDRIndirect(N->getOperand(2), Addr, Offset);
@@ -449,7 +498,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
case AMDGPUISD::BFE_I32:
case AMDGPUISD::BFE_U32: {
- if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
+ if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
break;
// There is a scalar version available, but unlike the vector version which
@@ -554,13 +603,11 @@ bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
}
bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
- if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
- if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
- N->getMemoryVT().bitsLT(MVT::i32)) {
+ if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS)
+ if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
+ N->getMemoryVT().bitsLT(MVT::i32))
return true;
- }
- }
+
return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
}
@@ -736,6 +783,8 @@ SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
}
+// We need to handle this here because tablegen doesn't support matching
+// instructions with multiple outputs.
SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
SDLoc SL(N);
EVT VT = N->getValueType(0);
@@ -745,30 +794,22 @@ SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
unsigned Opc
= (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
- const SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
- const SDValue False = CurDAG->getTargetConstant(0, MVT::i1);
- SDValue Ops[] = {
- Zero, // src0_modifiers
- N->getOperand(0), // src0
- Zero, // src1_modifiers
- N->getOperand(1), // src1
- Zero, // src2_modifiers
- N->getOperand(2), // src2
- False, // clamp
- Zero // omod
- };
+ // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod
+ SDValue Ops[8];
+ SelectVOP3Mods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
+ SelectVOP3Mods(N->getOperand(1), Ops[3], Ops[2]);
+ SelectVOP3Mods(N->getOperand(2), Ops[5], Ops[4]);
return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
}
bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
unsigned OffsetBits) const {
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
(OffsetBits == 8 && !isUInt<8>(Offset)))
return false;
- if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS)
+ if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS)
return true;
// On Southern Islands instruction with a negative base value and an offset
@@ -879,26 +920,32 @@ void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
SDValue N1 = Addr.getOperand(1);
ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
- if (isLegalMUBUFImmOffset(C1)) {
-
- if (N0.getOpcode() == ISD::ADD) {
- // (add (add N2, N3), C1) -> addr64
- SDValue N2 = N0.getOperand(0);
- SDValue N3 = N0.getOperand(1);
- Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
- Ptr = N2;
- VAddr = N3;
- Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
- return;
- }
+ if (N0.getOpcode() == ISD::ADD) {
+ // (add (add N2, N3), C1) -> addr64
+ SDValue N2 = N0.getOperand(0);
+ SDValue N3 = N0.getOperand(1);
+ Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
+ Ptr = N2;
+ VAddr = N3;
+ } else {
// (add N0, C1) -> offset
VAddr = CurDAG->getTargetConstant(0, MVT::i32);
Ptr = N0;
- Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
+ }
+
+ if (isLegalMUBUFImmOffset(C1)) {
+ Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
+ return;
+ } else if (isUInt<32>(C1->getZExtValue())) {
+ // Illegal offset, store it in soffset.
+ Offset = CurDAG->getTargetConstant(0, MVT::i16);
+ SOffset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
+ CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i32)), 0);
return;
}
}
+
if (Addr.getOpcode() == ISD::ADD) {
// (add N0, N1) -> addr64
SDValue N0 = Addr.getOperand(0);
@@ -918,9 +965,9 @@ void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
}
bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
- SDValue &VAddr,
+ SDValue &VAddr, SDValue &SOffset,
SDValue &Offset) const {
- SDValue Ptr, SOffset, Offen, Idxen, Addr64, GLC, SLC, TFE;
+ SDValue Ptr, Offen, Idxen, Addr64, GLC, SLC, TFE;
SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
GLC, SLC, TFE);
@@ -940,11 +987,12 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
}
bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
- SDValue &VAddr, SDValue &Offset,
- SDValue &SLC) const {
+ SDValue &VAddr, SDValue &SOffset,
+ SDValue &Offset,
+ SDValue &SLC) const {
SLC = CurDAG->getTargetConstant(0, MVT::i1);
- return SelectMUBUFAddr64(Addr, SRsrc, VAddr, Offset);
+ return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset);
}
bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
@@ -954,21 +1002,32 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
SDLoc DL(Addr);
MachineFunction &MF = CurDAG->getMachineFunction();
const SIRegisterInfo *TRI =
- static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
const SITargetLowering& Lowering =
*static_cast<const SITargetLowering*>(getTargetLowering());
- unsigned ScratchPtrReg =
- TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR);
unsigned ScratchOffsetReg =
TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
Lowering.CreateLiveInRegister(*CurDAG, &AMDGPU::SReg_32RegClass,
ScratchOffsetReg, MVT::i32);
+ SDValue Sym0 = CurDAG->getExternalSymbol("SCRATCH_RSRC_DWORD0", MVT::i32);
+ SDValue ScratchRsrcDword0 =
+ SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, Sym0), 0);
+
+ SDValue Sym1 = CurDAG->getExternalSymbol("SCRATCH_RSRC_DWORD1", MVT::i32);
+ SDValue ScratchRsrcDword1 =
+ SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, Sym1), 0);
- SDValue ScratchPtr =
- CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
- MRI.getLiveInVirtReg(ScratchPtrReg), MVT::i64);
+ const SDValue RsrcOps[] = {
+ CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
+ ScratchRsrcDword0,
+ CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
+ ScratchRsrcDword1,
+ CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32),
+ };
+ SDValue ScratchPtr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
+ MVT::v2i32, RsrcOps), 0);
Rsrc = SDValue(Lowering.buildScratchRSRC(*CurDAG, DL, ScratchPtr), 0);
SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32);
@@ -985,22 +1044,6 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
}
}
- // (add FI, n0)
- if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
- isa<FrameIndexSDNode>(Addr.getOperand(0))) {
- VAddr = Addr.getOperand(1);
- ImmOffset = Addr.getOperand(0);
- return true;
- }
-
- // (FI)
- if (isa<FrameIndexSDNode>(Addr)) {
- VAddr = SDValue(CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32,
- CurDAG->getConstant(0, MVT::i32)), 0);
- ImmOffset = Addr;
- return true;
- }
-
// (node)
VAddr = Addr;
ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
@@ -1012,6 +1055,8 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
SDValue &GLC, SDValue &SLC,
SDValue &TFE) const {
SDValue Ptr, VAddr, Offen, Idxen, Addr64;
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
GLC, SLC, TFE);
@@ -1019,7 +1064,7 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
!cast<ConstantSDNode>(Idxen)->getSExtValue() &&
!cast<ConstantSDNode>(Addr64)->getSExtValue()) {
- uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT |
+ uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
APInt::getAllOnesValue(32).getZExtValue(); // Size
SDLoc DL(Addr);
@@ -1045,7 +1090,7 @@ SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(N);
SDLoc DL(N);
- assert(Subtarget.hasFlatAddressSpace() &&
+ assert(Subtarget->hasFlatAddressSpace() &&
"addrspacecast only supported with flat address space!");
assert((ASC->getSrcAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS &&
@@ -1081,7 +1126,9 @@ SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
if (DestSize > SrcSize) {
assert(SrcSize == 32 && DestSize == 64);
- SDValue RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
+ // FIXME: This is probably wrong, we should never be defining
+ // a register class with both VGPRs and SGPRs
+ SDValue RC = CurDAG->getTargetConstant(AMDGPU::VS_64RegClassID, MVT::i32);
const SDValue Ops[] = {
RC,
@@ -1141,6 +1188,14 @@ bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp(SDValue In, SDValue &Src,
return SelectVOP3Mods(In, Src, SrcMods);
}
+bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src,
+ SDValue &SrcMods,
+ SDValue &Clamp,
+ SDValue &Omod) const {
+ Clamp = Omod = CurDAG->getTargetConstant(0, MVT::i32);
+ return SelectVOP3Mods(In, Src, SrcMods);
+}
+
void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
const AMDGPUTargetLowering& Lowering =
*static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp
index 2f95b74..4707279 100644
--- a/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/lib/Target/R600/AMDGPUISelLowering.cpp
@@ -102,11 +102,9 @@ EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
}
-AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
- TargetLowering(TM) {
-
- Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
-
+AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
+ const AMDGPUSubtarget &STI)
+ : TargetLowering(TM), Subtarget(&STI) {
setOperationAction(ISD::Constant, MVT::i32, Legal);
setOperationAction(ISD::Constant, MVT::i64, Legal);
setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
@@ -127,12 +125,21 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::FABS, MVT::f32, Legal);
setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
setOperationAction(ISD::FRINT, MVT::f32, Legal);
- setOperationAction(ISD::FROUND, MVT::f32, Legal);
setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
+ setOperationAction(ISD::FROUND, MVT::f32, Custom);
+ setOperationAction(ISD::FROUND, MVT::f64, Custom);
+
setOperationAction(ISD::FREM, MVT::f32, Custom);
setOperationAction(ISD::FREM, MVT::f64, Custom);
+ // v_mad_f32 does not support denormals according to some sources.
+ if (!Subtarget->hasFP32Denormals())
+ setOperationAction(ISD::FMAD, MVT::f32, Legal);
+
+ // Expand to fneg + fadd.
+ setOperationAction(ISD::FSUB, MVT::f64, Expand);
+
// Lower floating point store/load to integer store/load to reduce the number
// of patterns in tablegen.
setOperationAction(ISD::STORE, MVT::f32, Promote);
@@ -141,9 +148,6 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::STORE, MVT::v2f32, Promote);
AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
- setOperationAction(ISD::STORE, MVT::i64, Promote);
- AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
-
setOperationAction(ISD::STORE, MVT::v4f32, Promote);
AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
@@ -162,9 +166,6 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
// Custom lowering of vector stores is required for local address space
// stores.
setOperationAction(ISD::STORE, MVT::v4i32, Custom);
- // XXX: Native v2i32 local address space stores are possible, but not
- // currently implemented.
- setOperationAction(ISD::STORE, MVT::v2i32, Custom);
setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
@@ -187,9 +188,6 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
- setOperationAction(ISD::LOAD, MVT::i64, Promote);
- AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
-
setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
@@ -216,18 +214,28 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
+ // There are no 64-bit extloads. These should be done as a 32-bit extload and
+ // an extension to 64-bit.
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
+ }
+
+ for (MVT VT : MVT::integer_vector_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
+ }
setOperationAction(ISD::BR_CC, MVT::i1, Expand);
@@ -246,7 +254,8 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f32, MVT::f16, Expand);
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
@@ -382,6 +391,12 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setTargetDAGCombine(ISD::SELECT_CC);
setTargetDAGCombine(ISD::STORE);
+ setTargetDAGCombine(ISD::FADD);
+ setTargetDAGCombine(ISD::FSUB);
+
+ setBooleanContents(ZeroOrNegativeOneBooleanContent);
+ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
+
setSchedulingPreference(Sched::RegPressure);
setJumpIsExpensive(true);
@@ -397,6 +412,7 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
// large sequence of instructions.
setIntDivIsCheap(false);
setPow2SDivIsCheap(false);
+ setFsqrtIsCheap(true);
// FIXME: Need to really handle these.
MaxStoresPerMemcpy = 4096;
@@ -429,6 +445,29 @@ bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
}
+bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
+ ISD::LoadExtType,
+ EVT NewVT) const {
+
+ unsigned NewSize = NewVT.getStoreSizeInBits();
+
+ // If we are reducing to a 32-bit load, this is always better.
+ if (NewSize == 32)
+ return true;
+
+ EVT OldVT = N->getValueType(0);
+ unsigned OldSize = OldVT.getStoreSizeInBits();
+
+ // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
+ // extloads, so doing one requires using a buffer_load. In cases where we
+ // still couldn't use a scalar load, using the wider load shouldn't really
+ // hurt anything.
+
+ // If the old size already had to be an extload, there's no harm in continuing
+ // to reduce the width.
+ return (OldSize < 32);
+}
+
bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
EVT CastTy) const {
if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
@@ -442,6 +481,18 @@ bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
(LScalarSize < 32));
}
+// SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
+// profitable with the expansion for 64-bit since it's generally good to
+// speculate things.
+// FIXME: These should really have the size as a parameter.
+bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
+ return true;
+}
+
+bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
+ return true;
+}
+
//===---------------------------------------------------------------------===//
// Target Properties
//===---------------------------------------------------------------------===//
@@ -560,6 +611,7 @@ SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
case ISD::FRINT: return LowerFRINT(Op, DAG);
case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
+ case ISD::FROUND: return LowerFROUND(Op, DAG);
case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
@@ -619,7 +671,7 @@ SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
const SDValue &InitPtr,
SDValue Chain,
SelectionDAG &DAG) const {
- const DataLayout *TD = getTargetMachine().getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = getDataLayout();
SDLoc DL(InitPtr);
Type *InitTy = Init->getType();
@@ -707,7 +759,7 @@ SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
SDValue Op,
SelectionDAG &DAG) const {
- const DataLayout *TD = getTargetMachine().getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = getDataLayout();
GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = G->getGlobal();
@@ -810,8 +862,7 @@ SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>(
- getTargetMachine().getSubtargetImpl()->getFrameLowering());
+ const AMDGPUFrameLowering *TFL = Subtarget->getFrameLowering();
FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
@@ -866,10 +917,9 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
}
case Intrinsic::AMDGPU_div_fmas:
- // FIXME: Dropping bool parameter. Work is needed to support the implicit
- // read from VCC.
return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
- Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
+ Op.getOperand(4));
case Intrinsic::AMDGPU_div_fixup:
return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
@@ -889,7 +939,19 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
case Intrinsic::AMDGPU_rsq_clamped:
- return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1));
+ if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ Type *Type = VT.getTypeForEVT(*DAG.getContext());
+ APFloat Max = APFloat::getLargest(Type->getFltSemantics());
+ APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
+
+ SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
+ SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
+ DAG.getConstantFP(Max, VT));
+ return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
+ DAG.getConstantFP(Min, VT));
+ } else {
+ return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1));
+ }
case Intrinsic::AMDGPU_ldexp:
return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, Op.getOperand(1),
@@ -962,6 +1024,10 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
case AMDGPUIntrinsic::AMDGPU_brev:
return DAG.getNode(AMDGPUISD::BREV, DL, VT, Op.getOperand(1));
+ case Intrinsic::AMDGPU_class:
+ return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
+ Op.getOperand(1), Op.getOperand(2));
+
case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
@@ -1000,17 +1066,21 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
}
/// \brief Generate Min/Max node
-SDValue AMDGPUTargetLowering::CombineFMinMax(SDLoc DL,
- EVT VT,
- SDValue LHS,
- SDValue RHS,
- SDValue True,
- SDValue False,
- SDValue CC,
- SelectionDAG &DAG) const {
+SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(SDLoc DL,
+ EVT VT,
+ SDValue LHS,
+ SDValue RHS,
+ SDValue True,
+ SDValue False,
+ SDValue CC,
+ DAGCombinerInfo &DCI) const {
+ if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
+ return SDValue();
+
if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
return SDValue();
+ SelectionDAG &DAG = DCI.DAG;
ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
switch (CCOpcode) {
case ISD::SETOEQ:
@@ -1027,27 +1097,47 @@ SDValue AMDGPUTargetLowering::CombineFMinMax(SDLoc DL,
case ISD::SETO:
break;
case ISD::SETULE:
- case ISD::SETULT:
+ case ISD::SETULT: {
+ if (LHS == True)
+ return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
+ return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
+ }
case ISD::SETOLE:
case ISD::SETOLT:
case ISD::SETLE:
case ISD::SETLT: {
+ // Ordered. Assume ordered for undefined.
+
+ // Only do this after legalization to avoid interfering with other combines
+ // which might occur.
+ if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
+ !DCI.isCalledByLegalizer())
+ return SDValue();
+
// We need to permute the operands to get the correct NaN behavior. The
// selected operand is the second one based on the failing compare with NaN,
// so permute it based on the compare type the hardware uses.
if (LHS == True)
- return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
- return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
+ return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
+ return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
+ }
+ case ISD::SETUGE:
+ case ISD::SETUGT: {
+ if (LHS == True)
+ return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
+ return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
}
case ISD::SETGT:
case ISD::SETGE:
- case ISD::SETUGE:
case ISD::SETOGE:
- case ISD::SETUGT:
case ISD::SETOGT: {
+ if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
+ !DCI.isCalledByLegalizer())
+ return SDValue();
+
if (LHS == True)
- return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
- return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
+ return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
+ return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
}
case ISD::SETCC_INVALID:
llvm_unreachable("Invalid setcc condcode!");
@@ -1330,24 +1420,6 @@ SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
EVT MemVT = Load->getMemoryVT();
- if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
- // We can do the extload to 32-bits, and then need to separately extend to
- // 64-bits.
-
- SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
- Load->getChain(),
- Load->getBasePtr(),
- MemVT,
- Load->getMemOperand());
-
- SDValue Ops[] = {
- DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32),
- ExtLoad32.getValue(1)
- };
-
- return DAG.getMergeValues(Ops, DL);
- }
-
if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
assert(VT == MVT::i1 && "Only i1 non-extloads expected");
// FIXME: Copied from PPC
@@ -1586,12 +1658,26 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
+ if (VT == MVT::i64 &&
+ DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
+ DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
+
+ SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
+ LHS_Lo, RHS_Lo);
+
+ SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(0), zero);
+ SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(1), zero);
+ Results.push_back(DIV);
+ Results.push_back(REM);
+ return;
+ }
+
// Get Speculative values
SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
- SDValue REM_Hi = zero;
SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
+ SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, zero);
SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
SDValue DIV_Lo = zero;
@@ -1599,8 +1685,10 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
const unsigned halfBitWidth = HalfVT.getSizeInBits();
for (unsigned i = 0; i < halfBitWidth; ++i) {
- SDValue POS = DAG.getConstant(halfBitWidth - i - 1, HalfVT);
- // Get Value of high bit
+ const unsigned bitPos = halfBitWidth - i - 1;
+ SDValue POS = DAG.getConstant(bitPos, HalfVT);
+ // Get value of high bit
+ // TODO: Remove the BFE part when the optimization is fixed
SDValue HBit;
if (halfBitWidth == 32 && Subtarget->hasBFE()) {
HBit = DAG.getNode(AMDGPUISD::BFE_U32, DL, HalfVT, LHS_Lo, POS, one);
@@ -1608,33 +1696,23 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
}
+ HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
- SDValue Carry = DAG.getNode(ISD::SRL, DL, HalfVT, REM_Lo,
- DAG.getConstant(halfBitWidth - 1, HalfVT));
- REM_Hi = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Hi, one);
- REM_Hi = DAG.getNode(ISD::OR, DL, HalfVT, REM_Hi, Carry);
-
- REM_Lo = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Lo, one);
- REM_Lo = DAG.getNode(ISD::OR, DL, HalfVT, REM_Lo, HBit);
+ // Shift
+ REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, VT));
+ // Add LHS high bit
+ REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
-
- SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
-
- SDValue BIT = DAG.getConstant(1 << (halfBitWidth - i - 1), HalfVT);
+ SDValue BIT = DAG.getConstant(1 << bitPos, HalfVT);
SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE);
DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
// Update REM
-
SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
-
REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
- REM_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, zero);
- REM_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, one);
}
- SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, DIV_Lo, DIV_Hi);
Results.push_back(DIV);
Results.push_back(REM);
@@ -1655,8 +1733,8 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
SDValue Den = Op.getOperand(1);
if (VT == MVT::i32) {
- if (DAG.MaskedValueIsZero(Op.getOperand(0), APInt(32, 0xff << 24)) &&
- DAG.MaskedValueIsZero(Op.getOperand(1), APInt(32, 0xff << 24))) {
+ if (DAG.MaskedValueIsZero(Num, APInt::getHighBitsSet(32, 8)) &&
+ DAG.MaskedValueIsZero(Den, APInt::getHighBitsSet(32, 8))) {
// TODO: We technically could do this for i64, but shouldn't that just be
// handled by something generally reducing 64-bit division on 32-bit
// values to 32-bit?
@@ -1768,19 +1846,31 @@ SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
- if (VT == MVT::i32) {
- if (DAG.ComputeNumSignBits(Op.getOperand(0)) > 8 &&
- DAG.ComputeNumSignBits(Op.getOperand(1)) > 8) {
- // TODO: We technically could do this for i64, but shouldn't that just be
- // handled by something generally reducing 64-bit division on 32-bit
- // values to 32-bit?
- return LowerDIVREM24(Op, DAG, true);
- }
- }
-
SDValue Zero = DAG.getConstant(0, VT);
SDValue NegOne = DAG.getConstant(-1, VT);
+ if (VT == MVT::i32 &&
+ DAG.ComputeNumSignBits(LHS) > 8 &&
+ DAG.ComputeNumSignBits(RHS) > 8) {
+ return LowerDIVREM24(Op, DAG, true);
+ }
+ if (VT == MVT::i64 &&
+ DAG.ComputeNumSignBits(LHS) > 32 &&
+ DAG.ComputeNumSignBits(RHS) > 32) {
+ EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
+
+ //HiLo split
+ SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
+ SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
+ SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
+ LHS_Lo, RHS_Lo);
+ SDValue Res[2] = {
+ DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
+ DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
+ };
+ return DAG.getMergeValues(Res, DL);
+ }
+
SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
@@ -1845,6 +1935,20 @@ SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
}
+static SDValue extractF64Exponent(SDValue Hi, SDLoc SL, SelectionDAG &DAG) {
+ const unsigned FractBits = 52;
+ const unsigned ExpBits = 11;
+
+ SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
+ Hi,
+ DAG.getConstant(FractBits - 32, MVT::i32),
+ DAG.getConstant(ExpBits, MVT::i32));
+ SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
+ DAG.getConstant(1023, MVT::i32));
+
+ return Exp;
+}
+
SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
SDLoc SL(Op);
SDValue Src = Op.getOperand(0);
@@ -1860,16 +1964,9 @@ SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
// exponent.
SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
- const unsigned FractBits = 52;
- const unsigned ExpBits = 11;
+ SDValue Exp = extractF64Exponent(Hi, SL, DAG);
- // Extract the exponent.
- SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
- Hi,
- DAG.getConstant(FractBits - 32, MVT::i32),
- DAG.getConstant(ExpBits, MVT::i32));
- SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
- DAG.getConstant(1023, MVT::i32));
+ const unsigned FractBits = 52;
// Extract the sign bit.
const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, MVT::i32);
@@ -1932,6 +2029,99 @@ SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) con
return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
}
+// XXX - May require not supporting f32 denormals?
+SDValue AMDGPUTargetLowering::LowerFROUND32(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+ SDValue X = Op.getOperand(0);
+
+ SDValue T = DAG.getNode(ISD::FTRUNC, SL, MVT::f32, X);
+
+ SDValue Diff = DAG.getNode(ISD::FSUB, SL, MVT::f32, X, T);
+
+ SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, MVT::f32, Diff);
+
+ const SDValue Zero = DAG.getConstantFP(0.0, MVT::f32);
+ const SDValue One = DAG.getConstantFP(1.0, MVT::f32);
+ const SDValue Half = DAG.getConstantFP(0.5, MVT::f32);
+
+ SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f32, One, X);
+
+ EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f32);
+
+ SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
+
+ SDValue Sel = DAG.getNode(ISD::SELECT, SL, MVT::f32, Cmp, SignOne, Zero);
+
+ return DAG.getNode(ISD::FADD, SL, MVT::f32, T, Sel);
+}
+
+SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+ SDValue X = Op.getOperand(0);
+
+ SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
+
+ const SDValue Zero = DAG.getConstant(0, MVT::i32);
+ const SDValue One = DAG.getConstant(1, MVT::i32);
+ const SDValue NegOne = DAG.getConstant(-1, MVT::i32);
+ const SDValue FiftyOne = DAG.getConstant(51, MVT::i32);
+ EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::i32);
+
+
+ SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
+
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One);
+
+ SDValue Exp = extractF64Exponent(Hi, SL, DAG);
+
+ const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), MVT::i64);
+
+ SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
+ SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
+ DAG.getConstant(INT64_C(0x0008000000000000), MVT::i64),
+ Exp);
+
+ SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
+ SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
+ DAG.getConstant(0, MVT::i64), Tmp0,
+ ISD::SETNE);
+
+ SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
+ D, DAG.getConstant(0, MVT::i64));
+ SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
+
+ K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
+ K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K);
+
+ SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
+ SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
+ SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ);
+
+ SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
+ ExpEqNegOne,
+ DAG.getConstantFP(1.0, MVT::f64),
+ DAG.getConstantFP(0.0, MVT::f64));
+
+ SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
+
+ K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K);
+ K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K);
+
+ return K;
+}
+
+SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+
+ if (VT == MVT::f32)
+ return LowerFROUND32(Op, DAG);
+
+ if (VT == MVT::f64)
+ return LowerFROUND64(Op, DAG);
+
+ llvm_unreachable("unhandled type");
+}
+
SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
SDLoc SL(Op);
SDValue Src = Op.getOperand(0);
@@ -2155,7 +2345,8 @@ SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
SDValue Value = SN->getValue();
EVT VT = Value.getValueType();
- if (isTypeLegal(VT) || SN->isVolatile() || !ISD::isNormalLoad(Value.getNode()))
+ if (isTypeLegal(VT) || SN->isVolatile() ||
+ !ISD::isNormalLoad(Value.getNode()) || VT.getSizeInBits() < 8)
return SDValue();
LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
@@ -2231,27 +2422,9 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
simplifyI24(N1, DCI);
return SDValue();
}
- case ISD::SELECT_CC: {
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
-
- if (VT == MVT::f32 ||
- (VT == MVT::f64 &&
- Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)) {
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
- SDValue True = N->getOperand(2);
- SDValue False = N->getOperand(3);
- SDValue CC = N->getOperand(4);
-
- return CombineFMinMax(DL, VT, LHS, RHS, True, False, CC, DAG);
- }
-
- break;
- }
case ISD::SELECT: {
SDValue Cond = N->getOperand(0);
- if (Cond.getOpcode() == ISD::SETCC) {
+ if (Cond.getOpcode() == ISD::SETCC && Cond.hasOneUse()) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
SDValue LHS = Cond.getOperand(0);
@@ -2261,11 +2434,8 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
SDValue True = N->getOperand(1);
SDValue False = N->getOperand(2);
- if (VT == MVT::f32 ||
- (VT == MVT::f64 &&
- Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)) {
- return CombineFMinMax(DL, VT, LHS, RHS, True, False, CC, DAG);
- }
+ if (VT == MVT::f32)
+ return CombineFMinMaxLegacy(DL, VT, LHS, RHS, True, False, CC, DCI);
// TODO: Implement min / max Evergreen instructions.
if (VT == MVT::i32 &&
@@ -2451,7 +2621,6 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(DWORDADDR)
NODE_NAME_CASE(FRACT)
NODE_NAME_CASE(CLAMP)
- NODE_NAME_CASE(MAD)
NODE_NAME_CASE(FMAX_LEGACY)
NODE_NAME_CASE(SMAX)
NODE_NAME_CASE(UMAX)
@@ -2474,6 +2643,7 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(RSQ_LEGACY)
NODE_NAME_CASE(RSQ_CLAMPED)
NODE_NAME_CASE(LDEXP)
+ NODE_NAME_CASE(FP_CLASS)
NODE_NAME_CASE(DOT4)
NODE_NAME_CASE(BFE_U32)
NODE_NAME_CASE(BFE_I32)
@@ -2505,6 +2675,46 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
}
}
+SDValue AMDGPUTargetLowering::getRsqrtEstimate(SDValue Operand,
+ DAGCombinerInfo &DCI,
+ unsigned &RefinementSteps,
+ bool &UseOneConstNR) const {
+ SelectionDAG &DAG = DCI.DAG;
+ EVT VT = Operand.getValueType();
+
+ if (VT == MVT::f32) {
+ RefinementSteps = 0;
+ return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
+ }
+
+ // TODO: There is also f64 rsq instruction, but the documentation is less
+ // clear on its precision.
+
+ return SDValue();
+}
+
+SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
+ DAGCombinerInfo &DCI,
+ unsigned &RefinementSteps) const {
+ SelectionDAG &DAG = DCI.DAG;
+ EVT VT = Operand.getValueType();
+
+ if (VT == MVT::f32) {
+ // Reciprocal, < 1 ulp error.
+ //
+ // This reciprocal approximation converges to < 0.5 ulp error with one
+ // newton rhapson performed with two fused multiple adds (FMAs).
+
+ RefinementSteps = 0;
+ return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
+ }
+
+ // TODO: There is also f64 rcp instruction, but the documentation is less
+ // clear on its precision.
+
+ return SDValue();
+}
+
static void computeKnownBitsForMinMax(const SDValue Op0,
const SDValue Op1,
APInt &KnownZero,
diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h
index 36b4ee6..6bc6ca5 100644
--- a/lib/Target/R600/AMDGPUISelLowering.h
+++ b/lib/Target/R600/AMDGPUISelLowering.h
@@ -43,12 +43,15 @@ private:
/// \brief Split a vector store into multiple scalar stores.
/// \returns The resulting chain.
- SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerFROUND32(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFROUND64(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const;
@@ -86,6 +89,7 @@ protected:
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const;
void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &Results) const;
@@ -106,7 +110,7 @@ protected:
const SmallVectorImpl<ISD::InputArg> &Ins) const;
public:
- AMDGPUTargetLowering(TargetMachine &TM);
+ AMDGPUTargetLowering(TargetMachine &TM, const AMDGPUSubtarget &STI);
bool isFAbsFree(EVT VT) const override;
bool isFNegFree(EVT VT) const override;
@@ -124,8 +128,14 @@ public:
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
bool ShouldShrinkFPConstant(EVT VT) const override;
+ bool shouldReduceLoadWidth(SDNode *Load,
+ ISD::LoadExtType ExtType,
+ EVT ExtVT) const override;
bool isLoadBitCastBeneficial(EVT, EVT) const override;
+ bool isCheapToSpeculateCttz() const override;
+ bool isCheapToSpeculateCtlz() const override;
+
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
@@ -142,14 +152,14 @@ public:
SDValue LowerIntrinsicIABS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerIntrinsicLRP(SDValue Op, SelectionDAG &DAG) const;
- SDValue CombineFMinMax(SDLoc DL,
- EVT VT,
- SDValue LHS,
- SDValue RHS,
- SDValue True,
- SDValue False,
- SDValue CC,
- SelectionDAG &DAG) const;
+ SDValue CombineFMinMaxLegacy(SDLoc DL,
+ EVT VT,
+ SDValue LHS,
+ SDValue RHS,
+ SDValue True,
+ SDValue False,
+ SDValue CC,
+ DAGCombinerInfo &DCI) const;
SDValue CombineIMinMax(SDLoc DL,
EVT VT,
SDValue LHS,
@@ -161,6 +171,14 @@ public:
const char* getTargetNodeName(unsigned Opcode) const override;
+ SDValue getRsqrtEstimate(SDValue Operand,
+ DAGCombinerInfo &DCI,
+ unsigned &RefinementSteps,
+ bool &UseOneConstNR) const override;
+ SDValue getRecipEstimate(SDValue Operand,
+ DAGCombinerInfo &DCI,
+ unsigned &RefinementSteps) const override;
+
virtual SDNode *PostISelFolding(MachineSDNode *N,
SelectionDAG &DAG) const {
return N;
@@ -200,7 +218,6 @@ enum {
DWORDADDR,
FRACT,
CLAMP,
- MAD, // Multiply + add with same result as the separate operations.
// SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi.
// Denormals handled on some parts.
@@ -231,6 +248,7 @@ enum {
RSQ_LEGACY,
RSQ_CLAMPED,
LDEXP,
+ FP_CLASS,
DOT4,
BFE_U32, // Extract range of bits with zero extension to 32-bits.
BFE_I32, // Extract range of bits with sign extension to 32-bits.
diff --git a/lib/Target/R600/AMDGPUInstrInfo.cpp b/lib/Target/R600/AMDGPUInstrInfo.cpp
index a8fc614..f4de2d6 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.cpp
+++ b/lib/Target/R600/AMDGPUInstrInfo.cpp
@@ -319,10 +319,7 @@ int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
return -1;
}
- Offset = MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getFrameIndexOffset(MF, -1);
+ Offset = MF.getSubtarget().getFrameLowering()->getFrameIndexOffset(MF, -1);
return getIndirectIndexBegin(MF) + Offset;
}
@@ -341,8 +338,39 @@ int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
// instead.
namespace llvm {
namespace AMDGPU {
-int getMCOpcode(uint16_t Opcode, unsigned Gen) {
- return getMCOpcode(Opcode);
+static int getMCOpcode(uint16_t Opcode, unsigned Gen) {
+ return getMCOpcodeGen(Opcode, (enum Subtarget)Gen);
}
}
}
+
+// This must be kept in sync with the SISubtarget class in SIInstrInfo.td
+enum SISubtarget {
+ SI = 0,
+ VI = 1
+};
+
+static enum SISubtarget AMDGPUSubtargetToSISubtarget(unsigned Gen) {
+ switch (Gen) {
+ default:
+ return SI;
+ case AMDGPUSubtarget::VOLCANIC_ISLANDS:
+ return VI;
+ }
+}
+
+int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
+ int MCOp = AMDGPU::getMCOpcode(Opcode,
+ AMDGPUSubtargetToSISubtarget(RI.ST.getGeneration()));
+
+ // -1 means that Opcode is already a native instruction.
+ if (MCOp == -1)
+ return Opcode;
+
+ // (uint16_t)-1 means that Opcode is a pseudo instruction that has
+ // no encoding in the given subtarget generation.
+ if (MCOp == (uint16_t)-1)
+ return -1;
+
+ return MCOp;
+}
diff --git a/lib/Target/R600/AMDGPUInstrInfo.h b/lib/Target/R600/AMDGPUInstrInfo.h
index da9833d..202183c 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.h
+++ b/lib/Target/R600/AMDGPUInstrInfo.h
@@ -135,6 +135,17 @@ public:
bool isRegisterStore(const MachineInstr &MI) const;
bool isRegisterLoad(const MachineInstr &MI) const;
+ /// \brief Return a target-specific opcode if Opcode is a pseudo instruction.
+ /// Return -1 if the target-specific opcode for the pseudo instruction does
+ /// not exist. If Opcode is not a pseudo instruction, this is identity.
+ int pseudoToMCOpcode(int Opcode) const;
+
+ /// \brief Return the descriptor of the target-specific machine instruction
+ /// that corresponds to the specified pseudo or native opcode.
+ const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const {
+ return get(pseudoToMCOpcode(Opcode));
+ }
+
//===---------------------------------------------------------------------===//
// Pure virtual funtions to be implemented by sub-classes.
//===---------------------------------------------------------------------===//
diff --git a/lib/Target/R600/AMDGPUInstrInfo.td b/lib/Target/R600/AMDGPUInstrInfo.td
index 4ee0f2b..901eb51 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.td
+++ b/lib/Target/R600/AMDGPUInstrInfo.td
@@ -27,10 +27,19 @@ def AMDGPULdExpOp : SDTypeProfile<1, 2,
[SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisInt<2>]
>;
+def AMDGPUFPClassOp : SDTypeProfile<1, 2,
+ [SDTCisInt<0>, SDTCisFP<1>, SDTCisInt<2>]
+>;
+
def AMDGPUDivScaleOp : SDTypeProfile<2, 3,
[SDTCisFP<0>, SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisSameAs<0, 4>]
>;
+// float, float, float, vcc
+def AMDGPUFmasOp : SDTypeProfile<1, 4,
+ [SDTCisFP<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisInt<4>]
+>;
+
//===----------------------------------------------------------------------===//
// AMDGPU DAG Nodes
//
@@ -58,16 +67,17 @@ def AMDGPUrsq_clamped : SDNode<"AMDGPUISD::RSQ_CLAMPED", SDTFPUnaryOp>;
def AMDGPUldexp : SDNode<"AMDGPUISD::LDEXP", AMDGPULdExpOp>;
+def AMDGPUfp_class : SDNode<"AMDGPUISD::FP_CLASS", AMDGPUFPClassOp>;
+
// out = max(a, b) a and b are floats, where a nan comparison fails.
// This is not commutative because this gives the second operand:
// x < nan ? x : nan -> nan
// nan < x ? nan : x -> x
def AMDGPUfmax_legacy : SDNode<"AMDGPUISD::FMAX_LEGACY", SDTFPBinOp,
- [SDNPAssociative]
+ []
>;
def AMDGPUclamp : SDNode<"AMDGPUISD::CLAMP", SDTFPTernaryOp, []>;
-def AMDGPUmad : SDNode<"AMDGPUISD::MAD", SDTFPTernaryOp, []>;
// out = max(a, b) a and b are signed ints
def AMDGPUsmax : SDNode<"AMDGPUISD::SMAX", SDTIntBinOp,
@@ -81,7 +91,7 @@ def AMDGPUumax : SDNode<"AMDGPUISD::UMAX", SDTIntBinOp,
// out = min(a, b) a and b are floats, where a nan comparison fails.
def AMDGPUfmin_legacy : SDNode<"AMDGPUISD::FMIN_LEGACY", SDTFPBinOp,
- [SDNPAssociative]
+ []
>;
// out = min(a, b) a and b are signed ints
@@ -147,7 +157,7 @@ def AMDGPUdiv_scale : SDNode<"AMDGPUISD::DIV_SCALE", AMDGPUDivScaleOp>;
// Special case divide FMA with scale and flags (src0 = Quotient,
// src1 = Denominator, src2 = Numerator).
-def AMDGPUdiv_fmas : SDNode<"AMDGPUISD::DIV_FMAS", SDTFPTernaryOp>;
+def AMDGPUdiv_fmas : SDNode<"AMDGPUISD::DIV_FMAS", AMDGPUFmasOp>;
// Single or double precision division fixup.
// Special case divide fixup and flags(src0 = Quotient, src1 =
diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td
index c215865..849b241 100644
--- a/lib/Target/R600/AMDGPUInstructions.td
+++ b/lib/Target/R600/AMDGPUInstructions.td
@@ -23,8 +23,6 @@ class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instructio
let Pattern = pattern;
let Itinerary = NullALU;
- let isCodeGenOnly = 1;
-
let TSFlags{63} = isRegisterLoad;
let TSFlags{62} = isRegisterStore;
}
@@ -73,6 +71,11 @@ def COND_OEQ : PatLeaf <
[{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}]
>;
+def COND_ONE : PatLeaf <
+ (cond),
+ [{return N->get() == ISD::SETONE || N->get() == ISD::SETNE;}]
+>;
+
def COND_OGT : PatLeaf <
(cond),
[{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
@@ -93,23 +96,28 @@ def COND_OLE : PatLeaf <
[{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}]
>;
-def COND_UNE : PatLeaf <
- (cond),
- [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
->;
def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>;
def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>;
//===----------------------------------------------------------------------===//
-// PatLeafs for unsigned comparisons
+// PatLeafs for unsigned / unordered comparisons
//===----------------------------------------------------------------------===//
+def COND_UEQ : PatLeaf <(cond), [{return N->get() == ISD::SETUEQ;}]>;
+def COND_UNE : PatLeaf <(cond), [{return N->get() == ISD::SETUNE;}]>;
def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>;
def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>;
def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>;
def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>;
+// XXX - For some reason R600 version is preferring to use unordered
+// for setne?
+def COND_UNE_NE : PatLeaf <
+ (cond),
+ [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
+>;
+
//===----------------------------------------------------------------------===//
// PatLeafs for signed comparisons
//===----------------------------------------------------------------------===//
@@ -154,10 +162,6 @@ class PrivateStore <SDPatternOperator op> : PrivateMemOp <
(ops node:$value, node:$ptr), (op node:$value, node:$ptr)
>;
-def extloadi8_private : PrivateLoad <extloadi8>;
-def sextloadi8_private : PrivateLoad <sextloadi8>;
-def extloadi16_private : PrivateLoad <extloadi16>;
-def sextloadi16_private : PrivateLoad <sextloadi16>;
def load_private : PrivateLoad <load>;
def truncstorei8_private : PrivateStore <truncstorei8>;
@@ -221,6 +225,9 @@ def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
return isLocalLoad(dyn_cast<LoadSDNode>(N));
}]>;
+def extloadi8_private : PrivateLoad <az_extloadi8>;
+def sextloadi8_private : PrivateLoad <sextloadi8>;
+
def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
@@ -257,6 +264,9 @@ def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
return isLocalLoad(dyn_cast<LoadSDNode>(N));
}]>;
+def extloadi16_private : PrivateLoad <az_extloadi16>;
+def sextloadi16_private : PrivateLoad <sextloadi16>;
+
def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
@@ -403,11 +413,6 @@ def atomic_xor_global : global_binary_atomic_op<atomic_load_xor>;
// Misc Pattern Fragments
//===----------------------------------------------------------------------===//
-def fmad : PatFrag <
- (ops node:$src0, node:$src1, node:$src2),
- (fadd (fmul node:$src0, node:$src1), node:$src2)
->;
-
class Constants {
int TWO_PI = 0x40c90fdb;
int PI = 0x40490fdb;
@@ -428,6 +433,11 @@ def FP_ONE : PatLeaf <
[{return N->isExactlyValue(1.0);}]
>;
+def FP_HALF : PatLeaf <
+ (fpimm),
+ [{return N->isExactlyValue(0.5);}]
+>;
+
let isCodeGenOnly = 1, isPseudo = 1 in {
let usesCustomInserter = 1 in {
@@ -575,7 +585,7 @@ applied.
def legalshift32 : ImmLeaf <i32, [{return Imm >=0 && Imm < 32;}]>;
def bfemask : PatLeaf <(imm), [{return isMask_32(N->getZExtValue());}],
- SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(CountTrailingOnes_32(N->getZExtValue()), MVT::i32);}]>>;
+ SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(countTrailingOnes(N->getZExtValue()), MVT::i32);}]>>;
class BFEPattern <Instruction BFE> : Pat <
(and (srl i32:$x, legalshift32:$y), bfemask:$z),
@@ -593,6 +603,20 @@ class ROTRPattern <Instruction BIT_ALIGN> : Pat <
// 24-bit arithmetic patterns
def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
+// Special conversion patterns
+
+def cvt_rpi_i32_f32 : PatFrag <
+ (ops node:$src),
+ (fp_to_sint (ffloor (fadd $src, FP_HALF))),
+ [{ (void) N; return TM.Options.NoNaNsFPMath; }]
+>;
+
+def cvt_flr_i32_f32 : PatFrag <
+ (ops node:$src),
+ (fp_to_sint (ffloor $src)),
+ [{ (void)N; return TM.Options.NoNaNsFPMath; }]
+>;
+
/*
class UMUL24Pattern <Instruction UMUL24> : Pat <
(mul U24:$x, U24:$y),
@@ -639,17 +663,10 @@ class RcpPat<Instruction RcpInst, ValueType vt> : Pat <
(RcpInst $src)
>;
-multiclass RsqPat<Instruction RsqInst, ValueType vt> {
- def : Pat <
- (fdiv FP_ONE, (fsqrt vt:$src)),
- (RsqInst $src)
- >;
-
- def : Pat <
- (AMDGPUrcp (fsqrt vt:$src)),
- (RsqInst $src)
- >;
-}
+class RsqPat<Instruction RsqInst, ValueType vt> : Pat <
+ (AMDGPUrcp (fsqrt vt:$src)),
+ (RsqInst $src)
+>;
include "R600Instructions.td"
include "R700Instructions.td"
diff --git a/lib/Target/R600/AMDGPUMCInstLower.cpp b/lib/Target/R600/AMDGPUMCInstLower.cpp
index bca027f..f047ed0 100644
--- a/lib/Target/R600/AMDGPUMCInstLower.cpp
+++ b/lib/Target/R600/AMDGPUMCInstLower.cpp
@@ -22,6 +22,7 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
@@ -39,37 +40,23 @@ AMDGPUMCInstLower::AMDGPUMCInstLower(MCContext &ctx, const AMDGPUSubtarget &st):
Ctx(ctx), ST(st)
{ }
-enum AMDGPUMCInstLower::SISubtarget
-AMDGPUMCInstLower::AMDGPUSubtargetToSISubtarget(unsigned) const {
- return AMDGPUMCInstLower::SI;
-}
-
-unsigned AMDGPUMCInstLower::getMCOpcode(unsigned MIOpcode) const {
+void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
- int MCOpcode = AMDGPU::getMCOpcode(MIOpcode,
- AMDGPUSubtargetToSISubtarget(ST.getGeneration()));
- if (MCOpcode == -1)
- MCOpcode = MIOpcode;
+ int MCOpcode = ST.getInstrInfo()->pseudoToMCOpcode(MI->getOpcode());
- return MCOpcode;
-}
-
-void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
+ if (MCOpcode == -1) {
+ LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext();
+ C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have "
+ "a target-specific version: " + Twine(MI->getOpcode()));
+ }
- OutMI.setOpcode(getMCOpcode(MI->getOpcode()));
+ OutMI.setOpcode(MCOpcode);
for (const MachineOperand &MO : MI->explicit_operands()) {
MCOperand MCOp;
switch (MO.getType()) {
default:
llvm_unreachable("unknown operand type");
- case MachineOperand::MO_FPImmediate: {
- const APFloat &FloatValue = MO.getFPImm()->getValueAPF();
- assert(&FloatValue.getSemantics() == &APFloat::IEEEsingle &&
- "Only floating point immediates are supported at the moment.");
- MCOp = MCOperand::CreateFPImm(FloatValue.convertToFloat());
- break;
- }
case MachineOperand::MO_Immediate:
MCOp = MCOperand::CreateImm(MO.getImm());
break;
@@ -93,18 +80,24 @@ void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
MCOp = MCOperand::CreateExpr(Expr);
break;
}
+ case MachineOperand::MO_ExternalSymbol: {
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(StringRef(MO.getSymbolName()));
+ const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, Ctx);
+ MCOp = MCOperand::CreateExpr(Expr);
+ break;
+ }
}
OutMI.addOperand(MCOp);
}
}
void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- AMDGPUMCInstLower MCInstLowering(OutContext,
- MF->getTarget().getSubtarget<AMDGPUSubtarget>());
+ const AMDGPUSubtarget &STI = MF->getSubtarget<AMDGPUSubtarget>();
+ AMDGPUMCInstLower MCInstLowering(OutContext, STI);
#ifdef _DEBUG
StringRef Err;
- if (!TM.getSubtargetImpl()->getInstrInfo()->verifyInstruction(MI, Err)) {
+ if (!STI.getInstrInfo()->verifyInstruction(MI, Err)) {
errs() << "Warning: Illegal instruction detected: " << Err << "\n";
MI->dump();
}
@@ -122,15 +115,15 @@ void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCInstLowering.lower(MI, TmpInst);
EmitToStreamer(OutStreamer, TmpInst);
- if (DisasmEnabled) {
+ if (STI.dumpCode()) {
// Disassemble instruction/operands to text.
DisasmLines.resize(DisasmLines.size() + 1);
std::string &DisasmLine = DisasmLines.back();
raw_string_ostream DisasmStream(DisasmLine);
AMDGPUInstPrinter InstPrinter(*TM.getMCAsmInfo(),
- *TM.getSubtargetImpl()->getInstrInfo(),
- *TM.getSubtargetImpl()->getRegisterInfo());
+ *MF->getSubtarget().getInstrInfo(),
+ *MF->getSubtarget().getRegisterInfo());
InstPrinter.printInst(&TmpInst, DisasmStream, StringRef());
// Disassemble instruction/operands to hex representation.
@@ -141,7 +134,7 @@ void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCObjectStreamer &ObjStreamer = (MCObjectStreamer &)OutStreamer;
MCCodeEmitter &InstEmitter = ObjStreamer.getAssembler().getEmitter();
InstEmitter.EncodeInstruction(TmpInst, CodeStream, Fixups,
- TM.getSubtarget<MCSubtargetInfo>());
+ MF->getSubtarget<MCSubtargetInfo>());
CodeStream.flush();
HexLines.resize(HexLines.size() + 1);
diff --git a/lib/Target/R600/AMDGPUMCInstLower.h b/lib/Target/R600/AMDGPUMCInstLower.h
index 00d1f1b..d322fe0 100644
--- a/lib/Target/R600/AMDGPUMCInstLower.h
+++ b/lib/Target/R600/AMDGPUMCInstLower.h
@@ -19,22 +19,9 @@ class MCContext;
class MCInst;
class AMDGPUMCInstLower {
-
- // This must be kept in sync with the SISubtarget class in SIInstrInfo.td
- enum SISubtarget {
- SI = 0
- };
-
MCContext &Ctx;
const AMDGPUSubtarget &ST;
- /// Convert a member of the AMDGPUSubtarget::Generation enum to the
- /// SISubtarget enum.
- enum SISubtarget AMDGPUSubtargetToSISubtarget(unsigned Gen) const;
-
- /// Get the MC opcode for this MachineInstr.
- unsigned getMCOpcode(unsigned MIOpcode) const;
-
public:
AMDGPUMCInstLower(MCContext &ctx, const AMDGPUSubtarget &ST);
diff --git a/lib/Target/R600/AMDGPUMachineFunction.cpp b/lib/Target/R600/AMDGPUMachineFunction.cpp
index 0f3f9e2..21c7da6 100644
--- a/lib/Target/R600/AMDGPUMachineFunction.cpp
+++ b/lib/Target/R600/AMDGPUMachineFunction.cpp
@@ -15,9 +15,7 @@ AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
LDSSize(0),
ScratchSize(0),
IsKernel(true) {
- AttributeSet Set = MF.getFunction()->getAttributes();
- Attribute A = Set.getAttribute(AttributeSet::FunctionIndex,
- ShaderTypeAttribute);
+ Attribute A = MF.getFunction()->getFnAttribute(ShaderTypeAttribute);
if (A.isStringAttribute()) {
StringRef Str = A.getValueAsString();
diff --git a/lib/Target/R600/AMDGPURegisterInfo.cpp b/lib/Target/R600/AMDGPURegisterInfo.cpp
index 3433280..57b054b 100644
--- a/lib/Target/R600/AMDGPURegisterInfo.cpp
+++ b/lib/Target/R600/AMDGPURegisterInfo.cpp
@@ -42,8 +42,7 @@ void AMDGPURegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
}
unsigned AMDGPURegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- assert(!"Subroutines not supported yet");
- return 0;
+ return AMDGPU::NoRegister;
}
unsigned AMDGPURegisterInfo::getSubRegFromChannel(unsigned Channel) const {
diff --git a/lib/Target/R600/AMDGPUSubtarget.cpp b/lib/Target/R600/AMDGPUSubtarget.cpp
index 9d09a19..70c8525 100644
--- a/lib/Target/R600/AMDGPUSubtarget.cpp
+++ b/lib/Target/R600/AMDGPUSubtarget.cpp
@@ -16,11 +16,11 @@
#include "R600ISelLowering.h"
#include "R600InstrInfo.h"
#include "R600MachineScheduler.h"
-#include "SIInstrInfo.h"
#include "SIISelLowering.h"
+#include "SIInstrInfo.h"
+#include "SIMachineFunctionInfo.h"
#include "llvm/ADT/SmallString.h"
-
-#include "llvm/ADT/SmallString.h"
+#include "llvm/CodeGen/MachineScheduler.h"
using namespace llvm;
@@ -31,22 +31,9 @@ using namespace llvm;
#define GET_SUBTARGETINFO_CTOR
#include "AMDGPUGenSubtargetInfo.inc"
-static std::string computeDataLayout(const AMDGPUSubtarget &ST) {
- std::string Ret = "e-p:32:32";
-
- if (ST.is64bit()) {
- // 32-bit private, local, and region pointers. 64-bit global and constant.
- Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
- }
-
- Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
- "-v512:512-v1024:1024-v2048:2048-n32:64";
-
- return Ret;
-}
-
AMDGPUSubtarget &
-AMDGPUSubtarget::initializeSubtargetDependencies(StringRef GPU, StringRef FS) {
+AMDGPUSubtarget::initializeSubtargetDependencies(StringRef TT, StringRef GPU,
+ StringRef FS) {
// Determine default and user-specified characteristics
// On SI+, we want FP64 denormals to be on by default. FP32 denormals can be
// enabled, but some instructions do not respect them and they run at the
@@ -59,6 +46,9 @@ AMDGPUSubtarget::initializeSubtargetDependencies(StringRef GPU, StringRef FS) {
SmallString<256> FullFS("+promote-alloca,+fp64-denormals,");
FullFS += FS;
+ if (GPU == "" && Triple(TT).getArch() == Triple::amdgcn)
+ GPU = "SI";
+
ParseSubtargetFeatures(GPU, FullFS);
// FIXME: I don't think think Evergreen has any useful support for
@@ -76,21 +66,24 @@ AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef GPU, StringRef FS,
: AMDGPUGenSubtargetInfo(TT, GPU, FS), DevName(GPU), Is64bit(false),
DumpCode(false), R600ALUInst(false), HasVertexCache(false),
TexVTXClauseSize(0), Gen(AMDGPUSubtarget::R600), FP64(false),
- FP64Denormals(false), FP32Denormals(false), CaymanISA(false),
- FlatAddressSpace(false), EnableIRStructurizer(true),
- EnablePromoteAlloca(false), EnableIfCvt(true),
- EnableLoadStoreOpt(false), WavefrontSize(0), CFALUBug(false), LocalMemorySize(0),
- DL(computeDataLayout(initializeSubtargetDependencies(GPU, FS))),
+ FP64Denormals(false), FP32Denormals(false), FastFMAF32(false),
+ CaymanISA(false), FlatAddressSpace(false), EnableIRStructurizer(true),
+ EnablePromoteAlloca(false), EnableIfCvt(true), EnableLoadStoreOpt(false),
+ WavefrontSize(0), CFALUBug(false), LocalMemorySize(0),
+ EnableVGPRSpilling(false),
FrameLowering(TargetFrameLowering::StackGrowsUp,
64 * 16, // Maximum stack alignment (long16)
0),
- InstrItins(getInstrItineraryForCPU(GPU)) {
+ InstrItins(getInstrItineraryForCPU(GPU)), TargetTriple(TT) {
+
+ initializeSubtargetDependencies(TT, GPU, FS);
+
if (getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
InstrInfo.reset(new R600InstrInfo(*this));
- TLInfo.reset(new R600TargetLowering(TM));
+ TLInfo.reset(new R600TargetLowering(TM, *this));
} else {
InstrInfo.reset(new SIInstrInfo(*this));
- TLInfo.reset(new SITargetLowering(TM));
+ TLInfo.reset(new SITargetLowering(TM, *this));
}
}
@@ -107,3 +100,33 @@ unsigned AMDGPUSubtarget::getStackEntrySize() const {
llvm_unreachable("Illegal wavefront size.");
}
}
+
+unsigned AMDGPUSubtarget::getAmdKernelCodeChipID() const {
+ switch(getGeneration()) {
+ default: llvm_unreachable("ChipID unknown");
+ case SEA_ISLANDS: return 12;
+ }
+}
+
+bool AMDGPUSubtarget::isVGPRSpillingEnabled(
+ const SIMachineFunctionInfo *MFI) const {
+ return MFI->getShaderType() == ShaderType::COMPUTE || EnableVGPRSpilling;
+}
+
+void AMDGPUSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
+ MachineInstr *begin,
+ MachineInstr *end,
+ unsigned NumRegionInstrs) const {
+ if (getGeneration() >= SOUTHERN_ISLANDS) {
+
+ // Track register pressure so the scheduler can try to decrease
+ // pressure once register usage is above the threshold defined by
+ // SIRegisterInfo::getRegPressureSetLimit()
+ Policy.ShouldTrackPressure = true;
+
+ // Enabling both top down and bottom up scheduling seems to give us less
+ // register spills than just using one of these approaches on its own.
+ Policy.OnlyTopDown = false;
+ Policy.OnlyBottomUp = false;
+ }
+}
diff --git a/lib/Target/R600/AMDGPUSubtarget.h b/lib/Target/R600/AMDGPUSubtarget.h
index f71d80a..1b0122c 100644
--- a/lib/Target/R600/AMDGPUSubtarget.h
+++ b/lib/Target/R600/AMDGPUSubtarget.h
@@ -20,7 +20,6 @@
#include "AMDGPUIntrinsicInfo.h"
#include "AMDGPUSubtarget.h"
#include "R600ISelLowering.h"
-#include "llvm/IR/DataLayout.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Target/TargetSubtargetInfo.h"
@@ -30,6 +29,8 @@
namespace llvm {
+class SIMachineFunctionInfo;
+
class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo {
public:
@@ -39,7 +40,8 @@ public:
EVERGREEN,
NORTHERN_ISLANDS,
SOUTHERN_ISLANDS,
- SEA_ISLANDS
+ SEA_ISLANDS,
+ VOLCANIC_ISLANDS,
};
private:
@@ -53,6 +55,7 @@ private:
bool FP64;
bool FP64Denormals;
bool FP32Denormals;
+ bool FastFMAF32;
bool CaymanISA;
bool FlatAddressSpace;
bool EnableIRStructurizer;
@@ -62,16 +65,18 @@ private:
unsigned WavefrontSize;
bool CFALUBug;
int LocalMemorySize;
+ bool EnableVGPRSpilling;
- const DataLayout DL;
AMDGPUFrameLowering FrameLowering;
std::unique_ptr<AMDGPUTargetLowering> TLInfo;
std::unique_ptr<AMDGPUInstrInfo> InstrInfo;
InstrItineraryData InstrItins;
+ Triple TargetTriple;
public:
AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS, TargetMachine &TM);
- AMDGPUSubtarget &initializeSubtargetDependencies(StringRef GPU, StringRef FS);
+ AMDGPUSubtarget &initializeSubtargetDependencies(StringRef TT, StringRef GPU,
+ StringRef FS);
const AMDGPUFrameLowering *getFrameLowering() const override {
return &FrameLowering;
@@ -85,7 +90,6 @@ public:
AMDGPUTargetLowering *getTargetLowering() const override {
return TLInfo.get();
}
- const DataLayout *getDataLayout() const override { return &DL; }
const InstrItineraryData *getInstrItineraryData() const override {
return &InstrItins;
}
@@ -124,6 +128,10 @@ public:
return FP64Denormals;
}
+ bool hasFastFMAF32() const {
+ return FastFMAF32;
+ }
+
bool hasFlatAddressSpace() const {
return FlatAddressSpace;
}
@@ -198,10 +206,16 @@ public:
return LocalMemorySize;
}
+ unsigned getAmdKernelCodeChipID() const;
+
bool enableMachineScheduler() const override {
- return getGeneration() <= NORTHERN_ISLANDS;
+ return true;
}
+ void overrideSchedPolicy(MachineSchedPolicy &Policy,
+ MachineInstr *begin, MachineInstr *end,
+ unsigned NumRegionInstrs) const override;
+
// Helper functions to simplify if statements
bool isTargetELF() const {
return false;
@@ -217,6 +231,22 @@ public:
bool r600ALUEncoding() const {
return R600ALUInst;
}
+ bool isAmdHsaOS() const {
+ return TargetTriple.getOS() == Triple::AMDHSA;
+ }
+ bool isVGPRSpillingEnabled(const SIMachineFunctionInfo *MFI) const;
+
+ unsigned getMaxWavesPerCU() const {
+ if (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
+ return 10;
+
+ // FIXME: Not sure what this is for other subtagets.
+ llvm_unreachable("do not know max waves per CU for this subtarget.");
+ }
+
+ bool enableSubRegLiveness() const override {
+ return false;
+ }
};
} // End namespace llvm
diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/R600/AMDGPUTargetMachine.cpp
index b2cd988..a862f3c 100644
--- a/lib/Target/R600/AMDGPUTargetMachine.cpp
+++ b/lib/Target/R600/AMDGPUTargetMachine.cpp
@@ -15,6 +15,7 @@
#include "AMDGPUTargetMachine.h"
#include "AMDGPU.h"
+#include "AMDGPUTargetTransformInfo.h"
#include "R600ISelLowering.h"
#include "R600InstrInfo.h"
#include "R600MachineScheduler.h"
@@ -27,7 +28,7 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Verifier.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/PassManager.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_os_ostream.h"
#include "llvm/Transforms/IPO.h"
@@ -38,7 +39,8 @@ using namespace llvm;
extern "C" void LLVMInitializeR600Target() {
// Register the target
- RegisterTargetMachine<AMDGPUTargetMachine> X(TheAMDGPUTarget);
+ RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
+ RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
}
static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
@@ -49,12 +51,28 @@ static MachineSchedRegistry
SchedCustomRegistry("r600", "Run R600's custom scheduler",
createR600MachineScheduler);
+static std::string computeDataLayout(StringRef TT) {
+ Triple Triple(TT);
+ std::string Ret = "e-p:32:32";
+
+ if (Triple.getArch() == Triple::amdgcn) {
+ // 32-bit private, local, and region pointers. 64-bit global and constant.
+ Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
+ }
+
+ Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
+ "-v512:512-v1024:1024-v2048:2048-n32:64";
+
+ return Ret;
+}
+
AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
TargetOptions Options, Reloc::Model RM,
CodeModel::Model CM,
CodeGenOpt::Level OptLevel)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OptLevel),
+ DL(computeDataLayout(TT)),
TLOF(new TargetLoweringObjectFileELF()),
Subtarget(TT, CPU, FS, *this), IntrinsicInfo() {
setRequiresStructuredCFG(true);
@@ -65,10 +83,33 @@ AMDGPUTargetMachine::~AMDGPUTargetMachine() {
delete TLOF;
}
+//===----------------------------------------------------------------------===//
+// R600 Target Machine (R600 -> Cayman)
+//===----------------------------------------------------------------------===//
+
+R600TargetMachine::R600TargetMachine(const Target &T, StringRef TT, StringRef FS,
+ StringRef CPU, TargetOptions Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL) :
+ AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) { }
+
+
+//===----------------------------------------------------------------------===//
+// GCN Target Machine (SI+)
+//===----------------------------------------------------------------------===//
+
+GCNTargetMachine::GCNTargetMachine(const Target &T, StringRef TT, StringRef FS,
+ StringRef CPU, TargetOptions Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL) :
+ AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) { }
+
+//===----------------------------------------------------------------------===//
+// AMDGPU Pass Setup
+//===----------------------------------------------------------------------===//
+
namespace {
class AMDGPUPassConfig : public TargetPassConfig {
public:
- AMDGPUPassConfig(AMDGPUTargetMachine *TM, PassManagerBase &PM)
+ AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
: TargetPassConfig(TM, PM) {}
AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
@@ -85,29 +126,38 @@ public:
void addIRPasses() override;
void addCodeGenPrepare() override;
+ virtual bool addPreISel() override;
+ virtual bool addInstSelector() override;
+};
+
+class R600PassConfig : public AMDGPUPassConfig {
+public:
+ R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
+ : AMDGPUPassConfig(TM, PM) { }
+
bool addPreISel() override;
- bool addInstSelector() override;
- bool addPreRegAlloc() override;
- bool addPostRegAlloc() override;
- bool addPreSched2() override;
- bool addPreEmitPass() override;
+ void addPreRegAlloc() override;
+ void addPreSched2() override;
+ void addPreEmitPass() override;
};
-} // End of anonymous namespace
-TargetPassConfig *AMDGPUTargetMachine::createPassConfig(PassManagerBase &PM) {
- return new AMDGPUPassConfig(this, PM);
-}
+class GCNPassConfig : public AMDGPUPassConfig {
+public:
+ GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
+ : AMDGPUPassConfig(TM, PM) { }
+ bool addPreISel() override;
+ bool addInstSelector() override;
+ void addPreRegAlloc() override;
+ void addPostRegAlloc() override;
+ void addPreSched2() override;
+ void addPreEmitPass() override;
+};
-//===----------------------------------------------------------------------===//
-// AMDGPU Analysis Pass Setup
-//===----------------------------------------------------------------------===//
+} // End of anonymous namespace
-void AMDGPUTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
- // Add first the target-independent BasicTTI pass, then our AMDGPU pass. This
- // allows the AMDGPU pass to delegate to the target independent layer when
- // appropriate.
- PM.add(createBasicTargetTransformInfoPass(this));
- PM.add(createAMDGPUTargetTransformInfoPass(this));
+TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
+ return TargetIRAnalysis(
+ [this](Function &F) { return TargetTransformInfo(AMDGPUTTIImpl(this)); });
}
void AMDGPUPassConfig::addIRPasses() {
@@ -129,7 +179,6 @@ void AMDGPUPassConfig::addCodeGenPrepare() {
addPass(createAMDGPUPromoteAlloca(ST));
addPass(createSROAPass());
}
-
TargetPassConfig::addCodeGenPrepare();
}
@@ -139,84 +188,96 @@ AMDGPUPassConfig::addPreISel() {
addPass(createFlattenCFGPass());
if (ST.IsIRStructurizerEnabled())
addPass(createStructurizeCFGPass());
- if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
- addPass(createSinkingPass());
- addPass(createSITypeRewriter());
- addPass(createSIAnnotateControlFlowPass());
- } else {
- addPass(createR600TextureIntrinsicsReplacer());
- }
return false;
}
bool AMDGPUPassConfig::addInstSelector() {
- const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
-
addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
+ return false;
+}
- if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
- addPass(createSILowerI1CopiesPass());
- addPass(createSIFixSGPRCopiesPass(*TM));
- }
+//===----------------------------------------------------------------------===//
+// R600 Pass Setup
+//===----------------------------------------------------------------------===//
+bool R600PassConfig::addPreISel() {
+ AMDGPUPassConfig::addPreISel();
+ addPass(createR600TextureIntrinsicsReplacer());
return false;
}
-bool AMDGPUPassConfig::addPreRegAlloc() {
+void R600PassConfig::addPreRegAlloc() {
+ addPass(createR600VectorRegMerger(*TM));
+}
+
+void R600PassConfig::addPreSched2() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
+ addPass(createR600EmitClauseMarkers(), false);
+ if (ST.isIfCvtEnabled())
+ addPass(&IfConverterID, false);
+ addPass(createR600ClauseMergePass(*TM), false);
+}
- if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
- addPass(createR600VectorRegMerger(*TM));
- } else {
- if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
- // Don't do this with no optimizations since it throws away debug info by
- // merging nonadjacent loads.
-
- // This should be run after scheduling, but before register allocation. It
- // also need extra copies to the address operand to be eliminated.
- initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
- insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
- }
-
- addPass(createSIShrinkInstructionsPass());
- addPass(createSIFixSGPRLiveRangesPass());
- }
- return false;
+void R600PassConfig::addPreEmitPass() {
+ addPass(createAMDGPUCFGStructurizerPass(), false);
+ addPass(createR600ExpandSpecialInstrsPass(*TM), false);
+ addPass(&FinalizeMachineBundlesID, false);
+ addPass(createR600Packetizer(*TM), false);
+ addPass(createR600ControlFlowFinalizer(*TM), false);
}
-bool AMDGPUPassConfig::addPostRegAlloc() {
- const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
+TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new R600PassConfig(this, PM);
+}
- addPass(createSIShrinkInstructionsPass());
- if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
- addPass(createSIInsertWaits(*TM));
- }
+//===----------------------------------------------------------------------===//
+// GCN Pass Setup
+//===----------------------------------------------------------------------===//
+
+bool GCNPassConfig::addPreISel() {
+ AMDGPUPassConfig::addPreISel();
+ addPass(createSinkingPass());
+ addPass(createSITypeRewriter());
+ addPass(createSIAnnotateControlFlowPass());
return false;
}
-bool AMDGPUPassConfig::addPreSched2() {
- const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
-
- if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
- addPass(createR600EmitClauseMarkers());
- if (ST.isIfCvtEnabled())
- addPass(&IfConverterID);
- if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
- addPass(createR600ClauseMergePass(*TM));
+bool GCNPassConfig::addInstSelector() {
+ AMDGPUPassConfig::addInstSelector();
+ addPass(createSILowerI1CopiesPass());
+ addPass(createSIFixSGPRCopiesPass(*TM));
+ addPass(createSIFoldOperandsPass());
return false;
}
-bool AMDGPUPassConfig::addPreEmitPass() {
+void GCNPassConfig::addPreRegAlloc() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
- addPass(createAMDGPUCFGStructurizerPass());
- addPass(createR600ExpandSpecialInstrsPass(*TM));
- addPass(&FinalizeMachineBundlesID);
- addPass(createR600Packetizer(*TM));
- addPass(createR600ControlFlowFinalizer(*TM));
- } else {
- addPass(createSILowerControlFlowPass(*TM));
+ if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
+ // Don't do this with no optimizations since it throws away debug info by
+ // merging nonadjacent loads.
+
+ // This should be run after scheduling, but before register allocation. It
+ // also need extra copies to the address operand to be eliminated.
+ initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
+ insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
}
+ addPass(createSIShrinkInstructionsPass(), false);
+ addPass(createSIFixSGPRLiveRangesPass(), false);
+}
- return false;
+void GCNPassConfig::addPostRegAlloc() {
+ addPass(createSIPrepareScratchRegs(), false);
+ addPass(createSIShrinkInstructionsPass(), false);
+}
+
+void GCNPassConfig::addPreSched2() {
+ addPass(createSIInsertWaits(*TM), false);
+}
+
+void GCNPassConfig::addPreEmitPass() {
+ addPass(createSILowerControlFlowPass(*TM), false);
+}
+
+TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new GCNPassConfig(this, PM);
}
diff --git a/lib/Target/R600/AMDGPUTargetMachine.h b/lib/Target/R600/AMDGPUTargetMachine.h
index 1b3dbce..a691536 100644
--- a/lib/Target/R600/AMDGPUTargetMachine.h
+++ b/lib/Target/R600/AMDGPUTargetMachine.h
@@ -24,7 +24,15 @@
namespace llvm {
+//===----------------------------------------------------------------------===//
+// AMDGPU Target Machine (R600+)
+//===----------------------------------------------------------------------===//
+
class AMDGPUTargetMachine : public LLVMTargetMachine {
+private:
+ const DataLayout DL;
+
+protected:
TargetLoweringObjectFile *TLOF;
AMDGPUSubtarget Subtarget;
AMDGPUIntrinsicInfo IntrinsicInfo;
@@ -34,21 +42,52 @@ public:
StringRef CPU, TargetOptions Options, Reloc::Model RM,
CodeModel::Model CM, CodeGenOpt::Level OL);
~AMDGPUTargetMachine();
+ // FIXME: This is currently broken, the DataLayout needs to move to
+ // the target machine.
+ const DataLayout *getDataLayout() const override {
+ return &DL;
+ }
const AMDGPUSubtarget *getSubtargetImpl() const override {
return &Subtarget;
}
const AMDGPUIntrinsicInfo *getIntrinsicInfo() const override {
return &IntrinsicInfo;
}
- TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+ TargetIRAnalysis getTargetIRAnalysis() override;
- /// \brief Register R600 analysis passes with a pass manager.
- void addAnalysisPasses(PassManagerBase &PM) override;
TargetLoweringObjectFile *getObjFileLowering() const override {
return TLOF;
}
};
+//===----------------------------------------------------------------------===//
+// R600 Target Machine (R600 -> Cayman)
+//===----------------------------------------------------------------------===//
+
+class R600TargetMachine : public AMDGPUTargetMachine {
+
+public:
+ R600TargetMachine(const Target &T, StringRef TT, StringRef FS,
+ StringRef CPU, TargetOptions Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL);
+
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+};
+
+//===----------------------------------------------------------------------===//
+// GCN Target Machine (SI+)
+//===----------------------------------------------------------------------===//
+
+class GCNTargetMachine : public AMDGPUTargetMachine {
+
+public:
+ GCNTargetMachine(const Target &T, StringRef TT, StringRef FS,
+ StringRef CPU, TargetOptions Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL);
+
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+};
+
} // End namespace llvm
#endif
diff --git a/lib/Target/R600/AMDGPUTargetTransformInfo.cpp b/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
index e7bc006..68f4600 100644
--- a/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
+++ b/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
@@ -15,11 +15,11 @@
//
//===----------------------------------------------------------------------===//
-#include "AMDGPU.h"
-#include "AMDGPUTargetMachine.h"
+#include "AMDGPUTargetTransformInfo.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/CostTable.h"
#include "llvm/Target/TargetLowering.h"
@@ -27,80 +27,10 @@ using namespace llvm;
#define DEBUG_TYPE "AMDGPUtti"
-// Declare the pass initialization routine locally as target-specific passes
-// don't have a target-wide initialization entry point, and so we rely on the
-// pass constructor initialization.
-namespace llvm {
-void initializeAMDGPUTTIPass(PassRegistry &);
-}
-
-namespace {
-
-class AMDGPUTTI final : public ImmutablePass, public TargetTransformInfo {
- const AMDGPUTargetMachine *TM;
- const AMDGPUSubtarget *ST;
- const AMDGPUTargetLowering *TLI;
-
- /// Estimate the overhead of scalarizing an instruction. Insert and Extract
- /// are set if the result needs to be inserted and/or extracted from vectors.
- unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
-
-public:
- AMDGPUTTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) {
- llvm_unreachable("This pass cannot be directly constructed");
- }
-
- AMDGPUTTI(const AMDGPUTargetMachine *TM)
- : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
- TLI(TM->getSubtargetImpl()->getTargetLowering()) {
- initializeAMDGPUTTIPass(*PassRegistry::getPassRegistry());
- }
-
- void initializePass() override { pushTTIStack(this); }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- TargetTransformInfo::getAnalysisUsage(AU);
- }
-
- /// Pass identification.
- static char ID;
-
- /// Provide necessary pointer adjustments for the two base classes.
- void *getAdjustedAnalysisPointer(const void *ID) override {
- if (ID == &TargetTransformInfo::ID)
- return (TargetTransformInfo *)this;
- return this;
- }
-
- bool hasBranchDivergence() const override;
-
- void getUnrollingPreferences(const Function *F, Loop *L,
- UnrollingPreferences &UP) const override;
-
- PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const override;
-
- unsigned getNumberOfRegisters(bool Vector) const override;
- unsigned getRegisterBitWidth(bool Vector) const override;
- unsigned getMaxInterleaveFactor() const override;
-};
-
-} // end anonymous namespace
-
-INITIALIZE_AG_PASS(AMDGPUTTI, TargetTransformInfo, "AMDGPUtti",
- "AMDGPU Target Transform Info", true, true, false)
-char AMDGPUTTI::ID = 0;
-
-ImmutablePass *
-llvm::createAMDGPUTargetTransformInfoPass(const AMDGPUTargetMachine *TM) {
- return new AMDGPUTTI(TM);
-}
-
-bool AMDGPUTTI::hasBranchDivergence() const { return true; }
-
-void AMDGPUTTI::getUnrollingPreferences(const Function *, Loop *L,
- UnrollingPreferences &UP) const {
+void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L,
+ TTI::UnrollingPreferences &UP) {
UP.Threshold = 300; // Twice the default.
- UP.Count = UINT_MAX;
+ UP.MaxCount = UINT_MAX;
UP.Partial = true;
// TODO: Do we want runtime unrolling?
@@ -130,13 +60,7 @@ void AMDGPUTTI::getUnrollingPreferences(const Function *, Loop *L,
}
}
-AMDGPUTTI::PopcntSupportKind
-AMDGPUTTI::getPopcntSupport(unsigned TyWidth) const {
- assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
- return ST->hasBCNT(TyWidth) ? PSK_FastHardware : PSK_Software;
-}
-
-unsigned AMDGPUTTI::getNumberOfRegisters(bool Vec) const {
+unsigned AMDGPUTTIImpl::getNumberOfRegisters(bool Vec) {
if (Vec)
return 0;
@@ -147,11 +71,9 @@ unsigned AMDGPUTTI::getNumberOfRegisters(bool Vec) const {
return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
}
-unsigned AMDGPUTTI::getRegisterBitWidth(bool) const {
- return 32;
-}
+unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool) { return 32; }
-unsigned AMDGPUTTI::getMaxInterleaveFactor() const {
+unsigned AMDGPUTTIImpl::getMaxInterleaveFactor() {
// Semi-arbitrary large amount.
return 64;
}
diff --git a/lib/Target/R600/AMDGPUTargetTransformInfo.h b/lib/Target/R600/AMDGPUTargetTransformInfo.h
new file mode 100644
index 0000000..4abbdf2
--- /dev/null
+++ b/lib/Target/R600/AMDGPUTargetTransformInfo.h
@@ -0,0 +1,78 @@
+//===-- AMDGPUTargetTransformInfo.h - AMDGPU specific TTI -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file a TargetTransformInfo::Concept conforming object specific to the
+/// AMDGPU target machine. It uses the target's detailed information to
+/// provide more precise answers to certain TTI queries, while letting the
+/// target independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_R600_AMDGPUTARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_R600_AMDGPUTARGETTRANSFORMINFO_H
+
+#include "AMDGPU.h"
+#include "AMDGPUTargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+
+class AMDGPUTTIImpl : public BasicTTIImplBase<AMDGPUTTIImpl> {
+ typedef BasicTTIImplBase<AMDGPUTTIImpl> BaseT;
+ typedef TargetTransformInfo TTI;
+ friend BaseT;
+
+ const AMDGPUSubtarget *ST;
+ const AMDGPUTargetLowering *TLI;
+
+ const AMDGPUSubtarget *getST() const { return ST; }
+ const AMDGPUTargetLowering *getTLI() const { return TLI; }
+
+public:
+ explicit AMDGPUTTIImpl(const AMDGPUTargetMachine *TM)
+ : BaseT(TM), ST(TM->getSubtargetImpl()), TLI(ST->getTargetLowering()) {}
+
+ // Provide value semantics. MSVC requires that we spell all of these out.
+ AMDGPUTTIImpl(const AMDGPUTTIImpl &Arg)
+ : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
+ AMDGPUTTIImpl(AMDGPUTTIImpl &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
+ TLI(std::move(Arg.TLI)) {}
+ AMDGPUTTIImpl &operator=(const AMDGPUTTIImpl &RHS) {
+ BaseT::operator=(static_cast<const BaseT &>(RHS));
+ ST = RHS.ST;
+ TLI = RHS.TLI;
+ return *this;
+ }
+ AMDGPUTTIImpl &operator=(AMDGPUTTIImpl &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ ST = std::move(RHS.ST);
+ TLI = std::move(RHS.TLI);
+ return *this;
+ }
+
+ bool hasBranchDivergence() { return true; }
+
+ void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP);
+
+ TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) {
+ assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
+ return ST->hasBCNT(TyWidth) ? TTI::PSK_FastHardware : TTI::PSK_Software;
+ }
+
+ unsigned getNumberOfRegisters(bool Vector);
+ unsigned getRegisterBitWidth(bool Vector);
+ unsigned getMaxInterleaveFactor();
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/R600/AMDKernelCodeT.h b/lib/Target/R600/AMDKernelCodeT.h
new file mode 100644
index 0000000..4d3041f
--- /dev/null
+++ b/lib/Target/R600/AMDKernelCodeT.h
@@ -0,0 +1,704 @@
+//===-- AMDGPUKernelCodeT.h - Print AMDGPU assembly code ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file AMDKernelCodeT.h
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDKERNELCODET_H
+#define AMDKERNELCODET_H
+
+#include <cstddef>
+#include <cstdint>
+
+//---------------------------------------------------------------------------//
+// AMD Kernel Code, and its dependencies //
+//---------------------------------------------------------------------------//
+
+typedef uint8_t hsa_powertwo8_t;
+typedef uint32_t hsa_ext_code_kind_t;
+typedef uint8_t hsa_ext_brig_profile8_t;
+typedef uint8_t hsa_ext_brig_machine_model8_t;
+typedef uint64_t hsa_ext_control_directive_present64_t;
+typedef uint16_t hsa_ext_exception_kind16_t;
+typedef uint32_t hsa_ext_code_kind32_t;
+
+typedef struct hsa_dim3_s {
+ uint32_t x;
+ uint32_t y;
+ uint32_t z;
+} hsa_dim3_t;
+
+/// The version of the amd_*_code_t struct. Minor versions must be
+/// backward compatible.
+typedef uint32_t amd_code_version32_t;
+enum amd_code_version_t {
+ AMD_CODE_VERSION_MAJOR = 0,
+ AMD_CODE_VERSION_MINOR = 1
+};
+
+/// The values used to define the number of bytes to use for the
+/// swizzle element size.
+enum amd_element_byte_size_t {
+ AMD_ELEMENT_2_BYTES = 0,
+ AMD_ELEMENT_4_BYTES = 1,
+ AMD_ELEMENT_8_BYTES = 2,
+ AMD_ELEMENT_16_BYTES = 3
+};
+
+/// Shader program settings for CS. Contains COMPUTE_PGM_RSRC1 and
+/// COMPUTE_PGM_RSRC2 registers.
+typedef uint64_t amd_compute_pgm_resource_register64_t;
+
+/// Every amd_*_code_t has the following properties, which are composed of
+/// a number of bit fields. Every bit field has a mask (AMD_CODE_PROPERTY_*),
+/// bit width (AMD_CODE_PROPERTY_*_WIDTH, and bit shift amount
+/// (AMD_CODE_PROPERTY_*_SHIFT) for convenient access. Unused bits must be 0.
+///
+/// (Note that bit fields cannot be used as their layout is
+/// implementation defined in the C standard and so cannot be used to
+/// specify an ABI)
+typedef uint32_t amd_code_property32_t;
+enum amd_code_property_mask_t {
+
+ /// Enable the setup of the SGPR user data registers
+ /// (AMD_CODE_PROPERTY_ENABLE_SGPR_*), see documentation of amd_kernel_code_t
+ /// for initial register state.
+ ///
+ /// The total number of SGPRuser data registers requested must not
+ /// exceed 16. Any requests beyond 16 will be ignored.
+ ///
+ /// Used to set COMPUTE_PGM_RSRC2.USER_SGPR (set to total count of
+ /// SGPR user data registers enabled up to 16).
+
+ AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT = 0,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_WIDTH = 1,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT,
+
+ AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT = 1,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_WIDTH = 1,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT,
+
+ AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT = 2,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_WIDTH = 1,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT,
+
+ AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT = 3,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_WIDTH = 1,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT,
+
+ AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT = 4,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_WIDTH = 1,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT,
+
+ AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT = 5,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_WIDTH = 1,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT,
+
+ AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT = 6,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_WIDTH = 1,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT,
+
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT = 7,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_WIDTH = 1,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT,
+
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT = 8,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_WIDTH = 1,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT,
+
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT = 9,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_WIDTH = 1,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT,
+
+ /// Control wave ID base counter for GDS ordered-append. Used to set
+ /// COMPUTE_DISPATCH_INITIATOR.ORDERED_APPEND_ENBL. (Not sure if
+ /// ORDERED_APPEND_MODE also needs to be settable)
+ AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT = 10,
+ AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_WIDTH = 1,
+ AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS = ((1 << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT,
+
+ /// The interleave (swizzle) element size in bytes required by the
+ /// code for private memory. This must be 2, 4, 8 or 16. This value
+ /// is provided to the finalizer when it is invoked and is recorded
+ /// here. The hardware will interleave the memory requests of each
+ /// lane of a wavefront by this element size to ensure each
+ /// work-item gets a distinct memory memory location. Therefore, the
+ /// finalizer ensures that all load and store operations done to
+ /// private memory do not exceed this size. For example, if the
+ /// element size is 4 (32-bits or dword) and a 64-bit value must be
+ /// loaded, the finalizer will generate two 32-bit loads. This
+ /// ensures that the interleaving will get the the work-item
+ /// specific dword for both halves of the 64-bit value. If it just
+ /// did a 64-bit load then it would get one dword which belonged to
+ /// its own work-item, but the second dword would belong to the
+ /// adjacent lane work-item since the interleaving is in dwords.
+ ///
+ /// The value used must match the value that the runtime configures
+ /// the GPU flat scratch (SH_STATIC_MEM_CONFIG.ELEMENT_SIZE). This
+ /// is generally DWORD.
+ ///
+ /// Use values from the amd_element_byte_size_t enum.
+ AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT = 11,
+ AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_WIDTH = 2,
+ AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE = ((1 << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_WIDTH) - 1) << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT,
+
+ /// Are global memory addresses 64 bits. Must match
+ /// amd_kernel_code_t.hsail_machine_model ==
+ /// HSA_MACHINE_LARGE. Must also match
+ /// SH_MEM_CONFIG.PTR32 (GFX6 (SI)/GFX7 (CI)),
+ /// SH_MEM_CONFIG.ADDRESS_MODE (GFX8 (VI)+).
+ AMD_CODE_PROPERTY_IS_PTR64_SHIFT = 13,
+ AMD_CODE_PROPERTY_IS_PTR64_WIDTH = 1,
+ AMD_CODE_PROPERTY_IS_PTR64 = ((1 << AMD_CODE_PROPERTY_IS_PTR64_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_PTR64_SHIFT,
+
+ /// Indicate if the generated ISA is using a dynamically sized call
+ /// stack. This can happen if calls are implemented using a call
+ /// stack and recursion, alloca or calls to indirect functions are
+ /// present. In these cases the Finalizer cannot compute the total
+ /// private segment size at compile time. In this case the
+ /// workitem_private_segment_byte_size only specifies the statically
+ /// know private segment size, and additional space must be added
+ /// for the call stack.
+ AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT = 14,
+ AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_WIDTH = 1,
+ AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK = ((1 << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT,
+
+ /// Indicate if code generated has support for debugging.
+ AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT = 15,
+ AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_WIDTH = 1,
+ AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED = ((1 << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT
+};
+
+/// @brief The hsa_ext_control_directives_t specifies the values for the HSAIL
+/// control directives. These control how the finalizer generates code. This
+/// struct is used both as an argument to hsaFinalizeKernel to specify values for
+/// the control directives, and is used in HsaKernelCode to record the values of
+/// the control directives that the finalize used when generating the code which
+/// either came from the finalizer argument or explicit HSAIL control
+/// directives. See the definition of the control directives in HSA Programmer's
+/// Reference Manual which also defines how the values specified as finalizer
+/// arguments have to agree with the control directives in the HSAIL code.
+typedef struct hsa_ext_control_directives_s {
+ /// This is a bit set indicating which control directives have been
+ /// specified. If the value is 0 then there are no control directives specified
+ /// and the rest of the fields can be ignored. The bits are accessed using the
+ /// hsa_ext_control_directives_present_mask_t. Any control directive that is not
+ /// enabled in this bit set must have the value of all 0s.
+ hsa_ext_control_directive_present64_t enabled_control_directives;
+
+ /// If enableBreakExceptions is not enabled then must be 0, otherwise must be
+ /// non-0 and specifies the set of HSAIL exceptions that must have the BREAK
+ /// policy enabled. If this set is not empty then the generated code may have
+ /// lower performance than if the set is empty. If the kernel being finalized
+ /// has any enablebreakexceptions control directives, then the values specified
+ /// by this argument are unioned with the values in these control
+ /// directives. If any of the functions the kernel calls have an
+ /// enablebreakexceptions control directive, then they must be equal or a
+ /// subset of, this union.
+ hsa_ext_exception_kind16_t enable_break_exceptions;
+
+ /// If enableDetectExceptions is not enabled then must be 0, otherwise must be
+ /// non-0 and specifies the set of HSAIL exceptions that must have the DETECT
+ /// policy enabled. If this set is not empty then the generated code may have
+ /// lower performance than if the set is empty. However, an implementation
+ /// should endeavour to make the performance impact small. If the kernel being
+ /// finalized has any enabledetectexceptions control directives, then the
+ /// values specified by this argument are unioned with the values in these
+ /// control directives. If any of the functions the kernel calls have an
+ /// enabledetectexceptions control directive, then they must be equal or a
+ /// subset of, this union.
+ hsa_ext_exception_kind16_t enable_detect_exceptions;
+
+ /// If maxDynamicGroupSize is not enabled then must be 0, and any amount of
+ /// dynamic group segment can be allocated for a dispatch, otherwise the value
+ /// specifies the maximum number of bytes of dynamic group segment that can be
+ /// allocated for a dispatch. If the kernel being finalized has any
+ /// maxdynamicsize control directives, then the values must be the same, and
+ /// must be the same as this argument if it is enabled. This value can be used
+ /// by the finalizer to determine the maximum number of bytes of group memory
+ /// used by each work-group by adding this value to the group memory required
+ /// for all group segment variables used by the kernel and all functions it
+ /// calls, and group memory used to implement other HSAIL features such as
+ /// fbarriers and the detect exception operations. This can allow the finalizer
+ /// to determine the expected number of work-groups that can be executed by a
+ /// compute unit and allow more resources to be allocated to the work-items if
+ /// it is known that fewer work-groups can be executed due to group memory
+ /// limitations.
+ uint32_t max_dynamic_group_size;
+
+ /// If maxFlatGridSize is not enabled then must be 0, otherwise must be greater
+ /// than 0. See HSA Programmer's Reference Manual description of
+ /// maxflatgridsize control directive.
+ uint32_t max_flat_grid_size;
+
+ /// If maxFlatWorkgroupSize is not enabled then must be 0, otherwise must be
+ /// greater than 0. See HSA Programmer's Reference Manual description of
+ /// maxflatworkgroupsize control directive.
+ uint32_t max_flat_workgroup_size;
+
+ /// If requestedWorkgroupsPerCu is not enabled then must be 0, and the
+ /// finalizer is free to generate ISA that may result in any number of
+ /// work-groups executing on a single compute unit. Otherwise, the finalizer
+ /// should attempt to generate ISA that will allow the specified number of
+ /// work-groups to execute on a single compute unit. This is only a hint and
+ /// can be ignored by the finalizer. If the kernel being finalized, or any of
+ /// the functions it calls, has a requested control directive, then the values
+ /// must be the same. This can be used to determine the number of resources
+ /// that should be allocated to a single work-group and work-item. For example,
+ /// a low value may allow more resources to be allocated, resulting in higher
+ /// per work-item performance, as it is known there will never be more than the
+ /// specified number of work-groups actually executing on the compute
+ /// unit. Conversely, a high value may allocate fewer resources, resulting in
+ /// lower per work-item performance, which is offset by the fact it allows more
+ /// work-groups to actually execute on the compute unit.
+ uint32_t requested_workgroups_per_cu;
+
+ /// If not enabled then all elements for Dim3 must be 0, otherwise every
+ /// element must be greater than 0. See HSA Programmer's Reference Manual
+ /// description of requiredgridsize control directive.
+ hsa_dim3_t required_grid_size;
+
+ /// If requiredWorkgroupSize is not enabled then all elements for Dim3 must be
+ /// 0, and the produced code can be dispatched with any legal work-group range
+ /// consistent with the dispatch dimensions. Otherwise, the code produced must
+ /// always be dispatched with the specified work-group range. No element of the
+ /// specified range must be 0. It must be consistent with required_dimensions
+ /// and max_flat_workgroup_size. If the kernel being finalized, or any of the
+ /// functions it calls, has a requiredworkgroupsize control directive, then the
+ /// values must be the same. Specifying a value can allow the finalizer to
+ /// optimize work-group id operations, and if the number of work-items in the
+ /// work-group is less than the WAVESIZE then barrier operations can be
+ /// optimized to just a memory fence.
+ hsa_dim3_t required_workgroup_size;
+
+ /// If requiredDim is not enabled then must be 0 and the produced kernel code
+ /// can be dispatched with 1, 2 or 3 dimensions. If enabled then the value is
+ /// 1..3 and the code produced must only be dispatched with a dimension that
+ /// matches. Other values are illegal. If the kernel being finalized, or any of
+ /// the functions it calls, has a requireddimsize control directive, then the
+ /// values must be the same. This can be used to optimize the code generated to
+ /// compute the absolute and flat work-group and work-item id, and the dim
+ /// HSAIL operations.
+ uint8_t required_dim;
+
+ /// Reserved. Must be 0.
+ uint8_t reserved[75];
+} hsa_ext_control_directives_t;
+
+/// AMD Kernel Code Object (amd_kernel_code_t). GPU CP uses the AMD Kernel
+/// Code Object to set up the hardware to execute the kernel dispatch.
+///
+/// Initial Kernel Register State.
+///
+/// Initial kernel register state will be set up by CP/SPI prior to the start
+/// of execution of every wavefront. This is limited by the constraints of the
+/// current hardware.
+///
+/// The order of the SGPR registers is defined, but the Finalizer can specify
+/// which ones are actually setup in the amd_kernel_code_t object using the
+/// enable_sgpr_* bit fields. The register numbers used for enabled registers
+/// are dense starting at SGPR0: the first enabled register is SGPR0, the next
+/// enabled register is SGPR1 etc.; disabled registers do not have an SGPR
+/// number.
+///
+/// The initial SGPRs comprise up to 16 User SRGPs that are set up by CP and
+/// apply to all waves of the grid. It is possible to specify more than 16 User
+/// SGPRs using the enable_sgpr_* bit fields, in which case only the first 16
+/// are actually initialized. These are then immediately followed by the System
+/// SGPRs that are set up by ADC/SPI and can have different values for each wave
+/// of the grid dispatch.
+///
+/// SGPR register initial state is defined as follows:
+///
+/// Private Segment Buffer (enable_sgpr_private_segment_buffer):
+/// Number of User SGPR registers: 4. V# that can be used, together with
+/// Scratch Wave Offset as an offset, to access the Private/Spill/Arg
+/// segments using a segment address. It must be set as follows:
+/// - Base address: of the scratch memory area used by the dispatch. It
+/// does not include the scratch wave offset. It will be the per process
+/// SH_HIDDEN_PRIVATE_BASE_VMID plus any offset from this dispatch (for
+/// example there may be a per pipe offset, or per AQL Queue offset).
+/// - Stride + data_format: Element Size * Index Stride (???)
+/// - Cache swizzle: ???
+/// - Swizzle enable: SH_STATIC_MEM_CONFIG.SWIZZLE_ENABLE (must be 1 for
+/// scratch)
+/// - Num records: Flat Scratch Work Item Size / Element Size (???)
+/// - Dst_sel_*: ???
+/// - Num_format: ???
+/// - Element_size: SH_STATIC_MEM_CONFIG.ELEMENT_SIZE (will be DWORD, must
+/// agree with amd_kernel_code_t.privateElementSize)
+/// - Index_stride: SH_STATIC_MEM_CONFIG.INDEX_STRIDE (will be 64 as must
+/// be number of wavefront lanes for scratch, must agree with
+/// amd_kernel_code_t.wavefrontSize)
+/// - Add tid enable: 1
+/// - ATC: from SH_MEM_CONFIG.PRIVATE_ATC,
+/// - Hash_enable: ???
+/// - Heap: ???
+/// - Mtype: from SH_STATIC_MEM_CONFIG.PRIVATE_MTYPE
+/// - Type: 0 (a buffer) (???)
+///
+/// Dispatch Ptr (enable_sgpr_dispatch_ptr):
+/// Number of User SGPR registers: 2. 64 bit address of AQL dispatch packet
+/// for kernel actually executing.
+///
+/// Queue Ptr (enable_sgpr_queue_ptr):
+/// Number of User SGPR registers: 2. 64 bit address of AmdQueue object for
+/// AQL queue on which the dispatch packet was queued.
+///
+/// Kernarg Segment Ptr (enable_sgpr_kernarg_segment_ptr):
+/// Number of User SGPR registers: 2. 64 bit address of Kernarg segment. This
+/// is directly copied from the kernargPtr in the dispatch packet. Having CP
+/// load it once avoids loading it at the beginning of every wavefront.
+///
+/// Dispatch Id (enable_sgpr_dispatch_id):
+/// Number of User SGPR registers: 2. 64 bit Dispatch ID of the dispatch
+/// packet being executed.
+///
+/// Flat Scratch Init (enable_sgpr_flat_scratch_init):
+/// Number of User SGPR registers: 2. This is 2 SGPRs.
+///
+/// For CI/VI:
+/// The first SGPR is a 32 bit byte offset from SH_MEM_HIDDEN_PRIVATE_BASE
+/// to base of memory for scratch for this dispatch. This is the same offset
+/// used in computing the Scratch Segment Buffer base address. The value of
+/// Scratch Wave Offset must be added by the kernel code and moved to
+/// SGPRn-4 for use as the FLAT SCRATCH BASE in flat memory instructions.
+///
+/// The second SGPR is 32 bit byte size of a single work-item’s scratch
+/// memory usage. This is directly loaded from the dispatch packet Private
+/// Segment Byte Size and rounded up to a multiple of DWORD.
+///
+/// \todo [Does CP need to round this to >4 byte alignment?]
+///
+/// The kernel code must move to SGPRn-3 for use as the FLAT SCRATCH SIZE in
+/// flat memory instructions. Having CP load it once avoids loading it at
+/// the beginning of every wavefront.
+///
+/// For PI:
+/// This is the 64 bit base address of the scratch backing memory for
+/// allocated by CP for this dispatch.
+///
+/// Private Segment Size (enable_sgpr_private_segment_size):
+/// Number of User SGPR registers: 1. The 32 bit byte size of a single
+/// work-item’s scratch memory allocation. This is the value from the dispatch
+/// packet. Private Segment Byte Size rounded up by CP to a multiple of DWORD.
+///
+/// \todo [Does CP need to round this to >4 byte alignment?]
+///
+/// Having CP load it once avoids loading it at the beginning of every
+/// wavefront.
+///
+/// \todo [This will not be used for CI/VI since it is the same value as
+/// the second SGPR of Flat Scratch Init. However, it is need for PI which
+/// changes meaning of Flat Scratchg Init..]
+///
+/// Grid Work-Group Count X (enable_sgpr_grid_workgroup_count_x):
+/// Number of User SGPR registers: 1. 32 bit count of the number of
+/// work-groups in the X dimension for the grid being executed. Computed from
+/// the fields in the HsaDispatchPacket as
+/// ((gridSize.x+workgroupSize.x-1)/workgroupSize.x).
+///
+/// Grid Work-Group Count Y (enable_sgpr_grid_workgroup_count_y):
+/// Number of User SGPR registers: 1. 32 bit count of the number of
+/// work-groups in the Y dimension for the grid being executed. Computed from
+/// the fields in the HsaDispatchPacket as
+/// ((gridSize.y+workgroupSize.y-1)/workgroupSize.y).
+///
+/// Only initialized if <16 previous SGPRs initialized.
+///
+/// Grid Work-Group Count Z (enable_sgpr_grid_workgroup_count_z):
+/// Number of User SGPR registers: 1. 32 bit count of the number of
+/// work-groups in the Z dimension for the grid being executed. Computed
+/// from the fields in the HsaDispatchPacket as
+/// ((gridSize.z+workgroupSize.z-1)/workgroupSize.z).
+///
+/// Only initialized if <16 previous SGPRs initialized.
+///
+/// Work-Group Id X (enable_sgpr_workgroup_id_x):
+/// Number of System SGPR registers: 1. 32 bit work group id in X dimension
+/// of grid for wavefront. Always present.
+///
+/// Work-Group Id Y (enable_sgpr_workgroup_id_y):
+/// Number of System SGPR registers: 1. 32 bit work group id in Y dimension
+/// of grid for wavefront.
+///
+/// Work-Group Id Z (enable_sgpr_workgroup_id_z):
+/// Number of System SGPR registers: 1. 32 bit work group id in Z dimension
+/// of grid for wavefront. If present then Work-group Id Y will also be
+/// present
+///
+/// Work-Group Info (enable_sgpr_workgroup_info):
+/// Number of System SGPR registers: 1. {first_wave, 14’b0000,
+/// ordered_append_term[10:0], threadgroup_size_in_waves[5:0]}
+///
+/// Private Segment Wave Byte Offset
+/// (enable_sgpr_private_segment_wave_byte_offset):
+/// Number of System SGPR registers: 1. 32 bit byte offset from base of
+/// dispatch scratch base. Must be used as an offset with Private/Spill/Arg
+/// segment address when using Scratch Segment Buffer. It must be added to
+/// Flat Scratch Offset if setting up FLAT SCRATCH for flat addressing.
+///
+///
+/// The order of the VGPR registers is defined, but the Finalizer can specify
+/// which ones are actually setup in the amd_kernel_code_t object using the
+/// enableVgpr* bit fields. The register numbers used for enabled registers
+/// are dense starting at VGPR0: the first enabled register is VGPR0, the next
+/// enabled register is VGPR1 etc.; disabled registers do not have an VGPR
+/// number.
+///
+/// VGPR register initial state is defined as follows:
+///
+/// Work-Item Id X (always initialized):
+/// Number of registers: 1. 32 bit work item id in X dimension of work-group
+/// for wavefront lane.
+///
+/// Work-Item Id X (enable_vgpr_workitem_id > 0):
+/// Number of registers: 1. 32 bit work item id in Y dimension of work-group
+/// for wavefront lane.
+///
+/// Work-Item Id X (enable_vgpr_workitem_id > 0):
+/// Number of registers: 1. 32 bit work item id in Z dimension of work-group
+/// for wavefront lane.
+///
+///
+/// The setting of registers is being done by existing GPU hardware as follows:
+/// 1) SGPRs before the Work-Group Ids are set by CP using the 16 User Data
+/// registers.
+/// 2) Work-group Id registers X, Y, Z are set by SPI which supports any
+/// combination including none.
+/// 3) Scratch Wave Offset is also set by SPI which is why its value cannot
+/// be added into the value Flat Scratch Offset which would avoid the
+/// Finalizer generated prolog having to do the add.
+/// 4) The VGPRs are set by SPI which only supports specifying either (X),
+/// (X, Y) or (X, Y, Z).
+///
+/// Flat Scratch Dispatch Offset and Flat Scratch Size are adjacent SGRRs so
+/// they can be moved as a 64 bit value to the hardware required SGPRn-3 and
+/// SGPRn-4 respectively using the Finalizer ?FLAT_SCRATCH? Register.
+///
+/// The global segment can be accessed either using flat operations or buffer
+/// operations. If buffer operations are used then the Global Buffer used to
+/// access HSAIL Global/Readonly/Kernarg (which are combine) segments using a
+/// segment address is not passed into the kernel code by CP since its base
+/// address is always 0. Instead the Finalizer generates prolog code to
+/// initialize 4 SGPRs with a V# that has the following properties, and then
+/// uses that in the buffer instructions:
+/// - base address of 0
+/// - no swizzle
+/// - ATC=1
+/// - MTYPE set to support memory coherence specified in
+/// amd_kernel_code_t.globalMemoryCoherence
+///
+/// When the Global Buffer is used to access the Kernarg segment, must add the
+/// dispatch packet kernArgPtr to a kernarg segment address before using this V#.
+/// Alternatively scalar loads can be used if the kernarg offset is uniform, as
+/// the kernarg segment is constant for the duration of the kernel execution.
+///
+typedef struct amd_kernel_code_s {
+ /// The AMD major version of the Code Object. Must be the value
+ /// AMD_CODE_VERSION_MAJOR.
+ amd_code_version32_t amd_code_version_major;
+
+ /// The AMD minor version of the Code Object. Minor versions must be
+ /// backward compatible. Must be the value
+ /// AMD_CODE_VERSION_MINOR.
+ amd_code_version32_t amd_code_version_minor;
+
+ /// The byte size of this struct. Must be set to
+ /// sizeof(amd_kernel_code_t). Used for backward
+ /// compatibility.
+ uint32_t struct_byte_size;
+
+ /// The target chip instruction set for which code has been
+ /// generated. Values are from the E_SC_INSTRUCTION_SET enumeration
+ /// in sc/Interface/SCCommon.h.
+ uint32_t target_chip;
+
+ /// Byte offset (possibly negative) from start of amd_kernel_code_t
+ /// object to kernel's entry point instruction. The actual code for
+ /// the kernel is required to be 256 byte aligned to match hardware
+ /// requirements (SQ cache line is 16). The code must be position
+ /// independent code (PIC) for AMD devices to give runtime the
+ /// option of copying code to discrete GPU memory or APU L2
+ /// cache. The Finalizer should endeavour to allocate all kernel
+ /// machine code in contiguous memory pages so that a device
+ /// pre-fetcher will tend to only pre-fetch Kernel Code objects,
+ /// improving cache performance.
+ int64_t kernel_code_entry_byte_offset;
+
+ /// Range of bytes to consider prefetching expressed as an offset
+ /// and size. The offset is from the start (possibly negative) of
+ /// amd_kernel_code_t object. Set both to 0 if no prefetch
+ /// information is available.
+ ///
+ /// \todo ttye 11/15/2013 Is the prefetch definition we want? Did
+ /// not make the size a uint64_t as prefetching more than 4GiB seems
+ /// excessive.
+ int64_t kernel_code_prefetch_byte_offset;
+ uint64_t kernel_code_prefetch_byte_size;
+
+ /// Number of bytes of scratch backing memory required for full
+ /// occupancy of target chip. This takes into account the number of
+ /// bytes of scratch per work-item, the wavefront size, the maximum
+ /// number of wavefronts per CU, and the number of CUs. This is an
+ /// upper limit on scratch. If the grid being dispatched is small it
+ /// may only need less than this. If the kernel uses no scratch, or
+ /// the Finalizer has not computed this value, it must be 0.
+ uint64_t max_scratch_backing_memory_byte_size;
+
+ /// Shader program settings for CS. Contains COMPUTE_PGM_RSRC1 and
+ /// COMPUTE_PGM_RSRC2 registers.
+ amd_compute_pgm_resource_register64_t compute_pgm_resource_registers;
+
+ /// Code properties. See amd_code_property_mask_t for a full list of
+ /// properties.
+ amd_code_property32_t code_properties;
+
+ /// The amount of memory required for the combined private, spill
+ /// and arg segments for a work-item in bytes. If
+ /// is_dynamic_callstack is 1 then additional space must be added to
+ /// this value for the call stack.
+ uint32_t workitem_private_segment_byte_size;
+
+ /// The amount of group segment memory required by a work-group in
+ /// bytes. This does not include any dynamically allocated group
+ /// segment memory that may be added when the kernel is
+ /// dispatched.
+ uint32_t workgroup_group_segment_byte_size;
+
+ /// Number of byte of GDS required by kernel dispatch. Must be 0 if
+ /// not using GDS.
+ uint32_t gds_segment_byte_size;
+
+ /// The size in bytes of the kernarg segment that holds the values
+ /// of the arguments to the kernel. This could be used by CP to
+ /// prefetch the kernarg segment pointed to by the dispatch packet.
+ uint64_t kernarg_segment_byte_size;
+
+ /// Number of fbarrier's used in the kernel and all functions it
+ /// calls. If the implementation uses group memory to allocate the
+ /// fbarriers then that amount must already be included in the
+ /// workgroup_group_segment_byte_size total.
+ uint32_t workgroup_fbarrier_count;
+
+ /// Number of scalar registers used by a wavefront. This includes
+ /// the special SGPRs for VCC, Flat Scratch Base, Flat Scratch Size
+ /// and XNACK (for GFX8 (VI)). It does not include the 16 SGPR added if a
+ /// trap handler is enabled. Used to set COMPUTE_PGM_RSRC1.SGPRS.
+ uint16_t wavefront_sgpr_count;
+
+ /// Number of vector registers used by each work-item. Used to set
+ /// COMPUTE_PGM_RSRC1.VGPRS.
+ uint16_t workitem_vgpr_count;
+
+ /// If reserved_vgpr_count is 0 then must be 0. Otherwise, this is the
+ /// first fixed VGPR number reserved.
+ uint16_t reserved_vgpr_first;
+
+ /// The number of consecutive VGPRs reserved by the client. If
+ /// is_debug_supported then this count includes VGPRs reserved
+ /// for debugger use.
+ uint16_t reserved_vgpr_count;
+
+ /// If reserved_sgpr_count is 0 then must be 0. Otherwise, this is the
+ /// first fixed SGPR number reserved.
+ uint16_t reserved_sgpr_first;
+
+ /// The number of consecutive SGPRs reserved by the client. If
+ /// is_debug_supported then this count includes SGPRs reserved
+ /// for debugger use.
+ uint16_t reserved_sgpr_count;
+
+ /// If is_debug_supported is 0 then must be 0. Otherwise, this is the
+ /// fixed SGPR number used to hold the wave scratch offset for the
+ /// entire kernel execution, or uint16_t(-1) if the register is not
+ /// used or not known.
+ uint16_t debug_wavefront_private_segment_offset_sgpr;
+
+ /// If is_debug_supported is 0 then must be 0. Otherwise, this is the
+ /// fixed SGPR number of the first of 4 SGPRs used to hold the
+ /// scratch V# used for the entire kernel execution, or uint16_t(-1)
+ /// if the registers are not used or not known.
+ uint16_t debug_private_segment_buffer_sgpr;
+
+ /// The maximum byte alignment of variables used by the kernel in
+ /// the specified memory segment. Expressed as a power of two. Must
+ /// be at least HSA_POWERTWO_16.
+ hsa_powertwo8_t kernarg_segment_alignment;
+ hsa_powertwo8_t group_segment_alignment;
+ hsa_powertwo8_t private_segment_alignment;
+
+ uint8_t reserved3;
+
+ /// Type of code object.
+ hsa_ext_code_kind32_t code_type;
+
+ /// Reserved for code properties if any are defined in the future.
+ /// There are currently no code properties so this field must be 0.
+ uint32_t reserved4;
+
+ /// Wavefront size expressed as a power of two. Must be a power of 2
+ /// in range 1..64 inclusive. Used to support runtime query that
+ /// obtains wavefront size, which may be used by application to
+ /// allocated dynamic group memory and set the dispatch work-group
+ /// size.
+ hsa_powertwo8_t wavefront_size;
+
+ /// The optimization level specified when the kernel was
+ /// finalized.
+ uint8_t optimization_level;
+
+ /// The HSAIL profile defines which features are used. This
+ /// information is from the HSAIL version directive. If this
+ /// amd_kernel_code_t is not generated from an HSAIL compilation
+ /// unit then must be 0.
+ hsa_ext_brig_profile8_t hsail_profile;
+
+ /// The HSAIL machine model gives the address sizes used by the
+ /// code. This information is from the HSAIL version directive. If
+ /// not generated from an HSAIL compilation unit then must still
+ /// indicate for what machine mode the code is generated.
+ hsa_ext_brig_machine_model8_t hsail_machine_model;
+
+ /// The HSAIL major version. This information is from the HSAIL
+ /// version directive. If this amd_kernel_code_t is not
+ /// generated from an HSAIL compilation unit then must be 0.
+ uint32_t hsail_version_major;
+
+ /// The HSAIL minor version. This information is from the HSAIL
+ /// version directive. If this amd_kernel_code_t is not
+ /// generated from an HSAIL compilation unit then must be 0.
+ uint32_t hsail_version_minor;
+
+ /// Reserved for HSAIL target options if any are defined in the
+ /// future. There are currently no target options so this field
+ /// must be 0.
+ uint16_t reserved5;
+
+ /// Reserved. Must be 0.
+ uint16_t reserved6;
+
+ /// The values should be the actually values used by the finalizer
+ /// in generating the code. This may be the union of values
+ /// specified as finalizer arguments and explicit HSAIL control
+ /// directives. If the finalizer chooses to ignore a control
+ /// directive, and not generate constrained code, then the control
+ /// directive should not be marked as enabled even though it was
+ /// present in the HSAIL or finalizer argument. The values are
+ /// intended to reflect the constraints that the code actually
+ /// requires to correctly execute, not the values that were
+ /// actually specified at finalize time.
+ hsa_ext_control_directives_t control_directive;
+
+ /// The code can immediately follow the amd_kernel_code_t, or can
+ /// come after subsequent amd_kernel_code_t structs when there are
+ /// multiple kernels in the compilation unit.
+
+} amd_kernel_code_t;
+
+#endif // AMDKERNELCODET_H
diff --git a/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp b/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp
index 7ad815d..3b4ba1a 100644
--- a/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp
+++ b/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp
@@ -163,23 +163,22 @@ bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
MCInst Inst;
switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
- default: break;
- case Match_Success:
- Inst.setLoc(IDLoc);
- Out.EmitInstruction(Inst, STI);
- return false;
- case Match_MissingFeature:
- return Error(IDLoc, "instruction use requires an option to be enabled");
- case Match_MnemonicFail:
- return Error(IDLoc, "unrecognized instruction mnemonic");
- case Match_InvalidOperand: {
- if (ErrorInfo != ~0ULL) {
- if (ErrorInfo >= Operands.size())
- return Error(IDLoc, "too few operands for instruction");
-
- }
- return Error(IDLoc, "invalid operand for instruction");
+ case Match_Success:
+ Inst.setLoc(IDLoc);
+ Out.EmitInstruction(Inst, STI);
+ return false;
+ case Match_MissingFeature:
+ return Error(IDLoc, "instruction use requires an option to be enabled");
+ case Match_MnemonicFail:
+ return Error(IDLoc, "unrecognized instruction mnemonic");
+ case Match_InvalidOperand: {
+ if (ErrorInfo != ~0ULL) {
+ if (ErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction");
+
}
+ return Error(IDLoc, "invalid operand for instruction");
+ }
}
llvm_unreachable("Implement any new match types added!");
}
@@ -312,6 +311,7 @@ bool AMDGPUOperand::isSWaitCnt() const {
/// Force static initialization.
extern "C" void LLVMInitializeR600AsmParser() {
RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
+ RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
}
#define GET_REGISTER_MATCHER
diff --git a/lib/Target/R600/CIInstructions.td b/lib/Target/R600/CIInstructions.td
new file mode 100644
index 0000000..3ac7af8
--- /dev/null
+++ b/lib/Target/R600/CIInstructions.td
@@ -0,0 +1,42 @@
+//===-- CIInstructions.td - CI Instruction Defintions ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Instruction definitions for CI and newer.
+//===----------------------------------------------------------------------===//
+
+
+def isCIVI : Predicate <
+ "Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS || "
+ "Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS"
+>;
+
+//===----------------------------------------------------------------------===//
+// VOP1 Instructions
+//===----------------------------------------------------------------------===//
+
+let SubtargetPredicate = isCIVI in {
+
+defm V_TRUNC_F64 : VOP1Inst <vop1<0x17>, "v_trunc_f64",
+ VOP_F64_F64, ftrunc
+>;
+defm V_CEIL_F64 : VOP1Inst <vop1<0x18>, "v_ceil_f64",
+ VOP_F64_F64, fceil
+>;
+defm V_FLOOR_F64 : VOP1Inst <vop1<0x1A>, "v_floor_f64",
+ VOP_F64_F64, ffloor
+>;
+defm V_RNDNE_F64 : VOP1Inst <vop1<0x19>, "v_rndne_f64",
+ VOP_F64_F64, frint
+>;
+defm V_LOG_LEGACY_F32 : VOP1Inst <vop1<0x45, 0x4c>, "v_log_legacy_f32",
+ VOP_F32_F32
+>;
+defm V_EXP_LEGACY_F32 : VOP1Inst <vop1<0x46, 0x4b>, "v_exp_legacy_f32",
+ VOP_F32_F32
+>;
+} // End SubtargetPredicate = isCIVI
diff --git a/lib/Target/R600/CMakeLists.txt b/lib/Target/R600/CMakeLists.txt
index ed0a216..5a4bae2 100644
--- a/lib/Target/R600/CMakeLists.txt
+++ b/lib/Target/R600/CMakeLists.txt
@@ -43,6 +43,7 @@ add_llvm_target(R600CodeGen
SIAnnotateControlFlow.cpp
SIFixSGPRCopies.cpp
SIFixSGPRLiveRanges.cpp
+ SIFoldOperands.cpp
SIInsertWaits.cpp
SIInstrInfo.cpp
SIISelLowering.cpp
@@ -50,6 +51,7 @@ add_llvm_target(R600CodeGen
SILowerControlFlow.cpp
SILowerI1Copies.cpp
SIMachineFunctionInfo.cpp
+ SIPrepareScratchRegs.cpp
SIRegisterInfo.cpp
SIShrinkInstructions.cpp
SITypeRewriter.cpp
diff --git a/lib/Target/R600/CaymanInstructions.td b/lib/Target/R600/CaymanInstructions.td
index 58b5ce2..ba4df82 100644
--- a/lib/Target/R600/CaymanInstructions.td
+++ b/lib/Target/R600/CaymanInstructions.td
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-def isCayman : Predicate<"Subtarget.hasCaymanISA()">;
+def isCayman : Predicate<"Subtarget->hasCaymanISA()">;
//===----------------------------------------------------------------------===//
// Cayman Instructions
@@ -46,7 +46,7 @@ def SIN_cm : SIN_Common<0x8D>;
def COS_cm : COS_Common<0x8E>;
} // End isVector = 1
-defm : RsqPat<RECIPSQRT_IEEE_cm, f32>;
+def : RsqPat<RECIPSQRT_IEEE_cm, f32>;
def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>;
diff --git a/lib/Target/R600/EvergreenInstructions.td b/lib/Target/R600/EvergreenInstructions.td
index f24f76b..9f9472c 100644
--- a/lib/Target/R600/EvergreenInstructions.td
+++ b/lib/Target/R600/EvergreenInstructions.td
@@ -14,14 +14,14 @@
//===----------------------------------------------------------------------===//
def isEG : Predicate<
- "Subtarget.getGeneration() >= AMDGPUSubtarget::EVERGREEN && "
- "Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS && "
- "!Subtarget.hasCaymanISA()"
+ "Subtarget->getGeneration() >= AMDGPUSubtarget::EVERGREEN && "
+ "Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS && "
+ "!Subtarget->hasCaymanISA()"
>;
def isEGorCayman : Predicate<
- "Subtarget.getGeneration() == AMDGPUSubtarget::EVERGREEN ||"
- "Subtarget.getGeneration() ==AMDGPUSubtarget::NORTHERN_ISLANDS"
+ "Subtarget->getGeneration() == AMDGPUSubtarget::EVERGREEN ||"
+ "Subtarget->getGeneration() ==AMDGPUSubtarget::NORTHERN_ISLANDS"
>;
//===----------------------------------------------------------------------===//
@@ -69,7 +69,7 @@ def EXP_IEEE_eg : EXP_IEEE_Common<0x81>;
def LOG_IEEE_eg : LOG_IEEE_Common<0x83>;
def RECIP_CLAMPED_eg : RECIP_CLAMPED_Common<0x84>;
def RECIPSQRT_IEEE_eg : RECIPSQRT_IEEE_Common<0x89>;
-defm : RsqPat<RECIPSQRT_IEEE_eg, f32>;
+def : RsqPat<RECIPSQRT_IEEE_eg, f32>;
def SIN_eg : SIN_Common<0x8D>;
def COS_eg : COS_Common<0x8E>;
@@ -590,8 +590,6 @@ def : Pat<(fp_to_uint f32:$src0), (FLT_TO_UINT_eg (TRUNC $src0))>;
// SHA-256 Patterns
def : SHA256MaPattern <BFI_INT_eg, XOR_INT>;
-def : FROUNDPat <CNDGE_eg, CNDGT_eg>;
-
def EG_ExportSwz : ExportSwzInst {
let Word1{19-16} = 0; // BURST_COUNT
let Word1{20} = 0; // VALID_PIXEL_MODE
diff --git a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
index 64fe726..b66ed10 100644
--- a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
+++ b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
@@ -9,11 +9,11 @@
//===----------------------------------------------------------------------===//
#include "AMDGPUInstPrinter.h"
-#include "SIDefines.h"
-
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "SIDefines.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/MathExtras.h"
@@ -74,7 +74,7 @@ void AMDGPUInstPrinter::printMBUFOffset(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
if (MI->getOperand(OpNo).getImm()) {
O << " offset:";
- printU16ImmOperand(MI, OpNo, O);
+ printU16ImmDecOperand(MI, OpNo, O);
}
}
@@ -208,7 +208,7 @@ void AMDGPUInstPrinter::printRegOperand(unsigned reg, raw_ostream &O) {
O << Type << '[' << RegIdx << ':' << (RegIdx + NumRegs - 1) << ']';
}
-void AMDGPUInstPrinter::printImmediate(uint32_t Imm, raw_ostream &O) {
+void AMDGPUInstPrinter::printImmediate32(uint32_t Imm, raw_ostream &O) {
int32_t SImm = static_cast<int32_t>(Imm);
if (SImm >= -16 && SImm <= 64) {
O << SImm;
@@ -233,9 +233,37 @@ void AMDGPUInstPrinter::printImmediate(uint32_t Imm, raw_ostream &O) {
O << "4.0";
else if (Imm == FloatToBits(-4.0f))
O << "-4.0";
- else {
+ else
O << formatHex(static_cast<uint64_t>(Imm));
+}
+
+void AMDGPUInstPrinter::printImmediate64(uint64_t Imm, raw_ostream &O) {
+ int64_t SImm = static_cast<int64_t>(Imm);
+ if (SImm >= -16 && SImm <= 64) {
+ O << SImm;
+ return;
}
+
+ if (Imm == DoubleToBits(0.0))
+ O << "0.0";
+ else if (Imm == DoubleToBits(1.0))
+ O << "1.0";
+ else if (Imm == DoubleToBits(-1.0))
+ O << "-1.0";
+ else if (Imm == DoubleToBits(0.5))
+ O << "0.5";
+ else if (Imm == DoubleToBits(-0.5))
+ O << "-0.5";
+ else if (Imm == DoubleToBits(2.0))
+ O << "2.0";
+ else if (Imm == DoubleToBits(-2.0))
+ O << "-2.0";
+ else if (Imm == DoubleToBits(4.0))
+ O << "4.0";
+ else if (Imm == DoubleToBits(-4.0))
+ O << "-4.0";
+ else
+ llvm_unreachable("64-bit literal constants not supported");
}
void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
@@ -253,14 +281,39 @@ void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
break;
}
} else if (Op.isImm()) {
- printImmediate(Op.getImm(), O);
+ const MCInstrDesc &Desc = MII.get(MI->getOpcode());
+ int RCID = Desc.OpInfo[OpNo].RegClass;
+ if (RCID != -1) {
+ const MCRegisterClass &ImmRC = MRI.getRegClass(RCID);
+ if (ImmRC.getSize() == 4)
+ printImmediate32(Op.getImm(), O);
+ else if (ImmRC.getSize() == 8)
+ printImmediate64(Op.getImm(), O);
+ else
+ llvm_unreachable("Invalid register class size");
+ } else if (Desc.OpInfo[OpNo].OperandType == MCOI::OPERAND_IMMEDIATE) {
+ printImmediate32(Op.getImm(), O);
+ } else {
+ // We hit this for the immediate instruction bits that don't yet have a
+ // custom printer.
+ // TODO: Eventually this should be unnecessary.
+ O << formatDec(Op.getImm());
+ }
} else if (Op.isFPImm()) {
-
// We special case 0.0 because otherwise it will be printed as an integer.
if (Op.getFPImm() == 0.0)
O << "0.0";
- else
- printImmediate(FloatToBits(Op.getFPImm()), O);
+ else {
+ const MCInstrDesc &Desc = MII.get(MI->getOpcode());
+ const MCRegisterClass &ImmRC = MRI.getRegClass(Desc.OpInfo[OpNo].RegClass);
+
+ if (ImmRC.getSize() == 4)
+ printImmediate32(FloatToBits(Op.getFPImm()), O);
+ else if (ImmRC.getSize() == 8)
+ printImmediate64(DoubleToBits(Op.getFPImm()), O);
+ else
+ llvm_unreachable("Invalid register class size");
+ }
} else if (Op.isExpr()) {
const MCExpr *Exp = Op.getExpr();
Exp->print(O);
diff --git a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
index 4c06ac0..1d43c7a 100644
--- a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
+++ b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
@@ -48,7 +48,8 @@ private:
void printSLC(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printTFE(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printRegOperand(unsigned RegNo, raw_ostream &O);
- void printImmediate(uint32_t Imm, raw_ostream &O);
+ void printImmediate32(uint32_t I, raw_ostream &O);
+ void printImmediate64(uint64_t I, raw_ostream &O);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printOperandAndMods(const MCInst *MI, unsigned OpNo, raw_ostream &O);
static void printInterpSlot(const MCInst *MI, unsigned OpNum, raw_ostream &O);
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
index 5fb311b..d0c634f 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
@@ -29,7 +29,7 @@ public:
const MCAsmLayout &Layout) override {
//XXX: Implement if necessary.
}
- void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ void RecordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
MCValue Target, bool &IsPCRel,
uint64_t &FixedValue) override {
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
index 3c2b889..19d89fb 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
@@ -17,6 +17,7 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(StringRef &TT) : MCAsmInfoELF() {
MaxInstLength = 16;
SeparatorString = "\n";
CommentString = ";";
+ PrivateLabelPrefix = "";
InlineAsmStart = ";#ASMSTART";
InlineAsmEnd = ";#ASMEND";
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
index 8731055..83403ba 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
@@ -15,6 +15,7 @@
#include "AMDGPUMCTargetDesc.h"
#include "AMDGPUMCAsmInfo.h"
#include "InstPrinter/AMDGPUInstPrinter.h"
+#include "SIDefines.h"
#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -92,20 +93,29 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
extern "C" void LLVMInitializeR600TargetMC() {
RegisterMCAsmInfo<AMDGPUMCAsmInfo> Y(TheAMDGPUTarget);
+ RegisterMCAsmInfo<AMDGPUMCAsmInfo> Z(TheGCNTarget);
TargetRegistry::RegisterMCCodeGenInfo(TheAMDGPUTarget, createAMDGPUMCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(TheGCNTarget, createAMDGPUMCCodeGenInfo);
TargetRegistry::RegisterMCInstrInfo(TheAMDGPUTarget, createAMDGPUMCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheGCNTarget, createAMDGPUMCInstrInfo);
TargetRegistry::RegisterMCRegInfo(TheAMDGPUTarget, createAMDGPUMCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(TheGCNTarget, createAMDGPUMCRegisterInfo);
TargetRegistry::RegisterMCSubtargetInfo(TheAMDGPUTarget, createAMDGPUMCSubtargetInfo);
+ TargetRegistry::RegisterMCSubtargetInfo(TheGCNTarget, createAMDGPUMCSubtargetInfo);
TargetRegistry::RegisterMCInstPrinter(TheAMDGPUTarget, createAMDGPUMCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheGCNTarget, createAMDGPUMCInstPrinter);
TargetRegistry::RegisterMCCodeEmitter(TheAMDGPUTarget, createAMDGPUMCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheGCNTarget, createAMDGPUMCCodeEmitter);
TargetRegistry::RegisterMCAsmBackend(TheAMDGPUTarget, createAMDGPUAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheGCNTarget, createAMDGPUAsmBackend);
TargetRegistry::RegisterMCObjectStreamer(TheAMDGPUTarget, createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(TheGCNTarget, createMCStreamer);
}
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
index c019766..bc8cd53 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
@@ -30,6 +30,7 @@ class Target;
class raw_ostream;
extern Target TheAMDGPUTarget;
+extern Target TheGCNTarget;
MCCodeEmitter *createR600MCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
diff --git a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
index dc1344f..8a555ff 100644
--- a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
+++ b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
@@ -30,8 +30,8 @@ using namespace llvm;
namespace {
class R600MCCodeEmitter : public AMDGPUMCCodeEmitter {
- R600MCCodeEmitter(const R600MCCodeEmitter &) LLVM_DELETED_FUNCTION;
- void operator=(const R600MCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ R600MCCodeEmitter(const R600MCCodeEmitter &) = delete;
+ void operator=(const R600MCCodeEmitter &) = delete;
const MCInstrInfo &MCII;
const MCRegisterInfo &MRI;
diff --git a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
index 999fd0d..7e23772 100644
--- a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
+++ b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
@@ -14,10 +14,10 @@
//===----------------------------------------------------------------------===//
#include "AMDGPU.h"
-#include "SIDefines.h"
-#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
-#include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
#include "MCTargetDesc/AMDGPUFixupKinds.h"
+#include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
+#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "SIDefines.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCFixup.h"
@@ -31,15 +31,9 @@ using namespace llvm;
namespace {
-/// \brief Helper type used in encoding
-typedef union {
- int32_t I;
- float F;
-} IntFloatUnion;
-
class SIMCCodeEmitter : public AMDGPUMCCodeEmitter {
- SIMCCodeEmitter(const SIMCCodeEmitter &) LLVM_DELETED_FUNCTION;
- void operator=(const SIMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ SIMCCodeEmitter(const SIMCCodeEmitter &) = delete;
+ void operator=(const SIMCCodeEmitter &) = delete;
const MCInstrInfo &MCII;
const MCRegisterInfo &MRI;
MCContext &Ctx;
@@ -48,7 +42,7 @@ class SIMCCodeEmitter : public AMDGPUMCCodeEmitter {
bool isSrcOperand(const MCInstrDesc &Desc, unsigned OpNo) const;
/// \brief Encode an fp or int literal
- uint32_t getLitEncoding(const MCOperand &MO) const;
+ uint32_t getLitEncoding(const MCOperand &MO, unsigned OpSize) const;
public:
SIMCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri,
@@ -85,60 +79,107 @@ MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII,
bool SIMCCodeEmitter::isSrcOperand(const MCInstrDesc &Desc,
unsigned OpNo) const {
- unsigned RegClass = Desc.OpInfo[OpNo].RegClass;
- return (AMDGPU::SSrc_32RegClassID == RegClass) ||
- (AMDGPU::SSrc_64RegClassID == RegClass) ||
- (AMDGPU::VSrc_32RegClassID == RegClass) ||
- (AMDGPU::VSrc_64RegClassID == RegClass) ||
- (AMDGPU::VCSrc_32RegClassID == RegClass) ||
- (AMDGPU::VCSrc_64RegClassID == RegClass);
+ unsigned OpType = Desc.OpInfo[OpNo].OperandType;
+
+ return OpType == AMDGPU::OPERAND_REG_IMM32 ||
+ OpType == AMDGPU::OPERAND_REG_INLINE_C;
}
-uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO) const {
+// Returns the encoding value to use if the given integer is an integer inline
+// immediate value, or 0 if it is not.
+template <typename IntTy>
+static uint32_t getIntInlineImmEncoding(IntTy Imm) {
+ if (Imm >= 0 && Imm <= 64)
+ return 128 + Imm;
- IntFloatUnion Imm;
- if (MO.isImm())
- Imm.I = MO.getImm();
- else if (MO.isFPImm())
- Imm.F = MO.getFPImm();
- else if (MO.isExpr())
- return 255;
- else
- return ~0;
+ if (Imm >= -16 && Imm <= -1)
+ return 192 + std::abs(Imm);
- if (Imm.I >= 0 && Imm.I <= 64)
- return 128 + Imm.I;
+ return 0;
+}
- if (Imm.I >= -16 && Imm.I <= -1)
- return 192 + abs(Imm.I);
+static uint32_t getLit32Encoding(uint32_t Val) {
+ uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
+ if (IntImm != 0)
+ return IntImm;
- if (Imm.F == 0.5f)
+ if (Val == FloatToBits(0.5f))
return 240;
- if (Imm.F == -0.5f)
+ if (Val == FloatToBits(-0.5f))
return 241;
- if (Imm.F == 1.0f)
+ if (Val == FloatToBits(1.0f))
return 242;
- if (Imm.F == -1.0f)
+ if (Val == FloatToBits(-1.0f))
return 243;
- if (Imm.F == 2.0f)
+ if (Val == FloatToBits(2.0f))
return 244;
- if (Imm.F == -2.0f)
+ if (Val == FloatToBits(-2.0f))
return 245;
- if (Imm.F == 4.0f)
+ if (Val == FloatToBits(4.0f))
return 246;
- if (Imm.F == -4.0f)
+ if (Val == FloatToBits(-4.0f))
return 247;
return 255;
}
+static uint32_t getLit64Encoding(uint64_t Val) {
+ uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
+ if (IntImm != 0)
+ return IntImm;
+
+ if (Val == DoubleToBits(0.5))
+ return 240;
+
+ if (Val == DoubleToBits(-0.5))
+ return 241;
+
+ if (Val == DoubleToBits(1.0))
+ return 242;
+
+ if (Val == DoubleToBits(-1.0))
+ return 243;
+
+ if (Val == DoubleToBits(2.0))
+ return 244;
+
+ if (Val == DoubleToBits(-2.0))
+ return 245;
+
+ if (Val == DoubleToBits(4.0))
+ return 246;
+
+ if (Val == DoubleToBits(-4.0))
+ return 247;
+
+ return 255;
+}
+
+uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
+ unsigned OpSize) const {
+ if (MO.isExpr())
+ return 255;
+
+ assert(!MO.isFPImm());
+
+ if (!MO.isImm())
+ return ~0;
+
+ if (OpSize == 4)
+ return getLit32Encoding(static_cast<uint32_t>(MO.getImm()));
+
+ assert(OpSize == 8);
+
+ return getLit64Encoding(static_cast<uint64_t>(MO.getImm()));
+}
+
void SIMCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
@@ -161,25 +202,24 @@ void SIMCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
if (!isSrcOperand(Desc, i))
continue;
+ int RCID = Desc.OpInfo[i].RegClass;
+ const MCRegisterClass &RC = MRI.getRegClass(RCID);
+
// Is this operand a literal immediate?
const MCOperand &Op = MI.getOperand(i);
- if (getLitEncoding(Op) != 255)
+ if (getLitEncoding(Op, RC.getSize()) != 255)
continue;
// Yes! Encode it
- IntFloatUnion Imm;
+ int64_t Imm = 0;
+
if (Op.isImm())
- Imm.I = Op.getImm();
- else if (Op.isFPImm())
- Imm.F = Op.getFPImm();
- else {
- assert(Op.isExpr());
- // This will be replaced with a fixup value.
- Imm.I = 0;
- }
+ Imm = Op.getImm();
+ else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
+ llvm_unreachable("Must be immediate or expr");
for (unsigned j = 0; j < 4; j++) {
- OS.write((uint8_t) ((Imm.I >> (8 * j)) & 0xff));
+ OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff));
}
// Only one literal value allowed
@@ -234,7 +274,10 @@ uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI,
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
if (isSrcOperand(Desc, OpNo)) {
- uint32_t Enc = getLitEncoding(MO);
+ int RCID = Desc.OpInfo[OpNo].RegClass;
+ const MCRegisterClass &RC = MRI.getRegClass(RCID);
+
+ uint32_t Enc = getLitEncoding(MO, RC.getSize());
if (Enc != ~0U && (Enc != 255 || Desc.getSize() == 4))
return Enc;
diff --git a/lib/Target/R600/Processors.td b/lib/Target/R600/Processors.td
index ce17d7c..fb5aa61 100644
--- a/lib/Target/R600/Processors.td
+++ b/lib/Target/R600/Processors.td
@@ -83,28 +83,44 @@ def : Proc<"cayman", R600_VLIW4_Itin,
// Southern Islands
//===----------------------------------------------------------------------===//
-def : Proc<"SI", SI_Itin, [FeatureSouthernIslands]>;
+def : ProcessorModel<"SI", SIFullSpeedModel,
+ [FeatureSouthernIslands, FeatureFastFMAF32]
+>;
-def : Proc<"tahiti", SI_Itin, [FeatureSouthernIslands]>;
+def : ProcessorModel<"tahiti", SIFullSpeedModel,
+ [FeatureSouthernIslands, FeatureFastFMAF32]
+>;
-def : Proc<"pitcairn", SI_Itin, [FeatureSouthernIslands]>;
+def : ProcessorModel<"pitcairn", SIQuarterSpeedModel, [FeatureSouthernIslands]>;
-def : Proc<"verde", SI_Itin, [FeatureSouthernIslands]>;
+def : ProcessorModel<"verde", SIQuarterSpeedModel, [FeatureSouthernIslands]>;
-def : Proc<"oland", SI_Itin, [FeatureSouthernIslands]>;
+def : ProcessorModel<"oland", SIQuarterSpeedModel, [FeatureSouthernIslands]>;
-def : Proc<"hainan", SI_Itin, [FeatureSouthernIslands]>;
+def : ProcessorModel<"hainan", SIQuarterSpeedModel, [FeatureSouthernIslands]>;
//===----------------------------------------------------------------------===//
// Sea Islands
//===----------------------------------------------------------------------===//
-def : Proc<"bonaire", SI_Itin, [FeatureSeaIslands]>;
+def : ProcessorModel<"bonaire", SIQuarterSpeedModel, [FeatureSeaIslands]>;
-def : Proc<"kabini", SI_Itin, [FeatureSeaIslands]>;
+def : ProcessorModel<"kabini", SIQuarterSpeedModel, [FeatureSeaIslands]>;
-def : Proc<"kaveri", SI_Itin, [FeatureSeaIslands]>;
+def : ProcessorModel<"kaveri", SIQuarterSpeedModel, [FeatureSeaIslands]>;
-def : Proc<"hawaii", SI_Itin, [FeatureSeaIslands]>;
+def : ProcessorModel<"hawaii", SIFullSpeedModel,
+ [FeatureSeaIslands, FeatureFastFMAF32]
+>;
-def : Proc<"mullins", SI_Itin, [FeatureSeaIslands]>;
+def : ProcessorModel<"mullins", SIQuarterSpeedModel, [FeatureSeaIslands]>;
+
+//===----------------------------------------------------------------------===//
+// Volcanic Islands
+//===----------------------------------------------------------------------===//
+
+def : ProcessorModel<"tonga", SIQuarterSpeedModel, [FeatureVolcanicIslands]>;
+
+def : ProcessorModel<"iceland", SIQuarterSpeedModel, [FeatureVolcanicIslands]>;
+
+def : ProcessorModel<"carrizo", SIQuarterSpeedModel, [FeatureVolcanicIslands]>;
diff --git a/lib/Target/R600/R600ControlFlowFinalizer.cpp b/lib/Target/R600/R600ControlFlowFinalizer.cpp
index edaf278..c8f37f6 100644
--- a/lib/Target/R600/R600ControlFlowFinalizer.cpp
+++ b/lib/Target/R600/R600ControlFlowFinalizer.cpp
@@ -39,14 +39,14 @@ struct CFStack {
FIRST_NON_WQM_PUSH_W_FULL_ENTRY = 3
};
- const AMDGPUSubtarget &ST;
+ const AMDGPUSubtarget *ST;
std::vector<StackItem> BranchStack;
std::vector<StackItem> LoopStack;
unsigned MaxStackSize;
unsigned CurrentEntries;
unsigned CurrentSubEntries;
- CFStack(const AMDGPUSubtarget &st, unsigned ShaderType) : ST(st),
+ CFStack(const AMDGPUSubtarget *st, unsigned ShaderType) : ST(st),
// We need to reserve a stack entry for CALL_FS in vertex shaders.
MaxStackSize(ShaderType == ShaderType::VERTEX ? 1 : 0),
CurrentEntries(0), CurrentSubEntries(0) { }
@@ -76,11 +76,11 @@ bool CFStack::branchStackContains(CFStack::StackItem Item) {
}
bool CFStack::requiresWorkAroundForInst(unsigned Opcode) {
- if (Opcode == AMDGPU::CF_ALU_PUSH_BEFORE && ST.hasCaymanISA() &&
+ if (Opcode == AMDGPU::CF_ALU_PUSH_BEFORE && ST->hasCaymanISA() &&
getLoopDepth() > 1)
return true;
- if (!ST.hasCFAluBug())
+ if (!ST->hasCFAluBug())
return false;
switch(Opcode) {
@@ -91,7 +91,7 @@ bool CFStack::requiresWorkAroundForInst(unsigned Opcode) {
case AMDGPU::CF_ALU_CONTINUE:
if (CurrentSubEntries == 0)
return false;
- if (ST.getWavefrontSize() == 64) {
+ if (ST->getWavefrontSize() == 64) {
// We are being conservative here. We only require this work-around if
// CurrentSubEntries > 3 &&
// (CurrentSubEntries % 4 == 3 || CurrentSubEntries % 4 == 0)
@@ -102,7 +102,7 @@ bool CFStack::requiresWorkAroundForInst(unsigned Opcode) {
// resources without any problems.
return CurrentSubEntries > 3;
} else {
- assert(ST.getWavefrontSize() == 32);
+ assert(ST->getWavefrontSize() == 32);
// We are being conservative here. We only require the work-around if
// CurrentSubEntries > 7 &&
// (CurrentSubEntries % 8 == 7 || CurrentSubEntries % 8 == 0)
@@ -118,8 +118,8 @@ unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
default:
return 0;
case CFStack::FIRST_NON_WQM_PUSH:
- assert(!ST.hasCaymanISA());
- if (ST.getGeneration() <= AMDGPUSubtarget::R700) {
+ assert(!ST->hasCaymanISA());
+ if (ST->getGeneration() <= AMDGPUSubtarget::R700) {
// +1 For the push operation.
// +2 Extra space required.
return 3;
@@ -132,7 +132,7 @@ unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
return 2;
}
case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY:
- assert(ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
+ assert(ST->getGeneration() >= AMDGPUSubtarget::EVERGREEN);
// +1 For the push operation.
// +1 Extra space required.
return 2;
@@ -153,13 +153,14 @@ void CFStack::pushBranch(unsigned Opcode, bool isWQM) {
case AMDGPU::CF_PUSH_EG:
case AMDGPU::CF_ALU_PUSH_BEFORE:
if (!isWQM) {
- if (!ST.hasCaymanISA() && !branchStackContains(CFStack::FIRST_NON_WQM_PUSH))
+ if (!ST->hasCaymanISA() &&
+ !branchStackContains(CFStack::FIRST_NON_WQM_PUSH))
Item = CFStack::FIRST_NON_WQM_PUSH; // May not be required on Evergreen/NI
// See comment in
// CFStack::getSubEntrySize()
else if (CurrentEntries > 0 &&
- ST.getGeneration() > AMDGPUSubtarget::EVERGREEN &&
- !ST.hasCaymanISA() &&
+ ST->getGeneration() > AMDGPUSubtarget::EVERGREEN &&
+ !ST->hasCaymanISA() &&
!branchStackContains(CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY))
Item = CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY;
else
@@ -219,7 +220,7 @@ private:
const R600InstrInfo *TII;
const R600RegisterInfo *TRI;
unsigned MaxFetchInst;
- const AMDGPUSubtarget &ST;
+ const AMDGPUSubtarget *ST;
bool IsTrivialInst(MachineInstr *MI) const {
switch (MI->getOpcode()) {
@@ -233,7 +234,7 @@ private:
const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
unsigned Opcode = 0;
- bool isEg = (ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
+ bool isEg = (ST->getGeneration() >= AMDGPUSubtarget::EVERGREEN);
switch (CFI) {
case CF_TC:
Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
@@ -266,7 +267,7 @@ private:
Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
break;
case CF_END:
- if (ST.hasCaymanISA()) {
+ if (ST->hasCaymanISA()) {
Opcode = AMDGPU::CF_END_CM;
break;
}
@@ -467,17 +468,14 @@ private:
}
public:
- R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
- TII (nullptr), TRI(nullptr),
- ST(tm.getSubtarget<AMDGPUSubtarget>()) {
- const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
- MaxFetchInst = ST.getTexVTXClauseSize();
- }
+ R600ControlFlowFinalizer(TargetMachine &tm)
+ : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), ST(nullptr) {}
bool runOnMachineFunction(MachineFunction &MF) override {
- TII = static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo());
- TRI = static_cast<const R600RegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
+ ST = &MF.getSubtarget<AMDGPUSubtarget>();
+ MaxFetchInst = ST->getTexVTXClauseSize();
+ TII = static_cast<const R600InstrInfo *>(ST->getInstrInfo());
+ TRI = static_cast<const R600RegisterInfo *>(ST->getRegisterInfo());
R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
CFStack CFStack(ST, MFI->getShaderType());
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index a214e53..c738611 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -30,9 +30,9 @@
using namespace llvm;
-R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
- AMDGPUTargetLowering(TM),
- Gen(TM.getSubtarget<AMDGPUSubtarget>().getGeneration()) {
+R600TargetLowering::R600TargetLowering(TargetMachine &TM,
+ const AMDGPUSubtarget &STI)
+ : AMDGPUTargetLowering(TM, STI), Gen(STI.getGeneration()) {
addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass);
addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass);
addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass);
@@ -40,7 +40,7 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
addRegisterClass(MVT::v2f32, &AMDGPU::R600_Reg64RegClass);
addRegisterClass(MVT::v2i32, &AMDGPU::R600_Reg64RegClass);
- computeRegisterProperties();
+ computeRegisterProperties(STI.getRegisterInfo());
// Set condition code actions
setCondCodeAction(ISD::SETO, MVT::f32, Expand);
@@ -122,12 +122,19 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
// EXTLOAD should be the same as ZEXTLOAD. It is legal for some address
// spaces, so it is custom lowered to handle those where it isn't.
- setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::i16, Custom);
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Custom);
+
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Custom);
+
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Custom);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Custom);
+ }
setOperationAction(ISD::STORE, MVT::i8, Custom);
setOperationAction(ISD::STORE, MVT::i32, Custom);
@@ -181,8 +188,6 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
setOperationAction(ISD::SUBE, VT, Expand);
}
- setBooleanContents(ZeroOrNegativeOneBooleanContent);
- setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
setSchedulingPreference(Sched::Source);
}
@@ -192,7 +197,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
MachineRegisterInfo &MRI = MF->getRegInfo();
MachineBasicBlock::iterator I = *MI;
const R600InstrInfo *TII =
- static_cast<const R600InstrInfo *>(MF->getSubtarget().getInstrInfo());
+ static_cast<const R600InstrInfo *>(Subtarget->getInstrInfo());
switch (MI->getOpcode()) {
default:
@@ -647,9 +652,8 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
int ijb = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
MachineSDNode *interp;
if (ijb < 0) {
- const MachineFunction &MF = DAG.getMachineFunction();
- const R600InstrInfo *TII = static_cast<const R600InstrInfo *>(
- MF.getSubtarget().getInstrInfo());
+ const R600InstrInfo *TII =
+ static_cast<const R600InstrInfo *>(Subtarget->getInstrInfo());
interp = DAG.getMachineNode(AMDGPU::INTERP_VEC_LOAD, DL,
MVT::v4f32, DAG.getTargetConstant(slot / 4 , MVT::i32));
return DAG.getTargetExtractSubreg(
@@ -1115,6 +1119,13 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
SDValue CC = Op.getOperand(4);
SDValue Temp;
+ if (VT == MVT::f32) {
+ DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
+ SDValue MinMax = CombineFMinMaxLegacy(DL, VT, LHS, RHS, True, False, CC, DCI);
+ if (MinMax)
+ return MinMax;
+ }
+
// LHS and RHS are guaranteed to be the same value type
EVT CompareVT = LHS.getValueType();
@@ -1369,8 +1380,8 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
// Lowering for indirect addressing
const MachineFunction &MF = DAG.getMachineFunction();
- const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>(
- getTargetMachine().getSubtargetImpl()->getFrameLowering());
+ const AMDGPUFrameLowering *TFL =
+ static_cast<const AMDGPUFrameLowering *>(Subtarget->getFrameLowering());
unsigned StackWidth = TFL->getStackWidth(MF);
Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG);
@@ -1567,8 +1578,8 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
// Lowering for indirect addressing
const MachineFunction &MF = DAG.getMachineFunction();
- const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>(
- getTargetMachine().getSubtargetImpl()->getFrameLowering());
+ const AMDGPUFrameLowering *TFL =
+ static_cast<const AMDGPUFrameLowering *>(Subtarget->getFrameLowering());
unsigned StackWidth = TFL->getStackWidth(MF);
Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG);
@@ -1682,7 +1693,7 @@ SDValue R600TargetLowering::LowerFormalArguments(
// XXX - I think PartOffset should give you this, but it seems to give the
// size of the register which isn't useful.
- unsigned ValBase = ArgLocs[In.OrigArgIndex].getLocMemOffset();
+ unsigned ValBase = ArgLocs[In.getOrigArgIndex()].getLocMemOffset();
unsigned PartOffset = VA.getLocMemOffset();
unsigned Offset = 36 + VA.getLocMemOffset();
@@ -2172,9 +2183,7 @@ SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node,
unsigned Opcode = Node->getMachineOpcode();
SDValue FakeOp;
- std::vector<SDValue> Ops;
- for (const SDUse &I : Node->ops())
- Ops.push_back(I);
+ std::vector<SDValue> Ops(Node->op_begin(), Node->op_end());
if (Opcode == AMDGPU::DOT_4) {
int OperandIdx[] = {
@@ -2236,10 +2245,7 @@ SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node,
AMDGPU::OpName::clamp);
if (ClampIdx < 0)
return Node;
- std::vector<SDValue> Ops;
- unsigned NumOp = Src.getNumOperands();
- for(unsigned i = 0; i < NumOp; ++i)
- Ops.push_back(Src.getOperand(i));
+ std::vector<SDValue> Ops(Src->op_begin(), Src->op_end());
Ops[ClampIdx - 1] = DAG.getTargetConstant(1, MVT::i32);
return DAG.getMachineNode(Src.getMachineOpcode(), SDLoc(Node),
Node->getVTList(), Ops);
diff --git a/lib/Target/R600/R600ISelLowering.h b/lib/Target/R600/R600ISelLowering.h
index 10ebc10..c547195 100644
--- a/lib/Target/R600/R600ISelLowering.h
+++ b/lib/Target/R600/R600ISelLowering.h
@@ -23,7 +23,7 @@ class R600InstrInfo;
class R600TargetLowering : public AMDGPUTargetLowering {
public:
- R600TargetLowering(TargetMachine &TM);
+ R600TargetLowering(TargetMachine &TM, const AMDGPUSubtarget &STI);
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock * BB) const override;
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index b6c00f8..291fb04 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -335,10 +335,11 @@ def load_param : LoadParamFrag<load>;
def load_param_exti8 : LoadParamFrag<az_extloadi8>;
def load_param_exti16 : LoadParamFrag<az_extloadi16>;
-def isR600 : Predicate<"Subtarget.getGeneration() <= AMDGPUSubtarget::R700">;
+def isR600 : Predicate<"Subtarget->getGeneration() <= AMDGPUSubtarget::R700">;
-def isR600toCayman : Predicate<
- "Subtarget.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS">;
+def isR600toCayman
+ : Predicate<
+ "Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS">;
//===----------------------------------------------------------------------===//
// R600 SDNodes
@@ -579,6 +580,7 @@ i32imm:$COUNT, i32imm:$Enabled),
let ALT_CONST = 0;
let WHOLE_QUAD_MODE = 0;
let BARRIER = 1;
+ let isCodeGenOnly = 1;
let UseNamedOperandTable = 1;
let Inst{31-0} = Word0;
@@ -641,6 +643,7 @@ def FETCH_CLAUSE : AMDGPUInst <(outs),
field bits<8> Inst;
bits<8> num;
let Inst = num;
+ let isCodeGenOnly = 1;
}
def ALU_CLAUSE : AMDGPUInst <(outs),
@@ -648,10 +651,13 @@ def ALU_CLAUSE : AMDGPUInst <(outs),
field bits<8> Inst;
bits<8> num;
let Inst = num;
+ let isCodeGenOnly = 1;
}
def LITERALS : AMDGPUInst <(outs),
(ins LITERAL:$literal1, LITERAL:$literal2), "$literal1, $literal2", [] > {
+ let isCodeGenOnly = 1;
+
field bits<64> Inst;
bits<32> literal1;
bits<32> literal2;
@@ -698,7 +704,7 @@ def SGE : R600_2OP <
def SNE : R600_2OP <
0xB, "SETNE",
- [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_UNE))]
+ [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_UNE_NE))]
>;
def SETE_DX10 : R600_2OP <
@@ -716,9 +722,10 @@ def SETGE_DX10 : R600_2OP <
[(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_OGE))]
>;
+// FIXME: This should probably be COND_ONE
def SETNE_DX10 : R600_2OP <
0xF, "SETNE_DX10",
- [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_UNE))]
+ [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_UNE_NE))]
>;
def FRACT : R600_1OP_Helper <0x10, "FRACT", AMDGPUfract>;
@@ -913,7 +920,7 @@ class MULADD_Common <bits<5> inst> : R600_3OP <
class MULADD_IEEE_Common <bits<5> inst> : R600_3OP <
inst, "MULADD_IEEE",
- [(set f32:$dst, (fadd (fmul f32:$src0, f32:$src1), f32:$src2))]
+ [(set f32:$dst, (fmad f32:$src0, f32:$src1, f32:$src2))]
>;
class FMA_Common <bits<5> inst> : R600_3OP <
@@ -1141,16 +1148,6 @@ class TGSI_LIT_Z_Common <InstR600 mul_lit, InstR600 log_clamped, InstR600 exp_ie
(exp_ieee (mul_lit (log_clamped (MAX $src_y, (f32 ZERO))), $src_w, $src_x))
>;
-// FROUND pattern
-class FROUNDPat<Instruction CNDGE, Instruction CNDGT> : Pat <
- (AMDGPUround f32:$x),
- (CNDGE $x,
- (CNDGE (ADD (FNEG_R600 (f32 HALF)), (FRACT $x)), (CEIL $x), (FLOOR $x)),
- (CNDGT (ADD (FNEG_R600 (f32 HALF)), (FRACT $x)), (CEIL $x), (FLOOR $x))
- )
->;
-
-
//===----------------------------------------------------------------------===//
// R600 / R700 Instructions
//===----------------------------------------------------------------------===//
@@ -1192,9 +1189,7 @@ let Predicates = [isR600] in {
def TGSI_LIT_Z_r600 : TGSI_LIT_Z_Common<MUL_LIT_r600, LOG_CLAMPED_r600, EXP_IEEE_r600>;
def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_r600 $src))>;
- defm : RsqPat<RECIPSQRT_IEEE_r600, f32>;
-
- def : FROUNDPat <CNDGE_r600, CNDGT_r600>;
+ def : RsqPat<RECIPSQRT_IEEE_r600, f32>;
def R600_ExportSwz : ExportSwzInst {
let Word1{20-17} = 0; // BURST_COUNT
@@ -1248,6 +1243,7 @@ let Predicates = [isR600] in {
def CF_PUSH_ELSE_R600 : CF_CLAUSE_R600<12, (ins i32imm:$ADDR),
"PUSH_ELSE @$ADDR"> {
let CNT = 0;
+ let POP_COUNT = 0; // FIXME?
}
def CF_ELSE_R600 : CF_CLAUSE_R600<13, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
"ELSE @$ADDR POP:$POP_COUNT"> {
@@ -1364,7 +1360,7 @@ def CONST_COPY : Instruction {
let Pattern =
[(set R600_Reg32:$dst, (CONST_ADDRESS ADDRGA_CONST_OFFSET:$src))];
let AsmString = "CONST_COPY";
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
let isAsCheapAsAMove = 1;
let Itinerary = NullALU;
}
diff --git a/lib/Target/R600/R600MachineScheduler.cpp b/lib/Target/R600/R600MachineScheduler.cpp
index d782713..bcde5fb 100644
--- a/lib/Target/R600/R600MachineScheduler.cpp
+++ b/lib/Target/R600/R600MachineScheduler.cpp
@@ -16,7 +16,7 @@
#include "AMDGPUSubtarget.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Pass.h"
-#include "llvm/PassManager.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -26,17 +26,16 @@ using namespace llvm;
void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
assert(dag->hasVRegLiveness() && "R600SchedStrategy needs vreg liveness");
DAG = static_cast<ScheduleDAGMILive*>(dag);
+ const AMDGPUSubtarget &ST = DAG->MF.getSubtarget<AMDGPUSubtarget>();
TII = static_cast<const R600InstrInfo*>(DAG->TII);
TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
- VLIW5 = !DAG->MF.getTarget().getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
+ VLIW5 = !ST.hasCaymanISA();
MRI = &DAG->MRI;
CurInstKind = IDOther;
CurEmitted = 0;
OccupedSlotsMask = 31;
InstKindLimit[IDAlu] = TII->getMaxAlusPerClause();
InstKindLimit[IDOther] = 32;
-
- const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>();
InstKindLimit[IDFetch] = ST.getTexVTXClauseSize();
AluInstCount = 0;
FetchInstCount = 0;
diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp
index ddf68c9..deee5bc 100644
--- a/lib/Target/R600/R600Packetizer.cpp
+++ b/lib/Target/R600/R600Packetizer.cpp
@@ -153,7 +153,7 @@ public:
TII(static_cast<const R600InstrInfo *>(
MF.getSubtarget().getInstrInfo())),
TRI(TII->getRegisterInfo()) {
- VLIW5 = !MF.getTarget().getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
+ VLIW5 = !MF.getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
}
// initPacketizerState - initialize some internal flags.
diff --git a/lib/Target/R600/R700Instructions.td b/lib/Target/R600/R700Instructions.td
index 9aad85d..613a0d7 100644
--- a/lib/Target/R600/R700Instructions.td
+++ b/lib/Target/R600/R700Instructions.td
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-def isR700 : Predicate<"Subtarget.getGeneration() == AMDGPUSubtarget::R700">;
+def isR700 : Predicate<"Subtarget->getGeneration() == AMDGPUSubtarget::R700">;
let Predicates = [isR700] in {
def SIN_r700 : SIN_Common<0x6E>;
diff --git a/lib/Target/R600/SIAnnotateControlFlow.cpp b/lib/Target/R600/SIAnnotateControlFlow.cpp
index 91eb60b..79f6532 100644
--- a/lib/Target/R600/SIAnnotateControlFlow.cpp
+++ b/lib/Target/R600/SIAnnotateControlFlow.cpp
@@ -14,6 +14,7 @@
#include "AMDGPU.h"
#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
@@ -66,6 +67,8 @@ class SIAnnotateControlFlow : public FunctionPass {
DominatorTree *DT;
StackVector Stack;
+ LoopInfo *LI;
+
bool isTopOfStack(BasicBlock *BB);
Value *popSaved();
@@ -99,6 +102,7 @@ public:
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<LoopInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
FunctionPass::getAnalysisUsage(AU);
@@ -277,10 +281,26 @@ void SIAnnotateControlFlow::handleLoop(BranchInst *Term) {
Term->setCondition(CallInst::Create(Loop, Arg, "", Term));
push(Term->getSuccessor(0), Arg);
-}
-
-/// \brief Close the last opened control flow
+}/// \brief Close the last opened control flow
void SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) {
+ llvm::Loop *L = LI->getLoopFor(BB);
+
+ if (L && L->getHeader() == BB) {
+ // We can't insert an EndCF call into a loop header, because it will
+ // get executed on every iteration of the loop, when it should be
+ // executed only once before the loop.
+ SmallVector <BasicBlock*, 8> Latches;
+ L->getLoopLatches(Latches);
+
+ std::vector<BasicBlock*> Preds;
+ for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
+ if (std::find(Latches.begin(), Latches.end(), *PI) == Latches.end())
+ Preds.push_back(*PI);
+ }
+ BB = llvm::SplitBlockPredecessors(BB, Preds, "endcf.split", nullptr, DT,
+ LI, false);
+ }
+
CallInst::Create(EndCf, popSaved(), "", BB->getFirstInsertionPt());
}
@@ -288,6 +308,7 @@ void SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) {
/// recognize if/then/else and loops.
bool SIAnnotateControlFlow::runOnFunction(Function &F) {
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
for (df_iterator<BasicBlock *> I = df_begin(&F.getEntryBlock()),
E = df_end(&F.getEntryBlock()); I != E; ++I) {
diff --git a/lib/Target/R600/SIDefines.h b/lib/Target/R600/SIDefines.h
index 2e7dab6..b540140 100644
--- a/lib/Target/R600/SIDefines.h
+++ b/lib/Target/R600/SIDefines.h
@@ -8,25 +8,49 @@
/// \file
//===----------------------------------------------------------------------===//
+#include "llvm/MC/MCInstrDesc.h"
+
#ifndef LLVM_LIB_TARGET_R600_SIDEFINES_H
#define LLVM_LIB_TARGET_R600_SIDEFINES_H
namespace SIInstrFlags {
// This needs to be kept in sync with the field bits in InstSI.
enum {
- MIMG = 1 << 3,
- SMRD = 1 << 4,
- VOP1 = 1 << 5,
- VOP2 = 1 << 6,
- VOP3 = 1 << 7,
- VOPC = 1 << 8,
- SALU = 1 << 9,
- MUBUF = 1 << 10,
- MTBUF = 1 << 11,
- FLAT = 1 << 12
+ SALU = 1 << 3,
+ VALU = 1 << 4,
+
+ SOP1 = 1 << 5,
+ SOP2 = 1 << 6,
+ SOPC = 1 << 7,
+ SOPK = 1 << 8,
+ SOPP = 1 << 9,
+
+ VOP1 = 1 << 10,
+ VOP2 = 1 << 11,
+ VOP3 = 1 << 12,
+ VOPC = 1 << 13,
+
+ MUBUF = 1 << 14,
+ MTBUF = 1 << 15,
+ SMRD = 1 << 16,
+ DS = 1 << 17,
+ MIMG = 1 << 18,
+ FLAT = 1 << 19,
+ WQM = 1 << 20
};
}
+namespace llvm {
+namespace AMDGPU {
+ enum OperandType {
+ /// Operand with register or 32-bit immediate
+ OPERAND_REG_IMM32 = llvm::MCOI::OPERAND_FIRST_TARGET,
+ /// Operand with register or inline constant
+ OPERAND_REG_INLINE_C
+ };
+}
+}
+
namespace SIInstrFlags {
enum Flags {
// First 4 bits are the instruction encoding
@@ -34,6 +58,21 @@ namespace SIInstrFlags {
EXP_CNT = 1 << 1,
LGKM_CNT = 1 << 2
};
+
+ // v_cmp_class_* etc. use a 10-bit mask for what operation is checked.
+ // The result is true if any of these tests are true.
+ enum ClassFlags {
+ S_NAN = 1 << 0, // Signaling NaN
+ Q_NAN = 1 << 1, // Quiet NaN
+ N_INFINITY = 1 << 2, // Negative infinity
+ N_NORMAL = 1 << 3, // Negative normal
+ N_SUBNORMAL = 1 << 4, // Negative subnormal
+ N_ZERO = 1 << 5, // Negative zero
+ P_ZERO = 1 << 6, // Positive zero
+ P_SUBNORMAL = 1 << 7, // Positive subnormal
+ P_NORMAL = 1 << 8, // Positive normal
+ P_INFINITY = 1 << 9 // Positive infinity
+ };
}
namespace SISrcMods {
@@ -61,7 +100,14 @@ namespace SIOutMods {
#define S_00B028_VGPRS(x) (((x) & 0x3F) << 0)
#define S_00B028_SGPRS(x) (((x) & 0x0F) << 6)
#define R_00B84C_COMPUTE_PGM_RSRC2 0x00B84C
-#define S_00B02C_SCRATCH_EN(x) (((x) & 0x1) << 0)
+#define S_00B84C_SCRATCH_EN(x) (((x) & 0x1) << 0)
+#define S_00B84C_USER_SGPR(x) (((x) & 0x1F) << 1)
+#define S_00B84C_TGID_X_EN(x) (((x) & 0x1) << 7)
+#define S_00B84C_TGID_Y_EN(x) (((x) & 0x1) << 8)
+#define S_00B84C_TGID_Z_EN(x) (((x) & 0x1) << 9)
+#define S_00B84C_TG_SIZE_EN(x) (((x) & 0x1) << 10)
+#define S_00B84C_TIDIG_COMP_CNT(x) (((x) & 0x03) << 11)
+
#define S_00B84C_LDS_SIZE(x) (((x) & 0x1FF) << 15)
#define R_0286CC_SPI_PS_INPUT_ENA 0x0286CC
@@ -118,4 +164,8 @@ namespace SIOutMods {
#define R_00B860_COMPUTE_TMPRING_SIZE 0x00B860
#define S_00B860_WAVESIZE(x) (((x) & 0x1FFF) << 12)
+#define R_0286E8_SPI_TMPRING_SIZE 0x0286E8
+#define S_0286E8_WAVESIZE(x) (((x) & 0x1FFF) << 12)
+
+
#endif
diff --git a/lib/Target/R600/SIFixSGPRCopies.cpp b/lib/Target/R600/SIFixSGPRCopies.cpp
index d6f4b4c..cd1b3ac 100644
--- a/lib/Target/R600/SIFixSGPRCopies.cpp
+++ b/lib/Target/R600/SIFixSGPRCopies.cpp
@@ -136,12 +136,12 @@ const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromUses(
const MachineRegisterInfo &MRI,
unsigned Reg,
unsigned SubReg) const {
- // The Reg parameter to the function must always be defined by either a PHI
- // or a COPY, therefore it cannot be a physical register.
- assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
- "Reg cannot be a physical register");
- const TargetRegisterClass *RC = MRI.getRegClass(Reg);
+ const TargetRegisterClass *RC
+ = TargetRegisterInfo::isVirtualRegister(Reg) ?
+ MRI.getRegClass(Reg) :
+ TRI->getRegClass(Reg);
+
RC = TRI->getSubRegClass(RC, SubReg);
for (MachineRegisterInfo::use_instr_iterator
I = MRI.use_instr_begin(Reg), E = MRI.use_instr_end(); I != E; ++I) {
@@ -182,7 +182,12 @@ bool SIFixSGPRCopies::isVGPRToSGPRCopy(const MachineInstr &Copy,
unsigned DstReg = Copy.getOperand(0).getReg();
unsigned SrcReg = Copy.getOperand(1).getReg();
unsigned SrcSubReg = Copy.getOperand(1).getSubReg();
- const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
+
+ const TargetRegisterClass *DstRC
+ = TargetRegisterInfo::isVirtualRegister(DstReg) ?
+ MRI.getRegClass(DstReg) :
+ TRI->getRegClass(DstReg);
+
const TargetRegisterClass *SrcRC;
if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
@@ -217,20 +222,21 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
switch (MI.getOpcode()) {
default: continue;
case AMDGPU::PHI: {
- DEBUG(dbgs() << " Fixing PHI:\n");
- DEBUG(MI.print(dbgs()));
+ DEBUG(dbgs() << "Fixing PHI: " << MI);
- for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
- unsigned Reg = MI.getOperand(i).getReg();
- const TargetRegisterClass *RC = inferRegClassFromDef(TRI, MRI, Reg,
- MI.getOperand(0).getSubReg());
- MRI.constrainRegClass(Reg, RC);
+ for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
+ const MachineOperand &Op = MI.getOperand(i);
+ unsigned Reg = Op.getReg();
+ const TargetRegisterClass *RC
+ = inferRegClassFromDef(TRI, MRI, Reg, Op.getSubReg());
+
+ MRI.constrainRegClass(Op.getReg(), RC);
}
unsigned Reg = MI.getOperand(0).getReg();
const TargetRegisterClass *RC = inferRegClassFromUses(TRI, MRI, Reg,
MI.getOperand(0).getSubReg());
- if (TRI->getCommonSubClass(RC, &AMDGPU::VReg_32RegClass)) {
- MRI.constrainRegClass(Reg, &AMDGPU::VReg_32RegClass);
+ if (TRI->getCommonSubClass(RC, &AMDGPU::VGPR_32RegClass)) {
+ MRI.constrainRegClass(Reg, &AMDGPU::VGPR_32RegClass);
}
if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
diff --git a/lib/Target/R600/SIFoldOperands.cpp b/lib/Target/R600/SIFoldOperands.cpp
new file mode 100644
index 0000000..ae4b05d
--- /dev/null
+++ b/lib/Target/R600/SIFoldOperands.cpp
@@ -0,0 +1,287 @@
+//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+/// \file
+//===----------------------------------------------------------------------===//
+//
+
+#include "AMDGPU.h"
+#include "AMDGPUSubtarget.h"
+#include "SIInstrInfo.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetMachine.h"
+
+#define DEBUG_TYPE "si-fold-operands"
+using namespace llvm;
+
+namespace {
+
+class SIFoldOperands : public MachineFunctionPass {
+public:
+ static char ID;
+
+public:
+ SIFoldOperands() : MachineFunctionPass(ID) {
+ initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "SI Fold Operands";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineDominatorTree>();
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+
+struct FoldCandidate {
+ MachineInstr *UseMI;
+ unsigned UseOpNo;
+ MachineOperand *OpToFold;
+ uint64_t ImmToFold;
+
+ FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
+ UseMI(MI), UseOpNo(OpNo) {
+
+ if (FoldOp->isImm()) {
+ OpToFold = nullptr;
+ ImmToFold = FoldOp->getImm();
+ } else {
+ assert(FoldOp->isReg());
+ OpToFold = FoldOp;
+ }
+ }
+
+ bool isImm() const {
+ return !OpToFold;
+ }
+};
+
+} // End anonymous namespace.
+
+INITIALIZE_PASS_BEGIN(SIFoldOperands, DEBUG_TYPE,
+ "SI Fold Operands", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_END(SIFoldOperands, DEBUG_TYPE,
+ "SI Fold Operands", false, false)
+
+char SIFoldOperands::ID = 0;
+
+char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
+
+FunctionPass *llvm::createSIFoldOperandsPass() {
+ return new SIFoldOperands();
+}
+
+static bool isSafeToFold(unsigned Opcode) {
+ switch(Opcode) {
+ case AMDGPU::V_MOV_B32_e32:
+ case AMDGPU::V_MOV_B32_e64:
+ case AMDGPU::V_MOV_B64_PSEUDO:
+ case AMDGPU::S_MOV_B32:
+ case AMDGPU::S_MOV_B64:
+ case AMDGPU::COPY:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool updateOperand(FoldCandidate &Fold,
+ const TargetRegisterInfo &TRI) {
+ MachineInstr *MI = Fold.UseMI;
+ MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
+ assert(Old.isReg());
+
+ if (Fold.isImm()) {
+ Old.ChangeToImmediate(Fold.ImmToFold);
+ return true;
+ }
+
+ MachineOperand *New = Fold.OpToFold;
+ if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
+ TargetRegisterInfo::isVirtualRegister(New->getReg())) {
+ Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
+ return true;
+ }
+
+ // FIXME: Handle physical registers.
+
+ return false;
+}
+
+static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
+ MachineInstr *MI, unsigned OpNo,
+ MachineOperand *OpToFold,
+ const SIInstrInfo *TII) {
+ if (!TII->isOperandLegal(MI, OpNo, OpToFold)) {
+ // Operand is not legal, so try to commute the instruction to
+ // see if this makes it possible to fold.
+ unsigned CommuteIdx0;
+ unsigned CommuteIdx1;
+ bool CanCommute = TII->findCommutedOpIndices(MI, CommuteIdx0, CommuteIdx1);
+
+ if (CanCommute) {
+ if (CommuteIdx0 == OpNo)
+ OpNo = CommuteIdx1;
+ else if (CommuteIdx1 == OpNo)
+ OpNo = CommuteIdx0;
+ }
+
+ if (!CanCommute || !TII->commuteInstruction(MI))
+ return false;
+
+ if (!TII->isOperandLegal(MI, OpNo, OpToFold))
+ return false;
+ }
+
+ FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
+ return true;
+}
+
+bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const SIRegisterInfo &TRI = TII->getRegisterInfo();
+
+ for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
+ BI != BE; ++BI) {
+
+ MachineBasicBlock &MBB = *BI;
+ MachineBasicBlock::iterator I, Next;
+ for (I = MBB.begin(); I != MBB.end(); I = Next) {
+ Next = std::next(I);
+ MachineInstr &MI = *I;
+
+ if (!isSafeToFold(MI.getOpcode()))
+ continue;
+
+ unsigned OpSize = TII->getOpSize(MI, 1);
+ MachineOperand &OpToFold = MI.getOperand(1);
+ bool FoldingImm = OpToFold.isImm();
+
+ // FIXME: We could also be folding things like FrameIndexes and
+ // TargetIndexes.
+ if (!FoldingImm && !OpToFold.isReg())
+ continue;
+
+ // Folding immediates with more than one use will increase program size.
+ // FIXME: This will also reduce register usage, which may be better
+ // in some cases. A better heuristic is needed.
+ if (FoldingImm && !TII->isInlineConstant(OpToFold, OpSize) &&
+ !MRI.hasOneUse(MI.getOperand(0).getReg()))
+ continue;
+
+ // FIXME: Fold operands with subregs.
+ if (OpToFold.isReg() &&
+ (!TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()) ||
+ OpToFold.getSubReg()))
+ continue;
+
+ std::vector<FoldCandidate> FoldList;
+ for (MachineRegisterInfo::use_iterator
+ Use = MRI.use_begin(MI.getOperand(0).getReg()), E = MRI.use_end();
+ Use != E; ++Use) {
+
+ MachineInstr *UseMI = Use->getParent();
+ const MachineOperand &UseOp = UseMI->getOperand(Use.getOperandNo());
+
+ // FIXME: Fold operands with subregs.
+ if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) ||
+ UseOp.isImplicit())) {
+ continue;
+ }
+
+ APInt Imm;
+
+ if (FoldingImm) {
+ unsigned UseReg = UseOp.getReg();
+ const TargetRegisterClass *UseRC
+ = TargetRegisterInfo::isVirtualRegister(UseReg) ?
+ MRI.getRegClass(UseReg) :
+ TRI.getRegClass(UseReg);
+
+ Imm = APInt(64, OpToFold.getImm());
+
+ // Split 64-bit constants into 32-bits for folding.
+ if (UseOp.getSubReg()) {
+ if (UseRC->getSize() != 8)
+ continue;
+
+ if (UseOp.getSubReg() == AMDGPU::sub0) {
+ Imm = Imm.getLoBits(32);
+ } else {
+ assert(UseOp.getSubReg() == AMDGPU::sub1);
+ Imm = Imm.getHiBits(32);
+ }
+ }
+
+ // In order to fold immediates into copies, we need to change the
+ // copy to a MOV.
+ if (UseMI->getOpcode() == AMDGPU::COPY) {
+ unsigned DestReg = UseMI->getOperand(0).getReg();
+ const TargetRegisterClass *DestRC
+ = TargetRegisterInfo::isVirtualRegister(DestReg) ?
+ MRI.getRegClass(DestReg) :
+ TRI.getRegClass(DestReg);
+
+ unsigned MovOp = TII->getMovOpcode(DestRC);
+ if (MovOp == AMDGPU::COPY)
+ continue;
+
+ UseMI->setDesc(TII->get(MovOp));
+ }
+ }
+
+ const MCInstrDesc &UseDesc = UseMI->getDesc();
+
+ // Don't fold into target independent nodes. Target independent opcodes
+ // don't have defined register classes.
+ if (UseDesc.isVariadic() ||
+ UseDesc.OpInfo[Use.getOperandNo()].RegClass == -1)
+ continue;
+
+ if (FoldingImm) {
+ MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
+ tryAddToFoldList(FoldList, UseMI, Use.getOperandNo(), &ImmOp, TII);
+ continue;
+ }
+
+ tryAddToFoldList(FoldList, UseMI, Use.getOperandNo(), &OpToFold, TII);
+
+ // FIXME: We could try to change the instruction from 64-bit to 32-bit
+ // to enable more folding opportunites. The shrink operands pass
+ // already does this.
+ }
+
+ for (FoldCandidate &Fold : FoldList) {
+ if (updateOperand(Fold, TRI)) {
+ // Clear kill flags.
+ if (!Fold.isImm()) {
+ assert(Fold.OpToFold && Fold.OpToFold->isReg());
+ Fold.OpToFold->setIsKill(false);
+ }
+ DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
+ Fold.UseOpNo << " of " << *Fold.UseMI << '\n');
+ }
+ }
+ }
+ }
+ return false;
+}
diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
index 8d4164a..7d794b8 100644
--- a/lib/Target/R600/SIISelLowering.cpp
+++ b/lib/Target/R600/SIISelLowering.cpp
@@ -35,8 +35,9 @@
using namespace llvm;
-SITargetLowering::SITargetLowering(TargetMachine &TM) :
- AMDGPUTargetLowering(TM) {
+SITargetLowering::SITargetLowering(TargetMachine &TM,
+ const AMDGPUSubtarget &STI)
+ : AMDGPUTargetLowering(TM, STI) {
addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
@@ -44,7 +45,7 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass);
addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass);
- addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
+ addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
@@ -59,22 +60,7 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
- computeRegisterProperties();
-
- // Condition Codes
- setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
- setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
- setCondCodeAction(ISD::SETUGE, MVT::f32, Expand);
- setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
- setCondCodeAction(ISD::SETULE, MVT::f32, Expand);
- setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
-
- setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
- setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
- setCondCodeAction(ISD::SETUGE, MVT::f64, Expand);
- setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
- setCondCodeAction(ISD::SETULE, MVT::f64, Expand);
- setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
+ computeRegisterProperties(STI.getRegisterInfo());
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
@@ -104,12 +90,8 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setOperationAction(ISD::STORE, MVT::v16i32, Custom);
setOperationAction(ISD::STORE, MVT::i1, Custom);
- setOperationAction(ISD::STORE, MVT::i32, Custom);
- setOperationAction(ISD::STORE, MVT::v2i32, Custom);
setOperationAction(ISD::STORE, MVT::v4i32, Custom);
- setOperationAction(ISD::SELECT, MVT::f32, Promote);
- AddPromotedToType(ISD::SELECT, MVT::f32, MVT::i32);
setOperationAction(ISD::SELECT, MVT::i64, Custom);
setOperationAction(ISD::SELECT, MVT::f64, Promote);
AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
@@ -147,26 +129,34 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, Expand);
-
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Expand);
-
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::i16, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::i32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
-
- setTruncStoreAction(MVT::i32, MVT::i8, Custom);
- setTruncStoreAction(MVT::i32, MVT::i16, Custom);
+ for (MVT VT : MVT::integer_valuetypes()) {
+ if (VT == MVT::i64)
+ continue;
+
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
+
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
+
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
+ }
+
+ for (MVT VT : MVT::integer_vector_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v16i16, Expand);
+ }
+
+ for (MVT VT : MVT::fp_valuetypes())
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
+
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
setTruncStoreAction(MVT::i64, MVT::i32, Expand);
setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
@@ -213,13 +203,6 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
}
}
- for (int I = MVT::v1f64; I <= MVT::v8f64; ++I) {
- MVT::SimpleValueType VT = static_cast<MVT::SimpleValueType>(I);
- setOperationAction(ISD::FTRUNC, VT, Expand);
- setOperationAction(ISD::FCEIL, VT, Expand);
- setOperationAction(ISD::FFLOOR, VT, Expand);
- }
-
if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
setOperationAction(ISD::FCEIL, MVT::f64, Legal);
@@ -228,6 +211,7 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
}
setOperationAction(ISD::FDIV, MVT::f32, Custom);
+ setOperationAction(ISD::FDIV, MVT::f64, Custom);
setTargetDAGCombine(ISD::FADD);
setTargetDAGCombine(ISD::FSUB);
@@ -235,7 +219,8 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setTargetDAGCombine(ISD::FMAXNUM);
setTargetDAGCombine(ISD::SELECT_CC);
setTargetDAGCombine(ISD::SETCC);
-
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::OR);
setTargetDAGCombine(ISD::UINT_TO_FP);
// All memory operations. Some folding on the pointer operand is done to help
@@ -315,7 +300,7 @@ bool SITargetLowering::isLegalAddressingMode(const AddrMode &AM,
return true;
}
-bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
+bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
unsigned AddrSpace,
unsigned Align,
bool *IsFast) const {
@@ -327,9 +312,8 @@ bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
if (!VT.isSimple() || VT == MVT::Other)
return false;
- // XXX - CI changes say "Support for unaligned memory accesses" but I don't
- // see what for specifically. The wording everywhere else seems to be the
- // same.
+ // TODO - CI+ supports unaligned memory accesses, but this requires driver
+ // support.
// XXX - The only mention I see of this in the ISA manual is for LDS direct
// reads the "byte address and must be dword aligned". Is it also true for the
@@ -341,12 +325,18 @@ bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
return Align % 4 == 0;
}
+ // Smaller than dword value must be aligned.
+ // FIXME: This should be allowed on CI+
+ if (VT.bitsLT(MVT::i32))
+ return false;
+
// 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
// byte-address are ignored, thus forcing Dword alignment.
// This applies to private, global, and constant memory.
if (IsFast)
*IsFast = true;
- return VT.bitsGT(MVT::i32);
+
+ return VT.bitsGT(MVT::i32) && Align % 4 == 0;
}
EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
@@ -379,8 +369,8 @@ SITargetLowering::getPreferredVectorAction(EVT VT) const {
bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const {
- const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
- getTargetMachine().getSubtargetImpl()->getInstrInfo());
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
return TII->isInlineConstant(Imm);
}
@@ -413,16 +403,11 @@ SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
}
SDValue SITargetLowering::LowerFormalArguments(
- SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
-
- const TargetMachine &TM = getTargetMachine();
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
const SIRegisterInfo *TRI =
- static_cast<const SIRegisterInfo*>(TM.getSubtargetImpl()->getRegisterInfo());
+ static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
MachineFunction &MF = DAG.getMachineFunction();
FunctionType *FType = MF.getFunction()->getFunctionType();
@@ -461,7 +446,7 @@ SDValue SITargetLowering::LowerFormalArguments(
// We REALLY want the ORIGINAL number of vertex elements here, e.g. a
// three or five element vertex only needs three or five registers,
// NOT four or eigth.
- Type *ParamType = FType->getParamType(Arg.OrigArgIndex);
+ Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
unsigned NumElements = ParamType->getVectorNumElements();
for (unsigned j = 0; j != NumElements; ++j) {
@@ -489,7 +474,10 @@ SDValue SITargetLowering::LowerFormalArguments(
// The pointer to the list of arguments is stored in SGPR0, SGPR1
// The pointer to the scratch buffer is stored in SGPR2, SGPR3
if (Info->getShaderType() == ShaderType::COMPUTE) {
- Info->NumUserSGPRs = 4;
+ if (Subtarget->isAmdHsaOS())
+ Info->NumUserSGPRs = 2; // FIXME: Need to support scratch buffers.
+ else
+ Info->NumUserSGPRs = 4;
unsigned InputPtrReg =
TRI->getPreloadedValue(MF, SIRegisterInfo::INPUT_PTR);
@@ -541,7 +529,7 @@ SDValue SITargetLowering::LowerFormalArguments(
Offset, Ins[i].Flags.isSExt());
const PointerType *ParamTy =
- dyn_cast<PointerType>(FType->getParamType(Ins[i].OrigArgIndex));
+ dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
// On SI local pointers are just offsets into LDS, so they are always
@@ -576,7 +564,7 @@ SDValue SITargetLowering::LowerFormalArguments(
if (Arg.VT.isVector()) {
// Build a vector from the registers
- Type *ParamType = FType->getParamType(Arg.OrigArgIndex);
+ Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
unsigned NumElements = ParamType->getVectorNumElements();
SmallVector<SDValue, 4> Regs;
@@ -589,8 +577,7 @@ SDValue SITargetLowering::LowerFormalArguments(
// Fill up the missing vector elements
NumElements = Arg.VT.getVectorNumElements() - NumElements;
- for (unsigned j = 0; j != NumElements; ++j)
- Regs.push_back(DAG.getUNDEF(VT));
+ Regs.append(NumElements, DAG.getUNDEF(VT));
InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT, Regs));
continue;
@@ -598,6 +585,12 @@ SDValue SITargetLowering::LowerFormalArguments(
InVals.push_back(Val);
}
+
+ if (Info->getShaderType() != ShaderType::COMPUTE) {
+ unsigned ScratchIdx = CCInfo.getFirstUnallocated(ArrayRef<MCPhysReg>(
+ AMDGPU::SGPR_32RegClass.begin(), AMDGPU::SGPR_32RegClass.getNumRegs()));
+ Info->ScratchOffsetReg = AMDGPU::SGPR_32RegClass.getRegister(ScratchIdx);
+ }
return Chain;
}
@@ -605,25 +598,14 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
MachineInstr * MI, MachineBasicBlock * BB) const {
MachineBasicBlock::iterator I = *MI;
- const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
- getTargetMachine().getSubtargetImpl()->getInstrInfo());
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
switch (MI->getOpcode()) {
default:
return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
- case AMDGPU::BRANCH: return BB;
- case AMDGPU::V_SUB_F64: {
- unsigned DestReg = MI->getOperand(0).getReg();
- BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_ADD_F64), DestReg)
- .addImm(0) // SRC0 modifiers
- .addReg(MI->getOperand(1).getReg())
- .addImm(1) // SRC1 modifiers
- .addReg(MI->getOperand(2).getReg())
- .addImm(0) // CLAMP
- .addImm(0); // OMOD
- MI->eraseFromParent();
- break;
- }
+ case AMDGPU::BRANCH:
+ return BB;
case AMDGPU::SI_RegisterStorePseudo: {
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
@@ -640,17 +622,43 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
return BB;
}
-EVT SITargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
+bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
+ // This currently forces unfolding various combinations of fsub into fma with
+ // free fneg'd operands. As long as we have fast FMA (controlled by
+ // isFMAFasterThanFMulAndFAdd), we should perform these.
+
+ // When fma is quarter rate, for f64 where add / sub are at best half rate,
+ // most of these combines appear to be cycle neutral but save on instruction
+ // count / code size.
+ return true;
+}
+
+EVT SITargetLowering::getSetCCResultType(LLVMContext &Ctx, EVT VT) const {
if (!VT.isVector()) {
return MVT::i1;
}
- return MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
+ return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
}
MVT SITargetLowering::getScalarShiftAmountTy(EVT VT) const {
return MVT::i32;
}
+// Answering this is somewhat tricky and depends on the specific device which
+// have different rates for fma or all f64 operations.
+//
+// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
+// regardless of which device (although the number of cycles differs between
+// devices), so it is always profitable for f64.
+//
+// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
+// only on full rate devices. Normally, we should prefer selecting v_mad_f32
+// which we can always do even without fused FP ops since it returns the same
+// result as the separate operations and since it is always full
+// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
+// however does not support denormals, so we do report fma as faster if we have
+// a fast fma device and require denormals.
+//
bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
VT = VT.getScalarType();
@@ -659,7 +667,11 @@ bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
switch (VT.getSimpleVT().SimpleTy) {
case MVT::f32:
- return false; /* There is V_MAD_F32 for f32 */
+ // This is as fast on some subtargets. However, we always have full rate f32
+ // mad available which returns the same result as the separate operations
+ // which we should prefer over fma. We can't use this if we want to support
+ // denormals, so only report this in these cases.
+ return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32();
case MVT::f64:
return true;
default:
@@ -755,15 +767,12 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
assert(Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN);
// Build the result and
- SmallVector<EVT, 4> Res;
- for (unsigned i = 1, e = Intr->getNumValues(); i != e; ++i)
- Res.push_back(Intr->getValueType(i));
+ ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
// operands of the new intrinsic call
SmallVector<SDValue, 4> Ops;
Ops.push_back(BRCOND.getOperand(0));
- for (unsigned i = 1, e = Intr->getNumOperands(); i != e; ++i)
- Ops.push_back(Intr->getOperand(i));
+ Ops.append(Intr->op_begin() + 1, Intr->op_end());
Ops.push_back(Target);
// build the new intrinsic call
@@ -839,7 +848,7 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
const SIRegisterInfo *TRI =
- static_cast<const SIRegisterInfo*>(MF.getSubtarget().getRegisterInfo());
+ static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
EVT VT = Op.getValueType();
SDLoc DL(Op);
@@ -889,13 +898,13 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
TRI->getPreloadedValue(MF, SIRegisterInfo::TGID_Z), VT);
case Intrinsic::r600_read_tidig_x:
- return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
TRI->getPreloadedValue(MF, SIRegisterInfo::TIDIG_X), VT);
case Intrinsic::r600_read_tidig_y:
- return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
TRI->getPreloadedValue(MF, SIRegisterInfo::TIDIG_Y), VT);
case Intrinsic::r600_read_tidig_z:
- return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
TRI->getPreloadedValue(MF, SIRegisterInfo::TIDIG_Z), VT);
case AMDGPUIntrinsic::SI_load_const: {
SDValue Ops[] = {
@@ -1090,7 +1099,7 @@ SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
const APFloat K1Val(BitsToFloat(0x2f800000));
const SDValue K1 = DAG.getConstantFP(K1Val, MVT::f32);
- const SDValue One = DAG.getTargetConstantFP(1.0, MVT::f32);
+ const SDValue One = DAG.getConstantFP(1.0, MVT::f32);
EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f32);
@@ -1108,7 +1117,70 @@ SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
}
SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
- return SDValue();
+ if (DAG.getTarget().Options.UnsafeFPMath)
+ return LowerFastFDIV(Op, DAG);
+
+ SDLoc SL(Op);
+ SDValue X = Op.getOperand(0);
+ SDValue Y = Op.getOperand(1);
+
+ const SDValue One = DAG.getConstantFP(1.0, MVT::f64);
+
+ SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
+
+ SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
+
+ SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
+
+ SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
+
+ SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
+
+ SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
+
+ SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
+
+ SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
+
+ SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
+ SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
+
+ SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
+ NegDivScale0, Mul, DivScale1);
+
+ SDValue Scale;
+
+ if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ // Workaround a hardware bug on SI where the condition output from div_scale
+ // is not usable.
+
+ const SDValue Hi = DAG.getConstant(1, MVT::i32);
+
+ // Figure out if the scale to use for div_fmas.
+ SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
+ SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
+ SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
+ SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
+
+ SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
+ SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
+
+ SDValue Scale0Hi
+ = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
+ SDValue Scale1Hi
+ = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
+
+ SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
+ SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
+ Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
+ } else {
+ Scale = DivScale1.getValue(1);
+ }
+
+ SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
+ Fma4, Fma3, Mul, Scale);
+
+ return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
}
SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
@@ -1129,11 +1201,6 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Store->getMemoryVT();
// These stores are legal.
- if (Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
- VT.isVector() && VT.getVectorNumElements() == 2 &&
- VT.getVectorElementType() == MVT::i32)
- return SDValue();
-
if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
if (VT.isVector() && VT.getVectorNumElements() > 4)
return ScalarizeVectorStore(Op, DAG);
@@ -1177,7 +1244,7 @@ SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
//===----------------------------------------------------------------------===//
SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
- DAGCombinerInfo &DCI) {
+ DAGCombinerInfo &DCI) const {
EVT VT = N->getValueType(0);
EVT ScalarVT = VT.getScalarType();
if (ScalarVT != MVT::f32)
@@ -1225,8 +1292,21 @@ SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
EVT LoadVT = getEquivalentMemType(*DAG.getContext(), SrcVT);
EVT RegVT = getEquivalentLoadRegType(*DAG.getContext(), SrcVT);
EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32, NElts);
-
LoadSDNode *Load = cast<LoadSDNode>(Src);
+
+ unsigned AS = Load->getAddressSpace();
+ unsigned Align = Load->getAlignment();
+ Type *Ty = LoadVT.getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty);
+
+ // Don't try to replace the load if we have to expand it due to alignment
+ // problems. Otherwise we will end up scalarizing the load, and trying to
+ // repack into the vector for no real reason.
+ if (Align < ABIAlignment &&
+ !allowsMisalignedMemoryAccesses(LoadVT, AS, Align, nullptr)) {
+ return SDValue();
+ }
+
SDValue NewLoad = DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegVT,
Load->getChain(),
Load->getBasePtr(),
@@ -1297,8 +1377,8 @@ SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
if (!CAdd)
return SDValue();
- const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
- getTargetMachine().getSubtargetImpl()->getInstrInfo());
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
// If the resulting offset is too large, we can't fold it into the addressing
// mode offset.
@@ -1316,6 +1396,102 @@ SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset);
}
+SDValue SITargetLowering::performAndCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ if (DCI.isBeforeLegalize())
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+
+ // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
+ // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ if (LHS.getOpcode() == ISD::SETCC &&
+ RHS.getOpcode() == ISD::SETCC) {
+ ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
+ ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
+
+ SDValue X = LHS.getOperand(0);
+ SDValue Y = RHS.getOperand(0);
+ if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
+ return SDValue();
+
+ if (LCC == ISD::SETO) {
+ if (X != LHS.getOperand(1))
+ return SDValue();
+
+ if (RCC == ISD::SETUNE) {
+ const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
+ if (!C1 || !C1->isInfinity() || C1->isNegative())
+ return SDValue();
+
+ const uint32_t Mask = SIInstrFlags::N_NORMAL |
+ SIInstrFlags::N_SUBNORMAL |
+ SIInstrFlags::N_ZERO |
+ SIInstrFlags::P_ZERO |
+ SIInstrFlags::P_SUBNORMAL |
+ SIInstrFlags::P_NORMAL;
+
+ static_assert(((~(SIInstrFlags::S_NAN |
+ SIInstrFlags::Q_NAN |
+ SIInstrFlags::N_INFINITY |
+ SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
+ "mask not equal");
+
+ return DAG.getNode(AMDGPUISD::FP_CLASS, SDLoc(N), MVT::i1,
+ X, DAG.getConstant(Mask, MVT::i32));
+ }
+ }
+ }
+
+ return SDValue();
+}
+
+SDValue SITargetLowering::performOrCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
+ if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
+ RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
+ SDValue Src = LHS.getOperand(0);
+ if (Src != RHS.getOperand(0))
+ return SDValue();
+
+ const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
+ const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
+ if (!CLHS || !CRHS)
+ return SDValue();
+
+ // Only 10 bits are used.
+ static const uint32_t MaxMask = 0x3ff;
+
+ uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
+ return DAG.getNode(AMDGPUISD::FP_CLASS, SDLoc(N), MVT::i1,
+ Src, DAG.getConstant(NewMask, MVT::i32));
+ }
+
+ return SDValue();
+}
+
+SDValue SITargetLowering::performClassCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ SDValue Mask = N->getOperand(1);
+
+ // fp_class x, 0 -> false
+ if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
+ if (CMask->isNullValue())
+ return DAG.getConstant(0, MVT::i1);
+ }
+
+ return SDValue();
+}
+
static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
switch (Opc) {
case ISD::FMAXNUM:
@@ -1371,33 +1547,47 @@ SDValue SITargetLowering::performMin3Max3Combine(SDNode *N,
return SDValue();
}
+SDValue SITargetLowering::performSetCCCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc SL(N);
+
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ EVT VT = LHS.getValueType();
+
+ if (VT != MVT::f32 && VT != MVT::f64)
+ return SDValue();
+
+ // Match isinf pattern
+ // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
+ ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
+ if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) {
+ const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
+ if (!CRHS)
+ return SDValue();
+
+ const APFloat &APF = CRHS->getValueAPF();
+ if (APF.isInfinity() && !APF.isNegative()) {
+ unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
+ return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1,
+ LHS.getOperand(0), DAG.getConstant(Mask, MVT::i32));
+ }
+ }
+
+ return SDValue();
+}
+
SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDLoc DL(N);
- EVT VT = N->getValueType(0);
switch (N->getOpcode()) {
- default: return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
- case ISD::SETCC: {
- SDValue Arg0 = N->getOperand(0);
- SDValue Arg1 = N->getOperand(1);
- SDValue CC = N->getOperand(2);
- ConstantSDNode * C = nullptr;
- ISD::CondCode CCOp = dyn_cast<CondCodeSDNode>(CC)->get();
-
- // i1 setcc (sext(i1), 0, setne) -> i1 setcc(i1, 0, setne)
- if (VT == MVT::i1
- && Arg0.getOpcode() == ISD::SIGN_EXTEND
- && Arg0.getOperand(0).getValueType() == MVT::i1
- && (C = dyn_cast<ConstantSDNode>(Arg1))
- && C->isNullValue()
- && CCOp == ISD::SETNE) {
- return SimplifySetCC(VT, Arg0.getOperand(0),
- DAG.getConstant(0, MVT::i1), CCOp, true, DCI, DL);
- }
- break;
- }
+ default:
+ return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
+ case ISD::SETCC:
+ return performSetCCCombine(N, DCI);
case ISD::FMAXNUM: // TODO: What about fmax_legacy?
case ISD::FMINNUM:
case AMDGPUISD::SMAX:
@@ -1442,6 +1632,11 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
if (VT != MVT::f32)
break;
+ // Only do this if we are not trying to support denormals. v_mad_f32 does
+ // not support denormals ever.
+ if (Subtarget->hasFP32Denormals())
+ break;
+
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
@@ -1452,8 +1647,8 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
if (LHS.getOpcode() == ISD::FADD) {
SDValue A = LHS.getOperand(0);
if (A == LHS.getOperand(1)) {
- const SDValue Two = DAG.getTargetConstantFP(2.0, MVT::f32);
- return DAG.getNode(AMDGPUISD::MAD, DL, VT, Two, A, RHS);
+ const SDValue Two = DAG.getConstantFP(2.0, MVT::f32);
+ return DAG.getNode(ISD::FMAD, DL, VT, Two, A, RHS);
}
}
@@ -1461,12 +1656,12 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
if (RHS.getOpcode() == ISD::FADD) {
SDValue A = RHS.getOperand(0);
if (A == RHS.getOperand(1)) {
- const SDValue Two = DAG.getTargetConstantFP(2.0, MVT::f32);
- return DAG.getNode(AMDGPUISD::MAD, DL, VT, Two, A, LHS);
+ const SDValue Two = DAG.getConstantFP(2.0, MVT::f32);
+ return DAG.getNode(ISD::FMAD, DL, VT, Two, A, LHS);
}
}
- break;
+ return SDValue();
}
case ISD::FSUB: {
if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
@@ -1476,39 +1671,22 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
// Try to get the fneg to fold into the source modifier. This undoes generic
// DAG combines and folds them into the mad.
- if (VT == MVT::f32) {
+ //
+ // Only do this if we are not trying to support denormals. v_mad_f32 does
+ // not support denormals ever.
+ if (VT == MVT::f32 &&
+ !Subtarget->hasFP32Denormals()) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
-
- if (LHS.getOpcode() == ISD::FMUL) {
- // (fsub (fmul a, b), c) -> mad a, b, (fneg c)
-
- SDValue A = LHS.getOperand(0);
- SDValue B = LHS.getOperand(1);
- SDValue C = DAG.getNode(ISD::FNEG, DL, VT, RHS);
-
- return DAG.getNode(AMDGPUISD::MAD, DL, VT, A, B, C);
- }
-
- if (RHS.getOpcode() == ISD::FMUL) {
- // (fsub c, (fmul a, b)) -> mad (fneg a), b, c
-
- SDValue A = DAG.getNode(ISD::FNEG, DL, VT, RHS.getOperand(0));
- SDValue B = RHS.getOperand(1);
- SDValue C = LHS;
-
- return DAG.getNode(AMDGPUISD::MAD, DL, VT, A, B, C);
- }
-
if (LHS.getOpcode() == ISD::FADD) {
// (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
SDValue A = LHS.getOperand(0);
if (A == LHS.getOperand(1)) {
- const SDValue Two = DAG.getTargetConstantFP(2.0, MVT::f32);
+ const SDValue Two = DAG.getConstantFP(2.0, MVT::f32);
SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS);
- return DAG.getNode(AMDGPUISD::MAD, DL, VT, Two, A, NegRHS);
+ return DAG.getNode(ISD::FMAD, DL, VT, Two, A, NegRHS);
}
}
@@ -1517,10 +1695,12 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
SDValue A = RHS.getOperand(0);
if (A == RHS.getOperand(1)) {
- const SDValue NegTwo = DAG.getTargetConstantFP(-2.0, MVT::f32);
- return DAG.getNode(AMDGPUISD::MAD, DL, VT, NegTwo, A, LHS);
+ const SDValue NegTwo = DAG.getConstantFP(-2.0, MVT::f32);
+ return DAG.getNode(ISD::FMAD, DL, VT, NegTwo, A, LHS);
}
}
+
+ return SDValue();
}
break;
@@ -1554,9 +1734,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) {
SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI);
if (NewPtr) {
- SmallVector<SDValue, 8> NewOps;
- for (unsigned I = 0, E = MemNode->getNumOperands(); I != E; ++I)
- NewOps.push_back(MemNode->getOperand(I));
+ SmallVector<SDValue, 8> NewOps(MemNode->op_begin(), MemNode->op_end());
NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
return SDValue(DAG.UpdateNodeOperands(MemNode, NewOps), 0);
@@ -1564,287 +1742,44 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
}
break;
}
+ case ISD::AND:
+ return performAndCombine(N, DCI);
+ case ISD::OR:
+ return performOrCombine(N, DCI);
+ case AMDGPUISD::FP_CLASS:
+ return performClassCombine(N, DCI);
}
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
}
-/// \brief Test if RegClass is one of the VSrc classes
-static bool isVSrc(unsigned RegClass) {
- switch(RegClass) {
- default: return false;
- case AMDGPU::VSrc_32RegClassID:
- case AMDGPU::VCSrc_32RegClassID:
- case AMDGPU::VSrc_64RegClassID:
- case AMDGPU::VCSrc_64RegClassID:
- return true;
- }
-}
-
-/// \brief Test if RegClass is one of the SSrc classes
-static bool isSSrc(unsigned RegClass) {
- return AMDGPU::SSrc_32RegClassID == RegClass ||
- AMDGPU::SSrc_64RegClassID == RegClass;
-}
-
/// \brief Analyze the possible immediate value Op
///
/// Returns -1 if it isn't an immediate, 0 if it's and inline immediate
/// and the immediate value if it's a literal immediate
int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const {
- union {
- int32_t I;
- float F;
- } Imm;
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N)) {
- if (Node->getZExtValue() >> 32) {
- return -1;
- }
- Imm.I = Node->getSExtValue();
- } else if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N)) {
- if (N->getValueType(0) != MVT::f32)
- return -1;
- Imm.F = Node->getValueAPF().convertToFloat();
- } else
- return -1; // It isn't an immediate
-
- if ((Imm.I >= -16 && Imm.I <= 64) ||
- Imm.F == 0.5f || Imm.F == -0.5f ||
- Imm.F == 1.0f || Imm.F == -1.0f ||
- Imm.F == 2.0f || Imm.F == -2.0f ||
- Imm.F == 4.0f || Imm.F == -4.0f)
- return 0; // It's an inline immediate
-
- return Imm.I; // It's a literal immediate
-}
-
-/// \brief Try to fold an immediate directly into an instruction
-bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate,
- bool &ScalarSlotUsed) const {
-
- MachineSDNode *Mov = dyn_cast<MachineSDNode>(Operand);
- const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
- getTargetMachine().getSubtargetImpl()->getInstrInfo());
- if (!Mov || !TII->isMov(Mov->getMachineOpcode()))
- return false;
-
- const SDValue &Op = Mov->getOperand(0);
- int32_t Value = analyzeImmediate(Op.getNode());
- if (Value == -1) {
- // Not an immediate at all
- return false;
-
- } else if (Value == 0) {
- // Inline immediates can always be fold
- Operand = Op;
- return true;
-
- } else if (Value == Immediate) {
- // Already fold literal immediate
- Operand = Op;
- return true;
-
- } else if (!ScalarSlotUsed && !Immediate) {
- // Fold this literal immediate
- ScalarSlotUsed = true;
- Immediate = Value;
- Operand = Op;
- return true;
+ if (TII->isInlineConstant(Node->getAPIntValue()))
+ return 0;
+ uint64_t Val = Node->getZExtValue();
+ return isUInt<32>(Val) ? Val : -1;
}
- return false;
-}
+ if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N)) {
+ if (TII->isInlineConstant(Node->getValueAPF().bitcastToAPInt()))
+ return 0;
-const TargetRegisterClass *SITargetLowering::getRegClassForNode(
- SelectionDAG &DAG, const SDValue &Op) const {
- const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
- getTargetMachine().getSubtargetImpl()->getInstrInfo());
- const SIRegisterInfo &TRI = TII->getRegisterInfo();
-
- if (!Op->isMachineOpcode()) {
- switch(Op->getOpcode()) {
- case ISD::CopyFromReg: {
- MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
- unsigned Reg = cast<RegisterSDNode>(Op->getOperand(1))->getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- return MRI.getRegClass(Reg);
- }
- return TRI.getPhysRegClass(Reg);
- }
- default: return nullptr;
- }
- }
- const MCInstrDesc &Desc = TII->get(Op->getMachineOpcode());
- int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass;
- if (OpClassID != -1) {
- return TRI.getRegClass(OpClassID);
- }
- switch(Op.getMachineOpcode()) {
- case AMDGPU::COPY_TO_REGCLASS:
- // Operand 1 is the register class id for COPY_TO_REGCLASS instructions.
- OpClassID = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
-
- // If the COPY_TO_REGCLASS instruction is copying to a VSrc register
- // class, then the register class for the value could be either a
- // VReg or and SReg. In order to get a more accurate
- if (isVSrc(OpClassID))
- return getRegClassForNode(DAG, Op.getOperand(0));
-
- return TRI.getRegClass(OpClassID);
- case AMDGPU::EXTRACT_SUBREG: {
- int SubIdx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
- const TargetRegisterClass *SuperClass =
- getRegClassForNode(DAG, Op.getOperand(0));
- return TRI.getSubClassWithSubReg(SuperClass, SubIdx);
- }
- case AMDGPU::REG_SEQUENCE:
- // Operand 0 is the register class id for REG_SEQUENCE instructions.
- return TRI.getRegClass(
- cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue());
- default:
- return getRegClassFor(Op.getSimpleValueType());
- }
-}
+ if (Node->getValueType(0) == MVT::f32)
+ return FloatToBits(Node->getValueAPF().convertToFloat());
-/// \brief Does "Op" fit into register class "RegClass" ?
-bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, const SDValue &Op,
- unsigned RegClass) const {
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
- const TargetRegisterClass *RC = getRegClassForNode(DAG, Op);
- if (!RC) {
- return false;
+ return -1;
}
- return TRI->getRegClass(RegClass)->hasSubClassEq(RC);
-}
-/// \returns true if \p Node's operands are different from the SDValue list
-/// \p Ops
-static bool isNodeChanged(const SDNode *Node, const std::vector<SDValue> &Ops) {
- for (unsigned i = 0, e = Node->getNumOperands(); i < e; ++i) {
- if (Ops[i].getNode() != Node->getOperand(i).getNode()) {
- return true;
- }
- }
- return false;
-}
-
-/// TODO: This needs to be removed. It's current primary purpose is to fold
-/// immediates into operands when legal. The legalization parts are redundant
-/// with SIInstrInfo::legalizeOperands which is called in a post-isel hook.
-SDNode *SITargetLowering::legalizeOperands(MachineSDNode *Node,
- SelectionDAG &DAG) const {
- // Original encoding (either e32 or e64)
- int Opcode = Node->getMachineOpcode();
- const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
- getTargetMachine().getSubtargetImpl()->getInstrInfo());
- const MCInstrDesc *Desc = &TII->get(Opcode);
-
- unsigned NumDefs = Desc->getNumDefs();
- unsigned NumOps = Desc->getNumOperands();
-
- // Commuted opcode if available
- int OpcodeRev = Desc->isCommutable() ? TII->commuteOpcode(Opcode) : -1;
- const MCInstrDesc *DescRev = OpcodeRev == -1 ? nullptr : &TII->get(OpcodeRev);
-
- assert(!DescRev || DescRev->getNumDefs() == NumDefs);
- assert(!DescRev || DescRev->getNumOperands() == NumOps);
-
- int32_t Immediate = Desc->getSize() == 4 ? 0 : -1;
- bool HaveVSrc = false, HaveSSrc = false;
-
- // First figure out what we already have in this instruction.
- for (unsigned i = 0, e = Node->getNumOperands(), Op = NumDefs;
- i != e && Op < NumOps; ++i, ++Op) {
-
- unsigned RegClass = Desc->OpInfo[Op].RegClass;
- if (isVSrc(RegClass))
- HaveVSrc = true;
- else if (isSSrc(RegClass))
- HaveSSrc = true;
- else
- continue;
-
- int32_t Imm = analyzeImmediate(Node->getOperand(i).getNode());
- if (Imm != -1 && Imm != 0) {
- // Literal immediate
- Immediate = Imm;
- }
- }
-
- // If we neither have VSrc nor SSrc, it makes no sense to continue.
- if (!HaveVSrc && !HaveSSrc)
- return Node;
-
- // No scalar allowed when we have both VSrc and SSrc
- bool ScalarSlotUsed = HaveVSrc && HaveSSrc;
-
- // If this instruction has an implicit use of VCC, then it can't use the
- // constant bus.
- for (unsigned i = 0, e = Desc->getNumImplicitUses(); i != e; ++i) {
- if (Desc->ImplicitUses[i] == AMDGPU::VCC) {
- ScalarSlotUsed = true;
- break;
- }
- }
-
- // Second go over the operands and try to fold them
- std::vector<SDValue> Ops;
- for (unsigned i = 0, e = Node->getNumOperands(), Op = NumDefs;
- i != e && Op < NumOps; ++i, ++Op) {
-
- const SDValue &Operand = Node->getOperand(i);
- Ops.push_back(Operand);
-
- // Already folded immediate?
- if (isa<ConstantSDNode>(Operand.getNode()) ||
- isa<ConstantFPSDNode>(Operand.getNode()))
- continue;
-
- // Is this a VSrc or SSrc operand?
- unsigned RegClass = Desc->OpInfo[Op].RegClass;
- if (isVSrc(RegClass) || isSSrc(RegClass)) {
- // Try to fold the immediates. If this ends up with multiple constant bus
- // uses, it will be legalized later.
- foldImm(Ops[i], Immediate, ScalarSlotUsed);
- continue;
- }
-
- if (i == 1 && DescRev && fitsRegClass(DAG, Ops[0], RegClass)) {
-
- unsigned OtherRegClass = Desc->OpInfo[NumDefs].RegClass;
- assert(isVSrc(OtherRegClass) || isSSrc(OtherRegClass));
-
- // Test if it makes sense to swap operands
- if (foldImm(Ops[1], Immediate, ScalarSlotUsed) ||
- (!fitsRegClass(DAG, Ops[1], RegClass) &&
- fitsRegClass(DAG, Ops[1], OtherRegClass))) {
-
- // Swap commutable operands
- std::swap(Ops[0], Ops[1]);
-
- Desc = DescRev;
- DescRev = nullptr;
- continue;
- }
- }
- }
-
- // Add optional chain and glue
- for (unsigned i = NumOps - NumDefs, e = Node->getNumOperands(); i < e; ++i)
- Ops.push_back(Node->getOperand(i));
-
- // Nodes that have a glue result are not CSE'd by getMachineNode(), so in
- // this case a brand new node is always be created, even if the operands
- // are the same as before. So, manually check if anything has been changed.
- if (Desc->Opcode == Opcode && !isNodeChanged(Node, Ops)) {
- return Node;
- }
-
- // Create a complete new instruction
- return DAG.getMachineNode(Desc->Opcode, SDLoc(Node), Node->getVTList(), Ops);
+ return -1;
}
/// \brief Helper function for adjustWritemask
@@ -1904,14 +1839,13 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
// Adjust the writemask in the node
std::vector<SDValue> Ops;
Ops.push_back(DAG.getTargetConstant(NewDmask, MVT::i32));
- for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
- Ops.push_back(Node->getOperand(i));
+ Ops.insert(Ops.end(), Node->op_begin() + 1, Node->op_end());
Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops);
// If we only got one lane, replace it with a copy
// (if NewDmask has only one bit set...)
if (NewDmask && (NewDmask & (NewDmask-1)) == 0) {
- SDValue RC = DAG.getTargetConstant(AMDGPU::VReg_32RegClassID, MVT::i32);
+ SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, MVT::i32);
SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
SDLoc(), Users[Lane]->getValueType(0),
SDValue(Node, 0), RC);
@@ -1963,9 +1897,8 @@ void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
/// \brief Fold the instructions after selecting them.
SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
SelectionDAG &DAG) const {
- const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
- getTargetMachine().getSubtargetImpl()->getInstrInfo());
- Node = AdjustRegClass(Node, DAG);
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
if (TII->isMIMG(Node->getMachineOpcode()))
adjustWritemask(Node, DAG);
@@ -1975,17 +1908,17 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
legalizeTargetIndependentNode(Node, DAG);
return Node;
}
-
- return legalizeOperands(Node, DAG);
+ return Node;
}
/// \brief Assign the register class depending on the number of
/// bits set in the writemask
void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
SDNode *Node) const {
- const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
- getTargetMachine().getSubtargetImpl()->getInstrInfo());
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
+ MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
TII->legalizeOperands(MI);
if (TII->isMIMG(MI->getOpcode())) {
@@ -1998,14 +1931,13 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
const TargetRegisterClass *RC;
switch (BitsSet) {
default: return;
- case 1: RC = &AMDGPU::VReg_32RegClass; break;
+ case 1: RC = &AMDGPU::VGPR_32RegClass; break;
case 2: RC = &AMDGPU::VReg_64RegClass; break;
case 3: RC = &AMDGPU::VReg_96RegClass; break;
}
unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet);
MI->setDesc(TII->get(NewOpcode));
- MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
MRI.setRegClass(VReg, RC);
return;
}
@@ -2030,6 +1962,8 @@ static SDValue buildSMovImm32(SelectionDAG &DAG, SDLoc DL, uint64_t Val) {
MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
SDLoc DL,
SDValue Ptr) const {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
#if 1
// XXX - Workaround for moveToVALU not handling different register class
// inserts for REG_SEQUENCE.
@@ -2039,7 +1973,7 @@ MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, MVT::i32),
buildSMovImm32(DAG, DL, 0),
DAG.getTargetConstant(AMDGPU::sub0, MVT::i32),
- buildSMovImm32(DAG, DL, AMDGPU::RSRC_DATA_FORMAT >> 32),
+ buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
DAG.getTargetConstant(AMDGPU::sub1, MVT::i32)
};
@@ -2063,7 +1997,7 @@ MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
DAG.getTargetConstant(AMDGPU::sub0_sub1, MVT::i32),
buildSMovImm32(DAG, DL, 0),
DAG.getTargetConstant(AMDGPU::sub2, MVT::i32),
- buildSMovImm32(DAG, DL, AMDGPU::RSRC_DATA_FORMAT >> 32),
+ buildSMovImm32(DAG, DL, TII->getDefaultRsrcFormat() >> 32),
DAG.getTargetConstant(AMDGPU::sub3, MVT::i32)
};
@@ -2110,57 +2044,14 @@ MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG,
MachineSDNode *SITargetLowering::buildScratchRSRC(SelectionDAG &DAG,
SDLoc DL,
SDValue Ptr) const {
- uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | AMDGPU::RSRC_TID_ENABLE |
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
+ uint64_t Rsrc = TII->getDefaultRsrcDataFormat() | AMDGPU::RSRC_TID_ENABLE |
0xffffffff; // Size
return buildRSRC(DAG, DL, Ptr, 0, Rsrc);
}
-MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N,
- SelectionDAG &DAG) const {
-
- SDLoc DL(N);
- unsigned NewOpcode = N->getMachineOpcode();
-
- switch (N->getMachineOpcode()) {
- default: return N;
- case AMDGPU::S_LOAD_DWORD_IMM:
- NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
- // Fall-through
- case AMDGPU::S_LOAD_DWORDX2_SGPR:
- if (NewOpcode == N->getMachineOpcode()) {
- NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
- }
- // Fall-through
- case AMDGPU::S_LOAD_DWORDX4_IMM:
- case AMDGPU::S_LOAD_DWORDX4_SGPR: {
- if (NewOpcode == N->getMachineOpcode()) {
- NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
- }
- if (fitsRegClass(DAG, N->getOperand(0), AMDGPU::SReg_64RegClassID)) {
- return N;
- }
- ConstantSDNode *Offset = cast<ConstantSDNode>(N->getOperand(1));
-
- const SDValue Zero64 = DAG.getTargetConstant(0, MVT::i64);
- SDValue Ptr(DAG.getMachineNode(AMDGPU::S_MOV_B64, DL, MVT::i64, Zero64), 0);
- MachineSDNode *RSrc = wrapAddr64Rsrc(DAG, DL, Ptr);
-
- SmallVector<SDValue, 8> Ops;
- Ops.push_back(SDValue(RSrc, 0));
- Ops.push_back(N->getOperand(0));
- Ops.push_back(DAG.getConstant(Offset->getSExtValue() << 2, MVT::i32));
-
- // Copy remaining operands so we keep any chain and glue nodes that follow
- // the normal operands.
- for (unsigned I = 2, E = N->getNumOperands(); I != E; ++I)
- Ops.push_back(N->getOperand(I));
-
- return DAG.getMachineNode(NewOpcode, DL, N->getVTList(), Ops);
- }
- }
-}
-
SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
const TargetRegisterClass *RC,
unsigned Reg, EVT VT) const {
diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h
index 7bf406e..92f5847 100644
--- a/lib/Target/R600/SIISelLowering.h
+++ b/lib/Target/R600/SIISelLowering.h
@@ -42,27 +42,22 @@ class SITargetLowering : public AMDGPUTargetLowering {
SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
- bool foldImm(SDValue &Operand, int32_t &Immediate,
- bool &ScalarSlotUsed) const;
- const TargetRegisterClass *getRegClassForNode(SelectionDAG &DAG,
- const SDValue &Op) const;
- bool fitsRegClass(SelectionDAG &DAG, const SDValue &Op,
- unsigned RegClass) const;
-
- SDNode *legalizeOperands(MachineSDNode *N, SelectionDAG &DAG) const;
void adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const;
- MachineSDNode *AdjustRegClass(MachineSDNode *N, SelectionDAG &DAG) const;
- static SDValue performUCharToFloatCombine(SDNode *N,
- DAGCombinerInfo &DCI);
+ SDValue performUCharToFloatCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const;
SDValue performSHLPtrCombine(SDNode *N,
unsigned AS,
DAGCombinerInfo &DCI) const;
+ SDValue performAndCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue performOrCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue performClassCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performMin3Max3Combine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue performSetCCCombine(SDNode *N, DAGCombinerInfo &DCI) const;
public:
- SITargetLowering(TargetMachine &tm);
+ SITargetLowering(TargetMachine &tm, const AMDGPUSubtarget &STI);
bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
EVT /*VT*/) const override;
@@ -94,6 +89,7 @@ public:
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI,
MachineBasicBlock * BB) const override;
+ bool enableAggressiveFMAFusion(EVT VT) const override;
EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
MVT getScalarShiftAmountTy(EVT VT) const override;
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
diff --git a/lib/Target/R600/SIInsertWaits.cpp b/lib/Target/R600/SIInsertWaits.cpp
index 712d97d..50f20ac 100644
--- a/lib/Target/R600/SIInsertWaits.cpp
+++ b/lib/Target/R600/SIInsertWaits.cpp
@@ -41,6 +41,12 @@ typedef union {
} Counters;
+typedef enum {
+ OTHER,
+ SMEM,
+ VMEM
+} InstType;
+
typedef Counters RegCounters[512];
typedef std::pair<unsigned, unsigned> RegInterval;
@@ -73,6 +79,11 @@ private:
/// \brief Different export instruction types seen since last wait.
unsigned ExpInstrTypesSeen;
+ /// \brief Type of the last opcode.
+ InstType LastOpcodeType;
+
+ bool LastInstWritesM0;
+
/// \brief Get increment/decrement amount for this instruction.
Counters getHwCounts(MachineInstr &MI);
@@ -83,7 +94,8 @@ private:
RegInterval getRegInterval(MachineOperand &Op);
/// \brief Handle instructions async components
- void pushInstruction(MachineInstr &MI);
+ void pushInstruction(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I);
/// \brief Insert the actual wait instruction
bool insertWait(MachineBasicBlock &MBB,
@@ -96,6 +108,9 @@ private:
/// \brief Resolve all operand dependencies to counter requirements
Counters handleOperands(MachineInstr &MI);
+ /// \brief Insert S_NOP between an instruction writing M0 and S_SENDMSG.
+ void handleSendMsg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I);
+
public:
SIInsertWaits(TargetMachine &tm) :
MachineFunctionPass(ID),
@@ -176,6 +191,29 @@ bool SIInsertWaits::isOpRelevant(MachineOperand &Op) {
if (!MI.getDesc().mayStore())
return false;
+ // Check if this operand is the value being stored.
+ // Special case for DS instructions, since the address
+ // operand comes before the value operand and it may have
+ // multiple data operands.
+
+ if (TII->isDS(MI.getOpcode())) {
+ MachineOperand *Data = TII->getNamedOperand(MI, AMDGPU::OpName::data);
+ if (Data && Op.isIdenticalTo(*Data))
+ return true;
+
+ MachineOperand *Data0 = TII->getNamedOperand(MI, AMDGPU::OpName::data0);
+ if (Data0 && Op.isIdenticalTo(*Data0))
+ return true;
+
+ MachineOperand *Data1 = TII->getNamedOperand(MI, AMDGPU::OpName::data1);
+ if (Data1 && Op.isIdenticalTo(*Data1))
+ return true;
+
+ return false;
+ }
+
+ // NOTE: This assumes that the value operand is before the
+ // address operand, and that there is only one value operand.
for (MachineInstr::mop_iterator I = MI.operands_begin(),
E = MI.operands_end(); I != E; ++I) {
@@ -203,10 +241,11 @@ RegInterval SIInsertWaits::getRegInterval(MachineOperand &Op) {
return Result;
}
-void SIInsertWaits::pushInstruction(MachineInstr &MI) {
+void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) {
// Get the hardware counter increments and sum them up
- Counters Increment = getHwCounts(MI);
+ Counters Increment = getHwCounts(*I);
unsigned Sum = 0;
for (unsigned i = 0; i < 3; ++i) {
@@ -215,17 +254,43 @@ void SIInsertWaits::pushInstruction(MachineInstr &MI) {
}
// If we don't increase anything then that's it
- if (Sum == 0)
+ if (Sum == 0) {
+ LastOpcodeType = OTHER;
return;
+ }
+
+ if (TRI->ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ // Any occurence of consecutive VMEM or SMEM instructions forms a VMEM
+ // or SMEM clause, respectively.
+ //
+ // The temporary workaround is to break the clauses with S_NOP.
+ //
+ // The proper solution would be to allocate registers such that all source
+ // and destination registers don't overlap, e.g. this is illegal:
+ // r0 = load r2
+ // r2 = load r0
+ if ((LastOpcodeType == SMEM && TII->isSMRD(I->getOpcode())) ||
+ (LastOpcodeType == VMEM && Increment.Named.VM)) {
+ // Insert a NOP to break the clause.
+ BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP))
+ .addImm(0);
+ LastInstWritesM0 = false;
+ }
+
+ if (TII->isSMRD(I->getOpcode()))
+ LastOpcodeType = SMEM;
+ else if (Increment.Named.VM)
+ LastOpcodeType = VMEM;
+ }
// Remember which export instructions we have seen
if (Increment.Named.EXP) {
- ExpInstrTypesSeen |= MI.getOpcode() == AMDGPU::EXP ? 1 : 2;
+ ExpInstrTypesSeen |= I->getOpcode() == AMDGPU::EXP ? 1 : 2;
}
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- MachineOperand &Op = MI.getOperand(i);
+ MachineOperand &Op = I->getOperand(i);
if (!isOpRelevant(Op))
continue;
@@ -302,6 +367,8 @@ bool SIInsertWaits::insertWait(MachineBasicBlock &MBB,
((Counts.Named.EXP & 0x7) << 4) |
((Counts.Named.LGKM & 0x7) << 8));
+ LastOpcodeType = OTHER;
+ LastInstWritesM0 = false;
return true;
}
@@ -343,6 +410,30 @@ Counters SIInsertWaits::handleOperands(MachineInstr &MI) {
return Result;
}
+void SIInsertWaits::handleSendMsg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) {
+ if (TRI->ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
+ return;
+
+ // There must be "S_NOP 0" between an instruction writing M0 and S_SENDMSG.
+ if (LastInstWritesM0 && I->getOpcode() == AMDGPU::S_SENDMSG) {
+ BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP)).addImm(0);
+ LastInstWritesM0 = false;
+ return;
+ }
+
+ // Set whether this instruction sets M0
+ LastInstWritesM0 = false;
+
+ unsigned NumOperands = I->getNumOperands();
+ for (unsigned i = 0; i < NumOperands; i++) {
+ const MachineOperand &Op = I->getOperand(i);
+
+ if (Op.isReg() && Op.isDef() && Op.getReg() == AMDGPU::M0)
+ LastInstWritesM0 = true;
+ }
+}
+
// FIXME: Insert waits listed in Table 4.2 "Required User-Inserted Wait States"
// around other non-memory instructions.
bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
@@ -356,6 +447,8 @@ bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
WaitedOn = ZeroCounts;
LastIssued = ZeroCounts;
+ LastOpcodeType = OTHER;
+ LastInstWritesM0 = false;
memset(&UsedRegs, 0, sizeof(UsedRegs));
memset(&DefinedRegs, 0, sizeof(DefinedRegs));
@@ -367,8 +460,14 @@ bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I) {
- Changes |= insertWait(MBB, I, handleOperands(*I));
- pushInstruction(*I);
+ // Wait for everything before a barrier.
+ if (I->getOpcode() == AMDGPU::S_BARRIER)
+ Changes |= insertWait(MBB, I, LastIssued);
+ else
+ Changes |= insertWait(MBB, I, handleOperands(*I));
+
+ pushInstruction(MBB, I);
+ handleSendMsg(MBB, I);
}
// Wait for everything at the end of the MBB
diff --git a/lib/Target/R600/SIInstrFormats.td b/lib/Target/R600/SIInstrFormats.td
index 10e0a3f..c90c741 100644
--- a/lib/Target/R600/SIInstrFormats.td
+++ b/lib/Target/R600/SIInstrFormats.td
@@ -17,65 +17,109 @@ class InstSI <dag outs, dag ins, string asm, list<dag> pattern> :
field bits<1> VM_CNT = 0;
field bits<1> EXP_CNT = 0;
field bits<1> LGKM_CNT = 0;
- field bits<1> MIMG = 0;
- field bits<1> SMRD = 0;
+
+ field bits<1> SALU = 0;
+ field bits<1> VALU = 0;
+
+ field bits<1> SOP1 = 0;
+ field bits<1> SOP2 = 0;
+ field bits<1> SOPC = 0;
+ field bits<1> SOPK = 0;
+ field bits<1> SOPP = 0;
+
field bits<1> VOP1 = 0;
field bits<1> VOP2 = 0;
field bits<1> VOP3 = 0;
field bits<1> VOPC = 0;
- field bits<1> SALU = 0;
+
field bits<1> MUBUF = 0;
field bits<1> MTBUF = 0;
+ field bits<1> SMRD = 0;
+ field bits<1> DS = 0;
+ field bits<1> MIMG = 0;
field bits<1> FLAT = 0;
+ field bits<1> WQM = 0;
// These need to be kept in sync with the enum in SIInstrFlags.
let TSFlags{0} = VM_CNT;
let TSFlags{1} = EXP_CNT;
let TSFlags{2} = LGKM_CNT;
- let TSFlags{3} = MIMG;
- let TSFlags{4} = SMRD;
- let TSFlags{5} = VOP1;
- let TSFlags{6} = VOP2;
- let TSFlags{7} = VOP3;
- let TSFlags{8} = VOPC;
- let TSFlags{9} = SALU;
- let TSFlags{10} = MUBUF;
- let TSFlags{11} = MTBUF;
- let TSFlags{12} = FLAT;
+
+ let TSFlags{3} = SALU;
+ let TSFlags{4} = VALU;
+
+ let TSFlags{5} = SOP1;
+ let TSFlags{6} = SOP2;
+ let TSFlags{7} = SOPC;
+ let TSFlags{8} = SOPK;
+ let TSFlags{9} = SOPP;
+
+ let TSFlags{10} = VOP1;
+ let TSFlags{11} = VOP2;
+ let TSFlags{12} = VOP3;
+ let TSFlags{13} = VOPC;
+
+ let TSFlags{14} = MUBUF;
+ let TSFlags{15} = MTBUF;
+ let TSFlags{16} = SMRD;
+ let TSFlags{17} = DS;
+ let TSFlags{18} = MIMG;
+ let TSFlags{19} = FLAT;
+ let TSFlags{20} = WQM;
// Most instructions require adjustments after selection to satisfy
// operand requirements.
let hasPostISelHook = 1;
+ let SchedRW = [Write32Bit];
}
class Enc32 {
-
field bits<32> Inst;
int Size = 4;
}
class Enc64 {
-
field bits<64> Inst;
int Size = 8;
}
-class VOP1Common <dag outs, dag ins, string asm, list<dag> pattern> :
+let Uses = [EXEC] in {
+
+class VOPAnyCommon <dag outs, dag ins, string asm, list<dag> pattern> :
InstSI <outs, ins, asm, pattern> {
+
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let UseNamedOperandTable = 1;
+ let VALU = 1;
+}
+
+class VOPCCommon <dag ins, string asm, list<dag> pattern> :
+ VOPAnyCommon <(outs VCCReg:$dst), ins, asm, pattern> {
+
+ let DisableEncoding = "$dst";
+ let VOPC = 1;
+ let Size = 4;
+}
+
+class VOP1Common <dag outs, dag ins, string asm, list<dag> pattern> :
+ VOPAnyCommon <outs, ins, asm, pattern> {
+
let VOP1 = 1;
+ let Size = 4;
+}
+
+class VOP2Common <dag outs, dag ins, string asm, list<dag> pattern> :
+ VOPAnyCommon <outs, ins, asm, pattern> {
+
+ let VOP2 = 1;
+ let Size = 4;
}
class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
- InstSI <outs, ins, asm, pattern> {
+ VOPAnyCommon <outs, ins, asm, pattern> {
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let UseNamedOperandTable = 1;
// Using complex patterns gives VOP3 patterns a very high complexity rating,
// but standalone patterns are almost always prefered, so we need to adjust the
// priority lower. The goal is to use a high number to reduce complexity to
@@ -83,63 +127,58 @@ class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
let AddedComplexity = -1000;
let VOP3 = 1;
-
int Size = 8;
- let Uses = [EXEC];
}
+} // End Uses = [EXEC]
+
//===----------------------------------------------------------------------===//
// Scalar operations
//===----------------------------------------------------------------------===//
class SOP1e <bits<8> op> : Enc32 {
+ bits<7> sdst;
+ bits<8> ssrc0;
- bits<7> SDST;
- bits<8> SSRC0;
-
- let Inst{7-0} = SSRC0;
+ let Inst{7-0} = ssrc0;
let Inst{15-8} = op;
- let Inst{22-16} = SDST;
+ let Inst{22-16} = sdst;
let Inst{31-23} = 0x17d; //encoding;
}
class SOP2e <bits<7> op> : Enc32 {
+ bits<7> sdst;
+ bits<8> ssrc0;
+ bits<8> ssrc1;
- bits<7> SDST;
- bits<8> SSRC0;
- bits<8> SSRC1;
-
- let Inst{7-0} = SSRC0;
- let Inst{15-8} = SSRC1;
- let Inst{22-16} = SDST;
+ let Inst{7-0} = ssrc0;
+ let Inst{15-8} = ssrc1;
+ let Inst{22-16} = sdst;
let Inst{29-23} = op;
let Inst{31-30} = 0x2; // encoding
}
class SOPCe <bits<7> op> : Enc32 {
+ bits<8> ssrc0;
+ bits<8> ssrc1;
- bits<8> SSRC0;
- bits<8> SSRC1;
-
- let Inst{7-0} = SSRC0;
- let Inst{15-8} = SSRC1;
+ let Inst{7-0} = ssrc0;
+ let Inst{15-8} = ssrc1;
let Inst{22-16} = op;
let Inst{31-23} = 0x17e;
}
class SOPKe <bits<5> op> : Enc32 {
+ bits <7> sdst;
+ bits <16> simm16;
- bits <7> SDST;
- bits <16> SIMM16;
-
- let Inst{15-0} = SIMM16;
- let Inst{22-16} = SDST;
+ let Inst{15-0} = simm16;
+ let Inst{22-16} = sdst;
let Inst{27-23} = op;
let Inst{31-28} = 0xb; //encoding
}
class SOPPe <bits<7> op> : Enc32 {
-
bits <16> simm16;
let Inst{15-0} = simm16;
@@ -148,35 +187,36 @@ class SOPPe <bits<7> op> : Enc32 {
}
class SMRDe <bits<5> op, bits<1> imm> : Enc32 {
+ bits<7> sdst;
+ bits<7> sbase;
+ bits<8> offset;
- bits<7> SDST;
- bits<7> SBASE;
- bits<8> OFFSET;
-
- let Inst{7-0} = OFFSET;
+ let Inst{7-0} = offset;
let Inst{8} = imm;
- let Inst{14-9} = SBASE{6-1};
- let Inst{21-15} = SDST;
+ let Inst{14-9} = sbase{6-1};
+ let Inst{21-15} = sdst;
let Inst{26-22} = op;
let Inst{31-27} = 0x18; //encoding
}
-class SOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
- InstSI<outs, ins, asm, pattern>, SOP1e <op> {
-
+let SchedRW = [WriteSALU] in {
+class SOP1 <dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI<outs, ins, asm, pattern> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let SALU = 1;
+ let SOP1 = 1;
}
-class SOP2 <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
- InstSI <outs, ins, asm, pattern>, SOP2e<op> {
+class SOP2 <dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins, asm, pattern> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let SALU = 1;
+ let SOP2 = 1;
let UseNamedOperandTable = 1;
}
@@ -189,17 +229,19 @@ class SOPC <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayStore = 0;
let hasSideEffects = 0;
let SALU = 1;
+ let SOPC = 1;
let UseNamedOperandTable = 1;
}
-class SOPK <bits<5> op, dag outs, dag ins, string asm, list<dag> pattern> :
- InstSI <outs, ins , asm, pattern>, SOPKe<op> {
+class SOPK <dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins , asm, pattern> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let SALU = 1;
+ let SOPK = 1;
let UseNamedOperandTable = 1;
}
@@ -210,12 +252,14 @@ class SOPP <bits<7> op, dag ins, string asm, list<dag> pattern = []> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let isCodeGenOnly = 0;
let SALU = 1;
+ let SOPP = 1;
let UseNamedOperandTable = 1;
}
+} // let SchedRW = [WriteSALU]
+
class SMRD <dag outs, dag ins, string asm, list<dag> pattern> :
InstSI<outs, ins, asm, pattern> {
@@ -225,6 +269,7 @@ class SMRD <dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 1;
let hasSideEffects = 0;
let UseNamedOperandTable = 1;
+ let SchedRW = [WriteSMEM];
}
//===----------------------------------------------------------------------===//
@@ -232,32 +277,44 @@ class SMRD <dag outs, dag ins, string asm, list<dag> pattern> :
//===----------------------------------------------------------------------===//
class VOP1e <bits<8> op> : Enc32 {
+ bits<8> vdst;
+ bits<9> src0;
- bits<8> VDST;
- bits<9> SRC0;
-
- let Inst{8-0} = SRC0;
+ let Inst{8-0} = src0;
let Inst{16-9} = op;
- let Inst{24-17} = VDST;
+ let Inst{24-17} = vdst;
let Inst{31-25} = 0x3f; //encoding
}
class VOP2e <bits<6> op> : Enc32 {
+ bits<8> vdst;
+ bits<9> src0;
+ bits<8> src1;
- bits<8> VDST;
- bits<9> SRC0;
- bits<8> VSRC1;
-
- let Inst{8-0} = SRC0;
- let Inst{16-9} = VSRC1;
- let Inst{24-17} = VDST;
+ let Inst{8-0} = src0;
+ let Inst{16-9} = src1;
+ let Inst{24-17} = vdst;
let Inst{30-25} = op;
let Inst{31} = 0x0; //encoding
}
-class VOP3e <bits<9> op> : Enc64 {
+class VOP2_MADKe <bits<6> op> : Enc64 {
+
+ bits<8> vdst;
+ bits<9> src0;
+ bits<8> vsrc1;
+ bits<32> src2;
- bits<8> dst;
+ let Inst{8-0} = src0;
+ let Inst{16-9} = vsrc1;
+ let Inst{24-17} = vdst;
+ let Inst{30-25} = op;
+ let Inst{31} = 0x0; // encoding
+ let Inst{63-32} = src2;
+}
+
+class VOP3e <bits<9> op> : Enc64 {
+ bits<8> vdst;
bits<2> src0_modifiers;
bits<9> src0;
bits<2> src1_modifiers;
@@ -267,7 +324,7 @@ class VOP3e <bits<9> op> : Enc64 {
bits<1> clamp;
bits<2> omod;
- let Inst{7-0} = dst;
+ let Inst{7-0} = vdst;
let Inst{8} = src0_modifiers{1};
let Inst{9} = src1_modifiers{1};
let Inst{10} = src2_modifiers{1};
@@ -284,8 +341,7 @@ class VOP3e <bits<9> op> : Enc64 {
}
class VOP3be <bits<9> op> : Enc64 {
-
- bits<8> dst;
+ bits<8> vdst;
bits<2> src0_modifiers;
bits<9> src0;
bits<2> src1_modifiers;
@@ -295,7 +351,7 @@ class VOP3be <bits<9> op> : Enc64 {
bits<7> sdst;
bits<2> omod;
- let Inst{7-0} = dst;
+ let Inst{7-0} = vdst;
let Inst{14-8} = sdst;
let Inst{25-17} = op;
let Inst{31-26} = 0x34; //encoding
@@ -309,33 +365,30 @@ class VOP3be <bits<9> op> : Enc64 {
}
class VOPCe <bits<8> op> : Enc32 {
+ bits<9> src0;
+ bits<8> vsrc1;
- bits<9> SRC0;
- bits<8> VSRC1;
-
- let Inst{8-0} = SRC0;
- let Inst{16-9} = VSRC1;
+ let Inst{8-0} = src0;
+ let Inst{16-9} = vsrc1;
let Inst{24-17} = op;
let Inst{31-25} = 0x3e;
}
class VINTRPe <bits<2> op> : Enc32 {
+ bits<8> vdst;
+ bits<8> vsrc;
+ bits<2> attrchan;
+ bits<6> attr;
- bits<8> VDST;
- bits<8> VSRC;
- bits<2> ATTRCHAN;
- bits<6> ATTR;
-
- let Inst{7-0} = VSRC;
- let Inst{9-8} = ATTRCHAN;
- let Inst{15-10} = ATTR;
+ let Inst{7-0} = vsrc;
+ let Inst{9-8} = attrchan;
+ let Inst{15-10} = attr;
let Inst{17-16} = op;
- let Inst{25-18} = VDST;
+ let Inst{25-18} = vdst;
let Inst{31-26} = 0x32; // encoding
}
class DSe <bits<8> op> : Enc64 {
-
bits<8> vdst;
bits<1> gds;
bits<8> addr;
@@ -356,7 +409,6 @@ class DSe <bits<8> op> : Enc64 {
}
class MUBUFe <bits<7> op> : Enc64 {
-
bits<12> offset;
bits<1> offen;
bits<1> idxen;
@@ -387,67 +439,65 @@ class MUBUFe <bits<7> op> : Enc64 {
}
class MTBUFe <bits<3> op> : Enc64 {
+ bits<8> vdata;
+ bits<12> offset;
+ bits<1> offen;
+ bits<1> idxen;
+ bits<1> glc;
+ bits<1> addr64;
+ bits<4> dfmt;
+ bits<3> nfmt;
+ bits<8> vaddr;
+ bits<7> srsrc;
+ bits<1> slc;
+ bits<1> tfe;
+ bits<8> soffset;
- bits<8> VDATA;
- bits<12> OFFSET;
- bits<1> OFFEN;
- bits<1> IDXEN;
- bits<1> GLC;
- bits<1> ADDR64;
- bits<4> DFMT;
- bits<3> NFMT;
- bits<8> VADDR;
- bits<7> SRSRC;
- bits<1> SLC;
- bits<1> TFE;
- bits<8> SOFFSET;
-
- let Inst{11-0} = OFFSET;
- let Inst{12} = OFFEN;
- let Inst{13} = IDXEN;
- let Inst{14} = GLC;
- let Inst{15} = ADDR64;
+ let Inst{11-0} = offset;
+ let Inst{12} = offen;
+ let Inst{13} = idxen;
+ let Inst{14} = glc;
+ let Inst{15} = addr64;
let Inst{18-16} = op;
- let Inst{22-19} = DFMT;
- let Inst{25-23} = NFMT;
+ let Inst{22-19} = dfmt;
+ let Inst{25-23} = nfmt;
let Inst{31-26} = 0x3a; //encoding
- let Inst{39-32} = VADDR;
- let Inst{47-40} = VDATA;
- let Inst{52-48} = SRSRC{6-2};
- let Inst{54} = SLC;
- let Inst{55} = TFE;
- let Inst{63-56} = SOFFSET;
+ let Inst{39-32} = vaddr;
+ let Inst{47-40} = vdata;
+ let Inst{52-48} = srsrc{6-2};
+ let Inst{54} = slc;
+ let Inst{55} = tfe;
+ let Inst{63-56} = soffset;
}
class MIMGe <bits<7> op> : Enc64 {
-
- bits<8> VDATA;
- bits<4> DMASK;
- bits<1> UNORM;
- bits<1> GLC;
- bits<1> DA;
- bits<1> R128;
- bits<1> TFE;
- bits<1> LWE;
- bits<1> SLC;
- bits<8> VADDR;
- bits<7> SRSRC;
- bits<7> SSAMP;
-
- let Inst{11-8} = DMASK;
- let Inst{12} = UNORM;
- let Inst{13} = GLC;
- let Inst{14} = DA;
- let Inst{15} = R128;
- let Inst{16} = TFE;
- let Inst{17} = LWE;
+ bits<8> vdata;
+ bits<4> dmask;
+ bits<1> unorm;
+ bits<1> glc;
+ bits<1> da;
+ bits<1> r128;
+ bits<1> tfe;
+ bits<1> lwe;
+ bits<1> slc;
+ bits<8> vaddr;
+ bits<7> srsrc;
+ bits<7> ssamp;
+
+ let Inst{11-8} = dmask;
+ let Inst{12} = unorm;
+ let Inst{13} = glc;
+ let Inst{14} = da;
+ let Inst{15} = r128;
+ let Inst{16} = tfe;
+ let Inst{17} = lwe;
let Inst{24-18} = op;
- let Inst{25} = SLC;
+ let Inst{25} = slc;
let Inst{31-26} = 0x3c;
- let Inst{39-32} = VADDR;
- let Inst{47-40} = VDATA;
- let Inst{52-48} = SRSRC{6-2};
- let Inst{57-53} = SSAMP{6-2};
+ let Inst{39-32} = vaddr;
+ let Inst{47-40} = vdata;
+ let Inst{52-48} = srsrc{6-2};
+ let Inst{57-53} = ssamp{6-2};
}
class FLATe<bits<7> op> : Enc64 {
@@ -471,26 +521,26 @@ class FLATe<bits<7> op> : Enc64 {
}
class EXPe : Enc64 {
- bits<4> EN;
- bits<6> TGT;
- bits<1> COMPR;
- bits<1> DONE;
- bits<1> VM;
- bits<8> VSRC0;
- bits<8> VSRC1;
- bits<8> VSRC2;
- bits<8> VSRC3;
-
- let Inst{3-0} = EN;
- let Inst{9-4} = TGT;
- let Inst{10} = COMPR;
- let Inst{11} = DONE;
- let Inst{12} = VM;
+ bits<4> en;
+ bits<6> tgt;
+ bits<1> compr;
+ bits<1> done;
+ bits<1> vm;
+ bits<8> vsrc0;
+ bits<8> vsrc1;
+ bits<8> vsrc2;
+ bits<8> vsrc3;
+
+ let Inst{3-0} = en;
+ let Inst{9-4} = tgt;
+ let Inst{10} = compr;
+ let Inst{11} = done;
+ let Inst{12} = vm;
let Inst{31-26} = 0x3e;
- let Inst{39-32} = VSRC0;
- let Inst{47-40} = VSRC1;
- let Inst{55-48} = VSRC2;
- let Inst{63-56} = VSRC3;
+ let Inst{39-32} = vsrc0;
+ let Inst{47-40} = vsrc1;
+ let Inst{55-48} = vsrc2;
+ let Inst{63-56} = vsrc3;
}
let Uses = [EXEC] in {
@@ -500,34 +550,13 @@ class VOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
VOP1e<op>;
class VOP2 <bits<6> op, dag outs, dag ins, string asm, list<dag> pattern> :
- InstSI <outs, ins, asm, pattern>, VOP2e<op> {
-
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let UseNamedOperandTable = 1;
- let VOP2 = 1;
-}
-
-class VOP3 <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
- VOP3Common <outs, ins, asm, pattern>, VOP3e<op>;
-
-class VOP3b <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
- VOP3Common <outs, ins, asm, pattern>, VOP3be<op>;
+ VOP2Common <outs, ins, asm, pattern>, VOP2e<op>;
class VOPC <bits<8> op, dag ins, string asm, list<dag> pattern> :
- InstSI <(outs VCCReg:$dst), ins, asm, pattern>, VOPCe <op> {
-
- let DisableEncoding = "$dst";
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let UseNamedOperandTable = 1;
- let VOPC = 1;
-}
+ VOPCCommon <ins, asm, pattern>, VOPCe <op>;
-class VINTRP <bits <2> op, dag outs, dag ins, string asm, list<dag> pattern> :
- InstSI <outs, ins, asm, pattern>, VINTRPe<op> {
+class VINTRPCommon <dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins, asm, pattern> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -541,15 +570,18 @@ class VINTRP <bits <2> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Uses = [EXEC] in {
-class DS <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
- InstSI <outs, ins, asm, pattern> , DSe<op> {
+class DS <dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins, asm, pattern> {
let LGKM_CNT = 1;
+ let DS = 1;
let UseNamedOperandTable = 1;
+ let DisableEncoding = "$m0";
+ let SchedRW = [WriteLDS];
}
-class MUBUF <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
- InstSI<outs, ins, asm, pattern>, MUBUFe <op> {
+class MUBUF <dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI<outs, ins, asm, pattern> {
let VM_CNT = 1;
let EXP_CNT = 1;
@@ -557,6 +589,7 @@ class MUBUF <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let hasSideEffects = 0;
let UseNamedOperandTable = 1;
+ let SchedRW = [WriteVMEM];
}
class MTBUF <dag outs, dag ins, string asm, list<dag> pattern> :
@@ -566,8 +599,9 @@ class MTBUF <dag outs, dag ins, string asm, list<dag> pattern> :
let EXP_CNT = 1;
let MTBUF = 1;
- let neverHasSideEffects = 1;
+ let hasSideEffects = 0;
let UseNamedOperandTable = 1;
+ let SchedRW = [WriteVMEM];
}
class FLAT <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
@@ -596,5 +630,4 @@ class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
}
-
} // End Uses = [EXEC]
diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp
index 8343362..4f1e5ad 100644
--- a/lib/Target/R600/SIInstrInfo.cpp
+++ b/lib/Target/R600/SIInstrInfo.cpp
@@ -28,8 +28,7 @@
using namespace llvm;
SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st)
- : AMDGPUInstrInfo(st),
- RI(st) { }
+ : AMDGPUInstrInfo(st), RI(st) {}
//===----------------------------------------------------------------------===//
// TargetInstrInfo callbacks
@@ -326,26 +325,6 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
unsigned Opcode;
const int16_t *SubIndices;
- if (AMDGPU::M0 == DestReg) {
- // Check if M0 isn't already set to this value
- for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
- I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
-
- if (!I->definesRegister(AMDGPU::M0))
- continue;
-
- unsigned Opc = I->getOpcode();
- if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
- break;
-
- if (!I->readsRegister(SrcReg))
- break;
-
- // The copy isn't necessary
- return;
- }
- }
-
if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
@@ -353,6 +332,21 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
return;
} else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
+ if (DestReg == AMDGPU::VCC) {
+ if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
+ BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else {
+ // FIXME: Hack until VReg_1 removed.
+ assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_I32_e32), AMDGPU::VCC)
+ .addImm(0)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ }
+
+ return;
+ }
+
assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
@@ -373,8 +367,8 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Opcode = AMDGPU::S_MOV_B32;
SubIndices = Sub0_15;
- } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
- assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
+ } else if (AMDGPU::VGPR_32RegClass.contains(DestReg)) {
+ assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
AMDGPU::SReg_32RegClass.contains(SrcReg));
BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
@@ -428,27 +422,30 @@ unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
int NewOpc;
// Try to map original to commuted opcode
- if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
+ NewOpc = AMDGPU::getCommuteRev(Opcode);
+ // Check if the commuted (REV) opcode exists on the target.
+ if (NewOpc != -1 && pseudoToMCOpcode(NewOpc) != -1)
return NewOpc;
// Try to map commuted to original opcode
- if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
+ NewOpc = AMDGPU::getCommuteOrig(Opcode);
+ // Check if the original (non-REV) opcode exists on the target.
+ if (NewOpc != -1 && pseudoToMCOpcode(NewOpc) != -1)
return NewOpc;
return Opcode;
}
-static bool shouldTryToSpillVGPRs(MachineFunction *MF) {
-
- SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
- const TargetMachine &TM = MF->getTarget();
-
- // FIXME: Even though it can cause problems, we need to enable
- // spilling at -O0, since the fast register allocator always
- // spills registers that are live at the end of blocks.
- return MFI->getShaderType() == ShaderType::COMPUTE &&
- TM.getOptLevel() == CodeGenOpt::None;
+unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
+ if (DstRC->getSize() == 4) {
+ return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
+ } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) {
+ return AMDGPU::S_MOV_B64;
+ } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) {
+ return AMDGPU::V_MOV_B64_PSEUDO;
+ }
+ return AMDGPU::COPY;
}
void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -458,6 +455,7 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
MachineFunction *MF = MBB.getParent();
+ SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
MachineFrameInfo *FrameInfo = MF->getFrameInfo();
DebugLoc DL = MBB.findDebugLoc(MI);
int Opcode = -1;
@@ -473,7 +471,9 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
case 256: Opcode = AMDGPU::SI_SPILL_S256_SAVE; break;
case 512: Opcode = AMDGPU::SI_SPILL_S512_SAVE; break;
}
- } else if(shouldTryToSpillVGPRs(MF) && RI.hasVGPRs(RC)) {
+ } else if(RI.hasVGPRs(RC) && ST.isVGPRSpillingEnabled(MFI)) {
+ MFI->setHasSpilledVGPRs();
+
switch(RC->getSize() * 8) {
case 32: Opcode = AMDGPU::SI_SPILL_V32_SAVE; break;
case 64: Opcode = AMDGPU::SI_SPILL_V64_SAVE; break;
@@ -488,12 +488,16 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
FrameInfo->setObjectAlignment(FrameIndex, 4);
BuildMI(MBB, MI, DL, get(Opcode))
.addReg(SrcReg)
- .addFrameIndex(FrameIndex);
+ .addFrameIndex(FrameIndex)
+ // Place-holder registers, these will be filled in by
+ // SIPrepareScratchRegs.
+ .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef)
+ .addReg(AMDGPU::SGPR0, RegState::Undef);
} else {
LLVMContext &Ctx = MF->getFunction()->getContext();
Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to"
" spill register");
- BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), AMDGPU::VGPR0)
+ BuildMI(MBB, MI, DL, get(AMDGPU::KILL))
.addReg(SrcReg);
}
}
@@ -504,6 +508,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
MachineFunction *MF = MBB.getParent();
+ const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
MachineFrameInfo *FrameInfo = MF->getFrameInfo();
DebugLoc DL = MBB.findDebugLoc(MI);
int Opcode = -1;
@@ -516,7 +521,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
case 256: Opcode = AMDGPU::SI_SPILL_S256_RESTORE; break;
case 512: Opcode = AMDGPU::SI_SPILL_S512_RESTORE; break;
}
- } else if(shouldTryToSpillVGPRs(MF) && RI.hasVGPRs(RC)) {
+ } else if(RI.hasVGPRs(RC) && ST.isVGPRSpillingEnabled(MFI)) {
switch(RC->getSize() * 8) {
case 32: Opcode = AMDGPU::SI_SPILL_V32_RESTORE; break;
case 64: Opcode = AMDGPU::SI_SPILL_V64_RESTORE; break;
@@ -530,13 +535,17 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
if (Opcode != -1) {
FrameInfo->setObjectAlignment(FrameIndex, 4);
BuildMI(MBB, MI, DL, get(Opcode), DestReg)
- .addFrameIndex(FrameIndex);
+ .addFrameIndex(FrameIndex)
+ // Place-holder registers, these will be filled in by
+ // SIPrepareScratchRegs.
+ .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef)
+ .addReg(AMDGPU::SGPR0, RegState::Undef);
+
} else {
LLVMContext &Ctx = MF->getFunction()->getContext();
Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to"
" restore register");
- BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
- .addReg(AMDGPU::VGPR0);
+ BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg);
}
}
@@ -548,7 +557,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB,
unsigned Size) const {
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
- const AMDGPUSubtarget &ST = MF->getTarget().getSubtarget<AMDGPUSubtarget>();
+ const AMDGPUSubtarget &ST = MF->getSubtarget<AMDGPUSubtarget>();
const SIRegisterInfo *TRI =
static_cast<const SIRegisterInfo*>(ST.getRegisterInfo());
DebugLoc DL = MBB.findDebugLoc(MI);
@@ -561,7 +570,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB,
MachineBasicBlock::iterator Insert = Entry.front();
DebugLoc DL = Insert->getDebugLoc();
- TIDReg = RI.findUnusedVGPR(MF->getRegInfo());
+ TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass);
if (TIDReg == AMDGPU::NoRegister)
return TIDReg;
@@ -616,7 +625,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB,
.addImm(-1)
.addImm(0);
- BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e32),
+ BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64),
TIDReg)
.addImm(-1)
.addReg(TIDReg);
@@ -682,12 +691,42 @@ bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
// This is just a placeholder for register allocation.
MI->eraseFromParent();
break;
+
+ case AMDGPU::V_MOV_B64_PSEUDO: {
+ unsigned Dst = MI->getOperand(0).getReg();
+ unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
+ unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
+
+ const MachineOperand &SrcOp = MI->getOperand(1);
+ // FIXME: Will this work for 64-bit floating point immediates?
+ assert(!SrcOp.isFPImm());
+ if (SrcOp.isImm()) {
+ APInt Imm(64, SrcOp.getImm());
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
+ .addImm(Imm.getLoBits(32).getZExtValue())
+ .addReg(Dst, RegState::Implicit);
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
+ .addImm(Imm.getHiBits(32).getZExtValue())
+ .addReg(Dst, RegState::Implicit);
+ } else {
+ assert(SrcOp.isReg());
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
+ .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
+ .addReg(Dst, RegState::Implicit);
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
+ .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
+ .addReg(Dst, RegState::Implicit);
+ }
+ MI->eraseFromParent();
+ break;
+ }
}
return true;
}
MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
bool NewMI) const {
+
if (MI->getNumOperands() < 3)
return nullptr;
@@ -709,12 +748,13 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
// Make sure it's legal to commute operands for VOP2.
if (isVOP2(MI->getOpcode()) &&
(!isOperandLegal(MI, Src0Idx, &Src1) ||
- !isOperandLegal(MI, Src1Idx, &Src0)))
+ !isOperandLegal(MI, Src1Idx, &Src0))) {
return nullptr;
+ }
if (!Src1.isReg()) {
- // Allow commuting instructions with Imm or FPImm operands.
- if (NewMI || (!Src1.isImm() && !Src1.isFPImm()) ||
+ // Allow commuting instructions with Imm operands.
+ if (NewMI || !Src1.isImm() ||
(!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
return nullptr;
}
@@ -742,8 +782,6 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
unsigned SubReg = Src0.getSubReg();
if (Src1.isImm())
Src0.ChangeToImmediate(Src1.getImm());
- else if (Src1.isFPImm())
- Src0.ChangeToFPImmediate(Src1.getFPImm());
else
llvm_unreachable("Should only have immediates");
@@ -821,6 +859,131 @@ SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
return RC != &AMDGPU::EXECRegRegClass;
}
+static void removeModOperands(MachineInstr &MI) {
+ unsigned Opc = MI.getOpcode();
+ int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
+ AMDGPU::OpName::src0_modifiers);
+ int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc,
+ AMDGPU::OpName::src1_modifiers);
+ int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc,
+ AMDGPU::OpName::src2_modifiers);
+
+ MI.RemoveOperand(Src2ModIdx);
+ MI.RemoveOperand(Src1ModIdx);
+ MI.RemoveOperand(Src0ModIdx);
+}
+
+bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
+ unsigned Reg, MachineRegisterInfo *MRI) const {
+ if (!MRI->hasOneNonDBGUse(Reg))
+ return false;
+
+ unsigned Opc = UseMI->getOpcode();
+ if (Opc == AMDGPU::V_MAD_F32) {
+ // Don't fold if we are using source modifiers. The new VOP2 instructions
+ // don't have them.
+ if (hasModifiersSet(*UseMI, AMDGPU::OpName::src0_modifiers) ||
+ hasModifiersSet(*UseMI, AMDGPU::OpName::src1_modifiers) ||
+ hasModifiersSet(*UseMI, AMDGPU::OpName::src2_modifiers)) {
+ return false;
+ }
+
+ MachineOperand *Src0 = getNamedOperand(*UseMI, AMDGPU::OpName::src0);
+ MachineOperand *Src1 = getNamedOperand(*UseMI, AMDGPU::OpName::src1);
+ MachineOperand *Src2 = getNamedOperand(*UseMI, AMDGPU::OpName::src2);
+
+ // Multiplied part is the constant: Use v_madmk_f32
+ // We should only expect these to be on src0 due to canonicalizations.
+ if (Src0->isReg() && Src0->getReg() == Reg) {
+ if (!Src1->isReg() ||
+ (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
+ return false;
+
+ if (!Src2->isReg() ||
+ (Src2->isReg() && RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))))
+ return false;
+
+ // We need to do some weird looking operand shuffling since the madmk
+ // operands are out of the normal expected order with the multiplied
+ // constant as the last operand.
+ //
+ // v_mad_f32 src0, src1, src2 -> v_madmk_f32 src0 * src2K + src1
+ // src0 -> src2 K
+ // src1 -> src0
+ // src2 -> src1
+
+ const int64_t Imm = DefMI->getOperand(1).getImm();
+
+ // FIXME: This would be a lot easier if we could return a new instruction
+ // instead of having to modify in place.
+
+ // Remove these first since they are at the end.
+ UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32,
+ AMDGPU::OpName::omod));
+ UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32,
+ AMDGPU::OpName::clamp));
+
+ unsigned Src1Reg = Src1->getReg();
+ unsigned Src1SubReg = Src1->getSubReg();
+ unsigned Src2Reg = Src2->getReg();
+ unsigned Src2SubReg = Src2->getSubReg();
+ Src0->setReg(Src1Reg);
+ Src0->setSubReg(Src1SubReg);
+ Src1->setReg(Src2Reg);
+ Src1->setSubReg(Src2SubReg);
+
+ Src2->ChangeToImmediate(Imm);
+
+ removeModOperands(*UseMI);
+ UseMI->setDesc(get(AMDGPU::V_MADMK_F32));
+
+ bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
+ if (DeleteDef)
+ DefMI->eraseFromParent();
+
+ return true;
+ }
+
+ // Added part is the constant: Use v_madak_f32
+ if (Src2->isReg() && Src2->getReg() == Reg) {
+ // Not allowed to use constant bus for another operand.
+ // We can however allow an inline immediate as src0.
+ if (!Src0->isImm() &&
+ (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))
+ return false;
+
+ if (!Src1->isReg() ||
+ (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
+ return false;
+
+ const int64_t Imm = DefMI->getOperand(1).getImm();
+
+ // FIXME: This would be a lot easier if we could return a new instruction
+ // instead of having to modify in place.
+
+ // Remove these first since they are at the end.
+ UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32,
+ AMDGPU::OpName::omod));
+ UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32,
+ AMDGPU::OpName::clamp));
+
+ Src2->ChangeToImmediate(Imm);
+
+ // These come before src2.
+ removeModOperands(*UseMI);
+ UseMI->setDesc(get(AMDGPU::V_MADAK_F32));
+
+ bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
+ if (DeleteDef)
+ DefMI->eraseFromParent();
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
bool
SIInstrInfo::isTriviallyReMaterializable(const MachineInstr *MI,
AliasAnalysis *AA) const {
@@ -915,63 +1078,24 @@ bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
return false;
}
-namespace llvm {
-namespace AMDGPU {
-// Helper function generated by tablegen. We are wrapping this with
-// an SIInstrInfo function that returns bool rather than int.
-int isDS(uint16_t Opcode);
-}
-}
-
-bool SIInstrInfo::isDS(uint16_t Opcode) const {
- return ::AMDGPU::isDS(Opcode) != -1;
-}
-
-bool SIInstrInfo::isMIMG(uint16_t Opcode) const {
- return get(Opcode).TSFlags & SIInstrFlags::MIMG;
-}
-
-bool SIInstrInfo::isSMRD(uint16_t Opcode) const {
- return get(Opcode).TSFlags & SIInstrFlags::SMRD;
-}
-
-bool SIInstrInfo::isMUBUF(uint16_t Opcode) const {
- return get(Opcode).TSFlags & SIInstrFlags::MUBUF;
-}
-
-bool SIInstrInfo::isMTBUF(uint16_t Opcode) const {
- return get(Opcode).TSFlags & SIInstrFlags::MTBUF;
-}
-
-bool SIInstrInfo::isFLAT(uint16_t Opcode) const {
- return get(Opcode).TSFlags & SIInstrFlags::FLAT;
-}
-
-bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
- return get(Opcode).TSFlags & SIInstrFlags::VOP1;
-}
-
-bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
- return get(Opcode).TSFlags & SIInstrFlags::VOP2;
-}
-
-bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
- return get(Opcode).TSFlags & SIInstrFlags::VOP3;
-}
-
-bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
- return get(Opcode).TSFlags & SIInstrFlags::VOPC;
-}
-
-bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
- return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
-}
-
bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
- int32_t Val = Imm.getSExtValue();
- if (Val >= -16 && Val <= 64)
+ int64_t SVal = Imm.getSExtValue();
+ if (SVal >= -16 && SVal <= 64)
return true;
+ if (Imm.getBitWidth() == 64) {
+ uint64_t Val = Imm.getZExtValue();
+ return (DoubleToBits(0.0) == Val) ||
+ (DoubleToBits(1.0) == Val) ||
+ (DoubleToBits(-1.0) == Val) ||
+ (DoubleToBits(0.5) == Val) ||
+ (DoubleToBits(-0.5) == Val) ||
+ (DoubleToBits(2.0) == Val) ||
+ (DoubleToBits(-2.0) == Val) ||
+ (DoubleToBits(4.0) == Val) ||
+ (DoubleToBits(-4.0) == Val);
+ }
+
// The actual type of the operand does not seem to matter as long
// as the bits match one of the inline immediate values. For example:
//
@@ -980,32 +1104,38 @@ bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
//
// 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
// floating-point, so it is a legal inline immediate.
-
- return (APInt::floatToBits(0.0f) == Imm) ||
- (APInt::floatToBits(1.0f) == Imm) ||
- (APInt::floatToBits(-1.0f) == Imm) ||
- (APInt::floatToBits(0.5f) == Imm) ||
- (APInt::floatToBits(-0.5f) == Imm) ||
- (APInt::floatToBits(2.0f) == Imm) ||
- (APInt::floatToBits(-2.0f) == Imm) ||
- (APInt::floatToBits(4.0f) == Imm) ||
- (APInt::floatToBits(-4.0f) == Imm);
-}
-
-bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
- if (MO.isImm())
- return isInlineConstant(APInt(32, MO.getImm(), true));
-
- if (MO.isFPImm()) {
- APFloat FpImm = MO.getFPImm()->getValueAPF();
- return isInlineConstant(FpImm.bitcastToAPInt());
+ uint32_t Val = Imm.getZExtValue();
+
+ return (FloatToBits(0.0f) == Val) ||
+ (FloatToBits(1.0f) == Val) ||
+ (FloatToBits(-1.0f) == Val) ||
+ (FloatToBits(0.5f) == Val) ||
+ (FloatToBits(-0.5f) == Val) ||
+ (FloatToBits(2.0f) == Val) ||
+ (FloatToBits(-2.0f) == Val) ||
+ (FloatToBits(4.0f) == Val) ||
+ (FloatToBits(-4.0f) == Val);
+}
+
+bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
+ unsigned OpSize) const {
+ if (MO.isImm()) {
+ // MachineOperand provides no way to tell the true operand size, since it
+ // only records a 64-bit value. We need to know the size to determine if a
+ // 32-bit floating point immediate bit pattern is legal for an integer
+ // immediate. It would be for any 32-bit integer operand, but would not be
+ // for a 64-bit one.
+
+ unsigned BitSize = 8 * OpSize;
+ return isInlineConstant(APInt(BitSize, MO.getImm(), true));
}
return false;
}
-bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
- return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
+bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO,
+ unsigned OpSize) const {
+ return MO.isImm() && !isInlineConstant(MO, OpSize);
}
static bool compareMachineOp(const MachineOperand &Op0,
@@ -1018,8 +1148,6 @@ static bool compareMachineOp(const MachineOperand &Op0,
return Op0.getReg() == Op1.getReg();
case MachineOperand::MO_Immediate:
return Op0.getImm() == Op1.getImm();
- case MachineOperand::MO_FPImmediate:
- return Op0.getFPImm() == Op1.getFPImm();
default:
llvm_unreachable("Didn't expect to be comparing these operand types");
}
@@ -1029,7 +1157,7 @@ bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
const MachineOperand &MO) const {
const MCOperandInfo &OpInfo = get(MI->getOpcode()).OpInfo[OpNo];
- assert(MO.isImm() || MO.isFPImm() || MO.isTargetIndex() || MO.isFI());
+ assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
return true;
@@ -1037,21 +1165,26 @@ bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
if (OpInfo.RegClass < 0)
return false;
- if (isLiteralConstant(MO))
- return RI.regClassCanUseLiteralConstant(OpInfo.RegClass);
+ unsigned OpSize = RI.getRegClass(OpInfo.RegClass)->getSize();
+ if (isLiteralConstant(MO, OpSize))
+ return RI.opCanUseLiteralConstant(OpInfo.OperandType);
- return RI.regClassCanUseInlineConstant(OpInfo.RegClass);
+ return RI.opCanUseInlineConstant(OpInfo.OperandType);
}
-bool SIInstrInfo::canFoldOffset(unsigned OffsetSize, unsigned AS) {
+bool SIInstrInfo::canFoldOffset(unsigned OffsetSize, unsigned AS) const {
switch (AS) {
case AMDGPUAS::GLOBAL_ADDRESS: {
// MUBUF instructions a 12-bit offset in bytes.
return isUInt<12>(OffsetSize);
}
case AMDGPUAS::CONSTANT_ADDRESS: {
- // SMRD instructions have an 8-bit offset in dwords.
- return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4);
+ // SMRD instructions have an 8-bit offset in dwords on SI and
+ // a 20-bit offset in bytes on VI.
+ if (RI.ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
+ return isUInt<20>(OffsetSize);
+ else
+ return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4);
}
case AMDGPUAS::LOCAL_ADDRESS:
case AMDGPUAS::REGION_ADDRESS: {
@@ -1066,7 +1199,11 @@ bool SIInstrInfo::canFoldOffset(unsigned OffsetSize, unsigned AS) {
}
bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
- return AMDGPU::getVOPe32(Opcode) != -1;
+ int Op32 = AMDGPU::getVOPe32(Opcode);
+ if (Op32 == -1)
+ return false;
+
+ return pseudoToMCOpcode(Op32) != -1;
}
bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
@@ -1084,9 +1221,10 @@ bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
}
bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
- const MachineOperand &MO) const {
+ const MachineOperand &MO,
+ unsigned OpSize) const {
// Literal constants use the constant bus.
- if (isLiteralConstant(MO))
+ if (isLiteralConstant(MO, OpSize))
return true;
if (!MO.isReg() || !MO.isUse())
@@ -1132,21 +1270,35 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
// Make sure the register classes are correct
for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
+ if (MI->getOperand(i).isFPImm()) {
+ ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
+ "all fp values to integers.";
+ return false;
+ }
+
+ int RegClass = Desc.OpInfo[i].RegClass;
+
switch (Desc.OpInfo[i].OperandType) {
- case MCOI::OPERAND_REGISTER: {
- if ((MI->getOperand(i).isImm() || MI->getOperand(i).isFPImm()) &&
- !isImmOperandLegal(MI, i, MI->getOperand(i))) {
- ErrInfo = "Illegal immediate value for operand.";
- return false;
- }
+ case MCOI::OPERAND_REGISTER:
+ if (MI->getOperand(i).isImm()) {
+ ErrInfo = "Illegal immediate value for operand.";
+ return false;
+ }
+ break;
+ case AMDGPU::OPERAND_REG_IMM32:
+ break;
+ case AMDGPU::OPERAND_REG_INLINE_C:
+ if (isLiteralConstant(MI->getOperand(i),
+ RI.getRegClass(RegClass)->getSize())) {
+ ErrInfo = "Illegal immediate value for operand.";
+ return false;
}
break;
case MCOI::OPERAND_IMMEDIATE:
// Check if this operand is an immediate.
// FrameIndex operands will be replaced by immediates, so they are
// allowed.
- if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFPImm() &&
- !MI->getOperand(i).isFI()) {
+ if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFI()) {
ErrInfo = "Expected immediate, but got non-immediate";
return false;
}
@@ -1158,7 +1310,6 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
if (!MI->getOperand(i).isReg())
continue;
- int RegClass = Desc.OpInfo[i].RegClass;
if (RegClass != -1) {
unsigned Reg = MI->getOperand(i).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
@@ -1175,11 +1326,18 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
// Verify VOP*
if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
+ // Only look at the true operands. Only a real operand can use the constant
+ // bus, and we don't want to check pseudo-operands like the source modifier
+ // flags.
+ const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
+
unsigned ConstantBusCount = 0;
unsigned SGPRUsed = AMDGPU::NoRegister;
- for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (usesConstantBus(MRI, MO)) {
+ for (int OpIdx : OpIndices) {
+ if (OpIdx == -1)
+ break;
+ const MachineOperand &MO = MI->getOperand(OpIdx);
+ if (usesConstantBus(MRI, MO, getOpSize(Opcode, OpIdx))) {
if (MO.isReg()) {
if (MO.getReg() != SGPRUsed)
++ConstantBusCount;
@@ -1195,31 +1353,6 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
}
}
- // Verify SRC1 for VOP2 and VOPC
- if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
- const MachineOperand &Src1 = MI->getOperand(Src1Idx);
- if (Src1.isImm() || Src1.isFPImm()) {
- ErrInfo = "VOP[2C] src1 cannot be an immediate.";
- return false;
- }
- }
-
- // Verify VOP3
- if (isVOP3(Opcode)) {
- if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
- ErrInfo = "VOP3 src0 cannot be a literal constant.";
- return false;
- }
- if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
- ErrInfo = "VOP3 src1 cannot be a literal constant.";
- return false;
- }
- if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
- ErrInfo = "VOP3 src2 cannot be a literal constant.";
- return false;
- }
- }
-
// Verify misc. restrictions on specific instructions.
if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
@@ -1287,7 +1420,7 @@ unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
case AMDGPU::S_LOAD_DWORDX2_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
case AMDGPU::S_LOAD_DWORDX4_IMM:
case AMDGPU::S_LOAD_DWORDX4_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
- case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e32;
+ case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
}
@@ -1302,8 +1435,13 @@ const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
const MCInstrDesc &Desc = get(MI.getOpcode());
if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
- Desc.OpInfo[OpNo].RegClass == -1)
- return MRI.getRegClass(MI.getOperand(OpNo).getReg());
+ Desc.OpInfo[OpNo].RegClass == -1) {
+ unsigned Reg = MI.getOperand(OpNo).getReg();
+
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ return MRI.getRegClass(Reg);
+ return RI.getPhysRegClass(Reg);
+ }
unsigned RCID = Desc.OpInfo[OpNo].RegClass;
return RI.getRegClass(RCID);
@@ -1339,7 +1477,7 @@ void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
VRC = &AMDGPU::VReg_64RegClass;
else
- VRC = &AMDGPU::VReg_32RegClass;
+ VRC = &AMDGPU::VGPR_32RegClass;
unsigned Reg = MRI.createVirtualRegister(VRC);
DebugLoc DL = MBB->findDebugLoc(I);
@@ -1428,6 +1566,14 @@ unsigned SIInstrInfo::split64BitImm(SmallVectorImpl<MachineInstr *> &Worklist,
return Dst;
}
+// Change the order of operands from (0, 1, 2) to (0, 2, 1)
+void SIInstrInfo::swapOperands(MachineBasicBlock::iterator Inst) const {
+ assert(Inst->getNumExplicitOperands() == 3);
+ MachineOperand Op1 = Inst->getOperand(1);
+ Inst->RemoveOperand(1);
+ Inst->addOperand(Op1);
+}
+
bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
const MachineOperand *MO) const {
const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
@@ -1438,14 +1584,16 @@ bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
if (!MO)
MO = &MI->getOperand(OpIdx);
- if (usesConstantBus(MRI, *MO)) {
+ if (isVALU(InstDesc.Opcode) &&
+ usesConstantBus(MRI, *MO, DefinedRC->getSize())) {
unsigned SGPRUsed =
MO->isReg() ? MO->getReg() : (unsigned)AMDGPU::NoRegister;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (i == OpIdx)
continue;
- if (usesConstantBus(MRI, MI->getOperand(i)) &&
- MI->getOperand(i).isReg() && MI->getOperand(i).getReg() != SGPRUsed) {
+ const MachineOperand &Op = MI->getOperand(i);
+ if (Op.isReg() && Op.getReg() != SGPRUsed &&
+ usesConstantBus(MRI, Op, getOpSize(*MI, i))) {
return false;
}
}
@@ -1463,12 +1611,13 @@ bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
//
// s_sendmsg 0, s0 ; Operand defined as m0reg
// ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL
+
return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC;
}
// Handle non-register types that are treated like immediates.
- assert(MO->isImm() || MO->isFPImm() || MO->isTargetIndex() || MO->isFI());
+ assert(MO->isImm() || MO->isTargetIndex() || MO->isFI());
if (!DefinedRC) {
// This operand expects an immediate.
@@ -1537,7 +1686,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
// We can use one SGPR in each VOP3 instruction.
continue;
}
- } else if (!isLiteralConstant(MO)) {
+ } else if (!isLiteralConstant(MO, getOpSize(MI->getOpcode(), Idx))) {
// If it is not a register and not a literal constant, then it must be
// an inline constant which is always legal.
continue;
@@ -1641,17 +1790,18 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
// SRsrcPtrLo = srsrc:sub0
unsigned SRsrcPtrLo = buildExtractSubReg(MI, MRI, *SRsrc,
- &AMDGPU::VReg_128RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
+ &AMDGPU::VReg_128RegClass, AMDGPU::sub0, &AMDGPU::VGPR_32RegClass);
// SRsrcPtrHi = srsrc:sub1
unsigned SRsrcPtrHi = buildExtractSubReg(MI, MRI, *SRsrc,
- &AMDGPU::VReg_128RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
+ &AMDGPU::VReg_128RegClass, AMDGPU::sub1, &AMDGPU::VGPR_32RegClass);
// Create an empty resource descriptor
unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
+ uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
// Zero64 = 0
BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64),
@@ -1661,12 +1811,12 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
// SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
SRsrcFormatLo)
- .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
+ .addImm(RsrcDataFormat & 0xFFFFFFFF);
// SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
SRsrcFormatHi)
- .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
+ .addImm(RsrcDataFormat >> 32);
// NewSRsrc = {Zero64, SRsrcFormat}
BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
@@ -1685,8 +1835,8 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
if (VAddr) {
// This is already an ADDR64 instruction so we need to add the pointer
// extracted from the resource descriptor to the current value of VAddr.
- NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
- NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
+ NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
// NewVaddrLo = SRsrcPtrLo + VAddr:sub0
BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADD_I32_e32),
@@ -1709,9 +1859,6 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
MachineOperand *VData = getNamedOperand(*MI, AMDGPU::OpName::vdata);
MachineOperand *Offset = getNamedOperand(*MI, AMDGPU::OpName::offset);
MachineOperand *SOffset = getNamedOperand(*MI, AMDGPU::OpName::soffset);
- assert(SOffset->isImm() && SOffset->getImm() == 0 && "Legalizing MUBUF "
- "with non-zero soffset is not implemented");
- (void)SOffset;
// Create the new instruction.
unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode());
@@ -1722,6 +1869,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
.addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
// This will be replaced later
// with the new value of vaddr.
+ .addOperand(*SOffset)
.addOperand(*Offset);
MI->removeFromParent();
@@ -1764,27 +1912,30 @@ void SIInstrInfo::splitSMRD(MachineInstr *MI,
getNamedOperand(*MI, AMDGPU::OpName::offset);
const MachineOperand *SBase = getNamedOperand(*MI, AMDGPU::OpName::sbase);
+ // The SMRD has an 8-bit offset in dwords on SI and a 20-bit offset in bytes
+ // on VI.
if (OffOp) {
+ bool isVI = RI.ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS;
+ unsigned OffScale = isVI ? 1 : 4;
// Handle the _IMM variant
- unsigned LoOffset = OffOp->getImm();
- unsigned HiOffset = LoOffset + (HalfSize / 4);
+ unsigned LoOffset = OffOp->getImm() * OffScale;
+ unsigned HiOffset = LoOffset + HalfSize;
Lo = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegLo)
.addOperand(*SBase)
- .addImm(LoOffset);
+ .addImm(LoOffset / OffScale);
- if (!isUInt<8>(HiOffset)) {
+ if (!isUInt<20>(HiOffset) || (!isVI && !isUInt<8>(HiOffset / OffScale))) {
unsigned OffsetSGPR =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32), OffsetSGPR)
- .addImm(HiOffset << 2); // The immediate offset is in dwords,
- // but offset in register is in bytes.
+ .addImm(HiOffset); // The offset in register is in bytes.
Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi)
.addOperand(*SBase)
.addReg(OffsetSGPR);
} else {
Hi = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegHi)
.addOperand(*SBase)
- .addImm(HiOffset);
+ .addImm(HiOffset / OffScale);
}
} else {
// Handle the _SGPR variant
@@ -1849,10 +2000,13 @@ void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) con
ImmOffset = 0;
} else {
assert(MI->getOperand(2).isImm());
- // SMRD instructions take a dword offsets and MUBUF instructions
- // take a byte offset.
- ImmOffset = MI->getOperand(2).getImm() << 2;
+ // SMRD instructions take a dword offsets on SI and byte offset on VI
+ // and MUBUF instructions always take a byte offset.
+ ImmOffset = MI->getOperand(2).getImm();
+ if (RI.ST.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
+ ImmOffset <<= 2;
RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+
if (isUInt<12>(ImmOffset)) {
BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
RegOffset)
@@ -1870,13 +2024,14 @@ void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) con
unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+ uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1)
.addImm(0);
BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2)
- .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
+ .addImm(RsrcDataFormat & 0xFFFFFFFF);
BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3)
- .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
+ .addImm(RsrcDataFormat >> 32);
BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc)
.addReg(DWord0)
.addImm(AMDGPU::sub0)
@@ -1893,6 +2048,7 @@ void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) con
MI->getOperand(2).ChangeToRegister(MI->getOperand(1).getReg(), false);
}
MI->getOperand(1).setReg(SRsrc);
+ MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(0));
MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(ImmOffset));
const TargetRegisterClass *NewDstRC =
@@ -2001,6 +2157,43 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
continue;
}
+ case AMDGPU::S_LSHL_B32:
+ if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
+ swapOperands(Inst);
+ }
+ break;
+ case AMDGPU::S_ASHR_I32:
+ if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
+ swapOperands(Inst);
+ }
+ break;
+ case AMDGPU::S_LSHR_B32:
+ if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
+ swapOperands(Inst);
+ }
+ break;
+ case AMDGPU::S_LSHL_B64:
+ if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ NewOpcode = AMDGPU::V_LSHLREV_B64;
+ swapOperands(Inst);
+ }
+ break;
+ case AMDGPU::S_ASHR_I64:
+ if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ NewOpcode = AMDGPU::V_ASHRREV_I64;
+ swapOperands(Inst);
+ }
+ break;
+ case AMDGPU::S_LSHR_B64:
+ if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ NewOpcode = AMDGPU::V_LSHRREV_B64;
+ swapOperands(Inst);
+ }
+ break;
+
case AMDGPU::S_BFE_U64:
case AMDGPU::S_BFM_B64:
llvm_unreachable("Moving this op to VALU not implemented");
@@ -2107,7 +2300,7 @@ unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
}
const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
- return &AMDGPU::VReg_32RegClass;
+ return &AMDGPU::VGPR_32RegClass;
}
void SIInstrInfo::splitScalar64BitUnaryOp(
@@ -2237,7 +2430,7 @@ void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist
MachineOperand &Dest = Inst->getOperand(0);
MachineOperand &Src = Inst->getOperand(1);
- const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e32);
+ const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
const TargetRegisterClass *SrcRC = Src.isReg() ?
MRI.getRegClass(Src.getReg()) :
&AMDGPU::SGPR_32RegClass;
@@ -2419,7 +2612,7 @@ MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
unsigned ValueReg,
unsigned Address, unsigned OffsetReg) const {
const DebugLoc &DL = MBB->findDebugLoc(I);
- unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
+ unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister(
getIndirectIndexBegin(*MBB->getParent()));
return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
@@ -2437,7 +2630,7 @@ MachineInstrBuilder SIInstrInfo::buildIndirectRead(
unsigned ValueReg,
unsigned Address, unsigned OffsetReg) const {
const DebugLoc &DL = MBB->findDebugLoc(I);
- unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
+ unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister(
getIndirectIndexBegin(*MBB->getParent()));
return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
@@ -2459,7 +2652,7 @@ void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
for (int Index = Begin; Index <= End; ++Index)
- Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
+ Reserved.set(AMDGPU::VGPR_32RegClass.getRegister(Index));
for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
@@ -2485,3 +2678,11 @@ MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
return &MI.getOperand(Idx);
}
+
+uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const {
+ uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
+ if (ST.isAmdHsaOS())
+ RsrcDataFormat |= (1ULL << 56);
+
+ return RsrcDataFormat;
+}
diff --git a/lib/Target/R600/SIInstrInfo.h b/lib/Target/R600/SIInstrInfo.h
index 3bdbc9b..12dc3f3 100644
--- a/lib/Target/R600/SIInstrInfo.h
+++ b/lib/Target/R600/SIInstrInfo.h
@@ -17,6 +17,7 @@
#define LLVM_LIB_TARGET_R600_SIINSTRINFO_H
#include "AMDGPUInstrInfo.h"
+#include "SIDefines.h"
#include "SIRegisterInfo.h"
namespace llvm {
@@ -44,6 +45,8 @@ private:
const TargetRegisterClass *RC,
const MachineOperand &Op) const;
+ void swapOperands(MachineBasicBlock::iterator Inst) const;
+
void splitScalar64BitUnaryOp(SmallVectorImpl<MachineInstr *> &Worklist,
MachineInstr *Inst, unsigned Opcode) const;
@@ -107,6 +110,10 @@ public:
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
+ // \brief Returns an opcode that can be used to move a value to a \p DstRC
+ // register. If there is no hardware instruction that can store to \p
+ // DstRC, then AMDGPU::COPY is returned.
+ unsigned getMovOpcode(const TargetRegisterClass *DstRC) const;
unsigned commuteOpcode(unsigned Opcode) const;
MachineInstr *commuteInstruction(MachineInstr *MI,
@@ -128,27 +135,92 @@ public:
bool isMov(unsigned Opcode) const override;
bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
- bool isDS(uint16_t Opcode) const;
- bool isMIMG(uint16_t Opcode) const;
- bool isSMRD(uint16_t Opcode) const;
- bool isMUBUF(uint16_t Opcode) const;
- bool isMTBUF(uint16_t Opcode) const;
- bool isFLAT(uint16_t Opcode) const;
- bool isVOP1(uint16_t Opcode) const;
- bool isVOP2(uint16_t Opcode) const;
- bool isVOP3(uint16_t Opcode) const;
- bool isVOPC(uint16_t Opcode) const;
+
+ bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
+ unsigned Reg, MachineRegisterInfo *MRI) const final;
+
+ bool isSALU(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::SALU;
+ }
+
+ bool isVALU(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VALU;
+ }
+
+ bool isSOP1(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::SOP1;
+ }
+
+ bool isSOP2(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::SOP2;
+ }
+
+ bool isSOPC(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::SOPC;
+ }
+
+ bool isSOPK(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::SOPK;
+ }
+
+ bool isSOPP(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::SOPP;
+ }
+
+ bool isVOP1(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOP1;
+ }
+
+ bool isVOP2(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOP2;
+ }
+
+ bool isVOP3(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOP3;
+ }
+
+ bool isVOPC(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOPC;
+ }
+
+ bool isMUBUF(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::MUBUF;
+ }
+
+ bool isMTBUF(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::MTBUF;
+ }
+
+ bool isSMRD(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::SMRD;
+ }
+
+ bool isDS(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::DS;
+ }
+
+ bool isMIMG(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::MIMG;
+ }
+
+ bool isFLAT(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::FLAT;
+ }
+
+ bool isWQM(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::WQM;
+ }
bool isInlineConstant(const APInt &Imm) const;
- bool isInlineConstant(const MachineOperand &MO) const;
- bool isLiteralConstant(const MachineOperand &MO) const;
+ bool isInlineConstant(const MachineOperand &MO, unsigned OpSize) const;
+ bool isLiteralConstant(const MachineOperand &MO, unsigned OpSize) const;
bool isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
const MachineOperand &MO) const;
/// \brief Return true if the given offset Size in bytes can be folded into
/// the immediate offsets of a memory instruction for the given address space.
- static bool canFoldOffset(unsigned OffsetSize, unsigned AS) LLVM_READNONE;
+ bool canFoldOffset(unsigned OffsetSize, unsigned AS) const;
/// \brief Return true if this 64-bit VALU instruction has a 32-bit encoding.
/// This function will return false if you pass it a 32-bit instruction.
@@ -156,7 +228,8 @@ public:
/// \brief Returns true if this operand uses the constant bus.
bool usesConstantBus(const MachineRegisterInfo &MRI,
- const MachineOperand &MO) const;
+ const MachineOperand &MO,
+ unsigned OpSize) const;
/// \brief Return true if this instruction has any modifiers.
/// e.g. src[012]_mod, omod, clamp.
@@ -168,7 +241,6 @@ public:
bool verifyInstruction(const MachineInstr *MI,
StringRef &ErrInfo) const override;
- bool isSALUInstr(const MachineInstr &MI) const;
static unsigned getVALUOp(const MachineInstr &MI);
bool isSALUOpSupportedOnVALU(const MachineInstr &MI) const;
@@ -179,7 +251,27 @@ public:
/// the register class of its machine operand.
/// to infer the correct register class base on the other operands.
const TargetRegisterClass *getOpRegClass(const MachineInstr &MI,
- unsigned OpNo) const;\
+ unsigned OpNo) const;
+
+ /// \brief Return the size in bytes of the operand OpNo on the given
+ // instruction opcode.
+ unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const {
+ const MCOperandInfo &OpInfo = get(Opcode).OpInfo[OpNo];
+
+ if (OpInfo.RegClass == -1) {
+ // If this is an immediate operand, this must be a 32-bit literal.
+ assert(OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE);
+ return 4;
+ }
+
+ return RI.getRegClass(OpInfo.RegClass)->getSize();
+ }
+
+ /// \brief This form should usually be preferred since it handles operands
+ /// with unknown register classes.
+ unsigned getOpSize(const MachineInstr &MI, unsigned OpNo) const {
+ return getOpRegClass(MI, OpNo)->getSize();
+ }
/// \returns true if it is legal for the operand at index \p OpNo
/// to read a VGPR.
@@ -250,6 +342,9 @@ public:
unsigned OpName) const {
return getNamedOperand(const_cast<MachineInstr &>(MI), OpName);
}
+
+ uint64_t getDefaultRsrcDataFormat() const;
+
};
namespace AMDGPU {
@@ -258,7 +353,6 @@ namespace AMDGPU {
int getVOPe32(uint16_t Opcode);
int getCommuteRev(uint16_t Opcode);
int getCommuteOrig(uint16_t Opcode);
- int getMCOpcode(uint16_t Opcode, unsigned Gen);
int getAddr64Inst(uint16_t Opcode);
int getAtomicRetOp(uint16_t Opcode);
int getAtomicNoRetOp(uint16_t Opcode);
diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td
index 713e84e..e2747dc 100644
--- a/lib/Target/R600/SIInstrInfo.td
+++ b/lib/Target/R600/SIInstrInfo.td
@@ -9,35 +9,65 @@
class vop {
field bits<9> SI3;
+ field bits<10> VI3;
}
-class vopc <bits<8> si> : vop {
+class vopc <bits<8> si, bits<8> vi = !add(0x40, si)> : vop {
field bits<8> SI = si;
+ field bits<8> VI = vi;
- field bits<9> SI3 = {0, si{7-0}};
+ field bits<9> SI3 = {0, si{7-0}};
+ field bits<10> VI3 = {0, 0, vi{7-0}};
}
-class vop1 <bits<8> si> : vop {
- field bits<8> SI = si;
+class vop1 <bits<8> si, bits<8> vi = si> : vop {
+ field bits<8> SI = si;
+ field bits<8> VI = vi;
- field bits<9> SI3 = {1, 1, si{6-0}};
+ field bits<9> SI3 = {1, 1, si{6-0}};
+ field bits<10> VI3 = !add(0x140, vi);
}
-class vop2 <bits<6> si> : vop {
+class vop2 <bits<6> si, bits<6> vi = si> : vop {
field bits<6> SI = si;
+ field bits<6> VI = vi;
+
+ field bits<9> SI3 = {1, 0, 0, si{5-0}};
+ field bits<10> VI3 = {0, 1, 0, 0, vi{5-0}};
+}
- field bits<9> SI3 = {1, 0, 0, si{5-0}};
+// Specify a VOP2 opcode for SI and VOP3 opcode for VI
+// that doesn't have VOP2 encoding on VI
+class vop23 <bits<6> si, bits<10> vi> : vop2 <si> {
+ let VI3 = vi;
}
-class vop3 <bits<9> si> : vop {
- field bits<9> SI3 = si;
+class vop3 <bits<9> si, bits<10> vi = {0, si}> : vop {
+ let SI3 = si;
+ let VI3 = vi;
+}
+
+class sop1 <bits<8> si, bits<8> vi = si> {
+ field bits<8> SI = si;
+ field bits<8> VI = vi;
+}
+
+class sop2 <bits<7> si, bits<7> vi = si> {
+ field bits<7> SI = si;
+ field bits<7> VI = vi;
+}
+
+class sopk <bits<5> si, bits<5> vi = si> {
+ field bits<5> SI = si;
+ field bits<5> VI = vi;
}
// Execpt for the NONE field, this must be kept in sync with the SISubtarget enum
-// in AMDGPUMCInstLower.h
+// in AMDGPUInstrInfo.cpp
def SISubtarget {
int NONE = -1;
int SI = 0;
+ int VI = 1;
}
//===----------------------------------------------------------------------===//
@@ -131,6 +161,22 @@ def as_i32imm: SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i32);
}]>;
+def as_i64imm: SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i64);
+}]>;
+
+// Copied from the AArch64 backend:
+def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
+return CurDAG->getTargetConstant(
+ N->getValueAPF().bitcastToAPInt().getZExtValue(), MVT::i32);
+}]>;
+
+// Copied from the AArch64 backend:
+def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
+return CurDAG->getTargetConstant(
+ N->getValueAPF().bitcastToAPInt().getZExtValue(), MVT::i64);
+}]>;
+
def IMM8bit : PatLeaf <(imm),
[{return isUInt<8>(N->getZExtValue());}]
>;
@@ -143,6 +189,10 @@ def IMM16bit : PatLeaf <(imm),
[{return isUInt<16>(N->getZExtValue());}]
>;
+def IMM20bit : PatLeaf <(imm),
+ [{return isUInt<20>(N->getZExtValue());}]
+>;
+
def IMM32bit : PatLeaf <(imm),
[{return isUInt<32>(N->getZExtValue());}]
>;
@@ -156,13 +206,16 @@ class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{
return isInlineImmediate(N);
}]>;
+class InlineFPImm <ValueType vt> : PatLeaf <(vt fpimm), [{
+ return isInlineImmediate(N);
+}]>;
+
class SGPRImm <dag frag> : PatLeaf<frag, [{
- if (TM.getSubtarget<AMDGPUSubtarget>().getGeneration() <
- AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {
return false;
}
const SIRegisterInfo *SIRI =
- static_cast<const SIRegisterInfo*>(TM.getSubtargetImpl()->getRegisterInfo());
+ static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
U != E; ++U) {
if (SIRI->isSGPRClass(getOperandRegClass(*U, U.getOperandNo()))) {
@@ -186,6 +239,7 @@ def sopp_brtarget : Operand<OtherVT> {
}
include "SIInstrFormats.td"
+include "VIInstrFormats.td"
let OperandType = "OPERAND_IMMEDIATE" in {
@@ -238,14 +292,15 @@ def DS1Addr1Offset : ComplexPattern<i32, 2, "SelectDS1Addr1Offset">;
def DS64Bit4ByteAligned : ComplexPattern<i32, 3, "SelectDS64Bit4ByteAligned">;
def MUBUFAddr32 : ComplexPattern<i64, 9, "SelectMUBUFAddr32">;
-def MUBUFAddr64 : ComplexPattern<i64, 3, "SelectMUBUFAddr64">;
-def MUBUFAddr64Atomic : ComplexPattern<i64, 4, "SelectMUBUFAddr64">;
+def MUBUFAddr64 : ComplexPattern<i64, 4, "SelectMUBUFAddr64">;
+def MUBUFAddr64Atomic : ComplexPattern<i64, 5, "SelectMUBUFAddr64">;
def MUBUFScratch : ComplexPattern<i64, 4, "SelectMUBUFScratch">;
def MUBUFOffset : ComplexPattern<i64, 6, "SelectMUBUFOffset">;
def MUBUFOffsetAtomic : ComplexPattern<i64, 4, "SelectMUBUFOffset">;
def VOP3Mods0 : ComplexPattern<untyped, 4, "SelectVOP3Mods0">;
def VOP3Mods0Clamp : ComplexPattern<untyped, 3, "SelectVOP3Mods0Clamp">;
+def VOP3Mods0Clamp0OMod : ComplexPattern<untyped, 4, "SelectVOP3Mods0Clamp0OMod">;
def VOP3Mods : ComplexPattern<untyped, 2, "SelectVOP3Mods">;
//===----------------------------------------------------------------------===//
@@ -298,7 +353,7 @@ class SIMCInstr <string pseudo, int subtarget> {
class EXPCommon : InstSI<
(outs),
(ins i32imm:$en, i32imm:$tgt, i32imm:$compr, i32imm:$done, i32imm:$vm,
- VReg_32:$src0, VReg_32:$src1, VReg_32:$src2, VReg_32:$src3),
+ VGPR_32:$src0, VGPR_32:$src1, VGPR_32:$src2, VGPR_32:$src3),
"exp $en, $tgt, $compr, $done, $vm, $src0, $src1, $src2, $src3",
[] > {
@@ -308,60 +363,157 @@ class EXPCommon : InstSI<
multiclass EXP_m {
- let isPseudo = 1 in {
+ let isPseudo = 1, isCodeGenOnly = 1 in {
def "" : EXPCommon, SIMCInstr <"exp", SISubtarget.NONE> ;
}
def _si : EXPCommon, SIMCInstr <"exp", SISubtarget.SI>, EXPe;
+
+ def _vi : EXPCommon, SIMCInstr <"exp", SISubtarget.VI>, EXPe_vi;
}
//===----------------------------------------------------------------------===//
// Scalar classes
//===----------------------------------------------------------------------===//
-class SOP1_32 <bits<8> op, string opName, list<dag> pattern> : SOP1 <
- op, (outs SReg_32:$dst), (ins SSrc_32:$src0),
- opName#" $dst, $src0", pattern
+class SOP1_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
+ SOP1 <outs, ins, "", pattern>,
+ SIMCInstr<opName, SISubtarget.NONE> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
+
+class SOP1_Real_si <sop1 op, string opName, dag outs, dag ins, string asm> :
+ SOP1 <outs, ins, asm, []>,
+ SOP1e <op.SI>,
+ SIMCInstr<opName, SISubtarget.SI>;
+
+class SOP1_Real_vi <sop1 op, string opName, dag outs, dag ins, string asm> :
+ SOP1 <outs, ins, asm, []>,
+ SOP1e <op.VI>,
+ SIMCInstr<opName, SISubtarget.VI>;
+
+multiclass SOP1_m <sop1 op, string opName, dag outs, dag ins, string asm,
+ list<dag> pattern> {
+
+ def "" : SOP1_Pseudo <opName, outs, ins, pattern>;
+
+ def _si : SOP1_Real_si <op, opName, outs, ins, asm>;
+
+ def _vi : SOP1_Real_vi <op, opName, outs, ins, asm>;
+
+}
+
+multiclass SOP1_32 <sop1 op, string opName, list<dag> pattern> : SOP1_m <
+ op, opName, (outs SReg_32:$dst), (ins SSrc_32:$src0),
+ opName#" $dst, $src0", pattern
>;
-class SOP1_64 <bits<8> op, string opName, list<dag> pattern> : SOP1 <
- op, (outs SReg_64:$dst), (ins SSrc_64:$src0),
- opName#" $dst, $src0", pattern
+multiclass SOP1_64 <sop1 op, string opName, list<dag> pattern> : SOP1_m <
+ op, opName, (outs SReg_64:$dst), (ins SSrc_64:$src0),
+ opName#" $dst, $src0", pattern
>;
+// no input, 64-bit output.
+multiclass SOP1_64_0 <sop1 op, string opName, list<dag> pattern> {
+ def "" : SOP1_Pseudo <opName, (outs SReg_64:$dst), (ins), pattern>;
+
+ def _si : SOP1_Real_si <op, opName, (outs SReg_64:$dst), (ins),
+ opName#" $dst"> {
+ let ssrc0 = 0;
+ }
+
+ def _vi : SOP1_Real_vi <op, opName, (outs SReg_64:$dst), (ins),
+ opName#" $dst"> {
+ let ssrc0 = 0;
+ }
+}
+
+// 64-bit input, no output
+multiclass SOP1_1 <sop1 op, string opName, list<dag> pattern> {
+ def "" : SOP1_Pseudo <opName, (outs), (ins SReg_64:$src0), pattern>;
+
+ def _si : SOP1_Real_si <op, opName, (outs), (ins SReg_64:$src0),
+ opName#" $src0"> {
+ let sdst = 0;
+ }
+
+ def _vi : SOP1_Real_vi <op, opName, (outs), (ins SReg_64:$src0),
+ opName#" $src0"> {
+ let sdst = 0;
+ }
+}
+
// 64-bit input, 32-bit output.
-class SOP1_32_64 <bits<8> op, string opName, list<dag> pattern> : SOP1 <
- op, (outs SReg_32:$dst), (ins SSrc_64:$src0),
- opName#" $dst, $src0", pattern
+multiclass SOP1_32_64 <sop1 op, string opName, list<dag> pattern> : SOP1_m <
+ op, opName, (outs SReg_32:$dst), (ins SSrc_64:$src0),
+ opName#" $dst, $src0", pattern
>;
-class SOP2_32 <bits<7> op, string opName, list<dag> pattern> : SOP2 <
- op, (outs SReg_32:$dst), (ins SSrc_32:$src0, SSrc_32:$src1),
- opName#" $dst, $src0, $src1", pattern
->;
+class SOP2_Pseudo<string opName, dag outs, dag ins, list<dag> pattern> :
+ SOP2<outs, ins, "", pattern>,
+ SIMCInstr<opName, SISubtarget.NONE> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+ let Size = 4;
-class SOP2_SELECT_32 <bits<7> op, string opName, list<dag> pattern> : SOP2 <
- op, (outs SReg_32:$dst), (ins SSrc_32:$src0, SSrc_32:$src1, SCCReg:$scc),
- opName#" $dst, $src0, $src1 [$scc]", pattern
->;
+ // Pseudo instructions have no encodings, but adding this field here allows
+ // us to do:
+ // let sdst = xxx in {
+ // for multiclasses that include both real and pseudo instructions.
+ field bits<7> sdst = 0;
+}
-class SOP2_64 <bits<7> op, string opName, list<dag> pattern> : SOP2 <
- op, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_64:$src1),
- opName#" $dst, $src0, $src1", pattern
->;
+class SOP2_Real_si<sop2 op, string opName, dag outs, dag ins, string asm> :
+ SOP2<outs, ins, asm, []>,
+ SOP2e<op.SI>,
+ SIMCInstr<opName, SISubtarget.SI>;
+
+class SOP2_Real_vi<sop2 op, string opName, dag outs, dag ins, string asm> :
+ SOP2<outs, ins, asm, []>,
+ SOP2e<op.VI>,
+ SIMCInstr<opName, SISubtarget.VI>;
+
+multiclass SOP2_SELECT_32 <sop2 op, string opName, list<dag> pattern> {
+ def "" : SOP2_Pseudo <opName, (outs SReg_32:$dst),
+ (ins SSrc_32:$src0, SSrc_32:$src1, SCCReg:$scc), pattern>;
+
+ def _si : SOP2_Real_si <op, opName, (outs SReg_32:$dst),
+ (ins SSrc_32:$src0, SSrc_32:$src1, SCCReg:$scc),
+ opName#" $dst, $src0, $src1 [$scc]">;
+
+ def _vi : SOP2_Real_vi <op, opName, (outs SReg_32:$dst),
+ (ins SSrc_32:$src0, SSrc_32:$src1, SCCReg:$scc),
+ opName#" $dst, $src0, $src1 [$scc]">;
+}
+
+multiclass SOP2_m <sop2 op, string opName, dag outs, dag ins, string asm,
+ list<dag> pattern> {
+
+ def "" : SOP2_Pseudo <opName, outs, ins, pattern>;
+
+ def _si : SOP2_Real_si <op, opName, outs, ins, asm>;
-class SOP2_64_32 <bits<7> op, string opName, list<dag> pattern> : SOP2 <
- op, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_32:$src1),
- opName#" $dst, $src0, $src1", pattern
+ def _vi : SOP2_Real_vi <op, opName, outs, ins, asm>;
+
+}
+
+multiclass SOP2_32 <sop2 op, string opName, list<dag> pattern> : SOP2_m <
+ op, opName, (outs SReg_32:$dst), (ins SSrc_32:$src0, SSrc_32:$src1),
+ opName#" $dst, $src0, $src1", pattern
>;
-class SOP2_SHIFT_64 <bits<7> op, string opName, list<dag> pattern> : SOP2 <
- op, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_32:$src1),
- opName#" $dst, $src0, $src1", pattern
+multiclass SOP2_64 <sop2 op, string opName, list<dag> pattern> : SOP2_m <
+ op, opName, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_64:$src1),
+ opName#" $dst, $src0, $src1", pattern
>;
+multiclass SOP2_64_32 <sop2 op, string opName, list<dag> pattern> : SOP2_m <
+ op, opName, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_32:$src1),
+ opName#" $dst, $src0, $src1", pattern
+>;
-class SOPC_Helper <bits<7> op, RegisterClass rc, ValueType vt,
+class SOPC_Helper <bits<7> op, RegisterOperand rc, ValueType vt,
string opName, PatLeaf cond> : SOPC <
op, (outs SCCReg:$dst), (ins rc:$src0, rc:$src1),
opName#" $dst, $src0, $src1", []>;
@@ -372,15 +524,44 @@ class SOPC_32<bits<7> op, string opName, PatLeaf cond = COND_NULL>
class SOPC_64<bits<7> op, string opName, PatLeaf cond = COND_NULL>
: SOPC_Helper<op, SSrc_64, i64, opName, cond>;
-class SOPK_32 <bits<5> op, string opName, list<dag> pattern> : SOPK <
- op, (outs SReg_32:$dst), (ins u16imm:$src0),
- opName#" $dst, $src0", pattern
->;
+class SOPK_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
+ SOPK <outs, ins, "", pattern>,
+ SIMCInstr<opName, SISubtarget.NONE> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
-class SOPK_64 <bits<5> op, string opName, list<dag> pattern> : SOPK <
- op, (outs SReg_64:$dst), (ins u16imm:$src0),
- opName#" $dst, $src0", pattern
->;
+class SOPK_Real_si <sopk op, string opName, dag outs, dag ins, string asm> :
+ SOPK <outs, ins, asm, []>,
+ SOPKe <op.SI>,
+ SIMCInstr<opName, SISubtarget.SI>;
+
+class SOPK_Real_vi <sopk op, string opName, dag outs, dag ins, string asm> :
+ SOPK <outs, ins, asm, []>,
+ SOPKe <op.VI>,
+ SIMCInstr<opName, SISubtarget.VI>;
+
+multiclass SOPK_32 <sopk op, string opName, list<dag> pattern> {
+ def "" : SOPK_Pseudo <opName, (outs SReg_32:$dst), (ins u16imm:$src0),
+ pattern>;
+
+ def _si : SOPK_Real_si <op, opName, (outs SReg_32:$dst), (ins u16imm:$src0),
+ opName#" $dst, $src0">;
+
+ def _vi : SOPK_Real_vi <op, opName, (outs SReg_32:$dst), (ins u16imm:$src0),
+ opName#" $dst, $src0">;
+}
+
+multiclass SOPK_SCC <sopk op, string opName, list<dag> pattern> {
+ def "" : SOPK_Pseudo <opName, (outs SCCReg:$dst),
+ (ins SReg_32:$src0, u16imm:$src1), pattern>;
+
+ def _si : SOPK_Real_si <op, opName, (outs SCCReg:$dst),
+ (ins SReg_32:$src0, u16imm:$src1), opName#" $dst, $src0">;
+
+ def _vi : SOPK_Real_vi <op, opName, (outs SCCReg:$dst),
+ (ins SReg_32:$src0, u16imm:$src1), opName#" $dst, $src0">;
+}
//===----------------------------------------------------------------------===//
// SMRD classes
@@ -390,6 +571,7 @@ class SMRD_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
SMRD <outs, ins, "", pattern>,
SIMCInstr<opName, SISubtarget.NONE> {
let isPseudo = 1;
+ let isCodeGenOnly = 1;
}
class SMRD_Real_si <bits<5> op, string opName, bit imm, dag outs, dag ins,
@@ -398,6 +580,12 @@ class SMRD_Real_si <bits<5> op, string opName, bit imm, dag outs, dag ins,
SMRDe <op, imm>,
SIMCInstr<opName, SISubtarget.SI>;
+class SMRD_Real_vi <bits<8> op, string opName, bit imm, dag outs, dag ins,
+ string asm> :
+ SMRD <outs, ins, asm, []>,
+ SMEMe_vi <op, imm>,
+ SIMCInstr<opName, SISubtarget.VI>;
+
multiclass SMRD_m <bits<5> op, string opName, bit imm, dag outs, dag ins,
string asm, list<dag> pattern> {
@@ -405,6 +593,11 @@ multiclass SMRD_m <bits<5> op, string opName, bit imm, dag outs, dag ins,
def _si : SMRD_Real_si <op, opName, imm, outs, ins, asm>;
+ // glc is only applicable to scalar stores, which are not yet
+ // implemented.
+ let glc = 0 in {
+ def _vi : SMRD_Real_vi <{0, 0, 0, op}, opName, imm, outs, ins, asm>;
+ }
}
multiclass SMRD_Helper <bits<5> op, string opName, RegisterClass baseClass,
@@ -444,44 +637,27 @@ class getNumSrcArgs<ValueType Src1, ValueType Src2> {
// Returns the register class to use for the destination of VOP[123C]
// instructions for the given VT.
class getVALUDstForVT<ValueType VT> {
- RegisterClass ret = !if(!eq(VT.Size, 32), VReg_32, VReg_64);
+ RegisterClass ret = !if(!eq(VT.Size, 32), VGPR_32,
+ !if(!eq(VT.Size, 64), VReg_64,
+ SReg_64)); // else VT == i1
}
// Returns the register class to use for source 0 of VOP[12C]
// instructions for the given VT.
class getVOPSrc0ForVT<ValueType VT> {
- RegisterClass ret = !if(!eq(VT.Size, 32), VSrc_32, VSrc_64);
+ RegisterOperand ret = !if(!eq(VT.Size, 32), VSrc_32, VSrc_64);
}
// Returns the register class to use for source 1 of VOP[12C] for the
// given VT.
class getVOPSrc1ForVT<ValueType VT> {
- RegisterClass ret = !if(!eq(VT.Size, 32), VReg_32, VReg_64);
-}
-
-// Returns the register classes for the source arguments of a VOP[12C]
-// instruction for the given SrcVTs.
-class getInRC32 <list<ValueType> SrcVT> {
- list<RegisterClass> ret = [
- getVOPSrc0ForVT<SrcVT[0]>.ret,
- getVOPSrc1ForVT<SrcVT[1]>.ret
- ];
+ RegisterClass ret = !if(!eq(VT.Size, 32), VGPR_32, VReg_64);
}
// Returns the register class to use for sources of VOP3 instructions for the
// given VT.
class getVOP3SrcForVT<ValueType VT> {
- RegisterClass ret = !if(!eq(VT.Size, 32), VCSrc_32, VCSrc_64);
-}
-
-// Returns the register classes for the source arguments of a VOP3
-// instruction for the given SrcVTs.
-class getInRC64 <list<ValueType> SrcVT> {
- list<RegisterClass> ret = [
- getVOP3SrcForVT<SrcVT[0]>.ret,
- getVOP3SrcForVT<SrcVT[1]>.ret,
- getVOP3SrcForVT<SrcVT[2]>.ret
- ];
+ RegisterOperand ret = !if(!eq(VT.Size, 32), VCSrc_32, VCSrc_64);
}
// Returns 1 if the source arguments have modifiers, 0 if they do not.
@@ -491,15 +667,15 @@ class hasModifiers<ValueType SrcVT> {
}
// Returns the input arguments for VOP[12C] instructions for the given SrcVT.
-class getIns32 <RegisterClass Src0RC, RegisterClass Src1RC, int NumSrcArgs> {
+class getIns32 <RegisterOperand Src0RC, RegisterClass Src1RC, int NumSrcArgs> {
dag ret = !if(!eq(NumSrcArgs, 1), (ins Src0RC:$src0), // VOP1
!if(!eq(NumSrcArgs, 2), (ins Src0RC:$src0, Src1RC:$src1), // VOP2
(ins)));
}
// Returns the input arguments for VOP3 instructions for the given SrcVT.
-class getIns64 <RegisterClass Src0RC, RegisterClass Src1RC,
- RegisterClass Src2RC, int NumSrcArgs,
+class getIns64 <RegisterOperand Src0RC, RegisterOperand Src1RC,
+ RegisterOperand Src2RC, int NumSrcArgs,
bit HasModifiers> {
dag ret =
@@ -549,7 +725,7 @@ class getAsm32 <int NumSrcArgs> {
// Returns the assembly string for the inputs and outputs of a VOP3
// instruction.
class getAsm64 <int NumSrcArgs, bit HasModifiers> {
- string src0 = "$src0_modifiers,";
+ string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
string src1 = !if(!eq(NumSrcArgs, 1), "",
!if(!eq(NumSrcArgs, 2), " $src1_modifiers",
" $src1_modifiers,"));
@@ -570,11 +746,11 @@ class VOPProfile <list<ValueType> _ArgVT> {
field ValueType Src1VT = ArgVT[2];
field ValueType Src2VT = ArgVT[3];
field RegisterClass DstRC = getVALUDstForVT<DstVT>.ret;
- field RegisterClass Src0RC32 = getVOPSrc0ForVT<Src0VT>.ret;
+ field RegisterOperand Src0RC32 = getVOPSrc0ForVT<Src0VT>.ret;
field RegisterClass Src1RC32 = getVOPSrc1ForVT<Src1VT>.ret;
- field RegisterClass Src0RC64 = getVOP3SrcForVT<Src0VT>.ret;
- field RegisterClass Src1RC64 = getVOP3SrcForVT<Src1VT>.ret;
- field RegisterClass Src2RC64 = getVOP3SrcForVT<Src2VT>.ret;
+ field RegisterOperand Src0RC64 = getVOP3SrcForVT<Src0VT>.ret;
+ field RegisterOperand Src1RC64 = getVOP3SrcForVT<Src1VT>.ret;
+ field RegisterOperand Src2RC64 = getVOP3SrcForVT<Src2VT>.ret;
field int NumSrcArgs = getNumSrcArgs<Src1VT, Src2VT>.ret;
field bit HasModifiers = hasModifiers<Src0VT>.ret;
@@ -604,14 +780,31 @@ def VOP_F32_F32_I32 : VOPProfile <[f32, f32, i32, untyped]>;
def VOP_F64_F64_F64 : VOPProfile <[f64, f64, f64, untyped]>;
def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>;
def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>;
+def VOP_I32_F32_I32 : VOPProfile <[i32, f32, i32, untyped]>;
def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
def VOP_I32_I32_I32_VCC : VOPProfile <[i32, i32, i32, untyped]> {
let Src0RC32 = VCSrc_32;
}
+
+def VOP_I1_F32_I32 : VOPProfile <[i1, f32, i32, untyped]> {
+ let Ins64 = (ins InputModsNoDefault:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1);
+ let Asm64 = " $dst, $src0_modifiers, $src1";
+}
+
+def VOP_I1_F64_I32 : VOPProfile <[i1, f64, i32, untyped]> {
+ let Ins64 = (ins InputModsNoDefault:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1);
+ let Asm64 = " $dst, $src0_modifiers, $src1";
+}
+
def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>;
+def VOP_I64_I32_I64 : VOPProfile <[i64, i32, i64, untyped]>;
def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>;
def VOP_F32_F32_F32_F32 : VOPProfile <[f32, f32, f32, f32]>;
+def VOP_MADK : VOPProfile <[f32, f32, f32, f32]> {
+ field dag Ins = (ins VCSrc_32:$src0, VGPR_32:$vsrc1, u32imm:$src2);
+ field string Asm = " $dst, $src0, $vsrc1, $src2";
+}
def VOP_F64_F64_F64_F64 : VOPProfile <[f64, f64, f64, f64]>;
def VOP_I32_I32_I32_I32 : VOPProfile <[i32, i32, i32, i32]>;
def VOP_I64_I32_I32_I64 : VOPProfile <[i64, i32, i32, i64]>;
@@ -633,8 +826,13 @@ class AtomicNoRet <string noRetOp, bit isRet> {
class VOP1_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
VOP1Common <outs, ins, "", pattern>,
- SIMCInstr<opName, SISubtarget.NONE> {
+ VOP <opName>,
+ SIMCInstr <opName#"_e32", SISubtarget.NONE> {
let isPseudo = 1;
+ let isCodeGenOnly = 1;
+
+ field bits<8> vdst;
+ field bits<9> src0;
}
multiclass VOP1_m <vop1 op, dag outs, dag ins, string asm, list<dag> pattern,
@@ -642,32 +840,99 @@ multiclass VOP1_m <vop1 op, dag outs, dag ins, string asm, list<dag> pattern,
def "" : VOP1_Pseudo <outs, ins, pattern, opName>;
def _si : VOP1<op.SI, outs, ins, asm, []>,
- SIMCInstr <opName, SISubtarget.SI>;
+ SIMCInstr <opName#"_e32", SISubtarget.SI>;
+ def _vi : VOP1<op.VI, outs, ins, asm, []>,
+ SIMCInstr <opName#"_e32", SISubtarget.VI>;
+}
+
+multiclass VOP1SI_m <vop1 op, dag outs, dag ins, string asm, list<dag> pattern,
+ string opName> {
+ def "" : VOP1_Pseudo <outs, ins, pattern, opName>;
+
+ def _si : VOP1<op.SI, outs, ins, asm, []>,
+ SIMCInstr <opName#"_e32", SISubtarget.SI>;
+ // No VI instruction. This class is for SI only.
+}
+
+class VOP2_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
+ VOP2Common <outs, ins, "", pattern>,
+ VOP <opName>,
+ SIMCInstr<opName#"_e32", SISubtarget.NONE> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
+
+multiclass VOP2SI_m <vop2 op, dag outs, dag ins, string asm, list<dag> pattern,
+ string opName, string revOp> {
+ def "" : VOP2_Pseudo <outs, ins, pattern, opName>,
+ VOP2_REV<revOp#"_e32", !eq(revOp, opName)>;
+
+ def _si : VOP2 <op.SI, outs, ins, opName#asm, []>,
+ SIMCInstr <opName#"_e32", SISubtarget.SI>;
+}
+
+multiclass VOP2_m <vop2 op, dag outs, dag ins, string asm, list<dag> pattern,
+ string opName, string revOp> {
+ def "" : VOP2_Pseudo <outs, ins, pattern, opName>,
+ VOP2_REV<revOp#"_e32", !eq(revOp, opName)>;
+
+ def _si : VOP2 <op.SI, outs, ins, opName#asm, []>,
+ SIMCInstr <opName#"_e32", SISubtarget.SI>;
+ def _vi : VOP2 <op.VI, outs, ins, opName#asm, []>,
+ SIMCInstr <opName#"_e32", SISubtarget.VI>;
}
class VOP3DisableFields <bit HasSrc1, bit HasSrc2, bit HasModifiers> {
bits<2> src0_modifiers = !if(HasModifiers, ?, 0);
bits<2> src1_modifiers = !if(HasModifiers, !if(HasSrc1, ?, 0), 0);
- bits<2> src2_modifiers = !if(HasModifiers, !if(HasSrc2, ? ,0) ,0);
+ bits<2> src2_modifiers = !if(HasModifiers, !if(HasSrc2, ?, 0), 0);
bits<2> omod = !if(HasModifiers, ?, 0);
bits<1> clamp = !if(HasModifiers, ?, 0);
bits<9> src1 = !if(HasSrc1, ?, 0);
bits<9> src2 = !if(HasSrc2, ?, 0);
}
+class VOP3DisableModFields <bit HasSrc0Mods,
+ bit HasSrc1Mods = 0,
+ bit HasSrc2Mods = 0,
+ bit HasOutputMods = 0> {
+ bits<2> src0_modifiers = !if(HasSrc0Mods, ?, 0);
+ bits<2> src1_modifiers = !if(HasSrc1Mods, ?, 0);
+ bits<2> src2_modifiers = !if(HasSrc2Mods, ?, 0);
+ bits<2> omod = !if(HasOutputMods, ?, 0);
+ bits<1> clamp = !if(HasOutputMods, ?, 0);
+}
+
class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
VOP3Common <outs, ins, "", pattern>,
VOP <opName>,
- SIMCInstr<opName, SISubtarget.NONE> {
+ SIMCInstr<opName#"_e64", SISubtarget.NONE> {
let isPseudo = 1;
+ let isCodeGenOnly = 1;
}
class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
- VOP3 <op, outs, ins, asm, []>,
- SIMCInstr<opName, SISubtarget.SI>;
-
-multiclass VOP3_m <vop3 op, dag outs, dag ins, string asm, list<dag> pattern,
+ VOP3Common <outs, ins, asm, []>,
+ VOP3e <op>,
+ SIMCInstr<opName#"_e64", SISubtarget.SI>;
+
+class VOP3_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> :
+ VOP3Common <outs, ins, asm, []>,
+ VOP3e_vi <op>,
+ SIMCInstr <opName#"_e64", SISubtarget.VI>;
+
+class VOP3b_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
+ VOP3Common <outs, ins, asm, []>,
+ VOP3be <op>,
+ SIMCInstr<opName#"_e64", SISubtarget.SI>;
+
+class VOP3b_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> :
+ VOP3Common <outs, ins, asm, []>,
+ VOP3be_vi <op>,
+ SIMCInstr <opName#"_e64", SISubtarget.VI>;
+
+multiclass VOP3_m <vop op, dag outs, dag ins, string asm, list<dag> pattern,
string opName, int NumSrcArgs, bit HasMods = 1> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
@@ -676,7 +941,26 @@ multiclass VOP3_m <vop3 op, dag outs, dag ins, string asm, list<dag> pattern,
VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
!if(!eq(NumSrcArgs, 2), 0, 1),
HasMods>;
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
+ !if(!eq(NumSrcArgs, 2), 0, 1),
+ HasMods>;
+}
+
+// VOP3_m without source modifiers
+multiclass VOP3_m_nomods <vop op, dag outs, dag ins, string asm, list<dag> pattern,
+ string opName, int NumSrcArgs, bit HasMods = 1> {
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
+
+ let src0_modifiers = 0,
+ src1_modifiers = 0,
+ src2_modifiers = 0,
+ clamp = 0,
+ omod = 0 in {
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>;
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>;
+ }
}
multiclass VOP3_1_m <vop op, dag outs, dag ins, string asm,
@@ -686,6 +970,19 @@ multiclass VOP3_1_m <vop op, dag outs, dag ins, string asm,
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
VOP3DisableFields<0, 0, HasMods>;
+
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ VOP3DisableFields<0, 0, HasMods>;
+}
+
+multiclass VOP3SI_1_m <vop op, dag outs, dag ins, string asm,
+ list<dag> pattern, string opName, bit HasMods = 1> {
+
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
+
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ VOP3DisableFields<0, 0, HasMods>;
+ // No VI instruction. This class is for SI only.
}
multiclass VOP3_2_m <vop op, dag outs, dag ins, string asm,
@@ -695,12 +992,28 @@ multiclass VOP3_2_m <vop op, dag outs, dag ins, string asm,
def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
- def _si : VOP3_Real_si <op.SI3,
- outs, ins, asm, opName>,
- VOP2_REV<revOp#"_e64_si", !eq(revOp, opName)>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ VOP3DisableFields<1, 0, HasMods>;
+
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ VOP3DisableFields<1, 0, HasMods>;
+}
+
+multiclass VOP3SI_2_m <vop op, dag outs, dag ins, string asm,
+ list<dag> pattern, string opName, string revOp,
+ bit HasMods = 1, bit UseFullOp = 0> {
+
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
+ VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
+
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
VOP3DisableFields<1, 0, HasMods>;
+
+ // No VI instruction. This class is for SI only.
}
+// XXX - Is v_div_scale_{f32|f64} only available in vop3b without
+// option of implicit vcc use?
multiclass VOP3b_2_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, string revOp,
bit HasMods = 1, bit UseFullOp = 0> {
@@ -711,13 +1024,27 @@ multiclass VOP3b_2_m <vop op, dag outs, dag ins, string asm,
// can write it into any SGPR. We currently don't use the carry out,
// so for now hardcode it to VCC as well.
let sdst = SIOperand.VCC, Defs = [VCC] in {
- def _si : VOP3b <op.SI3, outs, ins, asm, pattern>,
- VOP3DisableFields<1, 0, HasMods>,
- SIMCInstr<opName, SISubtarget.SI>,
- VOP2_REV<revOp#"_e64_si", !eq(revOp, opName)>;
+ def _si : VOP3b_Real_si <op.SI3, outs, ins, asm, opName>,
+ VOP3DisableFields<1, 0, HasMods>;
+
+ def _vi : VOP3b_Real_vi <op.VI3, outs, ins, asm, opName>,
+ VOP3DisableFields<1, 0, HasMods>;
} // End sdst = SIOperand.VCC, Defs = [VCC]
}
+multiclass VOP3b_3_m <vop op, dag outs, dag ins, string asm,
+ list<dag> pattern, string opName, string revOp,
+ bit HasMods = 1, bit UseFullOp = 0> {
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
+
+
+ def _si : VOP3b_Real_si <op.SI3, outs, ins, asm, opName>,
+ VOP3DisableFields<1, 1, HasMods>;
+
+ def _vi : VOP3b_Real_vi <op.VI3, outs, ins, asm, opName>,
+ VOP3DisableFields<1, 1, HasMods>;
+}
+
multiclass VOP3_C_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName,
bit HasMods, bit defExec> {
@@ -725,17 +1052,39 @@ multiclass VOP3_C_m <vop op, dag outs, dag ins, string asm,
def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
- VOP3DisableFields<1, 0, HasMods> {
+ VOP3DisableFields<1, 0, HasMods> {
+ let Defs = !if(defExec, [EXEC], []);
+ }
+
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ VOP3DisableFields<1, 0, HasMods> {
let Defs = !if(defExec, [EXEC], []);
}
}
+// An instruction that is VOP2 on SI and VOP3 on VI, no modifiers.
+multiclass VOP2SI_3VI_m <vop3 op, string opName, dag outs, dag ins,
+ string asm, list<dag> pattern = []> {
+ let isPseudo = 1, isCodeGenOnly = 1 in {
+ def "" : VOPAnyCommon <outs, ins, "", pattern>,
+ SIMCInstr<opName, SISubtarget.NONE>;
+ }
+
+ def _si : VOP2 <op.SI3{5-0}, outs, ins, asm, []>,
+ SIMCInstr <opName, SISubtarget.SI>;
+
+ def _vi : VOP3Common <outs, ins, asm, []>,
+ VOP3e_vi <op.VI3>,
+ VOP3DisableFields <1, 0, 0>,
+ SIMCInstr <opName, SISubtarget.VI>;
+}
+
multiclass VOP1_Helper <vop1 op, string opName, dag outs,
dag ins32, string asm32, list<dag> pat32,
dag ins64, string asm64, list<dag> pat64,
bit HasMods> {
- def _e32 : VOP1 <op.SI, outs, ins32, opName#asm32, pat32>, VOP<opName>;
+ defm _e32 : VOP1_m <op, outs, ins32, opName#asm32, pat32, opName>;
defm _e64 : VOP3_1_m <op, outs, ins64, opName#"_e64"#asm64, pat64, opName, HasMods>;
}
@@ -752,17 +1101,24 @@ multiclass VOP1Inst <vop1 op, string opName, VOPProfile P,
P.HasModifiers
>;
-class VOP2_e32 <bits<6> op, string opName, dag outs, dag ins, string asm,
- list<dag> pattern, string revOp> :
- VOP2 <op, outs, ins, opName#asm, pattern>,
- VOP <opName>,
- VOP2_REV<revOp#"_e32", !eq(revOp, opName)>;
+multiclass VOP1InstSI <vop1 op, string opName, VOPProfile P,
+ SDPatternOperator node = null_frag> {
+
+ defm _e32 : VOP1SI_m <op, P.Outs, P.Ins32, opName#P.Asm32, [], opName>;
+
+ defm _e64 : VOP3SI_1_m <op, P.Outs, P.Ins64, opName#P.Asm64,
+ !if(P.HasModifiers,
+ [(set P.DstVT:$dst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0,
+ i32:$src0_modifiers, i1:$clamp, i32:$omod))))],
+ [(set P.DstVT:$dst, (node P.Src0VT:$src0))]),
+ opName, P.HasModifiers>;
+}
multiclass VOP2_Helper <vop2 op, string opName, dag outs,
dag ins32, string asm32, list<dag> pat32,
dag ins64, string asm64, list<dag> pat64,
string revOp, bit HasMods> {
- def _e32 : VOP2_e32 <op.SI, opName, outs, ins32, asm32, pat32, revOp>;
+ defm _e32 : VOP2_m <op, outs, ins32, asm32, pat32, opName, revOp>;
defm _e64 : VOP3_2_m <op,
outs, ins64, opName#"_e64"#asm64, pat64, opName, revOp, HasMods
@@ -784,12 +1140,27 @@ multiclass VOP2Inst <vop2 op, string opName, VOPProfile P,
revOp, P.HasModifiers
>;
+multiclass VOP2InstSI <vop2 op, string opName, VOPProfile P,
+ SDPatternOperator node = null_frag,
+ string revOp = opName> {
+ defm _e32 : VOP2SI_m <op, P.Outs, P.Ins32, P.Asm32, [], opName, revOp>;
+
+ defm _e64 : VOP3SI_2_m <op, P.Outs, P.Ins64, opName#"_e64"#P.Asm64,
+ !if(P.HasModifiers,
+ [(set P.DstVT:$dst,
+ (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
+ i1:$clamp, i32:$omod)),
+ (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
+ [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
+ opName, revOp, P.HasModifiers>;
+}
+
multiclass VOP2b_Helper <vop2 op, string opName, dag outs,
dag ins32, string asm32, list<dag> pat32,
dag ins64, string asm64, list<dag> pat64,
string revOp, bit HasMods> {
- def _e32 : VOP2_e32 <op.SI, opName, outs, ins32, asm32, pat32, revOp>;
+ defm _e32 : VOP2_m <op, outs, ins32, asm32, pat32, opName, revOp>;
defm _e64 : VOP3b_2_m <op,
outs, ins64, opName#"_e64"#asm64, pat64, opName, revOp, HasMods
@@ -811,16 +1182,94 @@ multiclass VOP2bInst <vop2 op, string opName, VOPProfile P,
revOp, P.HasModifiers
>;
+// A VOP2 instruction that is VOP3-only on VI.
+multiclass VOP2_VI3_Helper <vop23 op, string opName, dag outs,
+ dag ins32, string asm32, list<dag> pat32,
+ dag ins64, string asm64, list<dag> pat64,
+ string revOp, bit HasMods> {
+ defm _e32 : VOP2SI_m <op, outs, ins32, asm32, pat32, opName, revOp>;
+
+ defm _e64 : VOP3_2_m <op, outs, ins64, opName#"_e64"#asm64, pat64, opName,
+ revOp, HasMods>;
+}
+
+multiclass VOP2_VI3_Inst <vop23 op, string opName, VOPProfile P,
+ SDPatternOperator node = null_frag,
+ string revOp = opName>
+ : VOP2_VI3_Helper <
+ op, opName, P.Outs,
+ P.Ins32, P.Asm32, [],
+ P.Ins64, P.Asm64,
+ !if(P.HasModifiers,
+ [(set P.DstVT:$dst,
+ (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
+ i1:$clamp, i32:$omod)),
+ (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
+ [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
+ revOp, P.HasModifiers
+>;
+
+multiclass VOP2MADK <vop2 op, string opName, list<dag> pattern = []> {
+
+ def "" : VOP2_Pseudo <VOP_MADK.Outs, VOP_MADK.Ins, pattern, opName>;
+
+let isCodeGenOnly = 0 in {
+ def _si : VOP2Common <VOP_MADK.Outs, VOP_MADK.Ins,
+ !strconcat(opName, VOP_MADK.Asm), []>,
+ SIMCInstr <opName#"_e32", SISubtarget.SI>,
+ VOP2_MADKe <op.SI>;
+
+ def _vi : VOP2Common <VOP_MADK.Outs, VOP_MADK.Ins,
+ !strconcat(opName, VOP_MADK.Asm), []>,
+ SIMCInstr <opName#"_e32", SISubtarget.VI>,
+ VOP2_MADKe <op.VI>;
+} // End isCodeGenOnly = 0
+}
+
+class VOPC_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
+ VOPCCommon <ins, "", pattern>,
+ VOP <opName>,
+ SIMCInstr<opName#"_e32", SISubtarget.NONE> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
+
+multiclass VOPC_m <vopc op, dag outs, dag ins, string asm, list<dag> pattern,
+ string opName, bit DefExec> {
+ def "" : VOPC_Pseudo <outs, ins, pattern, opName>;
+
+ def _si : VOPC<op.SI, ins, asm, []>,
+ SIMCInstr <opName#"_e32", SISubtarget.SI> {
+ let Defs = !if(DefExec, [EXEC], []);
+ }
+
+ def _vi : VOPC<op.VI, ins, asm, []>,
+ SIMCInstr <opName#"_e32", SISubtarget.VI> {
+ let Defs = !if(DefExec, [EXEC], []);
+ }
+}
+
multiclass VOPC_Helper <vopc op, string opName,
dag ins32, string asm32, list<dag> pat32,
dag out64, dag ins64, string asm64, list<dag> pat64,
bit HasMods, bit DefExec> {
- def _e32 : VOPC <op.SI, ins32, opName#asm32, pat32>, VOP <opName> {
- let Defs = !if(DefExec, [EXEC], []);
- }
+ defm _e32 : VOPC_m <op, (outs), ins32, opName#asm32, pat32, opName, DefExec>;
+
+ defm _e64 : VOP3_C_m <op, out64, ins64, opName#"_e64"#asm64, pat64,
+ opName, HasMods, DefExec>;
+}
+
+// Special case for class instructions which only have modifiers on
+// the 1st source operand.
+multiclass VOPC_Class_Helper <vopc op, string opName,
+ dag ins32, string asm32, list<dag> pat32,
+ dag out64, dag ins64, string asm64, list<dag> pat64,
+ bit HasMods, bit DefExec> {
+ defm _e32 : VOPC_m <op, (outs), ins32, opName#asm32, pat32, opName, DefExec>;
- defm _e64 : VOP3_C_m <op, out64, ins64, opName#"_e64"#asm64, pat64, opName,
- HasMods, DefExec>;
+ defm _e64 : VOP3_C_m <op, out64, ins64, opName#"_e64"#asm64, pat64,
+ opName, HasMods, DefExec>,
+ VOP3DisableModFields<1, 0, 0>;
}
multiclass VOPCInst <vopc op, string opName,
@@ -839,6 +1288,19 @@ multiclass VOPCInst <vopc op, string opName,
P.HasModifiers, DefExec
>;
+multiclass VOPCClassInst <vopc op, string opName, VOPProfile P,
+ bit DefExec = 0> : VOPC_Class_Helper <
+ op, opName,
+ P.Ins32, P.Asm32, [],
+ (outs SReg_64:$dst), P.Ins64, P.Asm64,
+ !if(P.HasModifiers,
+ [(set i1:$dst,
+ (AMDGPUfp_class (P.Src0VT (VOP3Mods0Clamp0OMod P.Src0VT:$src0, i32:$src0_modifiers)), P.Src1VT:$src1))],
+ [(set i1:$dst, (AMDGPUfp_class P.Src0VT:$src0, P.Src1VT:$src1))]),
+ P.HasModifiers, DefExec
+>;
+
+
multiclass VOPC_F32 <vopc op, string opName, PatLeaf cond = COND_NULL> :
VOPCInst <op, opName, VOP_F32_F32_F32, cond>;
@@ -873,6 +1335,18 @@ multiclass VOP3_Helper <vop3 op, string opName, dag outs, dag ins, string asm,
op, outs, ins, opName#asm, pat, opName, NumSrcArgs, HasMods
>;
+multiclass VOPC_CLASS_F32 <vopc op, string opName> :
+ VOPCClassInst <op, opName, VOP_I1_F32_I32, 0>;
+
+multiclass VOPCX_CLASS_F32 <vopc op, string opName> :
+ VOPCClassInst <op, opName, VOP_I1_F32_I32, 1>;
+
+multiclass VOPC_CLASS_F64 <vopc op, string opName> :
+ VOPCClassInst <op, opName, VOP_I1_F64_I32, 0>;
+
+multiclass VOPCX_CLASS_F64 <vopc op, string opName> :
+ VOPCClassInst <op, opName, VOP_I1_F64_I32, 1>;
+
multiclass VOP3Inst <vop3 op, string opName, VOPProfile P,
SDPatternOperator node = null_frag> : VOP3_Helper <
op, opName, P.Outs, P.Ins64, P.Asm64,
@@ -901,9 +1375,31 @@ multiclass VOP3Inst <vop3 op, string opName, VOPProfile P,
P.NumSrcArgs, P.HasModifiers
>;
-multiclass VOP3b_Helper <vop op, RegisterClass vrc, RegisterClass arc,
+// Special case for v_div_fmas_{f32|f64}, since it seems to be the
+// only VOP instruction that implicitly reads VCC.
+multiclass VOP3_VCC_Inst <vop3 op, string opName,
+ VOPProfile P,
+ SDPatternOperator node = null_frag> : VOP3_Helper <
+ op, opName,
+ P.Outs,
+ (ins InputModsNoDefault:$src0_modifiers, P.Src0RC64:$src0,
+ InputModsNoDefault:$src1_modifiers, P.Src1RC64:$src1,
+ InputModsNoDefault:$src2_modifiers, P.Src2RC64:$src2,
+ ClampMod:$clamp,
+ omod:$omod),
+ " $dst, $src0_modifiers, $src1_modifiers, $src2_modifiers"#"$clamp"#"$omod",
+ [(set P.DstVT:$dst,
+ (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
+ i1:$clamp, i32:$omod)),
+ (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)),
+ (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers)),
+ (i1 VCC)))],
+ 3, 1
+>;
+
+multiclass VOP3b_Helper <vop op, RegisterClass vrc, RegisterOperand arc,
string opName, list<dag> pattern> :
- VOP3b_2_m <
+ VOP3b_3_m <
op, (outs vrc:$vdst, SReg_64:$sdst),
(ins InputModsNoDefault:$src0_modifiers, arc:$src0,
InputModsNoDefault:$src1_modifiers, arc:$src1,
@@ -917,7 +1413,7 @@ multiclass VOP3b_64 <vop3 op, string opName, list<dag> pattern> :
VOP3b_Helper <op, VReg_64, VSrc_64, opName, pattern>;
multiclass VOP3b_32 <vop3 op, string opName, list<dag> pattern> :
- VOP3b_Helper <op, VReg_32, VSrc_32, opName, pattern>;
+ VOP3b_Helper <op, VGPR_32, VSrc_32, opName, pattern>;
class Vop3ModPat<Instruction Inst, VOPProfile P, SDPatternOperator node> : Pat<
@@ -931,124 +1427,259 @@ class Vop3ModPat<Instruction Inst, VOPProfile P, SDPatternOperator node> : Pat<
i32:$omod)>;
//===----------------------------------------------------------------------===//
+// Interpolation opcodes
+//===----------------------------------------------------------------------===//
+
+class VINTRP_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
+ VINTRPCommon <outs, ins, "", pattern>,
+ SIMCInstr<opName, SISubtarget.NONE> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
+
+class VINTRP_Real_si <bits <2> op, string opName, dag outs, dag ins,
+ string asm> :
+ VINTRPCommon <outs, ins, asm, []>,
+ VINTRPe <op>,
+ SIMCInstr<opName, SISubtarget.SI>;
+
+class VINTRP_Real_vi <bits <2> op, string opName, dag outs, dag ins,
+ string asm> :
+ VINTRPCommon <outs, ins, asm, []>,
+ VINTRPe_vi <op>,
+ SIMCInstr<opName, SISubtarget.VI>;
+
+multiclass VINTRP_m <bits <2> op, string opName, dag outs, dag ins, string asm,
+ string disableEncoding = "", string constraints = "",
+ list<dag> pattern = []> {
+ let DisableEncoding = disableEncoding,
+ Constraints = constraints in {
+ def "" : VINTRP_Pseudo <opName, outs, ins, pattern>;
+
+ def _si : VINTRP_Real_si <op, opName, outs, ins, asm>;
+
+ def _vi : VINTRP_Real_vi <op, opName, outs, ins, asm>;
+ }
+}
+
+//===----------------------------------------------------------------------===//
// Vector I/O classes
//===----------------------------------------------------------------------===//
-class DS_1A <bits<8> op, dag outs, dag ins, string asm, list<dag> pat> :
- DS <op, outs, ins, asm, pat> {
+class DS_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
+ DS <outs, ins, "", pattern>,
+ SIMCInstr <opName, SISubtarget.NONE> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
+
+class DS_Real_si <bits<8> op, string opName, dag outs, dag ins, string asm> :
+ DS <outs, ins, asm, []>,
+ DSe <op>,
+ SIMCInstr <opName, SISubtarget.SI>;
+
+class DS_Real_vi <bits<8> op, string opName, dag outs, dag ins, string asm> :
+ DS <outs, ins, asm, []>,
+ DSe_vi <op>,
+ SIMCInstr <opName, SISubtarget.VI>;
+
+class DS_1A_Real_si <bits<8> op, string opName, dag outs, dag ins, string asm> :
+ DS <outs, ins, asm, []>,
+ DSe <op>,
+ SIMCInstr <opName, SISubtarget.SI> {
+
+ // Single load interpret the 2 i8imm operands as a single i16 offset.
bits<16> offset;
+ let offset0 = offset{7-0};
+ let offset1 = offset{15-8};
+}
+
+class DS_1A_Real_vi <bits<8> op, string opName, dag outs, dag ins, string asm> :
+ DS <outs, ins, asm, []>,
+ DSe_vi <op>,
+ SIMCInstr <opName, SISubtarget.VI> {
// Single load interpret the 2 i8imm operands as a single i16 offset.
+ bits<16> offset;
let offset0 = offset{7-0};
let offset1 = offset{15-8};
+}
+
+multiclass DS_1A_Load_m <bits<8> op, string opName, dag outs, dag ins, string asm,
+ list<dag> pat> {
+ let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
+ def "" : DS_Pseudo <opName, outs, ins, pat>;
- let hasSideEffects = 0;
+ let data0 = 0, data1 = 0 in {
+ def _si : DS_1A_Real_si <op, opName, outs, ins, asm>;
+ def _vi : DS_1A_Real_vi <op, opName, outs, ins, asm>;
+ }
+ }
}
-class DS_Load_Helper <bits<8> op, string asm, RegisterClass regClass> : DS_1A <
+multiclass DS_Load_Helper <bits<8> op, string asm, RegisterClass regClass>
+ : DS_1A_Load_m <
op,
+ asm,
(outs regClass:$vdst),
- (ins i1imm:$gds, VReg_32:$addr, ds_offset:$offset),
- asm#" $vdst, $addr"#"$offset"#" [M0]",
- []> {
- let data0 = 0;
- let data1 = 0;
- let mayLoad = 1;
- let mayStore = 0;
+ (ins i1imm:$gds, VGPR_32:$addr, ds_offset:$offset, M0Reg:$m0),
+ asm#" $vdst, $addr"#"$offset",
+ []>;
+
+multiclass DS_Load2_m <bits<8> op, string opName, dag outs, dag ins, string asm,
+ list<dag> pat> {
+ let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
+ def "" : DS_Pseudo <opName, outs, ins, pat>;
+
+ let data0 = 0, data1 = 0 in {
+ def _si : DS_Real_si <op, opName, outs, ins, asm>;
+ def _vi : DS_Real_vi <op, opName, outs, ins, asm>;
+ }
+ }
}
-class DS_Load2_Helper <bits<8> op, string asm, RegisterClass regClass> : DS <
+multiclass DS_Load2_Helper <bits<8> op, string asm, RegisterClass regClass>
+ : DS_Load2_m <
op,
+ asm,
(outs regClass:$vdst),
- (ins i1imm:$gds, VReg_32:$addr, ds_offset0:$offset0, ds_offset1:$offset1),
- asm#" $vdst, $addr"#"$offset0"#"$offset1 [M0]",
- []> {
- let data0 = 0;
- let data1 = 0;
- let mayLoad = 1;
- let mayStore = 0;
- let hasSideEffects = 0;
+ (ins i1imm:$gds, VGPR_32:$addr, ds_offset0:$offset0, ds_offset1:$offset1,
+ M0Reg:$m0),
+ asm#" $vdst, $addr"#"$offset0"#"$offset1",
+ []>;
+
+multiclass DS_1A_Store_m <bits<8> op, string opName, dag outs, dag ins,
+ string asm, list<dag> pat> {
+ let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
+ def "" : DS_Pseudo <opName, outs, ins, pat>;
+
+ let data1 = 0, vdst = 0 in {
+ def _si : DS_1A_Real_si <op, opName, outs, ins, asm>;
+ def _vi : DS_1A_Real_vi <op, opName, outs, ins, asm>;
+ }
+ }
}
-class DS_Store_Helper <bits<8> op, string asm, RegisterClass regClass> : DS_1A <
+multiclass DS_Store_Helper <bits<8> op, string asm, RegisterClass regClass>
+ : DS_1A_Store_m <
op,
+ asm,
(outs),
- (ins i1imm:$gds, VReg_32:$addr, regClass:$data0, ds_offset:$offset),
- asm#" $addr, $data0"#"$offset"#" [M0]",
- []> {
- let data1 = 0;
- let mayStore = 1;
- let mayLoad = 0;
- let vdst = 0;
+ (ins i1imm:$gds, VGPR_32:$addr, regClass:$data0, ds_offset:$offset, M0Reg:$m0),
+ asm#" $addr, $data0"#"$offset",
+ []>;
+
+multiclass DS_Store_m <bits<8> op, string opName, dag outs, dag ins,
+ string asm, list<dag> pat> {
+ let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
+ def "" : DS_Pseudo <opName, outs, ins, pat>;
+
+ let vdst = 0 in {
+ def _si : DS_Real_si <op, opName, outs, ins, asm>;
+ def _vi : DS_Real_vi <op, opName, outs, ins, asm>;
+ }
+ }
}
-class DS_Store2_Helper <bits<8> op, string asm, RegisterClass regClass> : DS <
+multiclass DS_Store2_Helper <bits<8> op, string asm, RegisterClass regClass>
+ : DS_Store_m <
op,
+ asm,
(outs),
- (ins i1imm:$gds, VReg_32:$addr, regClass:$data0, regClass:$data1,
- ds_offset0:$offset0, ds_offset1:$offset1),
- asm#" $addr, $data0, $data1"#"$offset0"#"$offset1 [M0]",
- []> {
- let mayStore = 1;
- let mayLoad = 0;
- let hasSideEffects = 0;
- let vdst = 0;
-}
+ (ins i1imm:$gds, VGPR_32:$addr, regClass:$data0, regClass:$data1,
+ ds_offset0:$offset0, ds_offset1:$offset1, M0Reg:$m0),
+ asm#" $addr, $data0, $data1"#"$offset0"#"$offset1",
+ []>;
// 1 address, 1 data.
-class DS_1A1D_RET <bits<8> op, string asm, RegisterClass rc, string noRetOp = ""> : DS_1A <
- op,
- (outs rc:$vdst),
- (ins i1imm:$gds, VReg_32:$addr, rc:$data0, ds_offset:$offset),
- asm#" $vdst, $addr, $data0"#"$offset"#" [M0]", []>,
- AtomicNoRet<noRetOp, 1> {
+multiclass DS_1A1D_RET_m <bits<8> op, string opName, dag outs, dag ins,
+ string asm, list<dag> pat, string noRetOp> {
+ let mayLoad = 1, mayStore = 1,
+ hasPostISelHook = 1 // Adjusted to no return version.
+ in {
+ def "" : DS_Pseudo <opName, outs, ins, pat>,
+ AtomicNoRet<noRetOp, 1>;
+
+ let data1 = 0 in {
+ def _si : DS_1A_Real_si <op, opName, outs, ins, asm>;
+ def _vi : DS_1A_Real_vi <op, opName, outs, ins, asm>;
+ }
+ }
+}
- let data1 = 0;
- let mayStore = 1;
- let mayLoad = 1;
+multiclass DS_1A1D_RET <bits<8> op, string asm, RegisterClass rc,
+ string noRetOp = ""> : DS_1A1D_RET_m <
+ op, asm,
+ (outs rc:$vdst),
+ (ins i1imm:$gds, VGPR_32:$addr, rc:$data0, ds_offset:$offset, M0Reg:$m0),
+ asm#" $vdst, $addr, $data0"#"$offset", [], noRetOp>;
- let hasPostISelHook = 1; // Adjusted to no return version.
+// 1 address, 2 data.
+multiclass DS_1A2D_RET_m <bits<8> op, string opName, dag outs, dag ins,
+ string asm, list<dag> pat, string noRetOp> {
+ let mayLoad = 1, mayStore = 1,
+ hasPostISelHook = 1 // Adjusted to no return version.
+ in {
+ def "" : DS_Pseudo <opName, outs, ins, pat>,
+ AtomicNoRet<noRetOp, 1>;
+
+ def _si : DS_1A_Real_si <op, opName, outs, ins, asm>;
+ def _vi : DS_1A_Real_vi <op, opName, outs, ins, asm>;
+ }
}
-// 1 address, 2 data.
-class DS_1A2D_RET <bits<8> op, string asm, RegisterClass rc, string noRetOp = ""> : DS_1A <
- op,
+multiclass DS_1A2D_RET <bits<8> op, string asm, RegisterClass rc,
+ string noRetOp = ""> : DS_1A2D_RET_m <
+ op, asm,
(outs rc:$vdst),
- (ins i1imm:$gds, VReg_32:$addr, rc:$data0, rc:$data1, ds_offset:$offset),
- asm#" $vdst, $addr, $data0, $data1"#"$offset"#" [M0]",
- []>,
- AtomicNoRet<noRetOp, 1> {
- let mayStore = 1;
- let mayLoad = 1;
- let hasPostISelHook = 1; // Adjusted to no return version.
-}
+ (ins i1imm:$gds, VGPR_32:$addr, rc:$data0, rc:$data1, ds_offset:$offset, M0Reg:$m0),
+ asm#" $vdst, $addr, $data0, $data1"#"$offset",
+ [], noRetOp>;
// 1 address, 2 data.
-class DS_1A2D_NORET <bits<8> op, string asm, RegisterClass rc, string noRetOp = asm> : DS_1A <
- op,
- (outs),
- (ins i1imm:$gds, VReg_32:$addr, rc:$data0, rc:$data1, ds_offset:$offset),
- asm#" $addr, $data0, $data1"#"$offset"#" [M0]",
- []>,
- AtomicNoRet<noRetOp, 0> {
- let mayStore = 1;
- let mayLoad = 1;
+multiclass DS_1A2D_NORET_m <bits<8> op, string opName, dag outs, dag ins,
+ string asm, list<dag> pat, string noRetOp> {
+ let mayLoad = 1, mayStore = 1 in {
+ def "" : DS_Pseudo <opName, outs, ins, pat>,
+ AtomicNoRet<noRetOp, 0>;
+
+ let vdst = 0 in {
+ def _si : DS_1A_Real_si <op, opName, outs, ins, asm>;
+ def _vi : DS_1A_Real_vi <op, opName, outs, ins, asm>;
+ }
+ }
}
-// 1 address, 1 data.
-class DS_1A1D_NORET <bits<8> op, string asm, RegisterClass rc, string noRetOp = asm> : DS_1A <
- op,
+multiclass DS_1A2D_NORET <bits<8> op, string asm, RegisterClass rc,
+ string noRetOp = asm> : DS_1A2D_NORET_m <
+ op, asm,
(outs),
- (ins i1imm:$gds, VReg_32:$addr, rc:$data0, ds_offset:$offset),
- asm#" $addr, $data0"#"$offset"#" [M0]",
- []>,
- AtomicNoRet<noRetOp, 0> {
+ (ins i1imm:$gds, VGPR_32:$addr, rc:$data0, rc:$data1, ds_offset:$offset, M0Reg:$m0),
+ asm#" $addr, $data0, $data1"#"$offset",
+ [], noRetOp>;
- let data1 = 0;
- let mayStore = 1;
- let mayLoad = 1;
+// 1 address, 1 data.
+multiclass DS_1A1D_NORET_m <bits<8> op, string opName, dag outs, dag ins,
+ string asm, list<dag> pat, string noRetOp> {
+ let mayLoad = 1, mayStore = 1 in {
+ def "" : DS_Pseudo <opName, outs, ins, pat>,
+ AtomicNoRet<noRetOp, 0>;
+
+ let data1 = 0, vdst = 0 in {
+ def _si : DS_1A_Real_si <op, opName, outs, ins, asm>;
+ def _vi : DS_1A_Real_vi <op, opName, outs, ins, asm>;
+ }
+ }
}
+multiclass DS_1A1D_NORET <bits<8> op, string asm, RegisterClass rc,
+ string noRetOp = asm> : DS_1A1D_NORET_m <
+ op, asm,
+ (outs),
+ (ins i1imm:$gds, VGPR_32:$addr, rc:$data0, ds_offset:$offset, M0Reg:$m0),
+ asm#" $addr, $data0"#"$offset",
+ [], noRetOp>;
+
//===----------------------------------------------------------------------===//
// MTBUF classes
//===----------------------------------------------------------------------===//
@@ -1057,6 +1688,7 @@ class MTBUF_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
MTBUF <outs, ins, "", pattern>,
SIMCInstr<opName, SISubtarget.NONE> {
let isPseudo = 1;
+ let isCodeGenOnly = 1;
}
class MTBUF_Real_si <bits<3> op, string opName, dag outs, dag ins,
@@ -1065,6 +1697,11 @@ class MTBUF_Real_si <bits<3> op, string opName, dag outs, dag ins,
MTBUFe <op>,
SIMCInstr<opName, SISubtarget.SI>;
+class MTBUF_Real_vi <bits<4> op, string opName, dag outs, dag ins, string asm> :
+ MTBUF <outs, ins, asm, []>,
+ MTBUFe_vi <op>,
+ SIMCInstr <opName, SISubtarget.VI>;
+
multiclass MTBUF_m <bits<3> op, string opName, dag outs, dag ins, string asm,
list<dag> pattern> {
@@ -1072,6 +1709,8 @@ multiclass MTBUF_m <bits<3> op, string opName, dag outs, dag ins, string asm,
def _si : MTBUF_Real_si <op, opName, outs, ins, asm>;
+ def _vi : MTBUF_Real_vi <{0, op{2}, op{1}, op{0}}, opName, outs, ins, asm>;
+
}
let mayStore = 1, mayLoad = 0 in {
@@ -1080,8 +1719,8 @@ multiclass MTBUF_Store_Helper <bits<3> op, string opName,
RegisterClass regClass> : MTBUF_m <
op, opName, (outs),
(ins regClass:$vdata, u16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc,
- i1imm:$addr64, i8imm:$dfmt, i8imm:$nfmt, VReg_32:$vaddr,
- SReg_128:$srsrc, i1imm:$slc, i1imm:$tfe, SSrc_32:$soffset),
+ i1imm:$addr64, i8imm:$dfmt, i8imm:$nfmt, VGPR_32:$vaddr,
+ SReg_128:$srsrc, i1imm:$slc, i1imm:$tfe, SCSrc_32:$soffset),
opName#" $vdata, $offset, $offen, $idxen, $glc, $addr64, $dfmt,"
#" $nfmt, $vaddr, $srsrc, $slc, $tfe, $soffset", []
>;
@@ -1094,43 +1733,124 @@ multiclass MTBUF_Load_Helper <bits<3> op, string opName,
RegisterClass regClass> : MTBUF_m <
op, opName, (outs regClass:$dst),
(ins u16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64,
- i8imm:$dfmt, i8imm:$nfmt, VReg_32:$vaddr, SReg_128:$srsrc,
- i1imm:$slc, i1imm:$tfe, SSrc_32:$soffset),
+ i8imm:$dfmt, i8imm:$nfmt, VGPR_32:$vaddr, SReg_128:$srsrc,
+ i1imm:$slc, i1imm:$tfe, SCSrc_32:$soffset),
opName#" $dst, $offset, $offen, $idxen, $glc, $addr64, $dfmt,"
#" $nfmt, $vaddr, $srsrc, $slc, $tfe, $soffset", []
>;
} // mayLoad = 1, mayStore = 0
-class MUBUFAddr64Table <bit is_addr64, string suffix = ""> {
+//===----------------------------------------------------------------------===//
+// MUBUF classes
+//===----------------------------------------------------------------------===//
+class mubuf <bits<7> si, bits<7> vi = si> {
+ field bits<7> SI = si;
+ field bits<7> VI = vi;
+}
+
+class MUBUFAddr64Table <bit is_addr64, string suffix = ""> {
bit IsAddr64 = is_addr64;
string OpName = NAME # suffix;
}
-class MUBUFAtomicAddr64 <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern>
- : MUBUF <op, outs, ins, asm, pattern> {
+class MUBUF_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
+ MUBUF <outs, ins, "", pattern>,
+ SIMCInstr<opName, SISubtarget.NONE> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+
+ // dummy fields, so that we can use let statements around multiclasses
+ bits<1> offen;
+ bits<1> idxen;
+ bits<8> vaddr;
+ bits<1> glc;
+ bits<1> slc;
+ bits<1> tfe;
+ bits<8> soffset;
+}
+
+class MUBUF_Real_si <mubuf op, string opName, dag outs, dag ins,
+ string asm> :
+ MUBUF <outs, ins, asm, []>,
+ MUBUFe <op.SI>,
+ SIMCInstr<opName, SISubtarget.SI> {
+ let lds = 0;
+}
- let offen = 0;
- let idxen = 0;
- let addr64 = 1;
- let tfe = 0;
+class MUBUF_Real_vi <mubuf op, string opName, dag outs, dag ins,
+ string asm> :
+ MUBUF <outs, ins, asm, []>,
+ MUBUFe_vi <op.VI>,
+ SIMCInstr<opName, SISubtarget.VI> {
let lds = 0;
- let soffset = 128;
}
-class MUBUFAtomicOffset <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern>
- : MUBUF <op, outs, ins, asm, pattern> {
+multiclass MUBUF_m <mubuf op, string opName, dag outs, dag ins, string asm,
+ list<dag> pattern> {
+
+ def "" : MUBUF_Pseudo <opName, outs, ins, pattern>,
+ MUBUFAddr64Table <0>;
- let offen = 0;
- let idxen = 0;
- let addr64 = 0;
- let tfe = 0;
+ let addr64 = 0 in {
+ def _si : MUBUF_Real_si <op, opName, outs, ins, asm>;
+ }
+
+ def _vi : MUBUF_Real_vi <op, opName, outs, ins, asm>;
+}
+
+multiclass MUBUFAddr64_m <mubuf op, string opName, dag outs,
+ dag ins, string asm, list<dag> pattern> {
+
+ def "" : MUBUF_Pseudo <opName, outs, ins, pattern>,
+ MUBUFAddr64Table <1>;
+
+ let addr64 = 1 in {
+ def _si : MUBUF_Real_si <op, opName, outs, ins, asm>;
+ }
+
+ // There is no VI version. If the pseudo is selected, it should be lowered
+ // for VI appropriately.
+}
+
+class MUBUF_si <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ MUBUF <outs, ins, asm, pattern>, MUBUFe <op> {
let lds = 0;
- let vaddr = 0;
}
-multiclass MUBUF_Atomic <bits<7> op, string name, RegisterClass rc,
+multiclass MUBUFAtomicOffset_m <mubuf op, string opName, dag outs, dag ins,
+ string asm, list<dag> pattern, bit is_return> {
+
+ def "" : MUBUF_Pseudo <opName, outs, ins, pattern>,
+ MUBUFAddr64Table <0, !if(is_return, "_RTN", "")>,
+ AtomicNoRet<NAME#"_OFFSET", is_return>;
+
+ let offen = 0, idxen = 0, tfe = 0, vaddr = 0 in {
+ let addr64 = 0 in {
+ def _si : MUBUF_Real_si <op, opName, outs, ins, asm>;
+ }
+
+ def _vi : MUBUF_Real_vi <op, opName, outs, ins, asm>;
+ }
+}
+
+multiclass MUBUFAtomicAddr64_m <mubuf op, string opName, dag outs, dag ins,
+ string asm, list<dag> pattern, bit is_return> {
+
+ def "" : MUBUF_Pseudo <opName, outs, ins, pattern>,
+ MUBUFAddr64Table <1, !if(is_return, "_RTN", "")>,
+ AtomicNoRet<NAME#"_ADDR64", is_return>;
+
+ let offen = 0, idxen = 0, addr64 = 1, tfe = 0 in {
+ def _si : MUBUF_Real_si <op, opName, outs, ins, asm>;
+ }
+
+ // There is no VI version. If the pseudo is selected, it should be lowered
+ // for VI appropriately.
+}
+
+multiclass MUBUF_Atomic <mubuf op, string name, RegisterClass rc,
ValueType vt, SDPatternOperator atomic> {
let mayStore = 1, mayLoad = 1, hasPostISelHook = 1 in {
@@ -1138,174 +1858,149 @@ multiclass MUBUF_Atomic <bits<7> op, string name, RegisterClass rc,
// No return variants
let glc = 0 in {
- def _ADDR64 : MUBUFAtomicAddr64 <
- op, (outs),
+ defm _ADDR64 : MUBUFAtomicAddr64_m <
+ op, name#"_addr64", (outs),
(ins rc:$vdata, SReg_128:$srsrc, VReg_64:$vaddr,
- mbuf_offset:$offset, slc:$slc),
- name#" $vdata, $vaddr, $srsrc, 0 addr64"#"$offset"#"$slc", []
- >, MUBUFAddr64Table<1>, AtomicNoRet<NAME#"_ADDR64", 0>;
+ mbuf_offset:$offset, SCSrc_32:$soffset, slc:$slc),
+ name#" $vdata, $vaddr, $srsrc, $soffset addr64"#"$offset"#"$slc", [], 0
+ >;
- def _OFFSET : MUBUFAtomicOffset <
- op, (outs),
+ defm _OFFSET : MUBUFAtomicOffset_m <
+ op, name#"_offset", (outs),
(ins rc:$vdata, SReg_128:$srsrc, mbuf_offset:$offset,
- SSrc_32:$soffset, slc:$slc),
- name#" $vdata, $srsrc, $soffset"#"$offset"#"$slc", []
- >, MUBUFAddr64Table<0>, AtomicNoRet<NAME#"_OFFSET", 0>;
+ SCSrc_32:$soffset, slc:$slc),
+ name#" $vdata, $srsrc, $soffset"#"$offset"#"$slc", [], 0
+ >;
} // glc = 0
// Variant that return values
let glc = 1, Constraints = "$vdata = $vdata_in",
DisableEncoding = "$vdata_in" in {
- def _RTN_ADDR64 : MUBUFAtomicAddr64 <
- op, (outs rc:$vdata),
+ defm _RTN_ADDR64 : MUBUFAtomicAddr64_m <
+ op, name#"_rtn_addr64", (outs rc:$vdata),
(ins rc:$vdata_in, SReg_128:$srsrc, VReg_64:$vaddr,
- mbuf_offset:$offset, slc:$slc),
- name#" $vdata, $vaddr, $srsrc, 0 addr64"#"$offset"#" glc"#"$slc",
+ mbuf_offset:$offset, SSrc_32:$soffset, slc:$slc),
+ name#" $vdata, $vaddr, $srsrc, $soffset addr64"#"$offset"#" glc"#"$slc",
[(set vt:$vdata,
- (atomic (MUBUFAddr64Atomic v4i32:$srsrc, i64:$vaddr, i16:$offset,
- i1:$slc), vt:$vdata_in))]
- >, MUBUFAddr64Table<1, "_RTN">, AtomicNoRet<NAME#"_ADDR64", 1>;
+ (atomic (MUBUFAddr64Atomic v4i32:$srsrc, i64:$vaddr, i32:$soffset,
+ i16:$offset, i1:$slc), vt:$vdata_in))], 1
+ >;
- def _RTN_OFFSET : MUBUFAtomicOffset <
- op, (outs rc:$vdata),
+ defm _RTN_OFFSET : MUBUFAtomicOffset_m <
+ op, name#"_rtn_offset", (outs rc:$vdata),
(ins rc:$vdata_in, SReg_128:$srsrc, mbuf_offset:$offset,
- SSrc_32:$soffset, slc:$slc),
+ SCSrc_32:$soffset, slc:$slc),
name#" $vdata, $srsrc, $soffset"#"$offset"#" glc $slc",
[(set vt:$vdata,
(atomic (MUBUFOffsetAtomic v4i32:$srsrc, i32:$soffset, i16:$offset,
- i1:$slc), vt:$vdata_in))]
- >, MUBUFAddr64Table<0, "_RTN">, AtomicNoRet<NAME#"_OFFSET", 1>;
+ i1:$slc), vt:$vdata_in))], 1
+ >;
} // glc = 1
} // mayStore = 1, mayLoad = 1, hasPostISelHook = 1
}
-multiclass MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass,
+multiclass MUBUF_Load_Helper <mubuf op, string name, RegisterClass regClass,
ValueType load_vt = i32,
SDPatternOperator ld = null_frag> {
- let lds = 0, mayLoad = 1 in {
+ let mayLoad = 1, mayStore = 0 in {
+ let offen = 0, idxen = 0, vaddr = 0 in {
+ defm _OFFSET : MUBUF_m <op, name#"_offset", (outs regClass:$vdata),
+ (ins SReg_128:$srsrc,
+ mbuf_offset:$offset, SCSrc_32:$soffset, glc:$glc,
+ slc:$slc, tfe:$tfe),
+ name#" $vdata, $srsrc, $soffset"#"$offset"#"$glc"#"$slc"#"$tfe",
+ [(set load_vt:$vdata, (ld (MUBUFOffset v4i32:$srsrc,
+ i32:$soffset, i16:$offset,
+ i1:$glc, i1:$slc, i1:$tfe)))]>;
+ }
- let addr64 = 0 in {
+ let offen = 1, idxen = 0 in {
+ defm _OFFEN : MUBUF_m <op, name#"_offen", (outs regClass:$vdata),
+ (ins SReg_128:$srsrc, VGPR_32:$vaddr,
+ SCSrc_32:$soffset, mbuf_offset:$offset, glc:$glc, slc:$slc,
+ tfe:$tfe),
+ name#" $vdata, $vaddr, $srsrc, $soffset offen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
+ }
+
+ let offen = 0, idxen = 1 in {
+ defm _IDXEN : MUBUF_m <op, name#"_idxen", (outs regClass:$vdata),
+ (ins SReg_128:$srsrc, VGPR_32:$vaddr,
+ mbuf_offset:$offset, SCSrc_32:$soffset, glc:$glc,
+ slc:$slc, tfe:$tfe),
+ name#" $vdata, $vaddr, $srsrc, $soffset idxen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
+ }
- let offen = 0, idxen = 0, vaddr = 0 in {
- def _OFFSET : MUBUF <op, (outs regClass:$vdata),
- (ins SReg_128:$srsrc,
- mbuf_offset:$offset, SSrc_32:$soffset, glc:$glc,
- slc:$slc, tfe:$tfe),
- asm#" $vdata, $srsrc, $soffset"#"$offset"#"$glc"#"$slc"#"$tfe",
- [(set load_vt:$vdata, (ld (MUBUFOffset v4i32:$srsrc,
- i32:$soffset, i16:$offset,
- i1:$glc, i1:$slc, i1:$tfe)))]>,
- MUBUFAddr64Table<0>;
- }
-
- let offen = 1, idxen = 0 in {
- def _OFFEN : MUBUF <op, (outs regClass:$vdata),
- (ins SReg_128:$srsrc, VReg_32:$vaddr,
- SSrc_32:$soffset, mbuf_offset:$offset, glc:$glc, slc:$slc,
- tfe:$tfe),
- asm#" $vdata, $vaddr, $srsrc, $soffset offen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
- }
-
- let offen = 0, idxen = 1 in {
- def _IDXEN : MUBUF <op, (outs regClass:$vdata),
- (ins SReg_128:$srsrc, VReg_32:$vaddr,
- mbuf_offset:$offset, SSrc_32:$soffset, glc:$glc,
- slc:$slc, tfe:$tfe),
- asm#" $vdata, $vaddr, $srsrc, $soffset idxen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
- }
-
- let offen = 1, idxen = 1 in {
- def _BOTHEN : MUBUF <op, (outs regClass:$vdata),
- (ins SReg_128:$srsrc, VReg_64:$vaddr,
- SSrc_32:$soffset, glc:$glc, slc:$slc, tfe:$tfe),
- asm#" $vdata, $vaddr, $srsrc, $soffset, idxen offen"#"$glc"#"$slc"#"$tfe", []>;
- }
+ let offen = 1, idxen = 1 in {
+ defm _BOTHEN : MUBUF_m <op, name#"_bothen", (outs regClass:$vdata),
+ (ins SReg_128:$srsrc, VReg_64:$vaddr,
+ SCSrc_32:$soffset, mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
+ name#" $vdata, $vaddr, $srsrc, $soffset idxen offen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
}
- let offen = 0, idxen = 0, addr64 = 1, glc = 0, slc = 0, tfe = 0, soffset = 128 /* ZERO */ in {
- def _ADDR64 : MUBUF <op, (outs regClass:$vdata),
- (ins SReg_128:$srsrc, VReg_64:$vaddr, mbuf_offset:$offset),
- asm#" $vdata, $vaddr, $srsrc, 0 addr64"#"$offset",
+ let offen = 0, idxen = 0, glc = 0, slc = 0, tfe = 0 in {
+ defm _ADDR64 : MUBUFAddr64_m <op, name#"_addr64", (outs regClass:$vdata),
+ (ins SReg_128:$srsrc, VReg_64:$vaddr,
+ SCSrc_32:$soffset, mbuf_offset:$offset),
+ name#" $vdata, $vaddr, $srsrc, $soffset addr64"#"$offset",
[(set load_vt:$vdata, (ld (MUBUFAddr64 v4i32:$srsrc,
- i64:$vaddr, i16:$offset)))]>, MUBUFAddr64Table<1>;
+ i64:$vaddr, i32:$soffset,
+ i16:$offset)))]>;
}
}
}
-multiclass MUBUF_Store_Helper <bits<7> op, string name, RegisterClass vdataClass,
+multiclass MUBUF_Store_Helper <mubuf op, string name, RegisterClass vdataClass,
ValueType store_vt, SDPatternOperator st> {
-
- let addr64 = 0, lds = 0 in {
-
- def "" : MUBUF <
- op, (outs),
- (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_32:$vaddr, SSrc_32:$soffset,
- mbuf_offset:$offset, offen:$offen, idxen:$idxen, glc:$glc, slc:$slc,
- tfe:$tfe),
- name#" $vdata, $vaddr, $srsrc, $soffset"#"$offen"#"$idxen"#"$offset"#
- "$glc"#"$slc"#"$tfe",
- []
- >;
+ let mayLoad = 0, mayStore = 1 in {
+ defm : MUBUF_m <op, name, (outs),
+ (ins vdataClass:$vdata, SReg_128:$srsrc, VGPR_32:$vaddr, SCSrc_32:$soffset,
+ mbuf_offset:$offset, offen:$offen, idxen:$idxen, glc:$glc, slc:$slc,
+ tfe:$tfe),
+ name#" $vdata, $vaddr, $srsrc, $soffset"#"$offen"#"$idxen"#"$offset"#
+ "$glc"#"$slc"#"$tfe", []>;
let offen = 0, idxen = 0, vaddr = 0 in {
- def _OFFSET : MUBUF <
- op, (outs),
- (ins vdataClass:$vdata, SReg_128:$srsrc, mbuf_offset:$offset,
- SSrc_32:$soffset, glc:$glc, slc:$slc, tfe:$tfe),
- name#" $vdata, $srsrc, $soffset"#"$offset"#"$glc"#"$slc"#"$tfe",
- [(st store_vt:$vdata, (MUBUFOffset v4i32:$srsrc, i32:$soffset,
- i16:$offset, i1:$glc, i1:$slc,
- i1:$tfe))]
- >, MUBUFAddr64Table<0>;
+ defm _OFFSET : MUBUF_m <op, name#"_offset",(outs),
+ (ins vdataClass:$vdata, SReg_128:$srsrc, mbuf_offset:$offset,
+ SCSrc_32:$soffset, glc:$glc, slc:$slc, tfe:$tfe),
+ name#" $vdata, $srsrc, $soffset"#"$offset"#"$glc"#"$slc"#"$tfe",
+ [(st store_vt:$vdata, (MUBUFOffset v4i32:$srsrc, i32:$soffset,
+ i16:$offset, i1:$glc, i1:$slc, i1:$tfe))]>;
} // offen = 0, idxen = 0, vaddr = 0
let offen = 1, idxen = 0 in {
- def _OFFEN : MUBUF <
- op, (outs),
- (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_32:$vaddr, SSrc_32:$soffset,
- mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
- name#" $vdata, $vaddr, $srsrc, $soffset offen"#"$offset"#
- "$glc"#"$slc"#"$tfe",
- []
- >;
+ defm _OFFEN : MUBUF_m <op, name#"_offen", (outs),
+ (ins vdataClass:$vdata, SReg_128:$srsrc, VGPR_32:$vaddr, SCSrc_32:$soffset,
+ mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
+ name#" $vdata, $vaddr, $srsrc, $soffset offen"#"$offset"#
+ "$glc"#"$slc"#"$tfe", []>;
} // end offen = 1, idxen = 0
- } // End addr64 = 0, lds = 0
-
- def _ADDR64 : MUBUF <
- op, (outs),
- (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_64:$vaddr, mbuf_offset:$offset),
- name#" $vdata, $vaddr, $srsrc, 0 addr64"#"$offset",
- [(st store_vt:$vdata,
- (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i16:$offset))]>, MUBUFAddr64Table<1>
- {
-
- let mayLoad = 0;
- let mayStore = 1;
-
- // Encoding
- let offen = 0;
- let idxen = 0;
- let glc = 0;
- let addr64 = 1;
- let lds = 0;
- let slc = 0;
- let tfe = 0;
- let soffset = 128; // ZERO
- }
+ let offen = 0, idxen = 0, glc = 0, slc = 0, tfe = 0 in {
+ defm _ADDR64 : MUBUFAddr64_m <op, name#"_addr64", (outs),
+ (ins vdataClass:$vdata, SReg_128:$srsrc,
+ VReg_64:$vaddr, SCSrc_32:$soffset,
+ mbuf_offset:$offset),
+ name#" $vdata, $vaddr, $srsrc, $soffset addr64"#"$offset",
+ [(st store_vt:$vdata,
+ (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr,
+ i32:$soffset, i16:$offset))]>;
+ }
+ } // End mayLoad = 0, mayStore = 1
}
class FLAT_Load_Helper <bits<7> op, string asm, RegisterClass regClass> :
- FLAT <op, (outs regClass:$data),
+ FLAT <op, (outs regClass:$vdst),
(ins VReg_64:$addr),
- asm#" $data, $addr, [M0, FLAT_SCRATCH]", []> {
+ asm#" $vdst, $addr, [M0, FLAT_SCRATCH]", []> {
let glc = 0;
let slc = 0;
let tfe = 0;
+ let data = 0;
let mayLoad = 1;
}
@@ -1321,6 +2016,7 @@ class FLAT_Store_Helper <bits<7> op, string name, RegisterClass vdataClass> :
let glc = 0;
let slc = 0;
let tfe = 0;
+ let vdst = 0;
}
class MIMG_Mask <string op, int channels> {
@@ -1339,7 +2035,7 @@ class MIMG_NoSampler_Helper <bits<7> op, string asm,
asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
#" $tfe, $lwe, $slc, $vaddr, $srsrc",
[]> {
- let SSAMP = 0;
+ let ssamp = 0;
let mayLoad = 1;
let mayStore = 0;
let hasPostISelHook = 1;
@@ -1348,7 +2044,7 @@ class MIMG_NoSampler_Helper <bits<7> op, string asm,
multiclass MIMG_NoSampler_Src_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
int channels> {
- def _V1 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_32>,
+ def _V1 : MIMG_NoSampler_Helper <op, asm, dst_rc, VGPR_32>,
MIMG_Mask<asm#"_V1", channels>;
def _V2 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_64>,
MIMG_Mask<asm#"_V2", channels>;
@@ -1357,7 +2053,7 @@ multiclass MIMG_NoSampler_Src_Helper <bits<7> op, string asm,
}
multiclass MIMG_NoSampler <bits<7> op, string asm> {
- defm _V1 : MIMG_NoSampler_Src_Helper <op, asm, VReg_32, 1>;
+ defm _V1 : MIMG_NoSampler_Src_Helper <op, asm, VGPR_32, 1>;
defm _V2 : MIMG_NoSampler_Src_Helper <op, asm, VReg_64, 2>;
defm _V3 : MIMG_NoSampler_Src_Helper <op, asm, VReg_96, 3>;
defm _V4 : MIMG_NoSampler_Src_Helper <op, asm, VReg_128, 4>;
@@ -1365,7 +2061,7 @@ multiclass MIMG_NoSampler <bits<7> op, string asm> {
class MIMG_Sampler_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
- RegisterClass src_rc> : MIMG <
+ RegisterClass src_rc, int wqm> : MIMG <
op,
(outs dst_rc:$vdata),
(ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
@@ -1377,33 +2073,41 @@ class MIMG_Sampler_Helper <bits<7> op, string asm,
let mayLoad = 1;
let mayStore = 0;
let hasPostISelHook = 1;
+ let WQM = wqm;
}
multiclass MIMG_Sampler_Src_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
- int channels> {
- def _V1 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_32>,
+ int channels, int wqm> {
+ def _V1 : MIMG_Sampler_Helper <op, asm, dst_rc, VGPR_32, wqm>,
MIMG_Mask<asm#"_V1", channels>;
- def _V2 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_64>,
+ def _V2 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_64, wqm>,
MIMG_Mask<asm#"_V2", channels>;
- def _V4 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_128>,
+ def _V4 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_128, wqm>,
MIMG_Mask<asm#"_V4", channels>;
- def _V8 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_256>,
+ def _V8 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_256, wqm>,
MIMG_Mask<asm#"_V8", channels>;
- def _V16 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_512>,
+ def _V16 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_512, wqm>,
MIMG_Mask<asm#"_V16", channels>;
}
multiclass MIMG_Sampler <bits<7> op, string asm> {
- defm _V1 : MIMG_Sampler_Src_Helper<op, asm, VReg_32, 1>;
- defm _V2 : MIMG_Sampler_Src_Helper<op, asm, VReg_64, 2>;
- defm _V3 : MIMG_Sampler_Src_Helper<op, asm, VReg_96, 3>;
- defm _V4 : MIMG_Sampler_Src_Helper<op, asm, VReg_128, 4>;
+ defm _V1 : MIMG_Sampler_Src_Helper<op, asm, VGPR_32, 1, 0>;
+ defm _V2 : MIMG_Sampler_Src_Helper<op, asm, VReg_64, 2, 0>;
+ defm _V3 : MIMG_Sampler_Src_Helper<op, asm, VReg_96, 3, 0>;
+ defm _V4 : MIMG_Sampler_Src_Helper<op, asm, VReg_128, 4, 0>;
+}
+
+multiclass MIMG_Sampler_WQM <bits<7> op, string asm> {
+ defm _V1 : MIMG_Sampler_Src_Helper<op, asm, VGPR_32, 1, 1>;
+ defm _V2 : MIMG_Sampler_Src_Helper<op, asm, VReg_64, 2, 1>;
+ defm _V3 : MIMG_Sampler_Src_Helper<op, asm, VReg_96, 3, 1>;
+ defm _V4 : MIMG_Sampler_Src_Helper<op, asm, VReg_128, 4, 1>;
}
class MIMG_Gather_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
- RegisterClass src_rc> : MIMG <
+ RegisterClass src_rc, int wqm> : MIMG <
op,
(outs dst_rc:$vdata),
(ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
@@ -1424,28 +2128,36 @@ class MIMG_Gather_Helper <bits<7> op, string asm,
// Therefore, disable all code which updates DMASK by setting these two:
let MIMG = 0;
let hasPostISelHook = 0;
+ let WQM = wqm;
}
multiclass MIMG_Gather_Src_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
- int channels> {
- def _V1 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_32>,
+ int channels, int wqm> {
+ def _V1 : MIMG_Gather_Helper <op, asm, dst_rc, VGPR_32, wqm>,
MIMG_Mask<asm#"_V1", channels>;
- def _V2 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_64>,
+ def _V2 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_64, wqm>,
MIMG_Mask<asm#"_V2", channels>;
- def _V4 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_128>,
+ def _V4 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_128, wqm>,
MIMG_Mask<asm#"_V4", channels>;
- def _V8 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_256>,
+ def _V8 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_256, wqm>,
MIMG_Mask<asm#"_V8", channels>;
- def _V16 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_512>,
+ def _V16 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_512, wqm>,
MIMG_Mask<asm#"_V16", channels>;
}
multiclass MIMG_Gather <bits<7> op, string asm> {
- defm _V1 : MIMG_Gather_Src_Helper<op, asm, VReg_32, 1>;
- defm _V2 : MIMG_Gather_Src_Helper<op, asm, VReg_64, 2>;
- defm _V3 : MIMG_Gather_Src_Helper<op, asm, VReg_96, 3>;
- defm _V4 : MIMG_Gather_Src_Helper<op, asm, VReg_128, 4>;
+ defm _V1 : MIMG_Gather_Src_Helper<op, asm, VGPR_32, 1, 0>;
+ defm _V2 : MIMG_Gather_Src_Helper<op, asm, VReg_64, 2, 0>;
+ defm _V3 : MIMG_Gather_Src_Helper<op, asm, VReg_96, 3, 0>;
+ defm _V4 : MIMG_Gather_Src_Helper<op, asm, VReg_128, 4, 0>;
+}
+
+multiclass MIMG_Gather_WQM <bits<7> op, string asm> {
+ defm _V1 : MIMG_Gather_Src_Helper<op, asm, VGPR_32, 1, 1>;
+ defm _V2 : MIMG_Gather_Src_Helper<op, asm, VReg_64, 2, 1>;
+ defm _V3 : MIMG_Gather_Src_Helper<op, asm, VReg_96, 3, 1>;
+ defm _V4 : MIMG_Gather_Src_Helper<op, asm, VReg_128, 4, 1>;
}
//===----------------------------------------------------------------------===//
@@ -1496,20 +2208,12 @@ def getCommuteOrig : InstrMapping {
let ValueCols = [["1"]];
}
-def isDS : InstrMapping {
- let FilterClass = "DS";
- let RowFields = ["Inst"];
- let ColFields = ["Size"];
- let KeyCol = ["8"];
- let ValueCols = [["8"]];
-}
-
-def getMCOpcode : InstrMapping {
+def getMCOpcodeGen : InstrMapping {
let FilterClass = "SIMCInstr";
let RowFields = ["PseudoInstr"];
let ColFields = ["Subtarget"];
let KeyCol = [!cast<string>(SISubtarget.NONE)];
- let ValueCols = [[!cast<string>(SISubtarget.SI)]];
+ let ValueCols = [[!cast<string>(SISubtarget.SI)],[!cast<string>(SISubtarget.VI)]];
}
def getAddr64Inst : InstrMapping {
@@ -1539,3 +2243,5 @@ def getAtomicNoRetOp : InstrMapping {
}
include "SIInstructions.td"
+include "CIInstructions.td"
+include "VIInstructions.td"
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
index 90da7a9..4f72e99 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/R600/SIInstructions.td
@@ -26,11 +26,18 @@ def SendMsgImm : Operand<i32> {
let PrintMethod = "printSendMsg";
}
-def isSI : Predicate<"Subtarget.getGeneration() "
+def isGCN : Predicate<"Subtarget->getGeneration() "
">= AMDGPUSubtarget::SOUTHERN_ISLANDS">;
-
-def isCI : Predicate<"Subtarget.getGeneration() "
+def isSICI : Predicate<
+ "Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS ||"
+ "Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS"
+>;
+def isCI : Predicate<"Subtarget->getGeneration() "
">= AMDGPUSubtarget::SEA_ISLANDS">;
+def isVI : Predicate <
+ "Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS"
+>;
+
def HasFlatAddressSpace : Predicate<"Subtarget.hasFlatAddressSpace()">;
def SWaitMatchClass : AsmOperandClass {
@@ -43,7 +50,7 @@ def WAIT_FLAG : InstFlag<"printWaitFlag"> {
let ParserMatchClass = SWaitMatchClass;
}
-let SubtargetPredicate = isSI in {
+let SubtargetPredicate = isGCN in {
//===----------------------------------------------------------------------===//
// EXP Instructions
@@ -96,90 +103,99 @@ defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper <
//===----------------------------------------------------------------------===//
let isMoveImm = 1 in {
-def S_MOV_B32 : SOP1_32 <0x00000003, "s_mov_b32", []>;
-def S_MOV_B64 : SOP1_64 <0x00000004, "s_mov_b64", []>;
-def S_CMOV_B32 : SOP1_32 <0x00000005, "s_cmov_b32", []>;
-def S_CMOV_B64 : SOP1_64 <0x00000006, "s_cmov_b64", []>;
+ let isReMaterializable = 1 in {
+ defm S_MOV_B32 : SOP1_32 <sop1<0x03, 0x00>, "s_mov_b32", []>;
+ defm S_MOV_B64 : SOP1_64 <sop1<0x04, 0x01>, "s_mov_b64", []>;
+ } // let isRematerializeable = 1
+
+ let Uses = [SCC] in {
+ defm S_CMOV_B32 : SOP1_32 <sop1<0x05, 0x02>, "s_cmov_b32", []>;
+ defm S_CMOV_B64 : SOP1_64 <sop1<0x06, 0x03>, "s_cmov_b64", []>;
+ } // End Uses = [SCC]
} // End isMoveImm = 1
-def S_NOT_B32 : SOP1_32 <0x00000007, "s_not_b32",
- [(set i32:$dst, (not i32:$src0))]
->;
+let Defs = [SCC] in {
+ defm S_NOT_B32 : SOP1_32 <sop1<0x07, 0x04>, "s_not_b32",
+ [(set i32:$dst, (not i32:$src0))]
+ >;
-def S_NOT_B64 : SOP1_64 <0x00000008, "s_not_b64",
- [(set i64:$dst, (not i64:$src0))]
->;
-def S_WQM_B32 : SOP1_32 <0x00000009, "s_wqm_b32", []>;
-def S_WQM_B64 : SOP1_64 <0x0000000a, "s_wqm_b64", []>;
-def S_BREV_B32 : SOP1_32 <0x0000000b, "s_brev_b32",
+ defm S_NOT_B64 : SOP1_64 <sop1<0x08, 0x05>, "s_not_b64",
+ [(set i64:$dst, (not i64:$src0))]
+ >;
+ defm S_WQM_B32 : SOP1_32 <sop1<0x09, 0x06>, "s_wqm_b32", []>;
+ defm S_WQM_B64 : SOP1_64 <sop1<0x0a, 0x07>, "s_wqm_b64", []>;
+} // End Defs = [SCC]
+
+
+defm S_BREV_B32 : SOP1_32 <sop1<0x0b, 0x08>, "s_brev_b32",
[(set i32:$dst, (AMDGPUbrev i32:$src0))]
>;
-def S_BREV_B64 : SOP1_64 <0x0000000c, "s_brev_b64", []>;
+defm S_BREV_B64 : SOP1_64 <sop1<0x0c, 0x09>, "s_brev_b64", []>;
-////def S_BCNT0_I32_B32 : SOP1_BCNT0 <0x0000000d, "s_bcnt0_i32_b32", []>;
-////def S_BCNT0_I32_B64 : SOP1_BCNT0 <0x0000000e, "s_bcnt0_i32_b64", []>;
-def S_BCNT1_I32_B32 : SOP1_32 <0x0000000f, "s_bcnt1_i32_b32",
- [(set i32:$dst, (ctpop i32:$src0))]
->;
-def S_BCNT1_I32_B64 : SOP1_32_64 <0x00000010, "s_bcnt1_i32_b64", []>;
+let Defs = [SCC] in {
+ defm S_BCNT0_I32_B32 : SOP1_32 <sop1<0x0d, 0x0a>, "s_bcnt0_i32_b32", []>;
+ defm S_BCNT0_I32_B64 : SOP1_32_64 <sop1<0x0e, 0x0b>, "s_bcnt0_i32_b64", []>;
+ defm S_BCNT1_I32_B32 : SOP1_32 <sop1<0x0f, 0x0c>, "s_bcnt1_i32_b32",
+ [(set i32:$dst, (ctpop i32:$src0))]
+ >;
+ defm S_BCNT1_I32_B64 : SOP1_32_64 <sop1<0x10, 0x0d>, "s_bcnt1_i32_b64", []>;
+} // End Defs = [SCC]
-////def S_FF0_I32_B32 : SOP1_32 <0x00000011, "s_ff0_i32_b32", []>;
-////def S_FF0_I32_B64 : SOP1_FF0 <0x00000012, "s_ff0_i32_b64", []>;
-def S_FF1_I32_B32 : SOP1_32 <0x00000013, "s_ff1_i32_b32",
+defm S_FF0_I32_B32 : SOP1_32 <sop1<0x11, 0x0e>, "s_ff0_i32_b32", []>;
+defm S_FF0_I32_B64 : SOP1_32_64 <sop1<0x12, 0x0f>, "s_ff0_i32_b64", []>;
+defm S_FF1_I32_B32 : SOP1_32 <sop1<0x13, 0x10>, "s_ff1_i32_b32",
[(set i32:$dst, (cttz_zero_undef i32:$src0))]
>;
-////def S_FF1_I32_B64 : SOP1_FF1 <0x00000014, "s_ff1_i32_b64", []>;
+defm S_FF1_I32_B64 : SOP1_32_64 <sop1<0x14, 0x11>, "s_ff1_i32_b64", []>;
-def S_FLBIT_I32_B32 : SOP1_32 <0x00000015, "s_flbit_i32_b32",
+defm S_FLBIT_I32_B32 : SOP1_32 <sop1<0x15, 0x12>, "s_flbit_i32_b32",
[(set i32:$dst, (ctlz_zero_undef i32:$src0))]
>;
-//def S_FLBIT_I32_B64 : SOP1_32 <0x00000016, "s_flbit_i32_b64", []>;
-def S_FLBIT_I32 : SOP1_32 <0x00000017, "s_flbit_i32", []>;
-//def S_FLBIT_I32_I64 : SOP1_32 <0x00000018, "s_flbit_i32_i64", []>;
-def S_SEXT_I32_I8 : SOP1_32 <0x00000019, "s_sext_i32_i8",
+defm S_FLBIT_I32_B64 : SOP1_32_64 <sop1<0x16, 0x13>, "s_flbit_i32_b64", []>;
+defm S_FLBIT_I32 : SOP1_32 <sop1<0x17, 0x14>, "s_flbit_i32", []>;
+defm S_FLBIT_I32_I64 : SOP1_32_64 <sop1<0x18, 0x15>, "s_flbit_i32_i64", []>;
+defm S_SEXT_I32_I8 : SOP1_32 <sop1<0x19, 0x16>, "s_sext_i32_i8",
[(set i32:$dst, (sext_inreg i32:$src0, i8))]
>;
-def S_SEXT_I32_I16 : SOP1_32 <0x0000001a, "s_sext_i32_i16",
+defm S_SEXT_I32_I16 : SOP1_32 <sop1<0x1a, 0x17>, "s_sext_i32_i16",
[(set i32:$dst, (sext_inreg i32:$src0, i16))]
>;
-////def S_BITSET0_B32 : SOP1_BITSET0 <0x0000001b, "s_bitset0_b32", []>;
-////def S_BITSET0_B64 : SOP1_BITSET0 <0x0000001c, "s_bitset0_b64", []>;
-////def S_BITSET1_B32 : SOP1_BITSET1 <0x0000001d, "s_bitset1_b32", []>;
-////def S_BITSET1_B64 : SOP1_BITSET1 <0x0000001e, "s_bitset1_b64", []>;
-def S_GETPC_B64 : SOP1 <
- 0x0000001f, (outs SReg_64:$dst), (ins), "s_getpc_b64 $dst", []
-> {
- let SSRC0 = 0;
-}
-def S_SETPC_B64 : SOP1_64 <0x00000020, "s_setpc_b64", []>;
-def S_SWAPPC_B64 : SOP1_64 <0x00000021, "s_swappc_b64", []>;
-def S_RFE_B64 : SOP1_64 <0x00000022, "s_rfe_b64", []>;
-
-let hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC] in {
-
-def S_AND_SAVEEXEC_B64 : SOP1_64 <0x00000024, "s_and_saveexec_b64", []>;
-def S_OR_SAVEEXEC_B64 : SOP1_64 <0x00000025, "s_or_saveexec_b64", []>;
-def S_XOR_SAVEEXEC_B64 : SOP1_64 <0x00000026, "s_xor_saveexec_b64", []>;
-def S_ANDN2_SAVEEXEC_B64 : SOP1_64 <0x00000027, "s_andn2_saveexec_b64", []>;
-def S_ORN2_SAVEEXEC_B64 : SOP1_64 <0x00000028, "s_orn2_saveexec_b64", []>;
-def S_NAND_SAVEEXEC_B64 : SOP1_64 <0x00000029, "s_nand_saveexec_b64", []>;
-def S_NOR_SAVEEXEC_B64 : SOP1_64 <0x0000002a, "s_nor_saveexec_b64", []>;
-def S_XNOR_SAVEEXEC_B64 : SOP1_64 <0x0000002b, "s_xnor_saveexec_b64", []>;
-
-} // End hasSideEffects = 1
-
-def S_QUADMASK_B32 : SOP1_32 <0x0000002c, "s_quadmask_b32", []>;
-def S_QUADMASK_B64 : SOP1_64 <0x0000002d, "s_quadmask_b64", []>;
-def S_MOVRELS_B32 : SOP1_32 <0x0000002e, "s_movrels_b32", []>;
-def S_MOVRELS_B64 : SOP1_64 <0x0000002f, "s_movrels_b64", []>;
-def S_MOVRELD_B32 : SOP1_32 <0x00000030, "s_movreld_b32", []>;
-def S_MOVRELD_B64 : SOP1_64 <0x00000031, "s_movreld_b64", []>;
-//def S_CBRANCH_JOIN : SOP1_ <0x00000032, "s_cbranch_join", []>;
-def S_MOV_REGRD_B32 : SOP1_32 <0x00000033, "s_mov_regrd_b32", []>;
-def S_ABS_I32 : SOP1_32 <0x00000034, "s_abs_i32", []>;
-def S_MOV_FED_B32 : SOP1_32 <0x00000035, "s_mov_fed_b32", []>;
+defm S_BITSET0_B32 : SOP1_32 <sop1<0x1b, 0x18>, "s_bitset0_b32", []>;
+defm S_BITSET0_B64 : SOP1_64 <sop1<0x1c, 0x19>, "s_bitset0_b64", []>;
+defm S_BITSET1_B32 : SOP1_32 <sop1<0x1d, 0x1a>, "s_bitset1_b32", []>;
+defm S_BITSET1_B64 : SOP1_64 <sop1<0x1e, 0x1b>, "s_bitset1_b64", []>;
+defm S_GETPC_B64 : SOP1_64_0 <sop1<0x1f, 0x1c>, "s_getpc_b64", []>;
+defm S_SETPC_B64 : SOP1_64 <sop1<0x20, 0x1d>, "s_setpc_b64", []>;
+defm S_SWAPPC_B64 : SOP1_64 <sop1<0x21, 0x1e>, "s_swappc_b64", []>;
+defm S_RFE_B64 : SOP1_64 <sop1<0x22, 0x1f>, "s_rfe_b64", []>;
+
+let hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC] in {
+
+defm S_AND_SAVEEXEC_B64 : SOP1_64 <sop1<0x24, 0x20>, "s_and_saveexec_b64", []>;
+defm S_OR_SAVEEXEC_B64 : SOP1_64 <sop1<0x25, 0x21>, "s_or_saveexec_b64", []>;
+defm S_XOR_SAVEEXEC_B64 : SOP1_64 <sop1<0x26, 0x22>, "s_xor_saveexec_b64", []>;
+defm S_ANDN2_SAVEEXEC_B64 : SOP1_64 <sop1<0x27, 0x23>, "s_andn2_saveexec_b64", []>;
+defm S_ORN2_SAVEEXEC_B64 : SOP1_64 <sop1<0x28, 0x24>, "s_orn2_saveexec_b64", []>;
+defm S_NAND_SAVEEXEC_B64 : SOP1_64 <sop1<0x29, 0x25>, "s_nand_saveexec_b64", []>;
+defm S_NOR_SAVEEXEC_B64 : SOP1_64 <sop1<0x2a, 0x26>, "s_nor_saveexec_b64", []>;
+defm S_XNOR_SAVEEXEC_B64 : SOP1_64 <sop1<0x2b, 0x27>, "s_xnor_saveexec_b64", []>;
+
+} // End hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC]
+
+defm S_QUADMASK_B32 : SOP1_32 <sop1<0x2c, 0x28>, "s_quadmask_b32", []>;
+defm S_QUADMASK_B64 : SOP1_64 <sop1<0x2d, 0x29>, "s_quadmask_b64", []>;
+defm S_MOVRELS_B32 : SOP1_32 <sop1<0x2e, 0x2a>, "s_movrels_b32", []>;
+defm S_MOVRELS_B64 : SOP1_64 <sop1<0x2f, 0x2b>, "s_movrels_b64", []>;
+defm S_MOVRELD_B32 : SOP1_32 <sop1<0x30, 0x2c>, "s_movreld_b32", []>;
+defm S_MOVRELD_B64 : SOP1_64 <sop1<0x31, 0x2d>, "s_movreld_b64", []>;
+defm S_CBRANCH_JOIN : SOP1_1 <sop1<0x32, 0x2e>, "s_cbranch_join", []>;
+defm S_MOV_REGRD_B32 : SOP1_32 <sop1<0x33, 0x2f>, "s_mov_regrd_b32", []>;
+let Defs = [SCC] in {
+ defm S_ABS_I32 : SOP1_32 <sop1<0x34, 0x30>, "s_abs_i32", []>;
+} // End Defs = [SCC]
+defm S_MOV_FED_B32 : SOP1_32 <sop1<0x35, 0x31>, "s_mov_fed_b32", []>;
//===----------------------------------------------------------------------===//
// SOP2 Instructions
@@ -187,119 +203,132 @@ def S_MOV_FED_B32 : SOP1_32 <0x00000035, "s_mov_fed_b32", []>;
let Defs = [SCC] in { // Carry out goes to SCC
let isCommutable = 1 in {
-def S_ADD_U32 : SOP2_32 <0x00000000, "s_add_u32", []>;
-def S_ADD_I32 : SOP2_32 <0x00000002, "s_add_i32",
+defm S_ADD_U32 : SOP2_32 <sop2<0x00>, "s_add_u32", []>;
+defm S_ADD_I32 : SOP2_32 <sop2<0x02>, "s_add_i32",
[(set i32:$dst, (add SSrc_32:$src0, SSrc_32:$src1))]
>;
} // End isCommutable = 1
-def S_SUB_U32 : SOP2_32 <0x00000001, "s_sub_u32", []>;
-def S_SUB_I32 : SOP2_32 <0x00000003, "s_sub_i32",
+defm S_SUB_U32 : SOP2_32 <sop2<0x01>, "s_sub_u32", []>;
+defm S_SUB_I32 : SOP2_32 <sop2<0x03>, "s_sub_i32",
[(set i32:$dst, (sub SSrc_32:$src0, SSrc_32:$src1))]
>;
let Uses = [SCC] in { // Carry in comes from SCC
let isCommutable = 1 in {
-def S_ADDC_U32 : SOP2_32 <0x00000004, "s_addc_u32",
+defm S_ADDC_U32 : SOP2_32 <sop2<0x04>, "s_addc_u32",
[(set i32:$dst, (adde (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
} // End isCommutable = 1
-def S_SUBB_U32 : SOP2_32 <0x00000005, "s_subb_u32",
+defm S_SUBB_U32 : SOP2_32 <sop2<0x05>, "s_subb_u32",
[(set i32:$dst, (sube (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
} // End Uses = [SCC]
-} // End Defs = [SCC]
-def S_MIN_I32 : SOP2_32 <0x00000006, "s_min_i32",
+defm S_MIN_I32 : SOP2_32 <sop2<0x06>, "s_min_i32",
[(set i32:$dst, (AMDGPUsmin i32:$src0, i32:$src1))]
>;
-def S_MIN_U32 : SOP2_32 <0x00000007, "s_min_u32",
+defm S_MIN_U32 : SOP2_32 <sop2<0x07>, "s_min_u32",
[(set i32:$dst, (AMDGPUumin i32:$src0, i32:$src1))]
>;
-def S_MAX_I32 : SOP2_32 <0x00000008, "s_max_i32",
+defm S_MAX_I32 : SOP2_32 <sop2<0x08>, "s_max_i32",
[(set i32:$dst, (AMDGPUsmax i32:$src0, i32:$src1))]
>;
-def S_MAX_U32 : SOP2_32 <0x00000009, "s_max_u32",
+defm S_MAX_U32 : SOP2_32 <sop2<0x09>, "s_max_u32",
[(set i32:$dst, (AMDGPUumax i32:$src0, i32:$src1))]
>;
+} // End Defs = [SCC]
-def S_CSELECT_B32 : SOP2_SELECT_32 <
- 0x0000000a, "s_cselect_b32",
- []
->;
+defm S_CSELECT_B32 : SOP2_SELECT_32 <sop2<0x0a>, "s_cselect_b32", []>;
-def S_CSELECT_B64 : SOP2_64 <0x0000000b, "s_cselect_b64", []>;
+let Uses = [SCC] in {
+ defm S_CSELECT_B64 : SOP2_64 <sop2<0x0b>, "s_cselect_b64", []>;
+} // End Uses = [SCC]
-def S_AND_B32 : SOP2_32 <0x0000000e, "s_and_b32",
+let Defs = [SCC] in {
+defm S_AND_B32 : SOP2_32 <sop2<0x0e, 0x0c>, "s_and_b32",
[(set i32:$dst, (and i32:$src0, i32:$src1))]
>;
-def S_AND_B64 : SOP2_64 <0x0000000f, "s_and_b64",
+defm S_AND_B64 : SOP2_64 <sop2<0x0f, 0x0d>, "s_and_b64",
[(set i64:$dst, (and i64:$src0, i64:$src1))]
>;
-def S_OR_B32 : SOP2_32 <0x00000010, "s_or_b32",
+defm S_OR_B32 : SOP2_32 <sop2<0x10, 0x0e>, "s_or_b32",
[(set i32:$dst, (or i32:$src0, i32:$src1))]
>;
-def S_OR_B64 : SOP2_64 <0x00000011, "s_or_b64",
+defm S_OR_B64 : SOP2_64 <sop2<0x11, 0x0f>, "s_or_b64",
[(set i64:$dst, (or i64:$src0, i64:$src1))]
>;
-def S_XOR_B32 : SOP2_32 <0x00000012, "s_xor_b32",
+defm S_XOR_B32 : SOP2_32 <sop2<0x12, 0x10>, "s_xor_b32",
[(set i32:$dst, (xor i32:$src0, i32:$src1))]
>;
-def S_XOR_B64 : SOP2_64 <0x00000013, "s_xor_b64",
+defm S_XOR_B64 : SOP2_64 <sop2<0x13, 0x11>, "s_xor_b64",
[(set i64:$dst, (xor i64:$src0, i64:$src1))]
>;
-def S_ANDN2_B32 : SOP2_32 <0x00000014, "s_andn2_b32", []>;
-def S_ANDN2_B64 : SOP2_64 <0x00000015, "s_andn2_b64", []>;
-def S_ORN2_B32 : SOP2_32 <0x00000016, "s_orn2_b32", []>;
-def S_ORN2_B64 : SOP2_64 <0x00000017, "s_orn2_b64", []>;
-def S_NAND_B32 : SOP2_32 <0x00000018, "s_nand_b32", []>;
-def S_NAND_B64 : SOP2_64 <0x00000019, "s_nand_b64", []>;
-def S_NOR_B32 : SOP2_32 <0x0000001a, "s_nor_b32", []>;
-def S_NOR_B64 : SOP2_64 <0x0000001b, "s_nor_b64", []>;
-def S_XNOR_B32 : SOP2_32 <0x0000001c, "s_xnor_b32", []>;
-def S_XNOR_B64 : SOP2_64 <0x0000001d, "s_xnor_b64", []>;
+defm S_ANDN2_B32 : SOP2_32 <sop2<0x14, 0x12>, "s_andn2_b32", []>;
+defm S_ANDN2_B64 : SOP2_64 <sop2<0x15, 0x13>, "s_andn2_b64", []>;
+defm S_ORN2_B32 : SOP2_32 <sop2<0x16, 0x14>, "s_orn2_b32", []>;
+defm S_ORN2_B64 : SOP2_64 <sop2<0x17, 0x15>, "s_orn2_b64", []>;
+defm S_NAND_B32 : SOP2_32 <sop2<0x18, 0x16>, "s_nand_b32", []>;
+defm S_NAND_B64 : SOP2_64 <sop2<0x19, 0x17>, "s_nand_b64", []>;
+defm S_NOR_B32 : SOP2_32 <sop2<0x1a, 0x18>, "s_nor_b32", []>;
+defm S_NOR_B64 : SOP2_64 <sop2<0x1b, 0x19>, "s_nor_b64", []>;
+defm S_XNOR_B32 : SOP2_32 <sop2<0x1c, 0x1a>, "s_xnor_b32", []>;
+defm S_XNOR_B64 : SOP2_64 <sop2<0x1d, 0x1b>, "s_xnor_b64", []>;
+} // End Defs = [SCC]
// Use added complexity so these patterns are preferred to the VALU patterns.
let AddedComplexity = 1 in {
+let Defs = [SCC] in {
-def S_LSHL_B32 : SOP2_32 <0x0000001e, "s_lshl_b32",
+defm S_LSHL_B32 : SOP2_32 <sop2<0x1e, 0x1c>, "s_lshl_b32",
[(set i32:$dst, (shl i32:$src0, i32:$src1))]
>;
-def S_LSHL_B64 : SOP2_SHIFT_64 <0x0000001f, "s_lshl_b64",
+defm S_LSHL_B64 : SOP2_64_32 <sop2<0x1f, 0x1d>, "s_lshl_b64",
[(set i64:$dst, (shl i64:$src0, i32:$src1))]
>;
-def S_LSHR_B32 : SOP2_32 <0x00000020, "s_lshr_b32",
+defm S_LSHR_B32 : SOP2_32 <sop2<0x20, 0x1e>, "s_lshr_b32",
[(set i32:$dst, (srl i32:$src0, i32:$src1))]
>;
-def S_LSHR_B64 : SOP2_SHIFT_64 <0x00000021, "s_lshr_b64",
+defm S_LSHR_B64 : SOP2_64_32 <sop2<0x21, 0x1f>, "s_lshr_b64",
[(set i64:$dst, (srl i64:$src0, i32:$src1))]
>;
-def S_ASHR_I32 : SOP2_32 <0x00000022, "s_ashr_i32",
+defm S_ASHR_I32 : SOP2_32 <sop2<0x22, 0x20>, "s_ashr_i32",
[(set i32:$dst, (sra i32:$src0, i32:$src1))]
>;
-def S_ASHR_I64 : SOP2_SHIFT_64 <0x00000023, "s_ashr_i64",
+defm S_ASHR_I64 : SOP2_64_32 <sop2<0x23, 0x21>, "s_ashr_i64",
[(set i64:$dst, (sra i64:$src0, i32:$src1))]
>;
+} // End Defs = [SCC]
-
-def S_BFM_B32 : SOP2_32 <0x00000024, "s_bfm_b32", []>;
-def S_BFM_B64 : SOP2_64 <0x00000025, "s_bfm_b64", []>;
-def S_MUL_I32 : SOP2_32 <0x00000026, "s_mul_i32",
+defm S_BFM_B32 : SOP2_32 <sop2<0x24, 0x22>, "s_bfm_b32", []>;
+defm S_BFM_B64 : SOP2_64 <sop2<0x25, 0x23>, "s_bfm_b64", []>;
+defm S_MUL_I32 : SOP2_32 <sop2<0x26, 0x24>, "s_mul_i32",
[(set i32:$dst, (mul i32:$src0, i32:$src1))]
>;
} // End AddedComplexity = 1
-def S_BFE_U32 : SOP2_32 <0x00000027, "s_bfe_u32", []>;
-def S_BFE_I32 : SOP2_32 <0x00000028, "s_bfe_i32", []>;
-def S_BFE_U64 : SOP2_64 <0x00000029, "s_bfe_u64", []>;
-def S_BFE_I64 : SOP2_64_32 <0x0000002a, "s_bfe_i64", []>;
-//def S_CBRANCH_G_FORK : SOP2_ <0x0000002b, "s_cbranch_g_fork", []>;
-def S_ABSDIFF_I32 : SOP2_32 <0x0000002c, "s_absdiff_i32", []>;
+let Defs = [SCC] in {
+defm S_BFE_U32 : SOP2_32 <sop2<0x27, 0x25>, "s_bfe_u32", []>;
+defm S_BFE_I32 : SOP2_32 <sop2<0x28, 0x26>, "s_bfe_i32", []>;
+defm S_BFE_U64 : SOP2_64 <sop2<0x29, 0x27>, "s_bfe_u64", []>;
+defm S_BFE_I64 : SOP2_64_32 <sop2<0x2a, 0x28>, "s_bfe_i64", []>;
+} // End Defs = [SCC]
+
+let sdst = 0 in {
+defm S_CBRANCH_G_FORK : SOP2_m <
+ sop2<0x2b, 0x29>, "s_cbranch_g_fork", (outs),
+ (ins SReg_64:$src0, SReg_64:$src1), "s_cbranch_g_fork $src0, $src1", []
+>;
+}
+
+let Defs = [SCC] in {
+defm S_ABSDIFF_I32 : SOP2_32 <sop2<0x2c, 0x2a>, "s_absdiff_i32", []>;
+} // End Defs = [SCC]
//===----------------------------------------------------------------------===//
// SOPC Instructions
@@ -328,9 +357,13 @@ def S_CMP_LE_U32 : SOPC_32 <0x0000000b, "s_cmp_le_u32">;
//===----------------------------------------------------------------------===//
let isReMaterializable = 1 in {
-def S_MOVK_I32 : SOPK_32 <0x00000000, "s_movk_i32", []>;
+defm S_MOVK_I32 : SOPK_32 <sopk<0x00>, "s_movk_i32", []>;
} // End isReMaterializable = 1
-def S_CMOVK_I32 : SOPK_32 <0x00000002, "s_cmovk_i32", []>;
+let Uses = [SCC] in {
+ defm S_CMOVK_I32 : SOPK_32 <sopk<0x02, 0x01>, "s_cmovk_i32", []>;
+}
+
+let isCompare = 1 in {
/*
This instruction is disabled for now until we can figure out how to teach
@@ -344,38 +377,36 @@ SCC = S_CMPK_EQ_I32 SGPR0, imm
VCC = COPY SCC
VGPR0 = V_CNDMASK VCC, VGPR0, VGPR1
-def S_CMPK_EQ_I32 : SOPK <
- 0x00000003, (outs SCCReg:$dst), (ins SReg_32:$src0, i32imm:$src1),
- "s_cmpk_eq_i32",
+defm S_CMPK_EQ_I32 : SOPK_SCC <sopk<0x03, 0x02>, "s_cmpk_eq_i32",
[(set i1:$dst, (setcc i32:$src0, imm:$src1, SETEQ))]
>;
*/
-let isCompare = 1, Defs = [SCC] in {
-def S_CMPK_LG_I32 : SOPK_32 <0x00000004, "s_cmpk_lg_i32", []>;
-def S_CMPK_GT_I32 : SOPK_32 <0x00000005, "s_cmpk_gt_i32", []>;
-def S_CMPK_GE_I32 : SOPK_32 <0x00000006, "s_cmpk_ge_i32", []>;
-def S_CMPK_LT_I32 : SOPK_32 <0x00000007, "s_cmpk_lt_i32", []>;
-def S_CMPK_LE_I32 : SOPK_32 <0x00000008, "s_cmpk_le_i32", []>;
-def S_CMPK_EQ_U32 : SOPK_32 <0x00000009, "s_cmpk_eq_u32", []>;
-def S_CMPK_LG_U32 : SOPK_32 <0x0000000a, "s_cmpk_lg_u32", []>;
-def S_CMPK_GT_U32 : SOPK_32 <0x0000000b, "s_cmpk_gt_u32", []>;
-def S_CMPK_GE_U32 : SOPK_32 <0x0000000c, "s_cmpk_ge_u32", []>;
-def S_CMPK_LT_U32 : SOPK_32 <0x0000000d, "s_cmpk_lt_u32", []>;
-def S_CMPK_LE_U32 : SOPK_32 <0x0000000e, "s_cmpk_le_u32", []>;
-} // End isCompare = 1, Defs = [SCC]
-
-let Defs = [SCC], isCommutable = 1 in {
- def S_ADDK_I32 : SOPK_32 <0x0000000f, "s_addk_i32", []>;
- def S_MULK_I32 : SOPK_32 <0x00000010, "s_mulk_i32", []>;
+defm S_CMPK_LG_I32 : SOPK_SCC <sopk<0x04, 0x03>, "s_cmpk_lg_i32", []>;
+defm S_CMPK_GT_I32 : SOPK_SCC <sopk<0x05, 0x04>, "s_cmpk_gt_i32", []>;
+defm S_CMPK_GE_I32 : SOPK_SCC <sopk<0x06, 0x05>, "s_cmpk_ge_i32", []>;
+defm S_CMPK_LT_I32 : SOPK_SCC <sopk<0x07, 0x06>, "s_cmpk_lt_i32", []>;
+defm S_CMPK_LE_I32 : SOPK_SCC <sopk<0x08, 0x07>, "s_cmpk_le_i32", []>;
+defm S_CMPK_EQ_U32 : SOPK_SCC <sopk<0x09, 0x08>, "s_cmpk_eq_u32", []>;
+defm S_CMPK_LG_U32 : SOPK_SCC <sopk<0x0a, 0x09>, "s_cmpk_lg_u32", []>;
+defm S_CMPK_GT_U32 : SOPK_SCC <sopk<0x0b, 0x0a>, "s_cmpk_gt_u32", []>;
+defm S_CMPK_GE_U32 : SOPK_SCC <sopk<0x0c, 0x0b>, "s_cmpk_ge_u32", []>;
+defm S_CMPK_LT_U32 : SOPK_SCC <sopk<0x0d, 0x0c>, "s_cmpk_lt_u32", []>;
+defm S_CMPK_LE_U32 : SOPK_SCC <sopk<0x0e, 0x0d>, "s_cmpk_le_u32", []>;
+} // End isCompare = 1
+
+let isCommutable = 1 in {
+ let Defs = [SCC], isCommutable = 1 in {
+ defm S_ADDK_I32 : SOPK_32 <sopk<0x0f, 0x0e>, "s_addk_i32", []>;
+ }
+ defm S_MULK_I32 : SOPK_32 <sopk<0x10, 0x0f>, "s_mulk_i32", []>;
}
-//def S_CBRANCH_I_FORK : SOPK_ <0x00000011, "s_cbranch_i_fork", []>;
-def S_GETREG_B32 : SOPK_32 <0x00000012, "s_getreg_b32", []>;
-def S_SETREG_B32 : SOPK_32 <0x00000013, "s_setreg_b32", []>;
-def S_GETREG_REGRD_B32 : SOPK_32 <0x00000014, "s_getreg_regrd_b32", []>;
-//def S_SETREG_IMM32_B32 : SOPK_32 <0x00000015, "s_setreg_imm32_b32", []>;
-//def EXP : EXP_ <0x00000000, "exp", []>;
+//defm S_CBRANCH_I_FORK : SOPK_ <sopk<0x11, 0x10>, "s_cbranch_i_fork", []>;
+defm S_GETREG_B32 : SOPK_32 <sopk<0x12, 0x11>, "s_getreg_b32", []>;
+defm S_SETREG_B32 : SOPK_32 <sopk<0x13, 0x12>, "s_setreg_b32", []>;
+defm S_GETREG_REGRD_B32 : SOPK_32 <sopk<0x14, 0x13>, "s_getreg_regrd_b32", []>;
+//defm S_SETREG_IMM32_B32 : SOPK_32 <sopk<0x15, 0x14>, "s_setreg_imm32_b32", []>;
//===----------------------------------------------------------------------===//
// SOPP Instructions
@@ -476,82 +507,84 @@ def S_TTRACEDATA : SOPP <0x00000016, (ins), "s_ttracedata"> {
let isCompare = 1 in {
-defm V_CMP_F_F32 : VOPC_F32 <vopc<0x0>, "v_cmp_f_f32">;
-defm V_CMP_LT_F32 : VOPC_F32 <vopc<0x1>, "v_cmp_lt_f32", COND_OLT>;
-defm V_CMP_EQ_F32 : VOPC_F32 <vopc<0x2>, "v_cmp_eq_f32", COND_OEQ>;
-defm V_CMP_LE_F32 : VOPC_F32 <vopc<0x3>, "v_cmp_le_f32", COND_OLE>;
-defm V_CMP_GT_F32 : VOPC_F32 <vopc<0x4>, "v_cmp_gt_f32", COND_OGT>;
-defm V_CMP_LG_F32 : VOPC_F32 <vopc<0x5>, "v_cmp_lg_f32">;
-defm V_CMP_GE_F32 : VOPC_F32 <vopc<0x6>, "v_cmp_ge_f32", COND_OGE>;
-defm V_CMP_O_F32 : VOPC_F32 <vopc<0x7>, "v_cmp_o_f32", COND_O>;
-defm V_CMP_U_F32 : VOPC_F32 <vopc<0x8>, "v_cmp_u_f32", COND_UO>;
-defm V_CMP_NGE_F32 : VOPC_F32 <vopc<0x9>, "v_cmp_nge_f32">;
-defm V_CMP_NLG_F32 : VOPC_F32 <vopc<0xa>, "v_cmp_nlg_f32">;
-defm V_CMP_NGT_F32 : VOPC_F32 <vopc<0xb>, "v_cmp_ngt_f32">;
-defm V_CMP_NLE_F32 : VOPC_F32 <vopc<0xc>, "v_cmp_nle_f32">;
-defm V_CMP_NEQ_F32 : VOPC_F32 <vopc<0xd>, "v_cmp_neq_f32", COND_UNE>;
-defm V_CMP_NLT_F32 : VOPC_F32 <vopc<0xe>, "v_cmp_nlt_f32">;
-defm V_CMP_TRU_F32 : VOPC_F32 <vopc<0xf>, "v_cmp_tru_f32">;
+defm V_CMP_F_F32 : VOPC_F32 <vopc<0x0, 0x40>, "v_cmp_f_f32">;
+defm V_CMP_LT_F32 : VOPC_F32 <vopc<0x1, 0x41>, "v_cmp_lt_f32", COND_OLT>;
+defm V_CMP_EQ_F32 : VOPC_F32 <vopc<0x2, 0x42>, "v_cmp_eq_f32", COND_OEQ>;
+defm V_CMP_LE_F32 : VOPC_F32 <vopc<0x3, 0x43>, "v_cmp_le_f32", COND_OLE>;
+defm V_CMP_GT_F32 : VOPC_F32 <vopc<0x4, 0x44>, "v_cmp_gt_f32", COND_OGT>;
+defm V_CMP_LG_F32 : VOPC_F32 <vopc<0x5, 0x45>, "v_cmp_lg_f32", COND_ONE>;
+defm V_CMP_GE_F32 : VOPC_F32 <vopc<0x6, 0x46>, "v_cmp_ge_f32", COND_OGE>;
+defm V_CMP_O_F32 : VOPC_F32 <vopc<0x7, 0x47>, "v_cmp_o_f32", COND_O>;
+defm V_CMP_U_F32 : VOPC_F32 <vopc<0x8, 0x48>, "v_cmp_u_f32", COND_UO>;
+defm V_CMP_NGE_F32 : VOPC_F32 <vopc<0x9, 0x49>, "v_cmp_nge_f32", COND_ULT>;
+defm V_CMP_NLG_F32 : VOPC_F32 <vopc<0xa, 0x4a>, "v_cmp_nlg_f32", COND_UEQ>;
+defm V_CMP_NGT_F32 : VOPC_F32 <vopc<0xb, 0x4b>, "v_cmp_ngt_f32", COND_ULE>;
+defm V_CMP_NLE_F32 : VOPC_F32 <vopc<0xc, 0x4c>, "v_cmp_nle_f32", COND_UGT>;
+defm V_CMP_NEQ_F32 : VOPC_F32 <vopc<0xd, 0x4d>, "v_cmp_neq_f32", COND_UNE>;
+defm V_CMP_NLT_F32 : VOPC_F32 <vopc<0xe, 0x4e>, "v_cmp_nlt_f32", COND_UGE>;
+defm V_CMP_TRU_F32 : VOPC_F32 <vopc<0xf, 0x4f>, "v_cmp_tru_f32">;
let hasSideEffects = 1 in {
-defm V_CMPX_F_F32 : VOPCX_F32 <vopc<0x10>, "v_cmpx_f_f32">;
-defm V_CMPX_LT_F32 : VOPCX_F32 <vopc<0x11>, "v_cmpx_lt_f32">;
-defm V_CMPX_EQ_F32 : VOPCX_F32 <vopc<0x12>, "v_cmpx_eq_f32">;
-defm V_CMPX_LE_F32 : VOPCX_F32 <vopc<0x13>, "v_cmpx_le_f32">;
-defm V_CMPX_GT_F32 : VOPCX_F32 <vopc<0x14>, "v_cmpx_gt_f32">;
-defm V_CMPX_LG_F32 : VOPCX_F32 <vopc<0x15>, "v_cmpx_lg_f32">;
-defm V_CMPX_GE_F32 : VOPCX_F32 <vopc<0x16>, "v_cmpx_ge_f32">;
-defm V_CMPX_O_F32 : VOPCX_F32 <vopc<0x17>, "v_cmpx_o_f32">;
-defm V_CMPX_U_F32 : VOPCX_F32 <vopc<0x18>, "v_cmpx_u_f32">;
-defm V_CMPX_NGE_F32 : VOPCX_F32 <vopc<0x19>, "v_cmpx_nge_f32">;
-defm V_CMPX_NLG_F32 : VOPCX_F32 <vopc<0x1a>, "v_cmpx_nlg_f32">;
-defm V_CMPX_NGT_F32 : VOPCX_F32 <vopc<0x1b>, "v_cmpx_ngt_f32">;
-defm V_CMPX_NLE_F32 : VOPCX_F32 <vopc<0x1c>, "v_cmpx_nle_f32">;
-defm V_CMPX_NEQ_F32 : VOPCX_F32 <vopc<0x1d>, "v_cmpx_neq_f32">;
-defm V_CMPX_NLT_F32 : VOPCX_F32 <vopc<0x1e>, "v_cmpx_nlt_f32">;
-defm V_CMPX_TRU_F32 : VOPCX_F32 <vopc<0x1f>, "v_cmpx_tru_f32">;
+defm V_CMPX_F_F32 : VOPCX_F32 <vopc<0x10, 0x50>, "v_cmpx_f_f32">;
+defm V_CMPX_LT_F32 : VOPCX_F32 <vopc<0x11, 0x51>, "v_cmpx_lt_f32">;
+defm V_CMPX_EQ_F32 : VOPCX_F32 <vopc<0x12, 0x52>, "v_cmpx_eq_f32">;
+defm V_CMPX_LE_F32 : VOPCX_F32 <vopc<0x13, 0x53>, "v_cmpx_le_f32">;
+defm V_CMPX_GT_F32 : VOPCX_F32 <vopc<0x14, 0x54>, "v_cmpx_gt_f32">;
+defm V_CMPX_LG_F32 : VOPCX_F32 <vopc<0x15, 0x55>, "v_cmpx_lg_f32">;
+defm V_CMPX_GE_F32 : VOPCX_F32 <vopc<0x16, 0x56>, "v_cmpx_ge_f32">;
+defm V_CMPX_O_F32 : VOPCX_F32 <vopc<0x17, 0x57>, "v_cmpx_o_f32">;
+defm V_CMPX_U_F32 : VOPCX_F32 <vopc<0x18, 0x58>, "v_cmpx_u_f32">;
+defm V_CMPX_NGE_F32 : VOPCX_F32 <vopc<0x19, 0x59>, "v_cmpx_nge_f32">;
+defm V_CMPX_NLG_F32 : VOPCX_F32 <vopc<0x1a, 0x5a>, "v_cmpx_nlg_f32">;
+defm V_CMPX_NGT_F32 : VOPCX_F32 <vopc<0x1b, 0x5b>, "v_cmpx_ngt_f32">;
+defm V_CMPX_NLE_F32 : VOPCX_F32 <vopc<0x1c, 0x5c>, "v_cmpx_nle_f32">;
+defm V_CMPX_NEQ_F32 : VOPCX_F32 <vopc<0x1d, 0x5d>, "v_cmpx_neq_f32">;
+defm V_CMPX_NLT_F32 : VOPCX_F32 <vopc<0x1e, 0x5e>, "v_cmpx_nlt_f32">;
+defm V_CMPX_TRU_F32 : VOPCX_F32 <vopc<0x1f, 0x5f>, "v_cmpx_tru_f32">;
} // End hasSideEffects = 1
-defm V_CMP_F_F64 : VOPC_F64 <vopc<0x20>, "v_cmp_f_f64">;
-defm V_CMP_LT_F64 : VOPC_F64 <vopc<0x21>, "v_cmp_lt_f64", COND_OLT>;
-defm V_CMP_EQ_F64 : VOPC_F64 <vopc<0x22>, "v_cmp_eq_f64", COND_OEQ>;
-defm V_CMP_LE_F64 : VOPC_F64 <vopc<0x23>, "v_cmp_le_f64", COND_OLE>;
-defm V_CMP_GT_F64 : VOPC_F64 <vopc<0x24>, "v_cmp_gt_f64", COND_OGT>;
-defm V_CMP_LG_F64 : VOPC_F64 <vopc<0x25>, "v_cmp_lg_f64">;
-defm V_CMP_GE_F64 : VOPC_F64 <vopc<0x26>, "v_cmp_ge_f64", COND_OGE>;
-defm V_CMP_O_F64 : VOPC_F64 <vopc<0x27>, "v_cmp_o_f64", COND_O>;
-defm V_CMP_U_F64 : VOPC_F64 <vopc<0x28>, "v_cmp_u_f64", COND_UO>;
-defm V_CMP_NGE_F64 : VOPC_F64 <vopc<0x29>, "v_cmp_nge_f64">;
-defm V_CMP_NLG_F64 : VOPC_F64 <vopc<0x2a>, "v_cmp_nlg_f64">;
-defm V_CMP_NGT_F64 : VOPC_F64 <vopc<0x2b>, "v_cmp_ngt_f64">;
-defm V_CMP_NLE_F64 : VOPC_F64 <vopc<0x2c>, "v_cmp_nle_f64">;
-defm V_CMP_NEQ_F64 : VOPC_F64 <vopc<0x2d>, "v_cmp_neq_f64", COND_UNE>;
-defm V_CMP_NLT_F64 : VOPC_F64 <vopc<0x2e>, "v_cmp_nlt_f64">;
-defm V_CMP_TRU_F64 : VOPC_F64 <vopc<0x2f>, "v_cmp_tru_f64">;
+defm V_CMP_F_F64 : VOPC_F64 <vopc<0x20, 0x60>, "v_cmp_f_f64">;
+defm V_CMP_LT_F64 : VOPC_F64 <vopc<0x21, 0x61>, "v_cmp_lt_f64", COND_OLT>;
+defm V_CMP_EQ_F64 : VOPC_F64 <vopc<0x22, 0x62>, "v_cmp_eq_f64", COND_OEQ>;
+defm V_CMP_LE_F64 : VOPC_F64 <vopc<0x23, 0x63>, "v_cmp_le_f64", COND_OLE>;
+defm V_CMP_GT_F64 : VOPC_F64 <vopc<0x24, 0x64>, "v_cmp_gt_f64", COND_OGT>;
+defm V_CMP_LG_F64 : VOPC_F64 <vopc<0x25, 0x65>, "v_cmp_lg_f64", COND_ONE>;
+defm V_CMP_GE_F64 : VOPC_F64 <vopc<0x26, 0x66>, "v_cmp_ge_f64", COND_OGE>;
+defm V_CMP_O_F64 : VOPC_F64 <vopc<0x27, 0x67>, "v_cmp_o_f64", COND_O>;
+defm V_CMP_U_F64 : VOPC_F64 <vopc<0x28, 0x68>, "v_cmp_u_f64", COND_UO>;
+defm V_CMP_NGE_F64 : VOPC_F64 <vopc<0x29, 0x69>, "v_cmp_nge_f64", COND_ULT>;
+defm V_CMP_NLG_F64 : VOPC_F64 <vopc<0x2a, 0x6a>, "v_cmp_nlg_f64", COND_UEQ>;
+defm V_CMP_NGT_F64 : VOPC_F64 <vopc<0x2b, 0x6b>, "v_cmp_ngt_f64", COND_ULE>;
+defm V_CMP_NLE_F64 : VOPC_F64 <vopc<0x2c, 0x6c>, "v_cmp_nle_f64", COND_UGT>;
+defm V_CMP_NEQ_F64 : VOPC_F64 <vopc<0x2d, 0x6d>, "v_cmp_neq_f64", COND_UNE>;
+defm V_CMP_NLT_F64 : VOPC_F64 <vopc<0x2e, 0x6e>, "v_cmp_nlt_f64", COND_UGE>;
+defm V_CMP_TRU_F64 : VOPC_F64 <vopc<0x2f, 0x6f>, "v_cmp_tru_f64">;
let hasSideEffects = 1 in {
-defm V_CMPX_F_F64 : VOPCX_F64 <vopc<0x30>, "v_cmpx_f_f64">;
-defm V_CMPX_LT_F64 : VOPCX_F64 <vopc<0x31>, "v_cmpx_lt_f64">;
-defm V_CMPX_EQ_F64 : VOPCX_F64 <vopc<0x32>, "v_cmpx_eq_f64">;
-defm V_CMPX_LE_F64 : VOPCX_F64 <vopc<0x33>, "v_cmpx_le_f64">;
-defm V_CMPX_GT_F64 : VOPCX_F64 <vopc<0x34>, "v_cmpx_gt_f64">;
-defm V_CMPX_LG_F64 : VOPCX_F64 <vopc<0x35>, "v_cmpx_lg_f64">;
-defm V_CMPX_GE_F64 : VOPCX_F64 <vopc<0x36>, "v_cmpx_ge_f64">;
-defm V_CMPX_O_F64 : VOPCX_F64 <vopc<0x37>, "v_cmpx_o_f64">;
-defm V_CMPX_U_F64 : VOPCX_F64 <vopc<0x38>, "v_cmpx_u_f64">;
-defm V_CMPX_NGE_F64 : VOPCX_F64 <vopc<0x39>, "v_cmpx_nge_f64">;
-defm V_CMPX_NLG_F64 : VOPCX_F64 <vopc<0x3a>, "v_cmpx_nlg_f64">;
-defm V_CMPX_NGT_F64 : VOPCX_F64 <vopc<0x3b>, "v_cmpx_ngt_f64">;
-defm V_CMPX_NLE_F64 : VOPCX_F64 <vopc<0x3c>, "v_cmpx_nle_f64">;
-defm V_CMPX_NEQ_F64 : VOPCX_F64 <vopc<0x3d>, "v_cmpx_neq_f64">;
-defm V_CMPX_NLT_F64 : VOPCX_F64 <vopc<0x3e>, "v_cmpx_nlt_f64">;
-defm V_CMPX_TRU_F64 : VOPCX_F64 <vopc<0x3f>, "v_cmpx_tru_f64">;
+defm V_CMPX_F_F64 : VOPCX_F64 <vopc<0x30, 0x70>, "v_cmpx_f_f64">;
+defm V_CMPX_LT_F64 : VOPCX_F64 <vopc<0x31, 0x71>, "v_cmpx_lt_f64">;
+defm V_CMPX_EQ_F64 : VOPCX_F64 <vopc<0x32, 0x72>, "v_cmpx_eq_f64">;
+defm V_CMPX_LE_F64 : VOPCX_F64 <vopc<0x33, 0x73>, "v_cmpx_le_f64">;
+defm V_CMPX_GT_F64 : VOPCX_F64 <vopc<0x34, 0x74>, "v_cmpx_gt_f64">;
+defm V_CMPX_LG_F64 : VOPCX_F64 <vopc<0x35, 0x75>, "v_cmpx_lg_f64">;
+defm V_CMPX_GE_F64 : VOPCX_F64 <vopc<0x36, 0x76>, "v_cmpx_ge_f64">;
+defm V_CMPX_O_F64 : VOPCX_F64 <vopc<0x37, 0x77>, "v_cmpx_o_f64">;
+defm V_CMPX_U_F64 : VOPCX_F64 <vopc<0x38, 0x78>, "v_cmpx_u_f64">;
+defm V_CMPX_NGE_F64 : VOPCX_F64 <vopc<0x39, 0x79>, "v_cmpx_nge_f64">;
+defm V_CMPX_NLG_F64 : VOPCX_F64 <vopc<0x3a, 0x7a>, "v_cmpx_nlg_f64">;
+defm V_CMPX_NGT_F64 : VOPCX_F64 <vopc<0x3b, 0x7b>, "v_cmpx_ngt_f64">;
+defm V_CMPX_NLE_F64 : VOPCX_F64 <vopc<0x3c, 0x7c>, "v_cmpx_nle_f64">;
+defm V_CMPX_NEQ_F64 : VOPCX_F64 <vopc<0x3d, 0x7d>, "v_cmpx_neq_f64">;
+defm V_CMPX_NLT_F64 : VOPCX_F64 <vopc<0x3e, 0x7e>, "v_cmpx_nlt_f64">;
+defm V_CMPX_TRU_F64 : VOPCX_F64 <vopc<0x3f, 0x7f>, "v_cmpx_tru_f64">;
} // End hasSideEffects = 1
+let SubtargetPredicate = isSICI in {
+
defm V_CMPS_F_F32 : VOPC_F32 <vopc<0x40>, "v_cmps_f_f32">;
defm V_CMPS_LT_F32 : VOPC_F32 <vopc<0x41>, "v_cmps_lt_f32">;
defm V_CMPS_EQ_F32 : VOPC_F32 <vopc<0x42>, "v_cmps_eq_f32">;
@@ -628,104 +661,106 @@ defm V_CMPSX_TRU_F64 : VOPC_F64 <vopc<0x7f>, "v_cmpsx_tru_f64">;
} // End hasSideEffects = 1, Defs = [EXEC]
-defm V_CMP_F_I32 : VOPC_I32 <vopc<0x80>, "v_cmp_f_i32">;
-defm V_CMP_LT_I32 : VOPC_I32 <vopc<0x81>, "v_cmp_lt_i32", COND_SLT>;
-defm V_CMP_EQ_I32 : VOPC_I32 <vopc<0x82>, "v_cmp_eq_i32", COND_EQ>;
-defm V_CMP_LE_I32 : VOPC_I32 <vopc<0x83>, "v_cmp_le_i32", COND_SLE>;
-defm V_CMP_GT_I32 : VOPC_I32 <vopc<0x84>, "v_cmp_gt_i32", COND_SGT>;
-defm V_CMP_NE_I32 : VOPC_I32 <vopc<0x85>, "v_cmp_ne_i32", COND_NE>;
-defm V_CMP_GE_I32 : VOPC_I32 <vopc<0x86>, "v_cmp_ge_i32", COND_SGE>;
-defm V_CMP_T_I32 : VOPC_I32 <vopc<0x87>, "v_cmp_t_i32">;
+} // End SubtargetPredicate = isSICI
+
+defm V_CMP_F_I32 : VOPC_I32 <vopc<0x80, 0xc0>, "v_cmp_f_i32">;
+defm V_CMP_LT_I32 : VOPC_I32 <vopc<0x81, 0xc1>, "v_cmp_lt_i32", COND_SLT>;
+defm V_CMP_EQ_I32 : VOPC_I32 <vopc<0x82, 0xc2>, "v_cmp_eq_i32", COND_EQ>;
+defm V_CMP_LE_I32 : VOPC_I32 <vopc<0x83, 0xc3>, "v_cmp_le_i32", COND_SLE>;
+defm V_CMP_GT_I32 : VOPC_I32 <vopc<0x84, 0xc4>, "v_cmp_gt_i32", COND_SGT>;
+defm V_CMP_NE_I32 : VOPC_I32 <vopc<0x85, 0xc5>, "v_cmp_ne_i32", COND_NE>;
+defm V_CMP_GE_I32 : VOPC_I32 <vopc<0x86, 0xc6>, "v_cmp_ge_i32", COND_SGE>;
+defm V_CMP_T_I32 : VOPC_I32 <vopc<0x87, 0xc7>, "v_cmp_t_i32">;
let hasSideEffects = 1 in {
-defm V_CMPX_F_I32 : VOPCX_I32 <vopc<0x90>, "v_cmpx_f_i32">;
-defm V_CMPX_LT_I32 : VOPCX_I32 <vopc<0x91>, "v_cmpx_lt_i32">;
-defm V_CMPX_EQ_I32 : VOPCX_I32 <vopc<0x92>, "v_cmpx_eq_i32">;
-defm V_CMPX_LE_I32 : VOPCX_I32 <vopc<0x93>, "v_cmpx_le_i32">;
-defm V_CMPX_GT_I32 : VOPCX_I32 <vopc<0x94>, "v_cmpx_gt_i32">;
-defm V_CMPX_NE_I32 : VOPCX_I32 <vopc<0x95>, "v_cmpx_ne_i32">;
-defm V_CMPX_GE_I32 : VOPCX_I32 <vopc<0x96>, "v_cmpx_ge_i32">;
-defm V_CMPX_T_I32 : VOPCX_I32 <vopc<0x97>, "v_cmpx_t_i32">;
+defm V_CMPX_F_I32 : VOPCX_I32 <vopc<0x90, 0xd0>, "v_cmpx_f_i32">;
+defm V_CMPX_LT_I32 : VOPCX_I32 <vopc<0x91, 0xd1>, "v_cmpx_lt_i32">;
+defm V_CMPX_EQ_I32 : VOPCX_I32 <vopc<0x92, 0xd2>, "v_cmpx_eq_i32">;
+defm V_CMPX_LE_I32 : VOPCX_I32 <vopc<0x93, 0xd3>, "v_cmpx_le_i32">;
+defm V_CMPX_GT_I32 : VOPCX_I32 <vopc<0x94, 0xd4>, "v_cmpx_gt_i32">;
+defm V_CMPX_NE_I32 : VOPCX_I32 <vopc<0x95, 0xd5>, "v_cmpx_ne_i32">;
+defm V_CMPX_GE_I32 : VOPCX_I32 <vopc<0x96, 0xd6>, "v_cmpx_ge_i32">;
+defm V_CMPX_T_I32 : VOPCX_I32 <vopc<0x97, 0xd7>, "v_cmpx_t_i32">;
} // End hasSideEffects = 1
-defm V_CMP_F_I64 : VOPC_I64 <vopc<0xa0>, "v_cmp_f_i64">;
-defm V_CMP_LT_I64 : VOPC_I64 <vopc<0xa1>, "v_cmp_lt_i64", COND_SLT>;
-defm V_CMP_EQ_I64 : VOPC_I64 <vopc<0xa2>, "v_cmp_eq_i64", COND_EQ>;
-defm V_CMP_LE_I64 : VOPC_I64 <vopc<0xa3>, "v_cmp_le_i64", COND_SLE>;
-defm V_CMP_GT_I64 : VOPC_I64 <vopc<0xa4>, "v_cmp_gt_i64", COND_SGT>;
-defm V_CMP_NE_I64 : VOPC_I64 <vopc<0xa5>, "v_cmp_ne_i64", COND_NE>;
-defm V_CMP_GE_I64 : VOPC_I64 <vopc<0xa6>, "v_cmp_ge_i64", COND_SGE>;
-defm V_CMP_T_I64 : VOPC_I64 <vopc<0xa7>, "v_cmp_t_i64">;
+defm V_CMP_F_I64 : VOPC_I64 <vopc<0xa0, 0xe0>, "v_cmp_f_i64">;
+defm V_CMP_LT_I64 : VOPC_I64 <vopc<0xa1, 0xe1>, "v_cmp_lt_i64", COND_SLT>;
+defm V_CMP_EQ_I64 : VOPC_I64 <vopc<0xa2, 0xe2>, "v_cmp_eq_i64", COND_EQ>;
+defm V_CMP_LE_I64 : VOPC_I64 <vopc<0xa3, 0xe3>, "v_cmp_le_i64", COND_SLE>;
+defm V_CMP_GT_I64 : VOPC_I64 <vopc<0xa4, 0xe4>, "v_cmp_gt_i64", COND_SGT>;
+defm V_CMP_NE_I64 : VOPC_I64 <vopc<0xa5, 0xe5>, "v_cmp_ne_i64", COND_NE>;
+defm V_CMP_GE_I64 : VOPC_I64 <vopc<0xa6, 0xe6>, "v_cmp_ge_i64", COND_SGE>;
+defm V_CMP_T_I64 : VOPC_I64 <vopc<0xa7, 0xe7>, "v_cmp_t_i64">;
let hasSideEffects = 1 in {
-defm V_CMPX_F_I64 : VOPCX_I64 <vopc<0xb0>, "v_cmpx_f_i64">;
-defm V_CMPX_LT_I64 : VOPCX_I64 <vopc<0xb1>, "v_cmpx_lt_i64">;
-defm V_CMPX_EQ_I64 : VOPCX_I64 <vopc<0xb2>, "v_cmpx_eq_i64">;
-defm V_CMPX_LE_I64 : VOPCX_I64 <vopc<0xb3>, "v_cmpx_le_i64">;
-defm V_CMPX_GT_I64 : VOPCX_I64 <vopc<0xb4>, "v_cmpx_gt_i64">;
-defm V_CMPX_NE_I64 : VOPCX_I64 <vopc<0xb5>, "v_cmpx_ne_i64">;
-defm V_CMPX_GE_I64 : VOPCX_I64 <vopc<0xb6>, "v_cmpx_ge_i64">;
-defm V_CMPX_T_I64 : VOPCX_I64 <vopc<0xb7>, "v_cmpx_t_i64">;
+defm V_CMPX_F_I64 : VOPCX_I64 <vopc<0xb0, 0xf0>, "v_cmpx_f_i64">;
+defm V_CMPX_LT_I64 : VOPCX_I64 <vopc<0xb1, 0xf1>, "v_cmpx_lt_i64">;
+defm V_CMPX_EQ_I64 : VOPCX_I64 <vopc<0xb2, 0xf2>, "v_cmpx_eq_i64">;
+defm V_CMPX_LE_I64 : VOPCX_I64 <vopc<0xb3, 0xf3>, "v_cmpx_le_i64">;
+defm V_CMPX_GT_I64 : VOPCX_I64 <vopc<0xb4, 0xf4>, "v_cmpx_gt_i64">;
+defm V_CMPX_NE_I64 : VOPCX_I64 <vopc<0xb5, 0xf5>, "v_cmpx_ne_i64">;
+defm V_CMPX_GE_I64 : VOPCX_I64 <vopc<0xb6, 0xf6>, "v_cmpx_ge_i64">;
+defm V_CMPX_T_I64 : VOPCX_I64 <vopc<0xb7, 0xf7>, "v_cmpx_t_i64">;
} // End hasSideEffects = 1
-defm V_CMP_F_U32 : VOPC_I32 <vopc<0xc0>, "v_cmp_f_u32">;
-defm V_CMP_LT_U32 : VOPC_I32 <vopc<0xc1>, "v_cmp_lt_u32", COND_ULT>;
-defm V_CMP_EQ_U32 : VOPC_I32 <vopc<0xc2>, "v_cmp_eq_u32", COND_EQ>;
-defm V_CMP_LE_U32 : VOPC_I32 <vopc<0xc3>, "v_cmp_le_u32", COND_ULE>;
-defm V_CMP_GT_U32 : VOPC_I32 <vopc<0xc4>, "v_cmp_gt_u32", COND_UGT>;
-defm V_CMP_NE_U32 : VOPC_I32 <vopc<0xc5>, "v_cmp_ne_u32", COND_NE>;
-defm V_CMP_GE_U32 : VOPC_I32 <vopc<0xc6>, "v_cmp_ge_u32", COND_UGE>;
-defm V_CMP_T_U32 : VOPC_I32 <vopc<0xc7>, "v_cmp_t_u32">;
+defm V_CMP_F_U32 : VOPC_I32 <vopc<0xc0, 0xc8>, "v_cmp_f_u32">;
+defm V_CMP_LT_U32 : VOPC_I32 <vopc<0xc1, 0xc9>, "v_cmp_lt_u32", COND_ULT>;
+defm V_CMP_EQ_U32 : VOPC_I32 <vopc<0xc2, 0xca>, "v_cmp_eq_u32", COND_EQ>;
+defm V_CMP_LE_U32 : VOPC_I32 <vopc<0xc3, 0xcb>, "v_cmp_le_u32", COND_ULE>;
+defm V_CMP_GT_U32 : VOPC_I32 <vopc<0xc4, 0xcc>, "v_cmp_gt_u32", COND_UGT>;
+defm V_CMP_NE_U32 : VOPC_I32 <vopc<0xc5, 0xcd>, "v_cmp_ne_u32", COND_NE>;
+defm V_CMP_GE_U32 : VOPC_I32 <vopc<0xc6, 0xce>, "v_cmp_ge_u32", COND_UGE>;
+defm V_CMP_T_U32 : VOPC_I32 <vopc<0xc7, 0xcf>, "v_cmp_t_u32">;
let hasSideEffects = 1 in {
-defm V_CMPX_F_U32 : VOPCX_I32 <vopc<0xd0>, "v_cmpx_f_u32">;
-defm V_CMPX_LT_U32 : VOPCX_I32 <vopc<0xd1>, "v_cmpx_lt_u32">;
-defm V_CMPX_EQ_U32 : VOPCX_I32 <vopc<0xd2>, "v_cmpx_eq_u32">;
-defm V_CMPX_LE_U32 : VOPCX_I32 <vopc<0xd3>, "v_cmpx_le_u32">;
-defm V_CMPX_GT_U32 : VOPCX_I32 <vopc<0xd4>, "v_cmpx_gt_u32">;
-defm V_CMPX_NE_U32 : VOPCX_I32 <vopc<0xd5>, "v_cmpx_ne_u32">;
-defm V_CMPX_GE_U32 : VOPCX_I32 <vopc<0xd6>, "v_cmpx_ge_u32">;
-defm V_CMPX_T_U32 : VOPCX_I32 <vopc<0xd7>, "v_cmpx_t_u32">;
+defm V_CMPX_F_U32 : VOPCX_I32 <vopc<0xd0, 0xd8>, "v_cmpx_f_u32">;
+defm V_CMPX_LT_U32 : VOPCX_I32 <vopc<0xd1, 0xd9>, "v_cmpx_lt_u32">;
+defm V_CMPX_EQ_U32 : VOPCX_I32 <vopc<0xd2, 0xda>, "v_cmpx_eq_u32">;
+defm V_CMPX_LE_U32 : VOPCX_I32 <vopc<0xd3, 0xdb>, "v_cmpx_le_u32">;
+defm V_CMPX_GT_U32 : VOPCX_I32 <vopc<0xd4, 0xdc>, "v_cmpx_gt_u32">;
+defm V_CMPX_NE_U32 : VOPCX_I32 <vopc<0xd5, 0xdd>, "v_cmpx_ne_u32">;
+defm V_CMPX_GE_U32 : VOPCX_I32 <vopc<0xd6, 0xde>, "v_cmpx_ge_u32">;
+defm V_CMPX_T_U32 : VOPCX_I32 <vopc<0xd7, 0xdf>, "v_cmpx_t_u32">;
} // End hasSideEffects = 1
-defm V_CMP_F_U64 : VOPC_I64 <vopc<0xe0>, "v_cmp_f_u64">;
-defm V_CMP_LT_U64 : VOPC_I64 <vopc<0xe1>, "v_cmp_lt_u64", COND_ULT>;
-defm V_CMP_EQ_U64 : VOPC_I64 <vopc<0xe2>, "v_cmp_eq_u64", COND_EQ>;
-defm V_CMP_LE_U64 : VOPC_I64 <vopc<0xe3>, "v_cmp_le_u64", COND_ULE>;
-defm V_CMP_GT_U64 : VOPC_I64 <vopc<0xe4>, "v_cmp_gt_u64", COND_UGT>;
-defm V_CMP_NE_U64 : VOPC_I64 <vopc<0xe5>, "v_cmp_ne_u64", COND_NE>;
-defm V_CMP_GE_U64 : VOPC_I64 <vopc<0xe6>, "v_cmp_ge_u64", COND_UGE>;
-defm V_CMP_T_U64 : VOPC_I64 <vopc<0xe7>, "v_cmp_t_u64">;
+defm V_CMP_F_U64 : VOPC_I64 <vopc<0xe0, 0xe8>, "v_cmp_f_u64">;
+defm V_CMP_LT_U64 : VOPC_I64 <vopc<0xe1, 0xe9>, "v_cmp_lt_u64", COND_ULT>;
+defm V_CMP_EQ_U64 : VOPC_I64 <vopc<0xe2, 0xea>, "v_cmp_eq_u64", COND_EQ>;
+defm V_CMP_LE_U64 : VOPC_I64 <vopc<0xe3, 0xeb>, "v_cmp_le_u64", COND_ULE>;
+defm V_CMP_GT_U64 : VOPC_I64 <vopc<0xe4, 0xec>, "v_cmp_gt_u64", COND_UGT>;
+defm V_CMP_NE_U64 : VOPC_I64 <vopc<0xe5, 0xed>, "v_cmp_ne_u64", COND_NE>;
+defm V_CMP_GE_U64 : VOPC_I64 <vopc<0xe6, 0xee>, "v_cmp_ge_u64", COND_UGE>;
+defm V_CMP_T_U64 : VOPC_I64 <vopc<0xe7, 0xef>, "v_cmp_t_u64">;
let hasSideEffects = 1 in {
-defm V_CMPX_F_U64 : VOPCX_I64 <vopc<0xf0>, "v_cmpx_f_u64">;
-defm V_CMPX_LT_U64 : VOPCX_I64 <vopc<0xf1>, "v_cmpx_lt_u64">;
-defm V_CMPX_EQ_U64 : VOPCX_I64 <vopc<0xf2>, "v_cmpx_eq_u64">;
-defm V_CMPX_LE_U64 : VOPCX_I64 <vopc<0xf3>, "v_cmpx_le_u64">;
-defm V_CMPX_GT_U64 : VOPCX_I64 <vopc<0xf4>, "v_cmpx_gt_u64">;
-defm V_CMPX_NE_U64 : VOPCX_I64 <vopc<0xf5>, "v_cmpx_ne_u64">;
-defm V_CMPX_GE_U64 : VOPCX_I64 <vopc<0xf6>, "v_cmpx_ge_u64">;
-defm V_CMPX_T_U64 : VOPCX_I64 <vopc<0xf7>, "v_cmpx_t_u64">;
+defm V_CMPX_F_U64 : VOPCX_I64 <vopc<0xf0, 0xf8>, "v_cmpx_f_u64">;
+defm V_CMPX_LT_U64 : VOPCX_I64 <vopc<0xf1, 0xf9>, "v_cmpx_lt_u64">;
+defm V_CMPX_EQ_U64 : VOPCX_I64 <vopc<0xf2, 0xfa>, "v_cmpx_eq_u64">;
+defm V_CMPX_LE_U64 : VOPCX_I64 <vopc<0xf3, 0xfb>, "v_cmpx_le_u64">;
+defm V_CMPX_GT_U64 : VOPCX_I64 <vopc<0xf4, 0xfc>, "v_cmpx_gt_u64">;
+defm V_CMPX_NE_U64 : VOPCX_I64 <vopc<0xf5, 0xfd>, "v_cmpx_ne_u64">;
+defm V_CMPX_GE_U64 : VOPCX_I64 <vopc<0xf6, 0xfe>, "v_cmpx_ge_u64">;
+defm V_CMPX_T_U64 : VOPCX_I64 <vopc<0xf7, 0xff>, "v_cmpx_t_u64">;
} // End hasSideEffects = 1
-defm V_CMP_CLASS_F32 : VOPC_F32 <vopc<0x88>, "v_cmp_class_f32">;
+defm V_CMP_CLASS_F32 : VOPC_CLASS_F32 <vopc<0x88, 0x10>, "v_cmp_class_f32">;
let hasSideEffects = 1 in {
-defm V_CMPX_CLASS_F32 : VOPCX_F32 <vopc<0x98>, "v_cmpx_class_f32">;
+defm V_CMPX_CLASS_F32 : VOPCX_CLASS_F32 <vopc<0x98, 0x11>, "v_cmpx_class_f32">;
} // End hasSideEffects = 1
-defm V_CMP_CLASS_F64 : VOPC_F64 <vopc<0xa8>, "v_cmp_class_f64">;
+defm V_CMP_CLASS_F64 : VOPC_CLASS_F64 <vopc<0xa8, 0x12>, "v_cmp_class_f64">;
let hasSideEffects = 1 in {
-defm V_CMPX_CLASS_F64 : VOPCX_F64 <vopc<0xb8>, "v_cmpx_class_f64">;
+defm V_CMPX_CLASS_F64 : VOPCX_CLASS_F64 <vopc<0xb8, 0x13>, "v_cmpx_class_f64">;
} // End hasSideEffects = 1
} // End isCompare = 1
@@ -735,88 +770,88 @@ defm V_CMPX_CLASS_F64 : VOPCX_F64 <vopc<0xb8>, "v_cmpx_class_f64">;
//===----------------------------------------------------------------------===//
-def DS_ADD_U32 : DS_1A1D_NORET <0x0, "ds_add_u32", VReg_32>;
-def DS_SUB_U32 : DS_1A1D_NORET <0x1, "ds_sub_u32", VReg_32>;
-def DS_RSUB_U32 : DS_1A1D_NORET <0x2, "ds_rsub_u32", VReg_32>;
-def DS_INC_U32 : DS_1A1D_NORET <0x3, "ds_inc_u32", VReg_32>;
-def DS_DEC_U32 : DS_1A1D_NORET <0x4, "ds_dec_u32", VReg_32>;
-def DS_MIN_I32 : DS_1A1D_NORET <0x5, "ds_min_i32", VReg_32>;
-def DS_MAX_I32 : DS_1A1D_NORET <0x6, "ds_max_i32", VReg_32>;
-def DS_MIN_U32 : DS_1A1D_NORET <0x7, "ds_min_u32", VReg_32>;
-def DS_MAX_U32 : DS_1A1D_NORET <0x8, "ds_max_u32", VReg_32>;
-def DS_AND_B32 : DS_1A1D_NORET <0x9, "ds_and_b32", VReg_32>;
-def DS_OR_B32 : DS_1A1D_NORET <0xa, "ds_or_b32", VReg_32>;
-def DS_XOR_B32 : DS_1A1D_NORET <0xb, "ds_xor_b32", VReg_32>;
-def DS_MSKOR_B32 : DS_1A1D_NORET <0xc, "ds_mskor_b32", VReg_32>;
-def DS_CMPST_B32 : DS_1A2D_NORET <0x10, "ds_cmpst_b32", VReg_32>;
-def DS_CMPST_F32 : DS_1A2D_NORET <0x11, "ds_cmpst_f32", VReg_32>;
-def DS_MIN_F32 : DS_1A1D_NORET <0x12, "ds_min_f32", VReg_32>;
-def DS_MAX_F32 : DS_1A1D_NORET <0x13, "ds_max_f32", VReg_32>;
-
-def DS_ADD_RTN_U32 : DS_1A1D_RET <0x20, "ds_add_rtn_u32", VReg_32, "ds_add_u32">;
-def DS_SUB_RTN_U32 : DS_1A1D_RET <0x21, "ds_sub_rtn_u32", VReg_32, "ds_sub_u32">;
-def DS_RSUB_RTN_U32 : DS_1A1D_RET <0x22, "ds_rsub_rtn_u32", VReg_32, "ds_rsub_u32">;
-def DS_INC_RTN_U32 : DS_1A1D_RET <0x23, "ds_inc_rtn_u32", VReg_32, "ds_inc_u32">;
-def DS_DEC_RTN_U32 : DS_1A1D_RET <0x24, "ds_dec_rtn_u32", VReg_32, "ds_dec_u32">;
-def DS_MIN_RTN_I32 : DS_1A1D_RET <0x25, "ds_min_rtn_i32", VReg_32, "ds_min_i32">;
-def DS_MAX_RTN_I32 : DS_1A1D_RET <0x26, "ds_max_rtn_i32", VReg_32, "ds_max_i32">;
-def DS_MIN_RTN_U32 : DS_1A1D_RET <0x27, "ds_min_rtn_u32", VReg_32, "ds_min_u32">;
-def DS_MAX_RTN_U32 : DS_1A1D_RET <0x28, "ds_max_rtn_u32", VReg_32, "ds_max_u32">;
-def DS_AND_RTN_B32 : DS_1A1D_RET <0x29, "ds_and_rtn_b32", VReg_32, "ds_and_b32">;
-def DS_OR_RTN_B32 : DS_1A1D_RET <0x2a, "ds_or_rtn_b32", VReg_32, "ds_or_b32">;
-def DS_XOR_RTN_B32 : DS_1A1D_RET <0x2b, "ds_xor_rtn_b32", VReg_32, "ds_xor_b32">;
-def DS_MSKOR_RTN_B32 : DS_1A1D_RET <0x2c, "ds_mskor_rtn_b32", VReg_32, "ds_mskor_b32">;
-def DS_WRXCHG_RTN_B32 : DS_1A1D_RET <0x2d, "ds_wrxchg_rtn_b32", VReg_32>;
-//def DS_WRXCHG2_RTN_B32 : DS_2A0D_RET <0x2e, "ds_wrxchg2_rtn_b32", VReg_32, "ds_wrxchg2_b32">;
-//def DS_WRXCHG2ST64_RTN_B32 : DS_2A0D_RET <0x2f, "ds_wrxchg2_rtn_b32", VReg_32, "ds_wrxchg2st64_b32">;
-def DS_CMPST_RTN_B32 : DS_1A2D_RET <0x30, "ds_cmpst_rtn_b32", VReg_32, "ds_cmpst_b32">;
-def DS_CMPST_RTN_F32 : DS_1A2D_RET <0x31, "ds_cmpst_rtn_f32", VReg_32, "ds_cmpst_f32">;
-def DS_MIN_RTN_F32 : DS_1A1D_RET <0x32, "ds_min_rtn_f32", VReg_32, "ds_min_f32">;
-def DS_MAX_RTN_F32 : DS_1A1D_RET <0x33, "ds_max_rtn_f32", VReg_32, "ds_max_f32">;
+defm DS_ADD_U32 : DS_1A1D_NORET <0x0, "ds_add_u32", VGPR_32>;
+defm DS_SUB_U32 : DS_1A1D_NORET <0x1, "ds_sub_u32", VGPR_32>;
+defm DS_RSUB_U32 : DS_1A1D_NORET <0x2, "ds_rsub_u32", VGPR_32>;
+defm DS_INC_U32 : DS_1A1D_NORET <0x3, "ds_inc_u32", VGPR_32>;
+defm DS_DEC_U32 : DS_1A1D_NORET <0x4, "ds_dec_u32", VGPR_32>;
+defm DS_MIN_I32 : DS_1A1D_NORET <0x5, "ds_min_i32", VGPR_32>;
+defm DS_MAX_I32 : DS_1A1D_NORET <0x6, "ds_max_i32", VGPR_32>;
+defm DS_MIN_U32 : DS_1A1D_NORET <0x7, "ds_min_u32", VGPR_32>;
+defm DS_MAX_U32 : DS_1A1D_NORET <0x8, "ds_max_u32", VGPR_32>;
+defm DS_AND_B32 : DS_1A1D_NORET <0x9, "ds_and_b32", VGPR_32>;
+defm DS_OR_B32 : DS_1A1D_NORET <0xa, "ds_or_b32", VGPR_32>;
+defm DS_XOR_B32 : DS_1A1D_NORET <0xb, "ds_xor_b32", VGPR_32>;
+defm DS_MSKOR_B32 : DS_1A1D_NORET <0xc, "ds_mskor_b32", VGPR_32>;
+defm DS_CMPST_B32 : DS_1A2D_NORET <0x10, "ds_cmpst_b32", VGPR_32>;
+defm DS_CMPST_F32 : DS_1A2D_NORET <0x11, "ds_cmpst_f32", VGPR_32>;
+defm DS_MIN_F32 : DS_1A1D_NORET <0x12, "ds_min_f32", VGPR_32>;
+defm DS_MAX_F32 : DS_1A1D_NORET <0x13, "ds_max_f32", VGPR_32>;
+
+defm DS_ADD_RTN_U32 : DS_1A1D_RET <0x20, "ds_add_rtn_u32", VGPR_32, "ds_add_u32">;
+defm DS_SUB_RTN_U32 : DS_1A1D_RET <0x21, "ds_sub_rtn_u32", VGPR_32, "ds_sub_u32">;
+defm DS_RSUB_RTN_U32 : DS_1A1D_RET <0x22, "ds_rsub_rtn_u32", VGPR_32, "ds_rsub_u32">;
+defm DS_INC_RTN_U32 : DS_1A1D_RET <0x23, "ds_inc_rtn_u32", VGPR_32, "ds_inc_u32">;
+defm DS_DEC_RTN_U32 : DS_1A1D_RET <0x24, "ds_dec_rtn_u32", VGPR_32, "ds_dec_u32">;
+defm DS_MIN_RTN_I32 : DS_1A1D_RET <0x25, "ds_min_rtn_i32", VGPR_32, "ds_min_i32">;
+defm DS_MAX_RTN_I32 : DS_1A1D_RET <0x26, "ds_max_rtn_i32", VGPR_32, "ds_max_i32">;
+defm DS_MIN_RTN_U32 : DS_1A1D_RET <0x27, "ds_min_rtn_u32", VGPR_32, "ds_min_u32">;
+defm DS_MAX_RTN_U32 : DS_1A1D_RET <0x28, "ds_max_rtn_u32", VGPR_32, "ds_max_u32">;
+defm DS_AND_RTN_B32 : DS_1A1D_RET <0x29, "ds_and_rtn_b32", VGPR_32, "ds_and_b32">;
+defm DS_OR_RTN_B32 : DS_1A1D_RET <0x2a, "ds_or_rtn_b32", VGPR_32, "ds_or_b32">;
+defm DS_XOR_RTN_B32 : DS_1A1D_RET <0x2b, "ds_xor_rtn_b32", VGPR_32, "ds_xor_b32">;
+defm DS_MSKOR_RTN_B32 : DS_1A1D_RET <0x2c, "ds_mskor_rtn_b32", VGPR_32, "ds_mskor_b32">;
+defm DS_WRXCHG_RTN_B32 : DS_1A1D_RET <0x2d, "ds_wrxchg_rtn_b32", VGPR_32>;
+//def DS_WRXCHG2_RTN_B32 : DS_2A0D_RET <0x2e, "ds_wrxchg2_rtn_b32", VGPR_32, "ds_wrxchg2_b32">;
+//def DS_WRXCHG2ST64_RTN_B32 : DS_2A0D_RET <0x2f, "ds_wrxchg2_rtn_b32", VGPR_32, "ds_wrxchg2st64_b32">;
+defm DS_CMPST_RTN_B32 : DS_1A2D_RET <0x30, "ds_cmpst_rtn_b32", VGPR_32, "ds_cmpst_b32">;
+defm DS_CMPST_RTN_F32 : DS_1A2D_RET <0x31, "ds_cmpst_rtn_f32", VGPR_32, "ds_cmpst_f32">;
+defm DS_MIN_RTN_F32 : DS_1A1D_RET <0x32, "ds_min_rtn_f32", VGPR_32, "ds_min_f32">;
+defm DS_MAX_RTN_F32 : DS_1A1D_RET <0x33, "ds_max_rtn_f32", VGPR_32, "ds_max_f32">;
let SubtargetPredicate = isCI in {
-def DS_WRAP_RTN_F32 : DS_1A1D_RET <0x34, "ds_wrap_rtn_f32", VReg_32, "ds_wrap_f32">;
+defm DS_WRAP_RTN_F32 : DS_1A1D_RET <0x34, "ds_wrap_rtn_f32", VGPR_32, "ds_wrap_f32">;
} // End isCI
-def DS_ADD_U64 : DS_1A1D_NORET <0x40, "ds_add_u64", VReg_64>;
-def DS_SUB_U64 : DS_1A1D_NORET <0x41, "ds_sub_u64", VReg_64>;
-def DS_RSUB_U64 : DS_1A1D_NORET <0x42, "ds_rsub_u64", VReg_64>;
-def DS_INC_U64 : DS_1A1D_NORET <0x43, "ds_inc_u64", VReg_64>;
-def DS_DEC_U64 : DS_1A1D_NORET <0x44, "ds_dec_u64", VReg_64>;
-def DS_MIN_I64 : DS_1A1D_NORET <0x45, "ds_min_i64", VReg_64>;
-def DS_MAX_I64 : DS_1A1D_NORET <0x46, "ds_max_i64", VReg_64>;
-def DS_MIN_U64 : DS_1A1D_NORET <0x47, "ds_min_u64", VReg_64>;
-def DS_MAX_U64 : DS_1A1D_NORET <0x48, "ds_max_u64", VReg_64>;
-def DS_AND_B64 : DS_1A1D_NORET <0x49, "ds_and_b64", VReg_64>;
-def DS_OR_B64 : DS_1A1D_NORET <0x4a, "ds_or_b64", VReg_64>;
-def DS_XOR_B64 : DS_1A1D_NORET <0x4b, "ds_xor_b64", VReg_64>;
-def DS_MSKOR_B64 : DS_1A1D_NORET <0x4c, "ds_mskor_b64", VReg_64>;
-def DS_CMPST_B64 : DS_1A2D_NORET <0x50, "ds_cmpst_b64", VReg_64>;
-def DS_CMPST_F64 : DS_1A2D_NORET <0x51, "ds_cmpst_f64", VReg_64>;
-def DS_MIN_F64 : DS_1A1D_NORET <0x52, "ds_min_f64", VReg_64>;
-def DS_MAX_F64 : DS_1A1D_NORET <0x53, "ds_max_f64", VReg_64>;
-
-def DS_ADD_RTN_U64 : DS_1A1D_RET <0x60, "ds_add_rtn_u64", VReg_64, "ds_add_u64">;
-def DS_SUB_RTN_U64 : DS_1A1D_RET <0x61, "ds_sub_rtn_u64", VReg_64, "ds_sub_u64">;
-def DS_RSUB_RTN_U64 : DS_1A1D_RET <0x62, "ds_rsub_rtn_u64", VReg_64, "ds_rsub_u64">;
-def DS_INC_RTN_U64 : DS_1A1D_RET <0x63, "ds_inc_rtn_u64", VReg_64, "ds_inc_u64">;
-def DS_DEC_RTN_U64 : DS_1A1D_RET <0x64, "ds_dec_rtn_u64", VReg_64, "ds_dec_u64">;
-def DS_MIN_RTN_I64 : DS_1A1D_RET <0x65, "ds_min_rtn_i64", VReg_64, "ds_min_i64">;
-def DS_MAX_RTN_I64 : DS_1A1D_RET <0x66, "ds_max_rtn_i64", VReg_64, "ds_max_i64">;
-def DS_MIN_RTN_U64 : DS_1A1D_RET <0x67, "ds_min_rtn_u64", VReg_64, "ds_min_u64">;
-def DS_MAX_RTN_U64 : DS_1A1D_RET <0x68, "ds_max_rtn_u64", VReg_64, "ds_max_u64">;
-def DS_AND_RTN_B64 : DS_1A1D_RET <0x69, "ds_and_rtn_b64", VReg_64, "ds_and_b64">;
-def DS_OR_RTN_B64 : DS_1A1D_RET <0x6a, "ds_or_rtn_b64", VReg_64, "ds_or_b64">;
-def DS_XOR_RTN_B64 : DS_1A1D_RET <0x6b, "ds_xor_rtn_b64", VReg_64, "ds_xor_b64">;
-def DS_MSKOR_RTN_B64 : DS_1A1D_RET <0x6c, "ds_mskor_rtn_b64", VReg_64, "ds_mskor_b64">;
-def DS_WRXCHG_RTN_B64 : DS_1A1D_RET <0x6d, "ds_wrxchg_rtn_b64", VReg_64, "ds_wrxchg_b64">;
+defm DS_ADD_U64 : DS_1A1D_NORET <0x40, "ds_add_u64", VReg_64>;
+defm DS_SUB_U64 : DS_1A1D_NORET <0x41, "ds_sub_u64", VReg_64>;
+defm DS_RSUB_U64 : DS_1A1D_NORET <0x42, "ds_rsub_u64", VReg_64>;
+defm DS_INC_U64 : DS_1A1D_NORET <0x43, "ds_inc_u64", VReg_64>;
+defm DS_DEC_U64 : DS_1A1D_NORET <0x44, "ds_dec_u64", VReg_64>;
+defm DS_MIN_I64 : DS_1A1D_NORET <0x45, "ds_min_i64", VReg_64>;
+defm DS_MAX_I64 : DS_1A1D_NORET <0x46, "ds_max_i64", VReg_64>;
+defm DS_MIN_U64 : DS_1A1D_NORET <0x47, "ds_min_u64", VReg_64>;
+defm DS_MAX_U64 : DS_1A1D_NORET <0x48, "ds_max_u64", VReg_64>;
+defm DS_AND_B64 : DS_1A1D_NORET <0x49, "ds_and_b64", VReg_64>;
+defm DS_OR_B64 : DS_1A1D_NORET <0x4a, "ds_or_b64", VReg_64>;
+defm DS_XOR_B64 : DS_1A1D_NORET <0x4b, "ds_xor_b64", VReg_64>;
+defm DS_MSKOR_B64 : DS_1A1D_NORET <0x4c, "ds_mskor_b64", VReg_64>;
+defm DS_CMPST_B64 : DS_1A2D_NORET <0x50, "ds_cmpst_b64", VReg_64>;
+defm DS_CMPST_F64 : DS_1A2D_NORET <0x51, "ds_cmpst_f64", VReg_64>;
+defm DS_MIN_F64 : DS_1A1D_NORET <0x52, "ds_min_f64", VReg_64>;
+defm DS_MAX_F64 : DS_1A1D_NORET <0x53, "ds_max_f64", VReg_64>;
+
+defm DS_ADD_RTN_U64 : DS_1A1D_RET <0x60, "ds_add_rtn_u64", VReg_64, "ds_add_u64">;
+defm DS_SUB_RTN_U64 : DS_1A1D_RET <0x61, "ds_sub_rtn_u64", VReg_64, "ds_sub_u64">;
+defm DS_RSUB_RTN_U64 : DS_1A1D_RET <0x62, "ds_rsub_rtn_u64", VReg_64, "ds_rsub_u64">;
+defm DS_INC_RTN_U64 : DS_1A1D_RET <0x63, "ds_inc_rtn_u64", VReg_64, "ds_inc_u64">;
+defm DS_DEC_RTN_U64 : DS_1A1D_RET <0x64, "ds_dec_rtn_u64", VReg_64, "ds_dec_u64">;
+defm DS_MIN_RTN_I64 : DS_1A1D_RET <0x65, "ds_min_rtn_i64", VReg_64, "ds_min_i64">;
+defm DS_MAX_RTN_I64 : DS_1A1D_RET <0x66, "ds_max_rtn_i64", VReg_64, "ds_max_i64">;
+defm DS_MIN_RTN_U64 : DS_1A1D_RET <0x67, "ds_min_rtn_u64", VReg_64, "ds_min_u64">;
+defm DS_MAX_RTN_U64 : DS_1A1D_RET <0x68, "ds_max_rtn_u64", VReg_64, "ds_max_u64">;
+defm DS_AND_RTN_B64 : DS_1A1D_RET <0x69, "ds_and_rtn_b64", VReg_64, "ds_and_b64">;
+defm DS_OR_RTN_B64 : DS_1A1D_RET <0x6a, "ds_or_rtn_b64", VReg_64, "ds_or_b64">;
+defm DS_XOR_RTN_B64 : DS_1A1D_RET <0x6b, "ds_xor_rtn_b64", VReg_64, "ds_xor_b64">;
+defm DS_MSKOR_RTN_B64 : DS_1A1D_RET <0x6c, "ds_mskor_rtn_b64", VReg_64, "ds_mskor_b64">;
+defm DS_WRXCHG_RTN_B64 : DS_1A1D_RET <0x6d, "ds_wrxchg_rtn_b64", VReg_64, "ds_wrxchg_b64">;
//def DS_WRXCHG2_RTN_B64 : DS_2A0D_RET <0x6e, "ds_wrxchg2_rtn_b64", VReg_64, "ds_wrxchg2_b64">;
//def DS_WRXCHG2ST64_RTN_B64 : DS_2A0D_RET <0x6f, "ds_wrxchg2_rtn_b64", VReg_64, "ds_wrxchg2st64_b64">;
-def DS_CMPST_RTN_B64 : DS_1A2D_RET <0x70, "ds_cmpst_rtn_b64", VReg_64, "ds_cmpst_b64">;
-def DS_CMPST_RTN_F64 : DS_1A2D_RET <0x71, "ds_cmpst_rtn_f64", VReg_64, "ds_cmpst_f64">;
-def DS_MIN_RTN_F64 : DS_1A1D_RET <0x72, "ds_min_f64", VReg_64, "ds_min_f64">;
-def DS_MAX_RTN_F64 : DS_1A1D_RET <0x73, "ds_max_f64", VReg_64, "ds_max_f64">;
+defm DS_CMPST_RTN_B64 : DS_1A2D_RET <0x70, "ds_cmpst_rtn_b64", VReg_64, "ds_cmpst_b64">;
+defm DS_CMPST_RTN_F64 : DS_1A2D_RET <0x71, "ds_cmpst_rtn_f64", VReg_64, "ds_cmpst_f64">;
+defm DS_MIN_RTN_F64 : DS_1A1D_RET <0x72, "ds_min_rtn_f64", VReg_64, "ds_min_f64">;
+defm DS_MAX_RTN_F64 : DS_1A1D_RET <0x73, "ds_max_rtn_f64", VReg_64, "ds_max_f64">;
//let SubtargetPredicate = isCI in {
// DS_CONDXCHG32_RTN_B64
@@ -825,139 +860,140 @@ def DS_MAX_RTN_F64 : DS_1A1D_RET <0x73, "ds_max_f64", VReg_64, "ds_max_f64">;
// TODO: _SRC2_* forms
-def DS_WRITE_B32 : DS_Store_Helper <0x0000000d, "ds_write_b32", VReg_32>;
-def DS_WRITE_B8 : DS_Store_Helper <0x00000001e, "ds_write_b8", VReg_32>;
-def DS_WRITE_B16 : DS_Store_Helper <0x00000001f, "ds_write_b16", VReg_32>;
-def DS_WRITE_B64 : DS_Store_Helper <0x00000004d, "ds_write_b64", VReg_64>;
+defm DS_WRITE_B32 : DS_Store_Helper <0x0000000d, "ds_write_b32", VGPR_32>;
+defm DS_WRITE_B8 : DS_Store_Helper <0x00000001e, "ds_write_b8", VGPR_32>;
+defm DS_WRITE_B16 : DS_Store_Helper <0x00000001f, "ds_write_b16", VGPR_32>;
+defm DS_WRITE_B64 : DS_Store_Helper <0x00000004d, "ds_write_b64", VReg_64>;
-def DS_READ_B32 : DS_Load_Helper <0x00000036, "ds_read_b32", VReg_32>;
-def DS_READ_I8 : DS_Load_Helper <0x00000039, "ds_read_i8", VReg_32>;
-def DS_READ_U8 : DS_Load_Helper <0x0000003a, "ds_read_u8", VReg_32>;
-def DS_READ_I16 : DS_Load_Helper <0x0000003b, "ds_read_i16", VReg_32>;
-def DS_READ_U16 : DS_Load_Helper <0x0000003c, "ds_read_u16", VReg_32>;
-def DS_READ_B64 : DS_Load_Helper <0x00000076, "ds_read_b64", VReg_64>;
+defm DS_READ_B32 : DS_Load_Helper <0x00000036, "ds_read_b32", VGPR_32>;
+defm DS_READ_I8 : DS_Load_Helper <0x00000039, "ds_read_i8", VGPR_32>;
+defm DS_READ_U8 : DS_Load_Helper <0x0000003a, "ds_read_u8", VGPR_32>;
+defm DS_READ_I16 : DS_Load_Helper <0x0000003b, "ds_read_i16", VGPR_32>;
+defm DS_READ_U16 : DS_Load_Helper <0x0000003c, "ds_read_u16", VGPR_32>;
+defm DS_READ_B64 : DS_Load_Helper <0x00000076, "ds_read_b64", VReg_64>;
// 2 forms.
-def DS_WRITE2_B32 : DS_Store2_Helper <0x0000000E, "ds_write2_b32", VReg_32>;
-def DS_WRITE2ST64_B32 : DS_Store2_Helper <0x0000000F, "ds_write2st64_b32", VReg_32>;
-def DS_WRITE2_B64 : DS_Store2_Helper <0x0000004E, "ds_write2_b64", VReg_64>;
-def DS_WRITE2ST64_B64 : DS_Store2_Helper <0x0000004F, "ds_write2st64_b64", VReg_64>;
+defm DS_WRITE2_B32 : DS_Store2_Helper <0x0000000E, "ds_write2_b32", VGPR_32>;
+defm DS_WRITE2ST64_B32 : DS_Store2_Helper <0x0000000F, "ds_write2st64_b32", VGPR_32>;
+defm DS_WRITE2_B64 : DS_Store2_Helper <0x0000004E, "ds_write2_b64", VReg_64>;
+defm DS_WRITE2ST64_B64 : DS_Store2_Helper <0x0000004F, "ds_write2st64_b64", VReg_64>;
-def DS_READ2_B32 : DS_Load2_Helper <0x00000037, "ds_read2_b32", VReg_64>;
-def DS_READ2ST64_B32 : DS_Load2_Helper <0x00000038, "ds_read2st64_b32", VReg_64>;
-def DS_READ2_B64 : DS_Load2_Helper <0x00000075, "ds_read2_b64", VReg_128>;
-def DS_READ2ST64_B64 : DS_Load2_Helper <0x00000076, "ds_read2st64_b64", VReg_128>;
+defm DS_READ2_B32 : DS_Load2_Helper <0x00000037, "ds_read2_b32", VReg_64>;
+defm DS_READ2ST64_B32 : DS_Load2_Helper <0x00000038, "ds_read2st64_b32", VReg_64>;
+defm DS_READ2_B64 : DS_Load2_Helper <0x00000075, "ds_read2_b64", VReg_128>;
+defm DS_READ2ST64_B64 : DS_Load2_Helper <0x00000076, "ds_read2st64_b64", VReg_128>;
//===----------------------------------------------------------------------===//
// MUBUF Instructions
//===----------------------------------------------------------------------===//
-//def BUFFER_LOAD_FORMAT_X : MUBUF_ <0x00000000, "buffer_load_format_x", []>;
-//def BUFFER_LOAD_FORMAT_XY : MUBUF_ <0x00000001, "buffer_load_format_xy", []>;
-//def BUFFER_LOAD_FORMAT_XYZ : MUBUF_ <0x00000002, "buffer_load_format_xyz", []>;
-defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "buffer_load_format_xyzw", VReg_128>;
-//def BUFFER_STORE_FORMAT_X : MUBUF_ <0x00000004, "buffer_store_format_x", []>;
-//def BUFFER_STORE_FORMAT_XY : MUBUF_ <0x00000005, "buffer_store_format_xy", []>;
-//def BUFFER_STORE_FORMAT_XYZ : MUBUF_ <0x00000006, "buffer_store_format_xyz", []>;
-//def BUFFER_STORE_FORMAT_XYZW : MUBUF_ <0x00000007, "buffer_store_format_xyzw", []>;
+//def BUFFER_LOAD_FORMAT_X : MUBUF_ <mubuf<0x00>, "buffer_load_format_x", []>;
+//def BUFFER_LOAD_FORMAT_XY : MUBUF_ <mubuf<0x01>, "buffer_load_format_xy", []>;
+//def BUFFER_LOAD_FORMAT_XYZ : MUBUF_ <mubuf<0x02>, "buffer_load_format_xyz", []>;
+defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <mubuf<0x03>, "buffer_load_format_xyzw", VReg_128>;
+//def BUFFER_STORE_FORMAT_X : MUBUF_ <mubuf<0x04>, "buffer_store_format_x", []>;
+//def BUFFER_STORE_FORMAT_XY : MUBUF_ <mubuf<0x05>, "buffer_store_format_xy", []>;
+//def BUFFER_STORE_FORMAT_XYZ : MUBUF_ <mubuf<0x06>, "buffer_store_format_xyz", []>;
+//def BUFFER_STORE_FORMAT_XYZW : MUBUF_ <mubuf<0x07>, "buffer_store_format_xyzw", []>;
defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <
- 0x00000008, "buffer_load_ubyte", VReg_32, i32, az_extloadi8_global
+ mubuf<0x08, 0x10>, "buffer_load_ubyte", VGPR_32, i32, az_extloadi8_global
>;
defm BUFFER_LOAD_SBYTE : MUBUF_Load_Helper <
- 0x00000009, "buffer_load_sbyte", VReg_32, i32, sextloadi8_global
+ mubuf<0x09, 0x11>, "buffer_load_sbyte", VGPR_32, i32, sextloadi8_global
>;
defm BUFFER_LOAD_USHORT : MUBUF_Load_Helper <
- 0x0000000a, "buffer_load_ushort", VReg_32, i32, az_extloadi16_global
+ mubuf<0x0a, 0x12>, "buffer_load_ushort", VGPR_32, i32, az_extloadi16_global
>;
defm BUFFER_LOAD_SSHORT : MUBUF_Load_Helper <
- 0x0000000b, "buffer_load_sshort", VReg_32, i32, sextloadi16_global
+ mubuf<0x0b, 0x13>, "buffer_load_sshort", VGPR_32, i32, sextloadi16_global
>;
defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper <
- 0x0000000c, "buffer_load_dword", VReg_32, i32, global_load
+ mubuf<0x0c, 0x14>, "buffer_load_dword", VGPR_32, i32, global_load
>;
defm BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <
- 0x0000000d, "buffer_load_dwordx2", VReg_64, v2i32, global_load
+ mubuf<0x0d, 0x15>, "buffer_load_dwordx2", VReg_64, v2i32, global_load
>;
defm BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <
- 0x0000000e, "buffer_load_dwordx4", VReg_128, v4i32, global_load
+ mubuf<0x0e, 0x17>, "buffer_load_dwordx4", VReg_128, v4i32, global_load
>;
defm BUFFER_STORE_BYTE : MUBUF_Store_Helper <
- 0x00000018, "buffer_store_byte", VReg_32, i32, truncstorei8_global
+ mubuf<0x18>, "buffer_store_byte", VGPR_32, i32, truncstorei8_global
>;
defm BUFFER_STORE_SHORT : MUBUF_Store_Helper <
- 0x0000001a, "buffer_store_short", VReg_32, i32, truncstorei16_global
+ mubuf<0x1a>, "buffer_store_short", VGPR_32, i32, truncstorei16_global
>;
defm BUFFER_STORE_DWORD : MUBUF_Store_Helper <
- 0x0000001c, "buffer_store_dword", VReg_32, i32, global_store
+ mubuf<0x1c>, "buffer_store_dword", VGPR_32, i32, global_store
>;
defm BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper <
- 0x0000001d, "buffer_store_dwordx2", VReg_64, v2i32, global_store
+ mubuf<0x1d>, "buffer_store_dwordx2", VReg_64, v2i32, global_store
>;
defm BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper <
- 0x0000001e, "buffer_store_dwordx4", VReg_128, v4i32, global_store
+ mubuf<0x1e, 0x1f>, "buffer_store_dwordx4", VReg_128, v4i32, global_store
>;
-//def BUFFER_ATOMIC_SWAP : MUBUF_ <0x00000030, "buffer_atomic_swap", []>;
+
defm BUFFER_ATOMIC_SWAP : MUBUF_Atomic <
- 0x00000030, "buffer_atomic_swap", VReg_32, i32, atomic_swap_global
+ mubuf<0x30, 0x40>, "buffer_atomic_swap", VGPR_32, i32, atomic_swap_global
>;
-//def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <0x00000031, "buffer_atomic_cmpswap", []>;
+//def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <mubuf<0x31, 0x41>, "buffer_atomic_cmpswap", []>;
defm BUFFER_ATOMIC_ADD : MUBUF_Atomic <
- 0x00000032, "buffer_atomic_add", VReg_32, i32, atomic_add_global
+ mubuf<0x32, 0x42>, "buffer_atomic_add", VGPR_32, i32, atomic_add_global
>;
defm BUFFER_ATOMIC_SUB : MUBUF_Atomic <
- 0x00000033, "buffer_atomic_sub", VReg_32, i32, atomic_sub_global
+ mubuf<0x33, 0x43>, "buffer_atomic_sub", VGPR_32, i32, atomic_sub_global
>;
-//def BUFFER_ATOMIC_RSUB : MUBUF_ <0x00000034, "buffer_atomic_rsub", []>;
+//def BUFFER_ATOMIC_RSUB : MUBUF_ <mubuf<0x34>, "buffer_atomic_rsub", []>; // isn't on CI & VI
defm BUFFER_ATOMIC_SMIN : MUBUF_Atomic <
- 0x00000035, "buffer_atomic_smin", VReg_32, i32, atomic_min_global
+ mubuf<0x35, 0x44>, "buffer_atomic_smin", VGPR_32, i32, atomic_min_global
>;
defm BUFFER_ATOMIC_UMIN : MUBUF_Atomic <
- 0x00000036, "buffer_atomic_umin", VReg_32, i32, atomic_umin_global
+ mubuf<0x36, 0x45>, "buffer_atomic_umin", VGPR_32, i32, atomic_umin_global
>;
defm BUFFER_ATOMIC_SMAX : MUBUF_Atomic <
- 0x00000037, "buffer_atomic_smax", VReg_32, i32, atomic_max_global
+ mubuf<0x37, 0x46>, "buffer_atomic_smax", VGPR_32, i32, atomic_max_global
>;
defm BUFFER_ATOMIC_UMAX : MUBUF_Atomic <
- 0x00000038, "buffer_atomic_umax", VReg_32, i32, atomic_umax_global
+ mubuf<0x38, 0x47>, "buffer_atomic_umax", VGPR_32, i32, atomic_umax_global
>;
defm BUFFER_ATOMIC_AND : MUBUF_Atomic <
- 0x00000039, "buffer_atomic_and", VReg_32, i32, atomic_and_global
+ mubuf<0x39, 0x48>, "buffer_atomic_and", VGPR_32, i32, atomic_and_global
>;
defm BUFFER_ATOMIC_OR : MUBUF_Atomic <
- 0x0000003a, "buffer_atomic_or", VReg_32, i32, atomic_or_global
+ mubuf<0x3a, 0x49>, "buffer_atomic_or", VGPR_32, i32, atomic_or_global
>;
defm BUFFER_ATOMIC_XOR : MUBUF_Atomic <
- 0x0000003b, "buffer_atomic_xor", VReg_32, i32, atomic_xor_global
->;
-//def BUFFER_ATOMIC_INC : MUBUF_ <0x0000003c, "buffer_atomic_inc", []>;
-//def BUFFER_ATOMIC_DEC : MUBUF_ <0x0000003d, "buffer_atomic_dec", []>;
-//def BUFFER_ATOMIC_FCMPSWAP : MUBUF_ <0x0000003e, "buffer_atomic_fcmpswap", []>;
-//def BUFFER_ATOMIC_FMIN : MUBUF_ <0x0000003f, "buffer_atomic_fmin", []>;
-//def BUFFER_ATOMIC_FMAX : MUBUF_ <0x00000040, "buffer_atomic_fmax", []>;
-//def BUFFER_ATOMIC_SWAP_X2 : MUBUF_X2 <0x00000050, "buffer_atomic_swap_x2", []>;
-//def BUFFER_ATOMIC_CMPSWAP_X2 : MUBUF_X2 <0x00000051, "buffer_atomic_cmpswap_x2", []>;
-//def BUFFER_ATOMIC_ADD_X2 : MUBUF_X2 <0x00000052, "buffer_atomic_add_x2", []>;
-//def BUFFER_ATOMIC_SUB_X2 : MUBUF_X2 <0x00000053, "buffer_atomic_sub_x2", []>;
-//def BUFFER_ATOMIC_RSUB_X2 : MUBUF_X2 <0x00000054, "buffer_atomic_rsub_x2", []>;
-//def BUFFER_ATOMIC_SMIN_X2 : MUBUF_X2 <0x00000055, "buffer_atomic_smin_x2", []>;
-//def BUFFER_ATOMIC_UMIN_X2 : MUBUF_X2 <0x00000056, "buffer_atomic_umin_x2", []>;
-//def BUFFER_ATOMIC_SMAX_X2 : MUBUF_X2 <0x00000057, "buffer_atomic_smax_x2", []>;
-//def BUFFER_ATOMIC_UMAX_X2 : MUBUF_X2 <0x00000058, "buffer_atomic_umax_x2", []>;
-//def BUFFER_ATOMIC_AND_X2 : MUBUF_X2 <0x00000059, "buffer_atomic_and_x2", []>;
-//def BUFFER_ATOMIC_OR_X2 : MUBUF_X2 <0x0000005a, "buffer_atomic_or_x2", []>;
-//def BUFFER_ATOMIC_XOR_X2 : MUBUF_X2 <0x0000005b, "buffer_atomic_xor_x2", []>;
-//def BUFFER_ATOMIC_INC_X2 : MUBUF_X2 <0x0000005c, "buffer_atomic_inc_x2", []>;
-//def BUFFER_ATOMIC_DEC_X2 : MUBUF_X2 <0x0000005d, "buffer_atomic_dec_x2", []>;
-//def BUFFER_ATOMIC_FCMPSWAP_X2 : MUBUF_X2 <0x0000005e, "buffer_atomic_fcmpswap_x2", []>;
-//def BUFFER_ATOMIC_FMIN_X2 : MUBUF_X2 <0x0000005f, "buffer_atomic_fmin_x2", []>;
-//def BUFFER_ATOMIC_FMAX_X2 : MUBUF_X2 <0x00000060, "buffer_atomic_fmax_x2", []>;
-//def BUFFER_WBINVL1_SC : MUBUF_WBINVL1 <0x00000070, "buffer_wbinvl1_sc", []>;
-//def BUFFER_WBINVL1 : MUBUF_WBINVL1 <0x00000071, "buffer_wbinvl1", []>;
+ mubuf<0x3b, 0x4a>, "buffer_atomic_xor", VGPR_32, i32, atomic_xor_global
+>;
+//def BUFFER_ATOMIC_INC : MUBUF_ <mubuf<0x3c, 0x4b>, "buffer_atomic_inc", []>;
+//def BUFFER_ATOMIC_DEC : MUBUF_ <mubuf<0x3d, 0x4c>, "buffer_atomic_dec", []>;
+//def BUFFER_ATOMIC_FCMPSWAP : MUBUF_ <mubuf<0x3e>, "buffer_atomic_fcmpswap", []>; // isn't on VI
+//def BUFFER_ATOMIC_FMIN : MUBUF_ <mubuf<0x3f>, "buffer_atomic_fmin", []>; // isn't on VI
+//def BUFFER_ATOMIC_FMAX : MUBUF_ <mubuf<0x40>, "buffer_atomic_fmax", []>; // isn't on VI
+//def BUFFER_ATOMIC_SWAP_X2 : MUBUF_X2 <mubuf<0x50, 0x60>, "buffer_atomic_swap_x2", []>;
+//def BUFFER_ATOMIC_CMPSWAP_X2 : MUBUF_X2 <mubuf<0x51, 0x61>, "buffer_atomic_cmpswap_x2", []>;
+//def BUFFER_ATOMIC_ADD_X2 : MUBUF_X2 <mubuf<0x52, 0x62>, "buffer_atomic_add_x2", []>;
+//def BUFFER_ATOMIC_SUB_X2 : MUBUF_X2 <mubuf<0x53, 0x63>, "buffer_atomic_sub_x2", []>;
+//def BUFFER_ATOMIC_RSUB_X2 : MUBUF_X2 <mubuf<0x54>, "buffer_atomic_rsub_x2", []>; // isn't on CI & VI
+//def BUFFER_ATOMIC_SMIN_X2 : MUBUF_X2 <mubuf<0x55, 0x64>, "buffer_atomic_smin_x2", []>;
+//def BUFFER_ATOMIC_UMIN_X2 : MUBUF_X2 <mubuf<0x56, 0x65>, "buffer_atomic_umin_x2", []>;
+//def BUFFER_ATOMIC_SMAX_X2 : MUBUF_X2 <mubuf<0x57, 0x66>, "buffer_atomic_smax_x2", []>;
+//def BUFFER_ATOMIC_UMAX_X2 : MUBUF_X2 <mubuf<0x58, 0x67>, "buffer_atomic_umax_x2", []>;
+//def BUFFER_ATOMIC_AND_X2 : MUBUF_X2 <mubuf<0x59, 0x68>, "buffer_atomic_and_x2", []>;
+//def BUFFER_ATOMIC_OR_X2 : MUBUF_X2 <mubuf<0x5a, 0x69>, "buffer_atomic_or_x2", []>;
+//def BUFFER_ATOMIC_XOR_X2 : MUBUF_X2 <mubuf<0x5b, 0x6a>, "buffer_atomic_xor_x2", []>;
+//def BUFFER_ATOMIC_INC_X2 : MUBUF_X2 <mubuf<0x5c, 0x6b>, "buffer_atomic_inc_x2", []>;
+//def BUFFER_ATOMIC_DEC_X2 : MUBUF_X2 <mubuf<0x5d, 0x6c>, "buffer_atomic_dec_x2", []>;
+//def BUFFER_ATOMIC_FCMPSWAP_X2 : MUBUF_X2 <mubuf<0x5e>, "buffer_atomic_fcmpswap_x2", []>; // isn't on VI
+//def BUFFER_ATOMIC_FMIN_X2 : MUBUF_X2 <mubuf<0x5f>, "buffer_atomic_fmin_x2", []>; // isn't on VI
+//def BUFFER_ATOMIC_FMAX_X2 : MUBUF_X2 <mubuf<0x60>, "buffer_atomic_fmax_x2", []>; // isn't on VI
+//def BUFFER_WBINVL1_SC : MUBUF_WBINVL1 <mubuf<0x70>, "buffer_wbinvl1_sc", []>; // isn't on CI & VI
+//def BUFFER_WBINVL1_VOL : MUBUF_WBINVL1 <mubuf<0x70, 0x3f>, "buffer_wbinvl1_vol", []>; // isn't on SI
+//def BUFFER_WBINVL1 : MUBUF_WBINVL1 <mubuf<0x71, 0x3e>, "buffer_wbinvl1", []>;
//===----------------------------------------------------------------------===//
// MTBUF Instructions
@@ -967,7 +1003,7 @@ defm BUFFER_ATOMIC_XOR : MUBUF_Atomic <
//def TBUFFER_LOAD_FORMAT_XY : MTBUF_ <0x00000001, "tbuffer_load_format_xy", []>;
//def TBUFFER_LOAD_FORMAT_XYZ : MTBUF_ <0x00000002, "tbuffer_load_format_xyz", []>;
defm TBUFFER_LOAD_FORMAT_XYZW : MTBUF_Load_Helper <0x00000003, "tbuffer_load_format_xyzw", VReg_128>;
-defm TBUFFER_STORE_FORMAT_X : MTBUF_Store_Helper <0x00000004, "tbuffer_store_format_x", VReg_32>;
+defm TBUFFER_STORE_FORMAT_X : MTBUF_Store_Helper <0x00000004, "tbuffer_store_format_x", VGPR_32>;
defm TBUFFER_STORE_FORMAT_XY : MTBUF_Store_Helper <0x00000005, "tbuffer_store_format_xy", VReg_64>;
defm TBUFFER_STORE_FORMAT_XYZ : MTBUF_Store_Helper <0x00000006, "tbuffer_store_format_xyz", VReg_128>;
defm TBUFFER_STORE_FORMAT_XYZW : MTBUF_Store_Helper <0x00000007, "tbuffer_store_format_xyzw", VReg_128>;
@@ -1004,63 +1040,63 @@ defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "image_get_resinfo">;
//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"image_atomic_fcmpswap", 0x0000001d>;
//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"image_atomic_fmin", 0x0000001e>;
//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"image_atomic_fmax", 0x0000001f>;
-defm IMAGE_SAMPLE : MIMG_Sampler <0x00000020, "image_sample">;
-defm IMAGE_SAMPLE_CL : MIMG_Sampler <0x00000021, "image_sample_cl">;
+defm IMAGE_SAMPLE : MIMG_Sampler_WQM <0x00000020, "image_sample">;
+defm IMAGE_SAMPLE_CL : MIMG_Sampler_WQM <0x00000021, "image_sample_cl">;
defm IMAGE_SAMPLE_D : MIMG_Sampler <0x00000022, "image_sample_d">;
defm IMAGE_SAMPLE_D_CL : MIMG_Sampler <0x00000023, "image_sample_d_cl">;
defm IMAGE_SAMPLE_L : MIMG_Sampler <0x00000024, "image_sample_l">;
-defm IMAGE_SAMPLE_B : MIMG_Sampler <0x00000025, "image_sample_b">;
-defm IMAGE_SAMPLE_B_CL : MIMG_Sampler <0x00000026, "image_sample_b_cl">;
+defm IMAGE_SAMPLE_B : MIMG_Sampler_WQM <0x00000025, "image_sample_b">;
+defm IMAGE_SAMPLE_B_CL : MIMG_Sampler_WQM <0x00000026, "image_sample_b_cl">;
defm IMAGE_SAMPLE_LZ : MIMG_Sampler <0x00000027, "image_sample_lz">;
-defm IMAGE_SAMPLE_C : MIMG_Sampler <0x00000028, "image_sample_c">;
-defm IMAGE_SAMPLE_C_CL : MIMG_Sampler <0x00000029, "image_sample_c_cl">;
+defm IMAGE_SAMPLE_C : MIMG_Sampler_WQM <0x00000028, "image_sample_c">;
+defm IMAGE_SAMPLE_C_CL : MIMG_Sampler_WQM <0x00000029, "image_sample_c_cl">;
defm IMAGE_SAMPLE_C_D : MIMG_Sampler <0x0000002a, "image_sample_c_d">;
defm IMAGE_SAMPLE_C_D_CL : MIMG_Sampler <0x0000002b, "image_sample_c_d_cl">;
defm IMAGE_SAMPLE_C_L : MIMG_Sampler <0x0000002c, "image_sample_c_l">;
-defm IMAGE_SAMPLE_C_B : MIMG_Sampler <0x0000002d, "image_sample_c_b">;
-defm IMAGE_SAMPLE_C_B_CL : MIMG_Sampler <0x0000002e, "image_sample_c_b_cl">;
+defm IMAGE_SAMPLE_C_B : MIMG_Sampler_WQM <0x0000002d, "image_sample_c_b">;
+defm IMAGE_SAMPLE_C_B_CL : MIMG_Sampler_WQM <0x0000002e, "image_sample_c_b_cl">;
defm IMAGE_SAMPLE_C_LZ : MIMG_Sampler <0x0000002f, "image_sample_c_lz">;
-defm IMAGE_SAMPLE_O : MIMG_Sampler <0x00000030, "image_sample_o">;
-defm IMAGE_SAMPLE_CL_O : MIMG_Sampler <0x00000031, "image_sample_cl_o">;
+defm IMAGE_SAMPLE_O : MIMG_Sampler_WQM <0x00000030, "image_sample_o">;
+defm IMAGE_SAMPLE_CL_O : MIMG_Sampler_WQM <0x00000031, "image_sample_cl_o">;
defm IMAGE_SAMPLE_D_O : MIMG_Sampler <0x00000032, "image_sample_d_o">;
defm IMAGE_SAMPLE_D_CL_O : MIMG_Sampler <0x00000033, "image_sample_d_cl_o">;
defm IMAGE_SAMPLE_L_O : MIMG_Sampler <0x00000034, "image_sample_l_o">;
-defm IMAGE_SAMPLE_B_O : MIMG_Sampler <0x00000035, "image_sample_b_o">;
-defm IMAGE_SAMPLE_B_CL_O : MIMG_Sampler <0x00000036, "image_sample_b_cl_o">;
+defm IMAGE_SAMPLE_B_O : MIMG_Sampler_WQM <0x00000035, "image_sample_b_o">;
+defm IMAGE_SAMPLE_B_CL_O : MIMG_Sampler_WQM <0x00000036, "image_sample_b_cl_o">;
defm IMAGE_SAMPLE_LZ_O : MIMG_Sampler <0x00000037, "image_sample_lz_o">;
-defm IMAGE_SAMPLE_C_O : MIMG_Sampler <0x00000038, "image_sample_c_o">;
-defm IMAGE_SAMPLE_C_CL_O : MIMG_Sampler <0x00000039, "image_sample_c_cl_o">;
+defm IMAGE_SAMPLE_C_O : MIMG_Sampler_WQM <0x00000038, "image_sample_c_o">;
+defm IMAGE_SAMPLE_C_CL_O : MIMG_Sampler_WQM <0x00000039, "image_sample_c_cl_o">;
defm IMAGE_SAMPLE_C_D_O : MIMG_Sampler <0x0000003a, "image_sample_c_d_o">;
defm IMAGE_SAMPLE_C_D_CL_O : MIMG_Sampler <0x0000003b, "image_sample_c_d_cl_o">;
defm IMAGE_SAMPLE_C_L_O : MIMG_Sampler <0x0000003c, "image_sample_c_l_o">;
-defm IMAGE_SAMPLE_C_B_O : MIMG_Sampler <0x0000003d, "image_sample_c_b_o">;
-defm IMAGE_SAMPLE_C_B_CL_O : MIMG_Sampler <0x0000003e, "image_sample_c_b_cl_o">;
+defm IMAGE_SAMPLE_C_B_O : MIMG_Sampler_WQM <0x0000003d, "image_sample_c_b_o">;
+defm IMAGE_SAMPLE_C_B_CL_O : MIMG_Sampler_WQM <0x0000003e, "image_sample_c_b_cl_o">;
defm IMAGE_SAMPLE_C_LZ_O : MIMG_Sampler <0x0000003f, "image_sample_c_lz_o">;
-defm IMAGE_GATHER4 : MIMG_Gather <0x00000040, "image_gather4">;
-defm IMAGE_GATHER4_CL : MIMG_Gather <0x00000041, "image_gather4_cl">;
+defm IMAGE_GATHER4 : MIMG_Gather_WQM <0x00000040, "image_gather4">;
+defm IMAGE_GATHER4_CL : MIMG_Gather_WQM <0x00000041, "image_gather4_cl">;
defm IMAGE_GATHER4_L : MIMG_Gather <0x00000044, "image_gather4_l">;
-defm IMAGE_GATHER4_B : MIMG_Gather <0x00000045, "image_gather4_b">;
-defm IMAGE_GATHER4_B_CL : MIMG_Gather <0x00000046, "image_gather4_b_cl">;
+defm IMAGE_GATHER4_B : MIMG_Gather_WQM <0x00000045, "image_gather4_b">;
+defm IMAGE_GATHER4_B_CL : MIMG_Gather_WQM <0x00000046, "image_gather4_b_cl">;
defm IMAGE_GATHER4_LZ : MIMG_Gather <0x00000047, "image_gather4_lz">;
-defm IMAGE_GATHER4_C : MIMG_Gather <0x00000048, "image_gather4_c">;
-defm IMAGE_GATHER4_C_CL : MIMG_Gather <0x00000049, "image_gather4_c_cl">;
+defm IMAGE_GATHER4_C : MIMG_Gather_WQM <0x00000048, "image_gather4_c">;
+defm IMAGE_GATHER4_C_CL : MIMG_Gather_WQM <0x00000049, "image_gather4_c_cl">;
defm IMAGE_GATHER4_C_L : MIMG_Gather <0x0000004c, "image_gather4_c_l">;
-defm IMAGE_GATHER4_C_B : MIMG_Gather <0x0000004d, "image_gather4_c_b">;
-defm IMAGE_GATHER4_C_B_CL : MIMG_Gather <0x0000004e, "image_gather4_c_b_cl">;
+defm IMAGE_GATHER4_C_B : MIMG_Gather_WQM <0x0000004d, "image_gather4_c_b">;
+defm IMAGE_GATHER4_C_B_CL : MIMG_Gather_WQM <0x0000004e, "image_gather4_c_b_cl">;
defm IMAGE_GATHER4_C_LZ : MIMG_Gather <0x0000004f, "image_gather4_c_lz">;
-defm IMAGE_GATHER4_O : MIMG_Gather <0x00000050, "image_gather4_o">;
-defm IMAGE_GATHER4_CL_O : MIMG_Gather <0x00000051, "image_gather4_cl_o">;
+defm IMAGE_GATHER4_O : MIMG_Gather_WQM <0x00000050, "image_gather4_o">;
+defm IMAGE_GATHER4_CL_O : MIMG_Gather_WQM <0x00000051, "image_gather4_cl_o">;
defm IMAGE_GATHER4_L_O : MIMG_Gather <0x00000054, "image_gather4_l_o">;
-defm IMAGE_GATHER4_B_O : MIMG_Gather <0x00000055, "image_gather4_b_o">;
+defm IMAGE_GATHER4_B_O : MIMG_Gather_WQM <0x00000055, "image_gather4_b_o">;
defm IMAGE_GATHER4_B_CL_O : MIMG_Gather <0x00000056, "image_gather4_b_cl_o">;
defm IMAGE_GATHER4_LZ_O : MIMG_Gather <0x00000057, "image_gather4_lz_o">;
-defm IMAGE_GATHER4_C_O : MIMG_Gather <0x00000058, "image_gather4_c_o">;
-defm IMAGE_GATHER4_C_CL_O : MIMG_Gather <0x00000059, "image_gather4_c_cl_o">;
+defm IMAGE_GATHER4_C_O : MIMG_Gather_WQM <0x00000058, "image_gather4_c_o">;
+defm IMAGE_GATHER4_C_CL_O : MIMG_Gather_WQM <0x00000059, "image_gather4_c_cl_o">;
defm IMAGE_GATHER4_C_L_O : MIMG_Gather <0x0000005c, "image_gather4_c_l_o">;
-defm IMAGE_GATHER4_C_B_O : MIMG_Gather <0x0000005d, "image_gather4_c_b_o">;
-defm IMAGE_GATHER4_C_B_CL_O : MIMG_Gather <0x0000005e, "image_gather4_c_b_cl_o">;
+defm IMAGE_GATHER4_C_B_O : MIMG_Gather_WQM <0x0000005d, "image_gather4_c_b_o">;
+defm IMAGE_GATHER4_C_B_CL_O : MIMG_Gather_WQM <0x0000005e, "image_gather4_c_b_cl_o">;
defm IMAGE_GATHER4_C_LZ_O : MIMG_Gather <0x0000005f, "image_gather4_c_lz_o">;
-defm IMAGE_GET_LOD : MIMG_Sampler <0x00000060, "image_get_lod">;
+defm IMAGE_GET_LOD : MIMG_Sampler_WQM <0x00000060, "image_get_lod">;
defm IMAGE_SAMPLE_CD : MIMG_Sampler <0x00000068, "image_sample_cd">;
defm IMAGE_SAMPLE_CD_CL : MIMG_Sampler <0x00000069, "image_sample_cd_cl">;
defm IMAGE_SAMPLE_C_CD : MIMG_Sampler <0x0000006a, "image_sample_c_cd">;
@@ -1077,25 +1113,25 @@ defm IMAGE_SAMPLE_C_CD_CL_O : MIMG_Sampler <0x0000006f, "image_sample_c_cd_cl_o"
//===----------------------------------------------------------------------===//
let Predicates = [HasFlatAddressSpace] in {
-def FLAT_LOAD_UBYTE : FLAT_Load_Helper <0x00000008, "flat_load_ubyte", VReg_32>;
-def FLAT_LOAD_SBYTE : FLAT_Load_Helper <0x00000009, "flat_load_sbyte", VReg_32>;
-def FLAT_LOAD_USHORT : FLAT_Load_Helper <0x0000000a, "flat_load_ushort", VReg_32>;
-def FLAT_LOAD_SSHORT : FLAT_Load_Helper <0x0000000b, "flat_load_sshort", VReg_32>;
-def FLAT_LOAD_DWORD : FLAT_Load_Helper <0x0000000c, "flat_load_dword", VReg_32>;
+def FLAT_LOAD_UBYTE : FLAT_Load_Helper <0x00000008, "flat_load_ubyte", VGPR_32>;
+def FLAT_LOAD_SBYTE : FLAT_Load_Helper <0x00000009, "flat_load_sbyte", VGPR_32>;
+def FLAT_LOAD_USHORT : FLAT_Load_Helper <0x0000000a, "flat_load_ushort", VGPR_32>;
+def FLAT_LOAD_SSHORT : FLAT_Load_Helper <0x0000000b, "flat_load_sshort", VGPR_32>;
+def FLAT_LOAD_DWORD : FLAT_Load_Helper <0x0000000c, "flat_load_dword", VGPR_32>;
def FLAT_LOAD_DWORDX2 : FLAT_Load_Helper <0x0000000d, "flat_load_dwordx2", VReg_64>;
def FLAT_LOAD_DWORDX4 : FLAT_Load_Helper <0x0000000e, "flat_load_dwordx4", VReg_128>;
def FLAT_LOAD_DWORDX3 : FLAT_Load_Helper <0x00000010, "flat_load_dwordx3", VReg_96>;
def FLAT_STORE_BYTE : FLAT_Store_Helper <
- 0x00000018, "flat_store_byte", VReg_32
+ 0x00000018, "flat_store_byte", VGPR_32
>;
def FLAT_STORE_SHORT : FLAT_Store_Helper <
- 0x0000001a, "flat_store_short", VReg_32
+ 0x0000001a, "flat_store_short", VGPR_32
>;
def FLAT_STORE_DWORD : FLAT_Store_Helper <
- 0x0000001c, "flat_store_dword", VReg_32
+ 0x0000001c, "flat_store_dword", VGPR_32
>;
def FLAT_STORE_DWORDX2 : FLAT_Store_Helper <
@@ -1150,7 +1186,9 @@ def FLAT_STORE_DWORDX3 : FLAT_Store_Helper <
// VOP1 Instructions
//===----------------------------------------------------------------------===//
-//def V_NOP : VOP1_ <0x00000000, "v_nop", []>;
+let vdst = 0, src0 = 0 in {
+defm V_NOP : VOP1_m <vop1<0x0>, (outs), (ins), "v_nop", [], "v_nop">;
+}
let isMoveImm = 1 in {
defm V_MOV_B32 : VOP1Inst <vop1<0x1>, "v_mov_b32", VOP_I32_I32>;
@@ -1158,16 +1196,20 @@ defm V_MOV_B32 : VOP1Inst <vop1<0x1>, "v_mov_b32", VOP_I32_I32>;
let Uses = [EXEC] in {
+// FIXME: Specify SchedRW for READFIRSTLANE_B32
+
def V_READFIRSTLANE_B32 : VOP1 <
0x00000002,
(outs SReg_32:$vdst),
- (ins VReg_32:$src0),
+ (ins VGPR_32:$src0),
"v_readfirstlane_b32 $vdst, $src0",
[]
>;
}
+let SchedRW = [WriteQuarterRate32] in {
+
defm V_CVT_I32_F64 : VOP1Inst <vop1<0x3>, "v_cvt_i32_f64",
VOP_I32_F64, fp_to_sint
>;
@@ -1193,9 +1235,11 @@ defm V_CVT_F16_F32 : VOP1Inst <vop1<0xa>, "v_cvt_f16_f32",
defm V_CVT_F32_F16 : VOP1Inst <vop1<0xb>, "v_cvt_f32_f16",
VOP_F32_I32, f16_to_fp
>;
-//defm V_CVT_RPI_I32_F32 : VOP1_32 <0x0000000c, "v_cvt_rpi_i32_f32", []>;
-//defm V_CVT_FLR_I32_F32 : VOP1_32 <0x0000000d, "v_cvt_flr_i32_f32", []>;
-//defm V_CVT_OFF_F32_I4 : VOP1_32 <0x0000000e, "v_cvt_off_f32_i4", []>;
+defm V_CVT_RPI_I32_F32 : VOP1Inst <vop1<0xc>, "v_cvt_rpi_i32_f32",
+ VOP_I32_F32, cvt_rpi_i32_f32>;
+defm V_CVT_FLR_I32_F32 : VOP1Inst <vop1<0xd>, "v_cvt_flr_i32_f32",
+ VOP_I32_F32, cvt_flr_i32_f32>;
+defm V_CVT_OFF_F32_I4 : VOP1Inst <vop1<0x0e>, "v_cvt_off_f32_i4", VOP_F32_I32>;
defm V_CVT_F32_F64 : VOP1Inst <vop1<0xf>, "v_cvt_f32_f64",
VOP_F32_F64, fround
>;
@@ -1221,493 +1265,580 @@ defm V_CVT_F64_U32 : VOP1Inst <vop1<0x16>, "v_cvt_f64_u32",
VOP_F64_I32, uint_to_fp
>;
-defm V_FRACT_F32 : VOP1Inst <vop1<0x20>, "v_fract_f32",
+} // let SchedRW = [WriteQuarterRate32]
+
+defm V_FRACT_F32 : VOP1Inst <vop1<0x20, 0x1b>, "v_fract_f32",
VOP_F32_F32, AMDGPUfract
>;
-defm V_TRUNC_F32 : VOP1Inst <vop1<0x21>, "v_trunc_f32",
+defm V_TRUNC_F32 : VOP1Inst <vop1<0x21, 0x1c>, "v_trunc_f32",
VOP_F32_F32, ftrunc
>;
-defm V_CEIL_F32 : VOP1Inst <vop1<0x22>, "v_ceil_f32",
+defm V_CEIL_F32 : VOP1Inst <vop1<0x22, 0x1d>, "v_ceil_f32",
VOP_F32_F32, fceil
>;
-defm V_RNDNE_F32 : VOP1Inst <vop1<0x23>, "v_rndne_f32",
+defm V_RNDNE_F32 : VOP1Inst <vop1<0x23, 0x1e>, "v_rndne_f32",
VOP_F32_F32, frint
>;
-defm V_FLOOR_F32 : VOP1Inst <vop1<0x24>, "v_floor_f32",
+defm V_FLOOR_F32 : VOP1Inst <vop1<0x24, 0x1f>, "v_floor_f32",
VOP_F32_F32, ffloor
>;
-defm V_EXP_F32 : VOP1Inst <vop1<0x25>, "v_exp_f32",
+defm V_EXP_F32 : VOP1Inst <vop1<0x25, 0x20>, "v_exp_f32",
VOP_F32_F32, fexp2
>;
-defm V_LOG_CLAMP_F32 : VOP1Inst <vop1<0x26>, "v_log_clamp_f32", VOP_F32_F32>;
-defm V_LOG_F32 : VOP1Inst <vop1<0x27>, "v_log_f32",
+
+let SchedRW = [WriteQuarterRate32] in {
+
+defm V_LOG_F32 : VOP1Inst <vop1<0x27, 0x21>, "v_log_f32",
VOP_F32_F32, flog2
>;
-
-defm V_RCP_CLAMP_F32 : VOP1Inst <vop1<0x28>, "v_rcp_clamp_f32", VOP_F32_F32>;
-defm V_RCP_LEGACY_F32 : VOP1Inst <vop1<0x29>, "v_rcp_legacy_f32", VOP_F32_F32>;
-defm V_RCP_F32 : VOP1Inst <vop1<0x2a>, "v_rcp_f32",
+defm V_RCP_F32 : VOP1Inst <vop1<0x2a, 0x22>, "v_rcp_f32",
VOP_F32_F32, AMDGPUrcp
>;
-defm V_RCP_IFLAG_F32 : VOP1Inst <vop1<0x2b>, "v_rcp_iflag_f32", VOP_F32_F32>;
-defm V_RSQ_CLAMP_F32 : VOP1Inst <vop1<0x2c>, "v_rsq_clamp_f32",
- VOP_F32_F32, AMDGPUrsq_clamped
+defm V_RCP_IFLAG_F32 : VOP1Inst <vop1<0x2b, 0x23>, "v_rcp_iflag_f32",
+ VOP_F32_F32
>;
-defm V_RSQ_LEGACY_F32 : VOP1Inst <vop1<0x2d>, "v_rsq_legacy_f32",
- VOP_F32_F32, AMDGPUrsq_legacy
->;
-defm V_RSQ_F32 : VOP1Inst <vop1<0x2e>, "v_rsq_f32",
+defm V_RSQ_F32 : VOP1Inst <vop1<0x2e, 0x24>, "v_rsq_f32",
VOP_F32_F32, AMDGPUrsq
>;
-defm V_RCP_F64 : VOP1Inst <vop1<0x2f>, "v_rcp_f64",
+
+} //let SchedRW = [WriteQuarterRate32]
+
+let SchedRW = [WriteDouble] in {
+
+defm V_RCP_F64 : VOP1Inst <vop1<0x2f, 0x25>, "v_rcp_f64",
VOP_F64_F64, AMDGPUrcp
>;
-defm V_RCP_CLAMP_F64 : VOP1Inst <vop1<0x30>, "v_rcp_clamp_f64", VOP_F64_F64>;
-defm V_RSQ_F64 : VOP1Inst <vop1<0x31>, "v_rsq_f64",
+defm V_RSQ_F64 : VOP1Inst <vop1<0x31, 0x26>, "v_rsq_f64",
VOP_F64_F64, AMDGPUrsq
>;
-defm V_RSQ_CLAMP_F64 : VOP1Inst <vop1<0x32>, "v_rsq_clamp_f64",
- VOP_F64_F64, AMDGPUrsq_clamped
->;
-defm V_SQRT_F32 : VOP1Inst <vop1<0x33>, "v_sqrt_f32",
+
+} // let SchedRW = [WriteDouble];
+
+defm V_SQRT_F32 : VOP1Inst <vop1<0x33, 0x27>, "v_sqrt_f32",
VOP_F32_F32, fsqrt
>;
-defm V_SQRT_F64 : VOP1Inst <vop1<0x34>, "v_sqrt_f64",
+
+let SchedRW = [WriteDouble] in {
+
+defm V_SQRT_F64 : VOP1Inst <vop1<0x34, 0x28>, "v_sqrt_f64",
VOP_F64_F64, fsqrt
>;
-defm V_SIN_F32 : VOP1Inst <vop1<0x35>, "v_sin_f32",
+
+} // let SchedRW = [WriteDouble]
+
+defm V_SIN_F32 : VOP1Inst <vop1<0x35, 0x29>, "v_sin_f32",
VOP_F32_F32, AMDGPUsin
>;
-defm V_COS_F32 : VOP1Inst <vop1<0x36>, "v_cos_f32",
+defm V_COS_F32 : VOP1Inst <vop1<0x36, 0x2a>, "v_cos_f32",
VOP_F32_F32, AMDGPUcos
>;
-defm V_NOT_B32 : VOP1Inst <vop1<0x37>, "v_not_b32", VOP_I32_I32>;
-defm V_BFREV_B32 : VOP1Inst <vop1<0x38>, "v_bfrev_b32", VOP_I32_I32>;
-defm V_FFBH_U32 : VOP1Inst <vop1<0x39>, "v_ffbh_u32", VOP_I32_I32>;
-defm V_FFBL_B32 : VOP1Inst <vop1<0x3a>, "v_ffbl_b32", VOP_I32_I32>;
-defm V_FFBH_I32 : VOP1Inst <vop1<0x3b>, "v_ffbh_i32", VOP_I32_I32>;
-//defm V_FREXP_EXP_I32_F64 : VOPInst <0x0000003c, "v_frexp_exp_i32_f64", VOP_I32_F32>;
-defm V_FREXP_MANT_F64 : VOP1Inst <vop1<0x3d>, "v_frexp_mant_f64", VOP_F64_F64>;
-defm V_FRACT_F64 : VOP1Inst <vop1<0x3e>, "v_fract_f64", VOP_F64_F64>;
-//defm V_FREXP_EXP_I32_F32 : VOPInst <0x0000003f, "v_frexp_exp_i32_f32", VOP_I32_F32>;
-defm V_FREXP_MANT_F32 : VOP1Inst <vop1<0x40>, "v_frexp_mant_f32", VOP_F32_F32>;
-//def V_CLREXCP : VOP1_ <0x00000041, "v_clrexcp", []>;
-defm V_MOVRELD_B32 : VOP1Inst <vop1<0x42>, "v_movreld_b32", VOP_I32_I32>;
-defm V_MOVRELS_B32 : VOP1Inst <vop1<0x43>, "v_movrels_b32", VOP_I32_I32>;
-defm V_MOVRELSD_B32 : VOP1Inst <vop1<0x44>, "v_movrelsd_b32", VOP_I32_I32>;
+defm V_NOT_B32 : VOP1Inst <vop1<0x37, 0x2b>, "v_not_b32", VOP_I32_I32>;
+defm V_BFREV_B32 : VOP1Inst <vop1<0x38, 0x2c>, "v_bfrev_b32", VOP_I32_I32>;
+defm V_FFBH_U32 : VOP1Inst <vop1<0x39, 0x2d>, "v_ffbh_u32", VOP_I32_I32>;
+defm V_FFBL_B32 : VOP1Inst <vop1<0x3a, 0x2e>, "v_ffbl_b32", VOP_I32_I32>;
+defm V_FFBH_I32 : VOP1Inst <vop1<0x3b, 0x2f>, "v_ffbh_i32", VOP_I32_I32>;
+defm V_FREXP_EXP_I32_F64 : VOP1Inst <vop1<0x3c,0x30>, "v_frexp_exp_i32_f64",
+ VOP_I32_F64
+>;
+defm V_FREXP_MANT_F64 : VOP1Inst <vop1<0x3d, 0x31>, "v_frexp_mant_f64",
+ VOP_F64_F64
+>;
+defm V_FRACT_F64 : VOP1Inst <vop1<0x3e, 0x32>, "v_fract_f64", VOP_F64_F64>;
+defm V_FREXP_EXP_I32_F32 : VOP1Inst <vop1<0x3f, 0x33>, "v_frexp_exp_i32_f32",
+ VOP_I32_F32
+>;
+defm V_FREXP_MANT_F32 : VOP1Inst <vop1<0x40, 0x34>, "v_frexp_mant_f32",
+ VOP_F32_F32
+>;
+let vdst = 0, src0 = 0 in {
+defm V_CLREXCP : VOP1_m <vop1<0x41,0x35>, (outs), (ins), "v_clrexcp", [],
+ "v_clrexcp"
+>;
+}
+defm V_MOVRELD_B32 : VOP1Inst <vop1<0x42, 0x36>, "v_movreld_b32", VOP_I32_I32>;
+defm V_MOVRELS_B32 : VOP1Inst <vop1<0x43, 0x37>, "v_movrels_b32", VOP_I32_I32>;
+defm V_MOVRELSD_B32 : VOP1Inst <vop1<0x44, 0x38>, "v_movrelsd_b32", VOP_I32_I32>;
+
+// These instruction only exist on SI and CI
+let SubtargetPredicate = isSICI in {
+
+let SchedRW = [WriteQuarterRate32] in {
+
+defm V_LOG_CLAMP_F32 : VOP1InstSI <vop1<0x26>, "v_log_clamp_f32", VOP_F32_F32>;
+defm V_RCP_CLAMP_F32 : VOP1InstSI <vop1<0x28>, "v_rcp_clamp_f32", VOP_F32_F32>;
+defm V_RCP_LEGACY_F32 : VOP1InstSI <vop1<0x29>, "v_rcp_legacy_f32", VOP_F32_F32>;
+defm V_RSQ_CLAMP_F32 : VOP1InstSI <vop1<0x2c>, "v_rsq_clamp_f32",
+ VOP_F32_F32, AMDGPUrsq_clamped
+>;
+defm V_RSQ_LEGACY_F32 : VOP1InstSI <vop1<0x2d>, "v_rsq_legacy_f32",
+ VOP_F32_F32, AMDGPUrsq_legacy
+>;
+
+} // End let SchedRW = [WriteQuarterRate32]
+
+let SchedRW = [WriteDouble] in {
+
+defm V_RCP_CLAMP_F64 : VOP1InstSI <vop1<0x30>, "v_rcp_clamp_f64", VOP_F64_F64>;
+defm V_RSQ_CLAMP_F64 : VOP1InstSI <vop1<0x32>, "v_rsq_clamp_f64",
+ VOP_F64_F64, AMDGPUrsq_clamped
+>;
+
+} // End SchedRW = [WriteDouble]
+} // End SubtargetPredicate = isSICI
//===----------------------------------------------------------------------===//
// VINTRP Instructions
//===----------------------------------------------------------------------===//
-def V_INTERP_P1_F32 : VINTRP <
- 0x00000000,
- (outs VReg_32:$dst),
- (ins VReg_32:$i, i32imm:$attr_chan, i32imm:$attr, M0Reg:$m0),
+// FIXME: Specify SchedRW for VINTRP insturctions.
+defm V_INTERP_P1_F32 : VINTRP_m <
+ 0x00000000, "v_interp_p1_f32",
+ (outs VGPR_32:$dst),
+ (ins VGPR_32:$i, i32imm:$attr_chan, i32imm:$attr, M0Reg:$m0),
"v_interp_p1_f32 $dst, $i, $attr_chan, $attr, [$m0]",
- []> {
- let DisableEncoding = "$m0";
-}
+ "$m0">;
-def V_INTERP_P2_F32 : VINTRP <
- 0x00000001,
- (outs VReg_32:$dst),
- (ins VReg_32:$src0, VReg_32:$j, i32imm:$attr_chan, i32imm:$attr, M0Reg:$m0),
+defm V_INTERP_P2_F32 : VINTRP_m <
+ 0x00000001, "v_interp_p2_f32",
+ (outs VGPR_32:$dst),
+ (ins VGPR_32:$src0, VGPR_32:$j, i32imm:$attr_chan, i32imm:$attr, M0Reg:$m0),
"v_interp_p2_f32 $dst, [$src0], $j, $attr_chan, $attr, [$m0]",
- []> {
-
- let Constraints = "$src0 = $dst";
- let DisableEncoding = "$src0,$m0";
+ "$src0,$m0",
+ "$src0 = $dst">;
-}
-
-def V_INTERP_MOV_F32 : VINTRP <
- 0x00000002,
- (outs VReg_32:$dst),
+defm V_INTERP_MOV_F32 : VINTRP_m <
+ 0x00000002, "v_interp_mov_f32",
+ (outs VGPR_32:$dst),
(ins InterpSlot:$src0, i32imm:$attr_chan, i32imm:$attr, M0Reg:$m0),
"v_interp_mov_f32 $dst, $src0, $attr_chan, $attr, [$m0]",
- []> {
- let DisableEncoding = "$m0";
-}
+ "$m0">;
//===----------------------------------------------------------------------===//
// VOP2 Instructions
//===----------------------------------------------------------------------===//
-def V_CNDMASK_B32_e32 : VOP2 <0x00000000, (outs VReg_32:$dst),
- (ins VSrc_32:$src0, VReg_32:$src1, VCCReg:$vcc),
- "v_cndmask_b32_e32 $dst, $src0, $src1, [$vcc]",
- []
->{
- let DisableEncoding = "$vcc";
-}
-
-def V_CNDMASK_B32_e64 : VOP3 <0x00000100, (outs VReg_32:$dst),
+defm V_CNDMASK_B32_e64 : VOP3_m_nomods <vop3<0x100>, (outs VGPR_32:$dst),
(ins VSrc_32:$src0, VSrc_32:$src1, SSrc_64:$src2),
"v_cndmask_b32_e64 $dst, $src0, $src1, $src2",
- [(set i32:$dst, (select i1:$src2, i32:$src1, i32:$src0))]
-> {
- let src0_modifiers = 0;
- let src1_modifiers = 0;
- let src2_modifiers = 0;
-}
-
-def V_READLANE_B32 : VOP2 <
- 0x00000001,
- (outs SReg_32:$vdst),
- (ins VReg_32:$src0, SSrc_32:$vsrc1),
- "v_readlane_b32 $vdst, $src0, $vsrc1",
- []
+ [(set i32:$dst, (select i1:$src2, i32:$src1, i32:$src0))],
+ "v_cndmask_b32_e64", 3
>;
-def V_WRITELANE_B32 : VOP2 <
- 0x00000002,
- (outs VReg_32:$vdst),
- (ins SReg_32:$src0, SSrc_32:$vsrc1),
- "v_writelane_b32 $vdst, $src0, $vsrc1",
- []
->;
let isCommutable = 1 in {
-defm V_ADD_F32 : VOP2Inst <vop2<0x3>, "v_add_f32",
+defm V_ADD_F32 : VOP2Inst <vop2<0x3, 0x1>, "v_add_f32",
VOP_F32_F32_F32, fadd
>;
-defm V_SUB_F32 : VOP2Inst <vop2<0x4>, "v_sub_f32", VOP_F32_F32_F32, fsub>;
-defm V_SUBREV_F32 : VOP2Inst <vop2<0x5>, "v_subrev_f32",
+defm V_SUB_F32 : VOP2Inst <vop2<0x4, 0x2>, "v_sub_f32", VOP_F32_F32_F32, fsub>;
+defm V_SUBREV_F32 : VOP2Inst <vop2<0x5, 0x3>, "v_subrev_f32",
VOP_F32_F32_F32, null_frag, "v_sub_f32"
>;
} // End isCommutable = 1
let isCommutable = 1 in {
-defm V_MAC_LEGACY_F32 : VOP2Inst <vop2<0x6>, "v_mac_legacy_f32",
- VOP_F32_F32_F32
->;
-
-defm V_MUL_LEGACY_F32 : VOP2Inst <vop2<0x7>, "v_mul_legacy_f32",
+defm V_MUL_LEGACY_F32 : VOP2Inst <vop2<0x7, 0x4>, "v_mul_legacy_f32",
VOP_F32_F32_F32, int_AMDGPU_mul
>;
-defm V_MUL_F32 : VOP2Inst <vop2<0x8>, "v_mul_f32",
+defm V_MUL_F32 : VOP2Inst <vop2<0x8, 0x5>, "v_mul_f32",
VOP_F32_F32_F32, fmul
>;
-defm V_MUL_I32_I24 : VOP2Inst <vop2<0x9>, "v_mul_i32_i24",
+defm V_MUL_I32_I24 : VOP2Inst <vop2<0x9, 0x6>, "v_mul_i32_i24",
VOP_I32_I32_I32, AMDGPUmul_i24
>;
-//defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "v_mul_hi_i32_i24", []>;
-defm V_MUL_U32_U24 : VOP2Inst <vop2<0xb>, "v_mul_u32_u24",
- VOP_I32_I32_I32, AMDGPUmul_u24
->;
-//defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "v_mul_hi_u32_u24", []>;
-
-defm V_MIN_LEGACY_F32 : VOP2Inst <vop2<0xd>, "v_min_legacy_f32",
- VOP_F32_F32_F32, AMDGPUfmin_legacy
+defm V_MUL_HI_I32_I24 : VOP2Inst <vop2<0xa,0x7>, "v_mul_hi_i32_i24",
+ VOP_I32_I32_I32
>;
-defm V_MAX_LEGACY_F32 : VOP2Inst <vop2<0xe>, "v_max_legacy_f32",
- VOP_F32_F32_F32, AMDGPUfmax_legacy
+defm V_MUL_U32_U24 : VOP2Inst <vop2<0xb, 0x8>, "v_mul_u32_u24",
+ VOP_I32_I32_I32, AMDGPUmul_u24
>;
-defm V_MIN_F32 : VOP2Inst <vop2<0xf>, "v_min_f32", VOP_F32_F32_F32, fminnum>;
-defm V_MAX_F32 : VOP2Inst <vop2<0x10>, "v_max_f32", VOP_F32_F32_F32, fmaxnum>;
-defm V_MIN_I32 : VOP2Inst <vop2<0x11>, "v_min_i32", VOP_I32_I32_I32, AMDGPUsmin>;
-defm V_MAX_I32 : VOP2Inst <vop2<0x12>, "v_max_i32", VOP_I32_I32_I32, AMDGPUsmax>;
-defm V_MIN_U32 : VOP2Inst <vop2<0x13>, "v_min_u32", VOP_I32_I32_I32, AMDGPUumin>;
-defm V_MAX_U32 : VOP2Inst <vop2<0x14>, "v_max_u32", VOP_I32_I32_I32, AMDGPUumax>;
+defm V_MUL_HI_U32_U24 : VOP2Inst <vop2<0xc,0x9>, "v_mul_hi_u32_u24",
+ VOP_I32_I32_I32
+>;
-defm V_LSHR_B32 : VOP2Inst <vop2<0x15>, "v_lshr_b32", VOP_I32_I32_I32, srl>;
+defm V_MIN_F32 : VOP2Inst <vop2<0xf, 0xa>, "v_min_f32", VOP_F32_F32_F32,
+ fminnum>;
+defm V_MAX_F32 : VOP2Inst <vop2<0x10, 0xb>, "v_max_f32", VOP_F32_F32_F32,
+ fmaxnum>;
+defm V_MIN_I32 : VOP2Inst <vop2<0x11, 0xc>, "v_min_i32", VOP_I32_I32_I32>;
+defm V_MAX_I32 : VOP2Inst <vop2<0x12, 0xd>, "v_max_i32", VOP_I32_I32_I32>;
+defm V_MIN_U32 : VOP2Inst <vop2<0x13, 0xe>, "v_min_u32", VOP_I32_I32_I32>;
+defm V_MAX_U32 : VOP2Inst <vop2<0x14, 0xf>, "v_max_u32", VOP_I32_I32_I32>;
defm V_LSHRREV_B32 : VOP2Inst <
- vop2<0x16>, "v_lshrrev_b32", VOP_I32_I32_I32, null_frag, "v_lshr_b32"
+ vop2<0x16, 0x10>, "v_lshrrev_b32", VOP_I32_I32_I32, null_frag,
+ "v_lshr_b32"
>;
-defm V_ASHR_I32 : VOP2Inst <vop2<0x17>, "v_ashr_i32",
- VOP_I32_I32_I32, sra
->;
defm V_ASHRREV_I32 : VOP2Inst <
- vop2<0x18>, "v_ashrrev_i32", VOP_I32_I32_I32, null_frag, "v_ashr_i32"
+ vop2<0x18, 0x11>, "v_ashrrev_i32", VOP_I32_I32_I32, null_frag,
+ "v_ashr_i32"
>;
-let hasPostISelHook = 1 in {
-
-defm V_LSHL_B32 : VOP2Inst <vop2<0x19>, "v_lshl_b32", VOP_I32_I32_I32, shl>;
-
-}
defm V_LSHLREV_B32 : VOP2Inst <
- vop2<0x1a>, "v_lshlrev_b32", VOP_I32_I32_I32, null_frag, "v_lshl_b32"
+ vop2<0x1a, 0x12>, "v_lshlrev_b32", VOP_I32_I32_I32, null_frag,
+ "v_lshl_b32"
>;
-defm V_AND_B32 : VOP2Inst <vop2<0x1b>, "v_and_b32",
- VOP_I32_I32_I32, and>;
-defm V_OR_B32 : VOP2Inst <vop2<0x1c>, "v_or_b32",
- VOP_I32_I32_I32, or
->;
-defm V_XOR_B32 : VOP2Inst <vop2<0x1d>, "v_xor_b32",
- VOP_I32_I32_I32, xor
->;
-
-} // End isCommutable = 1
-
-defm V_BFM_B32 : VOP2Inst <vop2<0x1e>, "v_bfm_b32",
- VOP_I32_I32_I32, AMDGPUbfm>;
+defm V_AND_B32 : VOP2Inst <vop2<0x1b, 0x13>, "v_and_b32", VOP_I32_I32_I32>;
+defm V_OR_B32 : VOP2Inst <vop2<0x1c, 0x14>, "v_or_b32", VOP_I32_I32_I32>;
+defm V_XOR_B32 : VOP2Inst <vop2<0x1d, 0x15>, "v_xor_b32", VOP_I32_I32_I32>;
-let isCommutable = 1 in {
-defm V_MAC_F32 : VOP2Inst <vop2<0x1f>, "v_mac_f32", VOP_F32_F32_F32>;
+defm V_MAC_F32 : VOP2Inst <vop2<0x1f, 0x16>, "v_mac_f32", VOP_F32_F32_F32>;
} // End isCommutable = 1
-defm V_MADMK_F32 : VOP2Inst <vop2<0x20>, "v_madmk_f32", VOP_F32_F32_F32>;
+defm V_MADMK_F32 : VOP2MADK <vop2<0x20, 0x17>, "v_madmk_f32">;
let isCommutable = 1 in {
-defm V_MADAK_F32 : VOP2Inst <vop2<0x21>, "v_madak_f32", VOP_F32_F32_F32>;
+defm V_MADAK_F32 : VOP2MADK <vop2<0x21, 0x18>, "v_madak_f32">;
} // End isCommutable = 1
-
-defm V_BCNT_U32_B32 : VOP2Inst <vop2<0x22>, "v_bcnt_u32_b32", VOP_I32_I32_I32>;
-defm V_MBCNT_LO_U32_B32 : VOP2Inst <vop2<0x23>, "v_mbcnt_lo_u32_b32",
-
- VOP_I32_I32_I32
->;
-defm V_MBCNT_HI_U32_B32 : VOP2Inst <vop2<0x24>, "v_mbcnt_hi_u32_b32",
- VOP_I32_I32_I32
->;
-
let isCommutable = 1, Defs = [VCC] in { // Carry-out goes to VCC
// No patterns so that the scalar instructions are always selected.
// The scalar versions will be replaced with vector when needed later.
-defm V_ADD_I32 : VOP2bInst <vop2<0x25>, "v_add_i32",
+
+// V_ADD_I32, V_SUB_I32, and V_SUBREV_I32 where renamed to *_U32 in VI,
+// but the VI instructions behave the same as the SI versions.
+defm V_ADD_I32 : VOP2bInst <vop2<0x25, 0x19>, "v_add_i32",
VOP_I32_I32_I32, add
>;
-defm V_SUB_I32 : VOP2bInst <vop2<0x26>, "v_sub_i32",
- VOP_I32_I32_I32, sub
->;
-defm V_SUBREV_I32 : VOP2bInst <vop2<0x27>, "v_subrev_i32",
+defm V_SUB_I32 : VOP2bInst <vop2<0x26, 0x1a>, "v_sub_i32", VOP_I32_I32_I32>;
+
+defm V_SUBREV_I32 : VOP2bInst <vop2<0x27, 0x1b>, "v_subrev_i32",
VOP_I32_I32_I32, null_frag, "v_sub_i32"
>;
let Uses = [VCC] in { // Carry-in comes from VCC
-defm V_ADDC_U32 : VOP2bInst <vop2<0x28>, "v_addc_u32",
- VOP_I32_I32_I32_VCC, adde
+defm V_ADDC_U32 : VOP2bInst <vop2<0x28, 0x1c>, "v_addc_u32",
+ VOP_I32_I32_I32_VCC
>;
-defm V_SUBB_U32 : VOP2bInst <vop2<0x29>, "v_subb_u32",
- VOP_I32_I32_I32_VCC, sube
+defm V_SUBB_U32 : VOP2bInst <vop2<0x29, 0x1d>, "v_subb_u32",
+ VOP_I32_I32_I32_VCC
>;
-defm V_SUBBREV_U32 : VOP2bInst <vop2<0x2a>, "v_subbrev_u32",
+defm V_SUBBREV_U32 : VOP2bInst <vop2<0x2a, 0x1e>, "v_subbrev_u32",
VOP_I32_I32_I32_VCC, null_frag, "v_subb_u32"
>;
} // End Uses = [VCC]
} // End isCommutable = 1, Defs = [VCC]
-defm V_LDEXP_F32 : VOP2Inst <vop2<0x2b>, "v_ldexp_f32",
+defm V_READLANE_B32 : VOP2SI_3VI_m <
+ vop3 <0x001, 0x289>,
+ "v_readlane_b32",
+ (outs SReg_32:$vdst),
+ (ins VGPR_32:$src0, SCSrc_32:$src1),
+ "v_readlane_b32 $vdst, $src0, $src1"
+>;
+
+defm V_WRITELANE_B32 : VOP2SI_3VI_m <
+ vop3 <0x002, 0x28a>,
+ "v_writelane_b32",
+ (outs VGPR_32:$vdst),
+ (ins SReg_32:$src0, SCSrc_32:$src1),
+ "v_writelane_b32 $vdst, $src0, $src1"
+>;
+
+// These instructions only exist on SI and CI
+let SubtargetPredicate = isSICI in {
+
+defm V_MIN_LEGACY_F32 : VOP2InstSI <vop2<0xd>, "v_min_legacy_f32",
+ VOP_F32_F32_F32, AMDGPUfmin_legacy
+>;
+defm V_MAX_LEGACY_F32 : VOP2InstSI <vop2<0xe>, "v_max_legacy_f32",
+ VOP_F32_F32_F32, AMDGPUfmax_legacy
+>;
+
+let isCommutable = 1 in {
+defm V_LSHR_B32 : VOP2InstSI <vop2<0x15>, "v_lshr_b32", VOP_I32_I32_I32>;
+defm V_ASHR_I32 : VOP2InstSI <vop2<0x17>, "v_ashr_i32", VOP_I32_I32_I32>;
+defm V_LSHL_B32 : VOP2InstSI <vop2<0x19>, "v_lshl_b32", VOP_I32_I32_I32>;
+} // End isCommutable = 1
+} // End let SubtargetPredicate = SICI
+
+let isCommutable = 1 in {
+defm V_MAC_LEGACY_F32 : VOP2_VI3_Inst <vop23<0x6, 0x28e>, "v_mac_legacy_f32",
+ VOP_F32_F32_F32
+>;
+} // End isCommutable = 1
+
+defm V_BFM_B32 : VOP2_VI3_Inst <vop23<0x1e, 0x293>, "v_bfm_b32", VOP_I32_I32_I32,
+ AMDGPUbfm
+>;
+defm V_BCNT_U32_B32 : VOP2_VI3_Inst <vop23<0x22, 0x28b>, "v_bcnt_u32_b32",
+ VOP_I32_I32_I32
+>;
+defm V_MBCNT_LO_U32_B32 : VOP2_VI3_Inst <vop23<0x23, 0x28c>, "v_mbcnt_lo_u32_b32",
+ VOP_I32_I32_I32
+>;
+defm V_MBCNT_HI_U32_B32 : VOP2_VI3_Inst <vop23<0x24, 0x28d>, "v_mbcnt_hi_u32_b32",
+ VOP_I32_I32_I32
+>;
+defm V_LDEXP_F32 : VOP2_VI3_Inst <vop23<0x2b, 0x288>, "v_ldexp_f32",
VOP_F32_F32_I32, AMDGPUldexp
>;
-////def V_CVT_PKACCUM_U8_F32 : VOP2_U8 <0x0000002c, "v_cvt_pkaccum_u8_f32", []>;
-////def V_CVT_PKNORM_I16_F32 : VOP2_I16 <0x0000002d, "v_cvt_pknorm_i16_f32", []>;
-////def V_CVT_PKNORM_U16_F32 : VOP2_U16 <0x0000002e, "v_cvt_pknorm_u16_f32", []>;
-defm V_CVT_PKRTZ_F16_F32 : VOP2Inst <vop2<0x2f>, "v_cvt_pkrtz_f16_f32",
- VOP_I32_F32_F32, int_SI_packf16
+
+
+defm V_CVT_PKACCUM_U8_F32 : VOP2_VI3_Inst <vop23<0x2c, 0x1f0>, "v_cvt_pkaccum_u8_f32",
+ VOP_I32_F32_I32>; // TODO: set "Uses = dst"
+
+defm V_CVT_PKNORM_I16_F32 : VOP2_VI3_Inst <vop23<0x2d, 0x294>, "v_cvt_pknorm_i16_f32",
+ VOP_I32_F32_F32
+>;
+defm V_CVT_PKNORM_U16_F32 : VOP2_VI3_Inst <vop23<0x2e, 0x295>, "v_cvt_pknorm_u16_f32",
+ VOP_I32_F32_F32
+>;
+defm V_CVT_PKRTZ_F16_F32 : VOP2_VI3_Inst <vop23<0x2f, 0x296>, "v_cvt_pkrtz_f16_f32",
+ VOP_I32_F32_F32, int_SI_packf16
+>;
+defm V_CVT_PK_U16_U32 : VOP2_VI3_Inst <vop23<0x30, 0x297>, "v_cvt_pk_u16_u32",
+ VOP_I32_I32_I32
+>;
+defm V_CVT_PK_I16_I32 : VOP2_VI3_Inst <vop23<0x31, 0x298>, "v_cvt_pk_i16_i32",
+ VOP_I32_I32_I32
>;
-////def V_CVT_PK_U16_U32 : VOP2_U16 <0x00000030, "v_cvt_pk_u16_u32", []>;
-////def V_CVT_PK_I16_I32 : VOP2_I16 <0x00000031, "v_cvt_pk_i16_i32", []>;
//===----------------------------------------------------------------------===//
// VOP3 Instructions
//===----------------------------------------------------------------------===//
let isCommutable = 1 in {
-defm V_MAD_LEGACY_F32 : VOP3Inst <vop3<0x140>, "v_mad_legacy_f32",
+defm V_MAD_LEGACY_F32 : VOP3Inst <vop3<0x140, 0x1c0>, "v_mad_legacy_f32",
VOP_F32_F32_F32_F32
>;
-defm V_MAD_F32 : VOP3Inst <vop3<0x141>, "v_mad_f32",
+defm V_MAD_F32 : VOP3Inst <vop3<0x141, 0x1c1>, "v_mad_f32",
VOP_F32_F32_F32_F32, fmad
>;
-defm V_MAD_I32_I24 : VOP3Inst <vop3<0x142>, "v_mad_i32_i24",
+defm V_MAD_I32_I24 : VOP3Inst <vop3<0x142, 0x1c2>, "v_mad_i32_i24",
VOP_I32_I32_I32_I32, AMDGPUmad_i24
>;
-defm V_MAD_U32_U24 : VOP3Inst <vop3<0x143>, "v_mad_u32_u24",
+defm V_MAD_U32_U24 : VOP3Inst <vop3<0x143, 0x1c3>, "v_mad_u32_u24",
VOP_I32_I32_I32_I32, AMDGPUmad_u24
>;
} // End isCommutable = 1
-defm V_CUBEID_F32 : VOP3Inst <vop3<0x144>, "v_cubeid_f32",
+defm V_CUBEID_F32 : VOP3Inst <vop3<0x144, 0x1c4>, "v_cubeid_f32",
VOP_F32_F32_F32_F32
>;
-defm V_CUBESC_F32 : VOP3Inst <vop3<0x145>, "v_cubesc_f32",
+defm V_CUBESC_F32 : VOP3Inst <vop3<0x145, 0x1c5>, "v_cubesc_f32",
VOP_F32_F32_F32_F32
>;
-defm V_CUBETC_F32 : VOP3Inst <vop3<0x146>, "v_cubetc_f32",
+defm V_CUBETC_F32 : VOP3Inst <vop3<0x146, 0x1c6>, "v_cubetc_f32",
VOP_F32_F32_F32_F32
>;
-defm V_CUBEMA_F32 : VOP3Inst <vop3<0x147>, "v_cubema_f32",
+defm V_CUBEMA_F32 : VOP3Inst <vop3<0x147, 0x1c7>, "v_cubema_f32",
VOP_F32_F32_F32_F32
>;
-defm V_BFE_U32 : VOP3Inst <vop3<0x148>, "v_bfe_u32",
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
+defm V_BFE_U32 : VOP3Inst <vop3<0x148, 0x1c8>, "v_bfe_u32",
VOP_I32_I32_I32_I32, AMDGPUbfe_u32
>;
-defm V_BFE_I32 : VOP3Inst <vop3<0x149>, "v_bfe_i32",
+defm V_BFE_I32 : VOP3Inst <vop3<0x149, 0x1c9>, "v_bfe_i32",
VOP_I32_I32_I32_I32, AMDGPUbfe_i32
>;
-defm V_BFI_B32 : VOP3Inst <vop3<0x14a>, "v_bfi_b32",
+}
+
+defm V_BFI_B32 : VOP3Inst <vop3<0x14a, 0x1ca>, "v_bfi_b32",
VOP_I32_I32_I32_I32, AMDGPUbfi
>;
let isCommutable = 1 in {
-defm V_FMA_F32 : VOP3Inst <vop3<0x14b>, "v_fma_f32",
+defm V_FMA_F32 : VOP3Inst <vop3<0x14b, 0x1cb>, "v_fma_f32",
VOP_F32_F32_F32_F32, fma
>;
-defm V_FMA_F64 : VOP3Inst <vop3<0x14c>, "v_fma_f64",
+defm V_FMA_F64 : VOP3Inst <vop3<0x14c, 0x1cc>, "v_fma_f64",
VOP_F64_F64_F64_F64, fma
>;
} // End isCommutable = 1
//def V_LERP_U8 : VOP3_U8 <0x0000014d, "v_lerp_u8", []>;
-defm V_ALIGNBIT_B32 : VOP3Inst <vop3<0x14e>, "v_alignbit_b32",
+defm V_ALIGNBIT_B32 : VOP3Inst <vop3<0x14e, 0x1ce>, "v_alignbit_b32",
VOP_I32_I32_I32_I32
>;
-defm V_ALIGNBYTE_B32 : VOP3Inst <vop3<0x14f>, "v_alignbyte_b32",
+defm V_ALIGNBYTE_B32 : VOP3Inst <vop3<0x14f, 0x1cf>, "v_alignbyte_b32",
VOP_I32_I32_I32_I32
>;
-defm V_MULLIT_F32 : VOP3Inst <vop3<0x150>, "v_mullit_f32",
- VOP_F32_F32_F32_F32>;
-defm V_MIN3_F32 : VOP3Inst <vop3<0x151>, "v_min3_f32",
+
+defm V_MIN3_F32 : VOP3Inst <vop3<0x151, 0x1d0>, "v_min3_f32",
VOP_F32_F32_F32_F32, AMDGPUfmin3>;
-defm V_MIN3_I32 : VOP3Inst <vop3<0x152>, "v_min3_i32",
+defm V_MIN3_I32 : VOP3Inst <vop3<0x152, 0x1d1>, "v_min3_i32",
VOP_I32_I32_I32_I32, AMDGPUsmin3
>;
-defm V_MIN3_U32 : VOP3Inst <vop3<0x153>, "v_min3_u32",
+defm V_MIN3_U32 : VOP3Inst <vop3<0x153, 0x1d2>, "v_min3_u32",
VOP_I32_I32_I32_I32, AMDGPUumin3
>;
-defm V_MAX3_F32 : VOP3Inst <vop3<0x154>, "v_max3_f32",
+defm V_MAX3_F32 : VOP3Inst <vop3<0x154, 0x1d3>, "v_max3_f32",
VOP_F32_F32_F32_F32, AMDGPUfmax3
>;
-defm V_MAX3_I32 : VOP3Inst <vop3<0x155>, "v_max3_i32",
+defm V_MAX3_I32 : VOP3Inst <vop3<0x155, 0x1d4>, "v_max3_i32",
VOP_I32_I32_I32_I32, AMDGPUsmax3
>;
-defm V_MAX3_U32 : VOP3Inst <vop3<0x156>, "v_max3_u32",
+defm V_MAX3_U32 : VOP3Inst <vop3<0x156, 0x1d5>, "v_max3_u32",
VOP_I32_I32_I32_I32, AMDGPUumax3
>;
-//def V_MED3_F32 : VOP3_MED3 <0x00000157, "v_med3_f32", []>;
-//def V_MED3_I32 : VOP3_MED3 <0x00000158, "v_med3_i32", []>;
-//def V_MED3_U32 : VOP3_MED3 <0x00000159, "v_med3_u32", []>;
+defm V_MED3_F32 : VOP3Inst <vop3<0x157, 0x1d6>, "v_med3_f32",
+ VOP_F32_F32_F32_F32
+>;
+defm V_MED3_I32 : VOP3Inst <vop3<0x158, 0x1d7>, "v_med3_i32",
+ VOP_I32_I32_I32_I32
+>;
+defm V_MED3_U32 : VOP3Inst <vop3<0x159, 0x1d8>, "v_med3_u32",
+ VOP_I32_I32_I32_I32
+>;
+
//def V_SAD_U8 : VOP3_U8 <0x0000015a, "v_sad_u8", []>;
//def V_SAD_HI_U8 : VOP3_U8 <0x0000015b, "v_sad_hi_u8", []>;
//def V_SAD_U16 : VOP3_U16 <0x0000015c, "v_sad_u16", []>;
-defm V_SAD_U32 : VOP3Inst <vop3<0x15d>, "v_sad_u32",
+defm V_SAD_U32 : VOP3Inst <vop3<0x15d, 0x1dc>, "v_sad_u32",
VOP_I32_I32_I32_I32
>;
////def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "v_cvt_pk_u8_f32", []>;
defm V_DIV_FIXUP_F32 : VOP3Inst <
- vop3<0x15f>, "v_div_fixup_f32", VOP_F32_F32_F32_F32, AMDGPUdiv_fixup
+ vop3<0x15f, 0x1de>, "v_div_fixup_f32", VOP_F32_F32_F32_F32, AMDGPUdiv_fixup
>;
+
+let SchedRW = [WriteDouble] in {
+
defm V_DIV_FIXUP_F64 : VOP3Inst <
- vop3<0x160>, "v_div_fixup_f64", VOP_F64_F64_F64_F64, AMDGPUdiv_fixup
+ vop3<0x160, 0x1df>, "v_div_fixup_f64", VOP_F64_F64_F64_F64, AMDGPUdiv_fixup
>;
-defm V_LSHL_B64 : VOP3Inst <vop3<0x161>, "v_lshl_b64",
- VOP_I64_I64_I32, shl
->;
-defm V_LSHR_B64 : VOP3Inst <vop3<0x162>, "v_lshr_b64",
- VOP_I64_I64_I32, srl
->;
-defm V_ASHR_I64 : VOP3Inst <vop3<0x163>, "v_ashr_i64",
- VOP_I64_I64_I32, sra
->;
+} // let SchedRW = [WriteDouble]
+let SchedRW = [WriteDouble] in {
let isCommutable = 1 in {
-defm V_ADD_F64 : VOP3Inst <vop3<0x164>, "v_add_f64",
+defm V_ADD_F64 : VOP3Inst <vop3<0x164, 0x280>, "v_add_f64",
VOP_F64_F64_F64, fadd
>;
-defm V_MUL_F64 : VOP3Inst <vop3<0x165>, "v_mul_f64",
+defm V_MUL_F64 : VOP3Inst <vop3<0x165, 0x281>, "v_mul_f64",
VOP_F64_F64_F64, fmul
>;
-defm V_MIN_F64 : VOP3Inst <vop3<0x166>, "v_min_f64",
+defm V_MIN_F64 : VOP3Inst <vop3<0x166, 0x282>, "v_min_f64",
VOP_F64_F64_F64, fminnum
>;
-defm V_MAX_F64 : VOP3Inst <vop3<0x167>, "v_max_f64",
+defm V_MAX_F64 : VOP3Inst <vop3<0x167, 0x283>, "v_max_f64",
VOP_F64_F64_F64, fmaxnum
>;
} // isCommutable = 1
-defm V_LDEXP_F64 : VOP3Inst <vop3<0x168>, "v_ldexp_f64",
+defm V_LDEXP_F64 : VOP3Inst <vop3<0x168, 0x284>, "v_ldexp_f64",
VOP_F64_F64_I32, AMDGPUldexp
>;
-let isCommutable = 1 in {
+} // let SchedRW = [WriteDouble]
+
+let isCommutable = 1, SchedRW = [WriteQuarterRate32] in {
-defm V_MUL_LO_U32 : VOP3Inst <vop3<0x169>, "v_mul_lo_u32",
+defm V_MUL_LO_U32 : VOP3Inst <vop3<0x169, 0x285>, "v_mul_lo_u32",
VOP_I32_I32_I32
>;
-defm V_MUL_HI_U32 : VOP3Inst <vop3<0x16a>, "v_mul_hi_u32",
+defm V_MUL_HI_U32 : VOP3Inst <vop3<0x16a, 0x286>, "v_mul_hi_u32",
VOP_I32_I32_I32
>;
-defm V_MUL_LO_I32 : VOP3Inst <vop3<0x16b>, "v_mul_lo_i32",
+
+defm V_MUL_LO_I32 : VOP3Inst <vop3<0x16b, 0x285>, "v_mul_lo_i32",
VOP_I32_I32_I32
>;
-defm V_MUL_HI_I32 : VOP3Inst <vop3<0x16c>, "v_mul_hi_i32",
+defm V_MUL_HI_I32 : VOP3Inst <vop3<0x16c, 0x287>, "v_mul_hi_i32",
VOP_I32_I32_I32
>;
-} // isCommutable = 1
+} // isCommutable = 1, SchedRW = [WriteQuarterRate32]
-defm V_DIV_SCALE_F32 : VOP3b_32 <vop3<0x16d>, "v_div_scale_f32", []>;
+let SchedRW = [WriteFloatFMA, WriteSALU] in {
+defm V_DIV_SCALE_F32 : VOP3b_32 <vop3<0x16d, 0x1e0>, "v_div_scale_f32", []>;
+}
+let SchedRW = [WriteDouble, WriteSALU] in {
// Double precision division pre-scale.
-defm V_DIV_SCALE_F64 : VOP3b_64 <vop3<0x16e>, "v_div_scale_f64", []>;
+defm V_DIV_SCALE_F64 : VOP3b_64 <vop3<0x16e, 0x1e1>, "v_div_scale_f64", []>;
+} // let SchedRW = [WriteDouble]
-let isCommutable = 1 in {
-defm V_DIV_FMAS_F32 : VOP3Inst <vop3<0x16f>, "v_div_fmas_f32",
+let isCommutable = 1, Uses = [VCC] in {
+
+// v_div_fmas_f32:
+// result = src0 * src1 + src2
+// if (vcc)
+// result *= 2^32
+//
+defm V_DIV_FMAS_F32 : VOP3_VCC_Inst <vop3<0x16f, 0x1e2>, "v_div_fmas_f32",
VOP_F32_F32_F32_F32, AMDGPUdiv_fmas
>;
-defm V_DIV_FMAS_F64 : VOP3Inst <vop3<0x170>, "v_div_fmas_f64",
+
+let SchedRW = [WriteDouble] in {
+// v_div_fmas_f64:
+// result = src0 * src1 + src2
+// if (vcc)
+// result *= 2^64
+//
+defm V_DIV_FMAS_F64 : VOP3_VCC_Inst <vop3<0x170, 0x1e3>, "v_div_fmas_f64",
VOP_F64_F64_F64_F64, AMDGPUdiv_fmas
>;
+
+} // End SchedRW = [WriteDouble]
} // End isCommutable = 1
//def V_MSAD_U8 : VOP3_U8 <0x00000171, "v_msad_u8", []>;
//def V_QSAD_U8 : VOP3_U8 <0x00000172, "v_qsad_u8", []>;
//def V_MQSAD_U8 : VOP3_U8 <0x00000173, "v_mqsad_u8", []>;
+let SchedRW = [WriteDouble] in {
defm V_TRIG_PREOP_F64 : VOP3Inst <
- vop3<0x174>, "v_trig_preop_f64", VOP_F64_F64_I32, AMDGPUtrig_preop
+ vop3<0x174, 0x292>, "v_trig_preop_f64", VOP_F64_F64_I32, AMDGPUtrig_preop
>;
-//===----------------------------------------------------------------------===//
-// Pseudo Instructions
-//===----------------------------------------------------------------------===//
+} // let SchedRW = [WriteDouble]
-let isCodeGenOnly = 1, isPseudo = 1 in {
+// These instructions only exist on SI and CI
+let SubtargetPredicate = isSICI in {
-def V_MOV_I1 : InstSI <
- (outs VReg_1:$dst),
- (ins i1imm:$src),
- "", [(set i1:$dst, (imm:$src))]
->;
+defm V_LSHL_B64 : VOP3Inst <vop3<0x161>, "v_lshl_b64", VOP_I64_I64_I32>;
+defm V_LSHR_B64 : VOP3Inst <vop3<0x162>, "v_lshr_b64", VOP_I64_I64_I32>;
+defm V_ASHR_I64 : VOP3Inst <vop3<0x163>, "v_ashr_i64", VOP_I64_I64_I32>;
-def V_AND_I1 : InstSI <
- (outs VReg_1:$dst), (ins VReg_1:$src0, VReg_1:$src1), "",
- [(set i1:$dst, (and i1:$src0, i1:$src1))]
->;
+defm V_MULLIT_F32 : VOP3Inst <vop3<0x150>, "v_mullit_f32",
+ VOP_F32_F32_F32_F32>;
-def V_OR_I1 : InstSI <
- (outs VReg_1:$dst), (ins VReg_1:$src0, VReg_1:$src1), "",
- [(set i1:$dst, (or i1:$src0, i1:$src1))]
->;
+} // End SubtargetPredicate = isSICI
+
+let SubtargetPredicate = isVI in {
-def V_XOR_I1 : InstSI <
- (outs VReg_1:$dst), (ins VReg_1:$src0, VReg_1:$src1), "",
- [(set i1:$dst, (xor i1:$src0, i1:$src1))]
+defm V_LSHLREV_B64 : VOP3Inst <vop3<0, 0x28f>, "v_lshlrev_b64",
+ VOP_I64_I32_I64
+>;
+defm V_LSHRREV_B64 : VOP3Inst <vop3<0, 0x290>, "v_lshrrev_b64",
+ VOP_I64_I32_I64
+>;
+defm V_ASHRREV_I64 : VOP3Inst <vop3<0, 0x291>, "v_ashrrev_i64",
+ VOP_I64_I32_I64
>;
+} // End SubtargetPredicate = isVI
+
+//===----------------------------------------------------------------------===//
+// Pseudo Instructions
+//===----------------------------------------------------------------------===//
+let isCodeGenOnly = 1, isPseudo = 1 in {
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
+// 64-bit vector move instruction. This is mainly used by the SIFoldOperands
+// pass to enable folding of inline immediates.
+def V_MOV_B64_PSEUDO : InstSI <(outs VReg_64:$dst), (ins VSrc_64:$src0), "", []>;
+} // end let hasSideEffects = 0, mayLoad = 0, mayStore = 0
+
let hasSideEffects = 1 in {
def SGPR_USE : InstSI <(outs),(ins), "", []>;
}
@@ -1785,12 +1916,12 @@ def SI_KILL : InstSI <
let Uses = [EXEC], Defs = [EXEC,VCC,M0] in {
-//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri, ADDRIndirect>;
+//defm SI_ : RegisterLoadStore <VGPR_32, FRAMEri, ADDRIndirect>;
let UseNamedOperandTable = 1 in {
def SI_RegisterLoad : InstSI <
- (outs VReg_32:$dst, SReg_64:$temp),
+ (outs VGPR_32:$dst, SReg_64:$temp),
(ins FRAMEri32:$addr, i32imm:$chan),
"", []
> {
@@ -1800,7 +1931,7 @@ def SI_RegisterLoad : InstSI <
class SIRegStore<dag outs> : InstSI <
outs,
- (ins VReg_32:$val, FRAMEri32:$addr, i32imm:$chan),
+ (ins VGPR_32:$val, FRAMEri32:$addr, i32imm:$chan),
"", []
> {
let isRegisterStore = 1;
@@ -1816,7 +1947,7 @@ def SI_RegisterStore : SIRegStore<(outs SReg_64:$temp)>;
} // End UseNamedOperandTable = 1
def SI_INDIRECT_SRC : InstSI <
- (outs VReg_32:$dst, SReg_64:$temp),
+ (outs VGPR_32:$dst, SReg_64:$temp),
(ins unknown:$src, VSrc_32:$idx, i32imm:$off),
"si_indirect_src $dst, $temp, $src, $idx, $off",
[]
@@ -1824,14 +1955,14 @@ def SI_INDIRECT_SRC : InstSI <
class SI_INDIRECT_DST<RegisterClass rc> : InstSI <
(outs rc:$dst, SReg_64:$temp),
- (ins unknown:$src, VSrc_32:$idx, i32imm:$off, VReg_32:$val),
+ (ins unknown:$src, VSrc_32:$idx, i32imm:$off, VGPR_32:$val),
"si_indirect_dst $dst, $temp, $src, $idx, $off, $val",
[]
> {
let Constraints = "$src = $dst";
}
-def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VReg_32>;
+def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VGPR_32>;
def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>;
def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
@@ -1839,31 +1970,22 @@ def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
} // Uses = [EXEC,VCC,M0], Defs = [EXEC,VCC,M0]
-let usesCustomInserter = 1 in {
-
-def V_SUB_F64 : InstSI <
- (outs VReg_64:$dst),
- (ins VReg_64:$src0, VReg_64:$src1),
- "v_sub_f64 $dst, $src0, $src1",
- [(set f64:$dst, (fsub f64:$src0, f64:$src1))]
->;
-
-} // end usesCustomInserter
-
multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
- def _SAVE : InstSI <
- (outs),
- (ins sgpr_class:$src, i32imm:$frame_idx),
- "", []
- >;
-
- def _RESTORE : InstSI <
- (outs sgpr_class:$dst),
- (ins i32imm:$frame_idx),
- "", []
- >;
-
+ let UseNamedOperandTable = 1 in {
+ def _SAVE : InstSI <
+ (outs),
+ (ins sgpr_class:$src, i32imm:$frame_idx, SReg_128:$scratch_rsrc,
+ SReg_32:$scratch_offset),
+ "", []
+ >;
+
+ def _RESTORE : InstSI <
+ (outs sgpr_class:$dst),
+ (ins i32imm:$frame_idx, SReg_128:$scratch_rsrc, SReg_32:$scratch_offset),
+ "", []
+ >;
+ } // End UseNamedOperandTable = 1
}
defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32>;
@@ -1873,20 +1995,23 @@ defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>;
defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>;
multiclass SI_SPILL_VGPR <RegisterClass vgpr_class> {
- def _SAVE : InstSI <
- (outs),
- (ins vgpr_class:$src, i32imm:$frame_idx),
- "", []
- >;
-
- def _RESTORE : InstSI <
- (outs vgpr_class:$dst),
- (ins i32imm:$frame_idx),
- "", []
- >;
+ let UseNamedOperandTable = 1 in {
+ def _SAVE : InstSI <
+ (outs),
+ (ins vgpr_class:$src, i32imm:$frame_idx, SReg_128:$scratch_rsrc,
+ SReg_32:$scratch_offset),
+ "", []
+ >;
+
+ def _RESTORE : InstSI <
+ (outs vgpr_class:$dst),
+ (ins i32imm:$frame_idx, SReg_128:$scratch_rsrc, SReg_32:$scratch_offset),
+ "", []
+ >;
+ } // End UseNamedOperandTable = 1
}
-defm SI_SPILL_V32 : SI_SPILL_VGPR <VReg_32>;
+defm SI_SPILL_V32 : SI_SPILL_VGPR <VGPR_32>;
defm SI_SPILL_V64 : SI_SPILL_VGPR <VReg_64>;
defm SI_SPILL_V96 : SI_SPILL_VGPR <VReg_96>;
defm SI_SPILL_V128 : SI_SPILL_VGPR <VReg_128>;
@@ -1905,9 +2030,9 @@ def SI_CONSTDATA_PTR : InstSI <
} // end IsCodeGenOnly, isPseudo
-} // end SubtargetPredicate = SI
+} // end SubtargetPredicate = isGCN
-let Predicates = [isSI] in {
+let Predicates = [isGCN] in {
def : Pat<
(int_AMDGPU_cndlt f32:$src0, f32:$src1, f32:$src2),
@@ -1941,7 +2066,7 @@ def : Pat <
multiclass SMRD_Pattern <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> {
- // 1. Offset as 8bit DWORD immediate
+ // 1. SI-CI: Offset as 8bit DWORD immediate
def : Pat <
(constant_load (add i64:$sbase, (i64 IMM8bitDWORD:$offset))),
(vt (Instr_IMM $sbase, (as_dword_i32imm $offset)))
@@ -1960,6 +2085,28 @@ multiclass SMRD_Pattern <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> {
>;
}
+multiclass SMRD_Pattern_vi <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> {
+
+ // 1. VI: Offset as 20bit immediate in bytes
+ def : Pat <
+ (constant_load (add i64:$sbase, (i64 IMM20bit:$offset))),
+ (vt (Instr_IMM $sbase, (as_i32imm $offset)))
+ >;
+
+ // 2. Offset loaded in an 32bit SGPR
+ def : Pat <
+ (constant_load (add i64:$sbase, (i64 IMM32bit:$offset))),
+ (vt (Instr_SGPR $sbase, (S_MOV_B32 (i32 (as_i32imm $offset)))))
+ >;
+
+ // 3. No offset at all
+ def : Pat <
+ (constant_load i64:$sbase),
+ (vt (Instr_IMM $sbase, 0))
+ >;
+}
+
+let Predicates = [isSICI] in {
defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, f32>;
defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>;
defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, v2i32>;
@@ -1967,6 +2114,19 @@ defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v4i32>;
defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>;
defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v8i32>;
defm : SMRD_Pattern <S_LOAD_DWORDX16_IMM, S_LOAD_DWORDX16_SGPR, v16i32>;
+} // End Predicates = [isSICI]
+
+let Predicates = [isVI] in {
+defm : SMRD_Pattern_vi <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, f32>;
+defm : SMRD_Pattern_vi <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>;
+defm : SMRD_Pattern_vi <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, v2i32>;
+defm : SMRD_Pattern_vi <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v4i32>;
+defm : SMRD_Pattern_vi <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>;
+defm : SMRD_Pattern_vi <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v8i32>;
+defm : SMRD_Pattern_vi <S_LOAD_DWORDX16_IMM, S_LOAD_DWORDX16_SGPR, v16i32>;
+} // End Predicates = [isVI]
+
+let Predicates = [isSICI] in {
// 1. Offset as 8bit DWORD immediate
def : Pat <
@@ -1974,14 +2134,14 @@ def : Pat <
(S_BUFFER_LOAD_DWORD_IMM $sbase, (as_dword_i32imm $offset))
>;
+} // End Predicates = [isSICI]
+
// 2. Offset loaded in an 32bit SGPR
def : Pat <
(SIload_constant v4i32:$sbase, imm:$offset),
(S_BUFFER_LOAD_DWORD_SGPR $sbase, (S_MOV_B32 imm:$offset))
>;
-} // Predicates = [isSI] in {
-
//===----------------------------------------------------------------------===//
// SOP1 Patterns
//===----------------------------------------------------------------------===//
@@ -2004,8 +2164,6 @@ def : Pat <
(S_ADD_U32 $src0, $src1)
>;
-let Predicates = [isSI] in {
-
//===----------------------------------------------------------------------===//
// SOPP Patterns
//===----------------------------------------------------------------------===//
@@ -2020,9 +2178,13 @@ def : Pat <
//===----------------------------------------------------------------------===//
let Predicates = [UnsafeFPMath] in {
-def : RcpPat<V_RCP_F64_e32, f64>;
-defm : RsqPat<V_RSQ_F64_e32, f64>;
-defm : RsqPat<V_RSQ_F32_e32, f32>;
+
+//def : RcpPat<V_RCP_F64_e32, f64>;
+//defm : RsqPat<V_RSQ_F64_e32, f64>;
+//defm : RsqPat<V_RSQ_F32_e32, f32>;
+
+def : RsqPat<V_RSQ_F32_e32, f32>;
+def : RsqPat<V_RSQ_F64_e32, f64>;
}
//===----------------------------------------------------------------------===//
@@ -2369,10 +2531,10 @@ foreach Index = 0-15 in {
}
def : BitConvert <i32, f32, SReg_32>;
-def : BitConvert <i32, f32, VReg_32>;
+def : BitConvert <i32, f32, VGPR_32>;
def : BitConvert <f32, i32, SReg_32>;
-def : BitConvert <f32, i32, VReg_32>;
+def : BitConvert <f32, i32, VGPR_32>;
def : BitConvert <i64, f64, VReg_64>;
@@ -2475,7 +2637,7 @@ def : Pat <
def : Pat <
(SGPRImm<(f32 fpimm)>:$imm),
- (S_MOV_B32 fpimm:$imm)
+ (S_MOV_B32 (f32 (bitcast_fpimm_to_i32 $imm)))
>;
def : Pat <
@@ -2485,7 +2647,7 @@ def : Pat <
def : Pat <
(f32 fpimm:$imm),
- (V_MOV_B32_e32 fpimm:$imm)
+ (V_MOV_B32_e32 (f32 (bitcast_fpimm_to_i32 $imm)))
>;
def : Pat <
@@ -2493,21 +2655,38 @@ def : Pat <
(S_MOV_B64 InlineImm<i64>:$imm)
>;
+// XXX - Should this use a s_cmp to set SCC?
+
+// Set to sign-extended 64-bit value (true = -1, false = 0)
+def : Pat <
+ (i1 imm:$imm),
+ (S_MOV_B64 (i64 (as_i64imm $imm)))
+>;
+
+def : Pat <
+ (f64 InlineFPImm<f64>:$imm),
+ (S_MOV_B64 (f64 (bitcast_fpimm_to_i64 InlineFPImm<f64>:$imm)))
+>;
+
/********** ===================== **********/
/********** Interpolation Paterns **********/
/********** ===================== **********/
+// The value of $params is constant through out the entire kernel.
+// We need to use S_MOV_B32 $params, because CSE ignores copies, so
+// without it we end up with a lot of redundant moves.
+
def : Pat <
(int_SI_fs_constant imm:$attr_chan, imm:$attr, i32:$params),
- (V_INTERP_MOV_F32 INTERP.P0, imm:$attr_chan, imm:$attr, $params)
+ (V_INTERP_MOV_F32 INTERP.P0, imm:$attr_chan, imm:$attr, (S_MOV_B32 $params))
>;
def : Pat <
- (int_SI_fs_interp imm:$attr_chan, imm:$attr, M0Reg:$params, v2i32:$ij),
+ (int_SI_fs_interp imm:$attr_chan, imm:$attr, i32:$params, v2i32:$ij),
(V_INTERP_P2_F32 (V_INTERP_P1_F32 (EXTRACT_SUBREG v2i32:$ij, sub0),
- imm:$attr_chan, imm:$attr, i32:$params),
+ imm:$attr_chan, imm:$attr, (S_MOV_B32 $params)),
(EXTRACT_SUBREG $ij, sub1),
- imm:$attr_chan, imm:$attr, $params)
+ imm:$attr_chan, imm:$attr, (S_MOV_B32 $params))
>;
/********** ================== **********/
@@ -2522,13 +2701,6 @@ def : Pat <
(V_MUL_LEGACY_F32_e32 $src0, (V_RCP_LEGACY_F32_e32 $src1))
>;
-def : Pat<
- (fdiv f64:$src0, f64:$src1),
- (V_MUL_F64 0 /* src0_modifiers */, $src0,
- 0 /* src1_modifiers */, (V_RCP_F64_e32 $src1),
- 0 /* clamp */, 0 /* omod */)
->;
-
def : Pat <
(int_AMDGPU_cube v4f32:$src),
(REG_SEQUENCE VReg_128,
@@ -2579,7 +2751,7 @@ def : Pat <
def : Pat <
(int_SI_tid),
- (V_MBCNT_HI_U32_B32_e32 0xffffffff,
+ (V_MBCNT_HI_U32_B32_e64 0xffffffff,
(V_MBCNT_LO_U32_B32_e64 0xffffffff, 0))
>;
@@ -2600,9 +2772,6 @@ def : Pat <
(V_MUL_HI_I32 $src0, $src1)
>;
-def : Vop3ModPat<V_MAD_F32, VOP_F32_F32_F32_F32, AMDGPUmad>;
-
-
defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>;
def : ROTRPattern <V_ALIGNBIT_B32>;
@@ -2612,7 +2781,7 @@ def : ROTRPattern <V_ALIGNBIT_B32>;
class DSReadPat <DS inst, ValueType vt, PatFrag frag> : Pat <
(vt (frag (DS1Addr1Offset i32:$ptr, i32:$offset))),
- (inst (i1 0), $ptr, (as_i16imm $offset))
+ (inst (i1 0), $ptr, (as_i16imm $offset), (S_MOV_B32 -1))
>;
def : DSReadPat <DS_READ_I8, i32, sextloadi8_local>;
@@ -2630,12 +2799,12 @@ def : DSReadPat <DS_READ_B64, v2i32, local_load_aligned8bytes>;
def : Pat <
(v2i32 (local_load (DS64Bit4ByteAligned i32:$ptr, i8:$offset0,
i8:$offset1))),
- (DS_READ2_B32 (i1 0), $ptr, $offset0, $offset1)
+ (DS_READ2_B32 (i1 0), $ptr, $offset0, $offset1, (S_MOV_B32 -1))
>;
class DSWritePat <DS inst, ValueType vt, PatFrag frag> : Pat <
(frag vt:$value, (DS1Addr1Offset i32:$ptr, i32:$offset)),
- (inst (i1 0), $ptr, $value, (as_i16imm $offset))
+ (inst (i1 0), $ptr, $value, (as_i16imm $offset), (S_MOV_B32 -1))
>;
def : DSWritePat <DS_WRITE_B8, i32, truncstorei8_local>;
@@ -2651,12 +2820,13 @@ def : Pat <
(local_store v2i32:$value, (DS64Bit4ByteAligned i32:$ptr, i8:$offset0,
i8:$offset1)),
(DS_WRITE2_B32 (i1 0), $ptr, (EXTRACT_SUBREG $value, sub0),
- (EXTRACT_SUBREG $value, sub1), $offset0, $offset1)
+ (EXTRACT_SUBREG $value, sub1), $offset0, $offset1,
+ (S_MOV_B32 -1))
>;
class DSAtomicRetPat<DS inst, ValueType vt, PatFrag frag> : Pat <
(frag (DS1Addr1Offset i32:$ptr, i32:$offset), vt:$value),
- (inst (i1 0), $ptr, $value, (as_i16imm $offset))
+ (inst (i1 0), $ptr, $value, (as_i16imm $offset), (S_MOV_B32 -1))
>;
// Special case of DSAtomicRetPat for add / sub 1 -> inc / dec
@@ -2672,13 +2842,13 @@ class DSAtomicRetPat<DS inst, ValueType vt, PatFrag frag> : Pat <
class DSAtomicIncRetPat<DS inst, ValueType vt,
Instruction LoadImm, PatFrag frag> : Pat <
(frag (DS1Addr1Offset i32:$ptr, i32:$offset), (vt 1)),
- (inst (i1 0), $ptr, (LoadImm (vt -1)), (as_i16imm $offset))
+ (inst (i1 0), $ptr, (LoadImm (vt -1)), (as_i16imm $offset), (S_MOV_B32 -1))
>;
class DSAtomicCmpXChg <DS inst, ValueType vt, PatFrag frag> : Pat <
(frag (DS1Addr1Offset i32:$ptr, i32:$offset), vt:$cmp, vt:$swap),
- (inst (i1 0), $ptr, $cmp, $swap, (as_i16imm $offset))
+ (inst (i1 0), $ptr, $cmp, $swap, (as_i16imm $offset), (S_MOV_B32 -1))
>;
@@ -2728,11 +2898,12 @@ def : DSAtomicCmpXChg<DS_CMPST_RTN_B64, i64, atomic_cmp_swap_64_local>;
multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt,
PatFrag constant_ld> {
def : Pat <
- (vt (constant_ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i16:$offset))),
- (Instr_ADDR64 $srsrc, $vaddr, $offset)
+ (vt (constant_ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset, i16:$offset))),
+ (Instr_ADDR64 $srsrc, $vaddr, $soffset, $offset)
>;
}
+let Predicates = [isSICI] in {
defm : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32, sextloadi8_constant>;
defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32, az_extloadi8_constant>;
defm : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32, sextloadi16_constant>;
@@ -2740,6 +2911,7 @@ defm : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32, az_extloadi16_constant
defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32, constant_load>;
defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, v2i32, constant_load>;
defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX4_ADDR64, v4i32, constant_load>;
+} // End Predicates = [isSICI]
class MUBUFScratchLoadPat <MUBUF Instr, ValueType vt, PatFrag ld> : Pat <
(vt (ld (MUBUFScratch v4i32:$srsrc, i32:$vaddr,
@@ -2785,9 +2957,9 @@ multiclass MUBUF_Load_Dword <ValueType vt, MUBUF offset, MUBUF offen, MUBUF idxe
def : Pat <
(vt (int_SI_buffer_load_dword v4i32:$rsrc, v2i32:$vaddr, i32:$soffset,
- imm, 1, 1, imm:$glc, imm:$slc,
+ imm:$offset, 1, 1, imm:$glc, imm:$slc,
imm:$tfe)),
- (bothen $rsrc, $vaddr, $soffset, (as_i1imm $glc), (as_i1imm $slc),
+ (bothen $rsrc, $vaddr, $soffset, (as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc),
(as_i1imm $tfe))
>;
}
@@ -2817,11 +2989,13 @@ class MUBUFStore_Pattern <MUBUF Instr, ValueType vt, PatFrag st> : Pat <
(Instr $value, $srsrc, $vaddr, $offset)
>;
+let Predicates = [isSICI] in {
def : MUBUFStore_Pattern <BUFFER_STORE_BYTE_ADDR64, i32, truncstorei8_private>;
def : MUBUFStore_Pattern <BUFFER_STORE_SHORT_ADDR64, i32, truncstorei16_private>;
def : MUBUFStore_Pattern <BUFFER_STORE_DWORD_ADDR64, i32, store_private>;
def : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2_ADDR64, v2i32, store_private>;
def : MUBUFStore_Pattern <BUFFER_STORE_DWORDX4_ADDR64, v4i32, store_private>;
+} // End Predicates = [isSICI]
*/
@@ -2848,20 +3022,6 @@ def : MTBUF_StoreResource <v4i32, 4, TBUFFER_STORE_FORMAT_XYZW>;
let SubtargetPredicate = isCI in {
-// Sea island new arithmetic instructinos
-defm V_TRUNC_F64 : VOP1Inst <vop1<0x17>, "v_trunc_f64",
- VOP_F64_F64, ftrunc
->;
-defm V_CEIL_F64 : VOP1Inst <vop1<0x18>, "v_ceil_f64",
- VOP_F64_F64, fceil
->;
-defm V_FLOOR_F64 : VOP1Inst <vop1<0x1A>, "v_floor_f64",
- VOP_F64_F64, ffloor
->;
-defm V_RNDNE_F64 : VOP1Inst <vop1<0x19>, "v_rndne_f64",
- VOP_F64_F64, frint
->;
-
defm V_QSAD_PK_U16_U8 : VOP3Inst <vop3<0x173>, "v_qsad_pk_u16_u8",
VOP_I32_I32_I32
>;
@@ -2890,8 +3050,6 @@ defm V_MAD_I64_I32 : VOP3Inst <vop3<0x177>, "v_mad_i64_i32",
// S_CBRANCH_CDBGSYS_OR_USER
// S_CBRANCH_CDBGSYS_AND_USER
// S_DCACHE_INV_VOL
-// V_EXP_LEGACY_F32
-// V_LOG_LEGACY_F32
// DS_NOP
// DS_GWS_SEMA_RELEASE_ALL
// DS_WRAP_RTN_B32
@@ -2904,7 +3062,7 @@ defm V_MAD_I64_I32 : VOP3Inst <vop3<0x177>, "v_mad_i64_i32",
// BUFFER_LOAD_DWORDX3
// BUFFER_STORE_DWORDX3
-} // End iSCI
+} // End isCI
//===----------------------------------------------------------------------===//
// Flat Patterns
@@ -3038,6 +3196,27 @@ def : Pat <
(V_CNDMASK_B32_e64 0, -1, $src), sub1)
>;
+// If we need to perform a logical operation on i1 values, we need to
+// use vector comparisons since there is only one SCC register. Vector
+// comparisions still write to a pair of SGPRs, so treat these as
+// 64-bit comparisons. When legalizing SGPR copies, instructions
+// resulting in the copies from SCC to these instructions will be
+// moved to the VALU.
+def : Pat <
+ (i1 (and i1:$src0, i1:$src1)),
+ (S_AND_B64 $src0, $src1)
+>;
+
+def : Pat <
+ (i1 (or i1:$src0, i1:$src1)),
+ (S_OR_B64 $src0, $src1)
+>;
+
+def : Pat <
+ (i1 (xor i1:$src0, i1:$src1)),
+ (S_XOR_B64 $src0, $src1)
+>;
+
def : Pat <
(f32 (sint_to_fp i1:$src)),
(V_CNDMASK_B32_e64 (i32 0), CONST.FP32_NEG_ONE, $src)
@@ -3050,7 +3229,7 @@ def : Pat <
def : Pat <
(f64 (sint_to_fp i1:$src)),
- (V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src))
+ (V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src))
>;
def : Pat <
@@ -3073,16 +3252,27 @@ def : Pat <
>;
def : Pat <
+ (i1 (trunc i64:$a)),
+ (V_CMP_EQ_I32_e64 (V_AND_B32_e64 (i32 1),
+ (EXTRACT_SUBREG $a, sub0)), 1)
+>;
+
+def : Pat <
(i32 (bswap i32:$a)),
(V_BFI_B32 (S_MOV_B32 0x00ff00ff),
(V_ALIGNBIT_B32 $a, $a, 24),
(V_ALIGNBIT_B32 $a, $a, 8))
>;
+def : Pat <
+ (f32 (select i1:$src2, f32:$src1, f32:$src0)),
+ (V_CNDMASK_B32_e64 $src0, $src1, $src2)
+>;
+
//============================================================================//
// Miscellaneous Optimization Patterns
//============================================================================//
def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e64>;
-} // End isSI predicate
+} // End isGCN predicate
diff --git a/lib/Target/R600/SILoadStoreOptimizer.cpp b/lib/Target/R600/SILoadStoreOptimizer.cpp
index 4140196..46630d0 100644
--- a/lib/Target/R600/SILoadStoreOptimizer.cpp
+++ b/lib/Target/R600/SILoadStoreOptimizer.cpp
@@ -55,7 +55,6 @@ namespace {
class SILoadStoreOptimizer : public MachineFunctionPass {
private:
- const TargetMachine *TM;
const SIInstrInfo *TII;
const SIRegisterInfo *TRI;
MachineRegisterInfo *MRI;
@@ -86,20 +85,11 @@ private:
public:
static char ID;
- SILoadStoreOptimizer() :
- MachineFunctionPass(ID),
- TM(nullptr),
- TII(nullptr),
- TRI(nullptr),
- MRI(nullptr),
- LIS(nullptr) {
+ SILoadStoreOptimizer()
+ : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), MRI(nullptr),
+ LIS(nullptr) {}
- }
-
- SILoadStoreOptimizer(const TargetMachine &TM_) :
- MachineFunctionPass(ID),
- TM(&TM_),
- TII(static_cast<const SIInstrInfo*>(TM->getSubtargetImpl()->getInstrInfo())) {
+ SILoadStoreOptimizer(const TargetMachine &TM_) : MachineFunctionPass(ID) {
initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
}
@@ -222,6 +212,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
// Be careful, since the addresses could be subregisters themselves in weird
// cases, like vectors of pointers.
const MachineOperand *AddrReg = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
+ const MachineOperand *M0Reg = TII->getNamedOperand(*I, AMDGPU::OpName::m0);
unsigned DestReg0 = TII->getNamedOperand(*I, AMDGPU::OpName::vdst)->getReg();
unsigned DestReg1
@@ -262,6 +253,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
.addOperand(*AddrReg) // addr
.addImm(NewOffset0) // offset0
.addImm(NewOffset1) // offset1
+ .addOperand(*M0Reg) // M0
.addMemOperand(*I->memoperands_begin())
.addMemOperand(*Paired->memoperands_begin());
@@ -280,6 +272,18 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
LiveInterval &AddrRegLI = LIS->getInterval(AddrReg->getReg());
LIS->shrinkToUses(&AddrRegLI);
+ LiveInterval &M0RegLI = LIS->getInterval(M0Reg->getReg());
+ LIS->shrinkToUses(&M0RegLI);
+
+ // Currently m0 is treated as a register class with one member instead of an
+ // implicit physical register. We are using the virtual register for the first
+ // one, but we still need to update the live range of the now unused second m0
+ // virtual register to avoid verifier errors.
+ const MachineOperand *PairedM0Reg
+ = TII->getNamedOperand(*Paired, AMDGPU::OpName::m0);
+ LiveInterval &PairedM0RegLI = LIS->getInterval(PairedM0Reg->getReg());
+ LIS->shrinkToUses(&PairedM0RegLI);
+
LIS->getInterval(DestReg); // Create new LI
DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
@@ -295,6 +299,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
// Be sure to use .addOperand(), and not .addReg() with these. We want to be
// sure we preserve the subregister index and any register flags set on them.
const MachineOperand *Addr = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
+ const MachineOperand *M0Reg = TII->getNamedOperand(*I, AMDGPU::OpName::m0);
const MachineOperand *Data0 = TII->getNamedOperand(*I, AMDGPU::OpName::data0);
const MachineOperand *Data1
= TII->getNamedOperand(*Paired, AMDGPU::OpName::data0);
@@ -333,11 +338,13 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
.addOperand(*Data1) // data1
.addImm(NewOffset0) // offset0
.addImm(NewOffset1) // offset1
+ .addOperand(*M0Reg) // m0
.addMemOperand(*I->memoperands_begin())
.addMemOperand(*Paired->memoperands_begin());
// XXX - How do we express subregisters here?
- unsigned OrigRegs[] = { Data0->getReg(), Data1->getReg(), Addr->getReg() };
+ unsigned OrigRegs[] = { Data0->getReg(), Data1->getReg(), Addr->getReg(),
+ M0Reg->getReg()};
LIS->RemoveMachineInstrFromMaps(I);
LIS->RemoveMachineInstrFromMaps(Paired);
@@ -397,9 +404,9 @@ bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
}
bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
- const TargetSubtargetInfo *STM = MF.getTarget().getSubtargetImpl();
- TRI = static_cast<const SIRegisterInfo*>(STM->getRegisterInfo());
- TII = static_cast<const SIInstrInfo*>(STM->getInstrInfo());
+ const TargetSubtargetInfo &STM = MF.getSubtarget();
+ TRI = static_cast<const SIRegisterInfo *>(STM.getRegisterInfo());
+ TII = static_cast<const SIInstrInfo *>(STM.getInstrInfo());
MRI = &MF.getRegInfo();
LIS = &getAnalysis<LiveIntervals>();
diff --git a/lib/Target/R600/SILowerControlFlow.cpp b/lib/Target/R600/SILowerControlFlow.cpp
index 9702565..2e08c9f 100644
--- a/lib/Target/R600/SILowerControlFlow.cpp
+++ b/lib/Target/R600/SILowerControlFlow.cpp
@@ -88,7 +88,6 @@ private:
void Kill(MachineInstr &MI);
void Branch(MachineInstr &MI);
- void InitM0ForLDS(MachineBasicBlock::iterator MI);
void LoadM0(MachineInstr &MI, MachineInstr *MovRel);
void IndirectSrc(MachineInstr &MI);
void IndirectDst(MachineInstr &MI);
@@ -309,10 +308,9 @@ void SILowerControlFlowPass::Kill(MachineInstr &MI) {
#endif
// Clear this thread from the exec mask if the operand is negative
- if ((Op.isImm() || Op.isFPImm())) {
+ if ((Op.isImm())) {
// Constant operand: Set exec mask to 0 or do nothing
- if (Op.isImm() ? (Op.getImm() & 0x80000000) :
- Op.getFPImm()->isNegative()) {
+ if (Op.getImm() & 0x80000000) {
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
.addImm(0);
}
@@ -325,14 +323,6 @@ void SILowerControlFlowPass::Kill(MachineInstr &MI) {
MI.eraseFromParent();
}
-/// The m0 register stores the maximum allowable address for LDS reads and
-/// writes. Its value must be at least the size in bytes of LDS allocated by
-/// the shader. For simplicity, we set it to the maximum possible value.
-void SILowerControlFlowPass::InitM0ForLDS(MachineBasicBlock::iterator MI) {
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
- AMDGPU::M0).addImm(0xffffffff);
-}
-
void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel) {
MachineBasicBlock &MBB = *MI.getParent();
@@ -349,7 +339,7 @@ void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel) {
} else {
assert(AMDGPU::SReg_64RegClass.contains(Save));
- assert(AMDGPU::VReg_32RegClass.contains(Idx));
+ assert(AMDGPU::VGPR_32RegClass.contains(Idx));
// Save the EXEC mask
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
@@ -391,12 +381,6 @@ void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel) {
.addReg(Save);
}
- // FIXME: Are there any values other than the LDS address clamp that need to
- // be stored in the m0 register and may be live for more than a few
- // instructions? If so, we should save the m0 register at the beginning
- // of this function and restore it here.
- // FIXME: Add support for LDS direct loads.
- InitM0ForLDS(&MI);
MI.eraseFromParent();
}
@@ -450,7 +434,6 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
bool HaveKill = false;
- bool NeedM0 = false;
bool NeedWQM = false;
bool NeedFlat = false;
unsigned Depth = 0;
@@ -464,16 +447,12 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
Next = std::next(I);
MachineInstr &MI = *I;
- if (TII->isDS(MI.getOpcode())) {
- NeedM0 = true;
+ if (TII->isWQM(MI.getOpcode()) || TII->isDS(MI.getOpcode()))
NeedWQM = true;
- }
// Flat uses m0 in case it needs to access LDS.
- if (TII->isFLAT(MI.getOpcode())) {
- NeedM0 = true;
+ if (TII->isFLAT(MI.getOpcode()))
NeedFlat = true;
- }
switch (MI.getOpcode()) {
default: break;
@@ -534,23 +513,10 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
case AMDGPU::SI_INDIRECT_DST_V16:
IndirectDst(MI);
break;
-
- case AMDGPU::V_INTERP_P1_F32:
- case AMDGPU::V_INTERP_P2_F32:
- case AMDGPU::V_INTERP_MOV_F32:
- NeedWQM = true;
- break;
}
}
}
- if (NeedM0) {
- MachineBasicBlock &MBB = MF.front();
- // Initialize M0 to a value that won't cause LDS access to be discarded
- // due to offset clamping
- InitM0ForLDS(MBB.getFirstNonPHI());
- }
-
if (NeedWQM && MFI->getShaderType() == ShaderType::PIXEL) {
MachineBasicBlock &MBB = MF.front();
BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
diff --git a/lib/Target/R600/SILowerI1Copies.cpp b/lib/Target/R600/SILowerI1Copies.cpp
index 65b892c..67421e2 100644
--- a/lib/Target/R600/SILowerI1Copies.cpp
+++ b/lib/Target/R600/SILowerI1Copies.cpp
@@ -85,30 +85,6 @@ bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) {
Next = std::next(I);
MachineInstr &MI = *I;
- if (MI.getOpcode() == AMDGPU::V_MOV_I1) {
- I1Defs.push_back(MI.getOperand(0).getReg());
- MI.setDesc(TII->get(AMDGPU::V_MOV_B32_e32));
- continue;
- }
-
- if (MI.getOpcode() == AMDGPU::V_AND_I1) {
- I1Defs.push_back(MI.getOperand(0).getReg());
- MI.setDesc(TII->get(AMDGPU::V_AND_B32_e32));
- continue;
- }
-
- if (MI.getOpcode() == AMDGPU::V_OR_I1) {
- I1Defs.push_back(MI.getOperand(0).getReg());
- MI.setDesc(TII->get(AMDGPU::V_OR_B32_e32));
- continue;
- }
-
- if (MI.getOpcode() == AMDGPU::V_XOR_I1) {
- I1Defs.push_back(MI.getOperand(0).getReg());
- MI.setDesc(TII->get(AMDGPU::V_XOR_B32_e32));
- continue;
- }
-
if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF) {
unsigned Reg = MI.getOperand(0).getReg();
const TargetRegisterClass *RC = MRI.getRegClass(Reg);
@@ -117,39 +93,59 @@ bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) {
continue;
}
- if (MI.getOpcode() != AMDGPU::COPY ||
- !TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg()) ||
- !TargetRegisterInfo::isVirtualRegister(MI.getOperand(1).getReg()))
+ if (MI.getOpcode() != AMDGPU::COPY)
continue;
+ const MachineOperand &Dst = MI.getOperand(0);
+ const MachineOperand &Src = MI.getOperand(1);
+
+ if (!TargetRegisterInfo::isVirtualRegister(Src.getReg()) ||
+ !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
+ continue;
- const TargetRegisterClass *DstRC =
- MRI.getRegClass(MI.getOperand(0).getReg());
- const TargetRegisterClass *SrcRC =
- MRI.getRegClass(MI.getOperand(1).getReg());
+ const TargetRegisterClass *DstRC = MRI.getRegClass(Dst.getReg());
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(Src.getReg());
if (DstRC == &AMDGPU::VReg_1RegClass &&
TRI->getCommonSubClass(SrcRC, &AMDGPU::SGPR_64RegClass)) {
- I1Defs.push_back(MI.getOperand(0).getReg());
- BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(AMDGPU::V_CNDMASK_B32_e64))
- .addOperand(MI.getOperand(0))
- .addImm(0)
- .addImm(-1)
- .addOperand(MI.getOperand(1));
+ I1Defs.push_back(Dst.getReg());
+ DebugLoc DL = MI.getDebugLoc();
+
+ MachineInstr *DefInst = MRI.getUniqueVRegDef(Src.getReg());
+ if (DefInst->getOpcode() == AMDGPU::S_MOV_B64) {
+ if (DefInst->getOperand(1).isImm()) {
+ I1Defs.push_back(Dst.getReg());
+
+ int64_t Val = DefInst->getOperand(1).getImm();
+ assert(Val == 0 || Val == -1);
+
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_MOV_B32_e32))
+ .addOperand(Dst)
+ .addImm(Val);
+ MI.eraseFromParent();
+ continue;
+ }
+ }
+
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64))
+ .addOperand(Dst)
+ .addImm(0)
+ .addImm(-1)
+ .addOperand(Src);
MI.eraseFromParent();
} else if (TRI->getCommonSubClass(DstRC, &AMDGPU::SGPR_64RegClass) &&
SrcRC == &AMDGPU::VReg_1RegClass) {
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(AMDGPU::V_CMP_NE_I32_e64))
- .addOperand(MI.getOperand(0))
- .addOperand(MI.getOperand(1))
- .addImm(0);
+ .addOperand(Dst)
+ .addOperand(Src)
+ .addImm(0);
MI.eraseFromParent();
}
}
}
for (unsigned Reg : I1Defs)
- MRI.setRegClass(Reg, &AMDGPU::VReg_32RegClass);
+ MRI.setRegClass(Reg, &AMDGPU::VGPR_32RegClass);
return false;
}
diff --git a/lib/Target/R600/SIMachineFunctionInfo.cpp b/lib/Target/R600/SIMachineFunctionInfo.cpp
index d58f31d..587ea63 100644
--- a/lib/Target/R600/SIMachineFunctionInfo.cpp
+++ b/lib/Target/R600/SIMachineFunctionInfo.cpp
@@ -29,6 +29,7 @@ void SIMachineFunctionInfo::anchor() {}
SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
: AMDGPUMachineFunction(MF),
TIDReg(AMDGPU::NoRegister),
+ HasSpilledVGPRs(false),
PSInputAddr(0),
NumUserSGPRs(0),
LDSWaveSpillSize(0) { }
@@ -38,8 +39,8 @@ SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg(
unsigned FrameIndex,
unsigned SubIdx) {
const MachineFrameInfo *FrameInfo = MF->getFrameInfo();
- const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo*>(
- MF->getTarget().getSubtarget<AMDGPUSubtarget>().getRegisterInfo());
+ const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
+ MF->getSubtarget<AMDGPUSubtarget>().getRegisterInfo());
MachineRegisterInfo &MRI = MF->getRegInfo();
int64_t Offset = FrameInfo->getObjectOffset(FrameIndex);
Offset += SubIdx * 4;
@@ -50,7 +51,7 @@ SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg(
struct SpilledReg Spill;
if (!LaneVGPRs.count(LaneVGPRIdx)) {
- unsigned LaneVGPR = TRI->findUnusedVGPR(MRI);
+ unsigned LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass);
LaneVGPRs[LaneVGPRIdx] = LaneVGPR;
MRI.setPhysRegUsed(LaneVGPR);
@@ -69,7 +70,7 @@ SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg(
unsigned SIMachineFunctionInfo::getMaximumWorkGroupSize(
const MachineFunction &MF) const {
- const AMDGPUSubtarget &ST = MF.getTarget().getSubtarget<AMDGPUSubtarget>();
+ const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
// FIXME: We should get this information from kernel attributes if it
// is available.
return getShaderType() == ShaderType::COMPUTE ? 256 : ST.getWavefrontSize();
diff --git a/lib/Target/R600/SIMachineFunctionInfo.h b/lib/Target/R600/SIMachineFunctionInfo.h
index 6bb8f9d..667da4c 100644
--- a/lib/Target/R600/SIMachineFunctionInfo.h
+++ b/lib/Target/R600/SIMachineFunctionInfo.h
@@ -29,6 +29,7 @@ class SIMachineFunctionInfo : public AMDGPUMachineFunction {
void anchor() override;
unsigned TIDReg;
+ bool HasSpilledVGPRs;
public:
@@ -49,9 +50,12 @@ public:
unsigned NumUserSGPRs;
std::map<unsigned, unsigned> LaneVGPRs;
unsigned LDSWaveSpillSize;
+ unsigned ScratchOffsetReg;
bool hasCalculatedTID() const { return TIDReg != AMDGPU::NoRegister; };
unsigned getTIDReg() const { return TIDReg; };
void setTIDReg(unsigned Reg) { TIDReg = Reg; }
+ bool hasSpilledVGPRs() const { return HasSpilledVGPRs; }
+ void setHasSpilledVGPRs(bool Spill = true) { HasSpilledVGPRs = Spill; }
unsigned getMaximumWorkGroupSize(const MachineFunction &MF) const;
};
diff --git a/lib/Target/R600/SIPrepareScratchRegs.cpp b/lib/Target/R600/SIPrepareScratchRegs.cpp
new file mode 100644
index 0000000..0a57a5b
--- /dev/null
+++ b/lib/Target/R600/SIPrepareScratchRegs.cpp
@@ -0,0 +1,208 @@
+//===-- SIPrepareScratchRegs.cpp - Use predicates for control flow --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+///
+/// This pass loads scratch pointer and scratch offset into a register or a
+/// frame index which can be used anywhere in the program. These values will
+/// be used for spilling VGPRs.
+///
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "AMDGPUSubtarget.h"
+#include "SIDefines.h"
+#include "SIInstrInfo.h"
+#include "SIMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+
+using namespace llvm;
+
+namespace {
+
+class SIPrepareScratchRegs : public MachineFunctionPass {
+
+private:
+ static char ID;
+
+public:
+ SIPrepareScratchRegs() : MachineFunctionPass(ID) { }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "SI prepare scratch registers";
+ }
+
+};
+
+} // End anonymous namespace
+
+char SIPrepareScratchRegs::ID = 0;
+
+FunctionPass *llvm::createSIPrepareScratchRegs() {
+ return new SIPrepareScratchRegs();
+}
+
+bool SIPrepareScratchRegs::runOnMachineFunction(MachineFunction &MF) {
+ SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const SIRegisterInfo *TRI = &TII->getRegisterInfo();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ MachineFrameInfo *FrameInfo = MF.getFrameInfo();
+ MachineBasicBlock *Entry = MF.begin();
+ MachineBasicBlock::iterator I = Entry->begin();
+ DebugLoc DL = I->getDebugLoc();
+
+ // FIXME: If we don't have enough VGPRs for SGPR spilling we will need to
+ // run this pass.
+ if (!MFI->hasSpilledVGPRs())
+ return false;
+
+ unsigned ScratchPtrPreloadReg =
+ TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR);
+ unsigned ScratchOffsetPreloadReg =
+ TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
+
+ if (!Entry->isLiveIn(ScratchPtrPreloadReg))
+ Entry->addLiveIn(ScratchPtrPreloadReg);
+
+ if (!Entry->isLiveIn(ScratchOffsetPreloadReg))
+ Entry->addLiveIn(ScratchOffsetPreloadReg);
+
+ // Load the scratch offset.
+ unsigned ScratchOffsetReg =
+ TRI->findUnusedRegister(MRI, &AMDGPU::SGPR_32RegClass);
+ int ScratchOffsetFI = -1;
+
+ if (ScratchOffsetReg != AMDGPU::NoRegister) {
+ // Found an SGPR to use
+ MRI.setPhysRegUsed(ScratchOffsetReg);
+ BuildMI(*Entry, I, DL, TII->get(AMDGPU::S_MOV_B32), ScratchOffsetReg)
+ .addReg(ScratchOffsetPreloadReg);
+ } else {
+ // No SGPR is available, we must spill.
+ ScratchOffsetFI = FrameInfo->CreateSpillStackObject(4,4);
+ BuildMI(*Entry, I, DL, TII->get(AMDGPU::SI_SPILL_S32_SAVE))
+ .addReg(ScratchOffsetPreloadReg)
+ .addFrameIndex(ScratchOffsetFI)
+ .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef)
+ .addReg(AMDGPU::SGPR0, RegState::Undef);
+ }
+
+
+ // Now that we have the scratch pointer and offset values, we need to
+ // add them to all the SI_SPILL_V* instructions.
+
+ RegScavenger RS;
+ unsigned ScratchRsrcFI = FrameInfo->CreateSpillStackObject(16, 4);
+ RS.addScavengingFrameIndex(ScratchRsrcFI);
+
+ for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
+ BI != BE; ++BI) {
+
+ MachineBasicBlock &MBB = *BI;
+ // Add the scratch offset reg as a live-in so that the register scavenger
+ // doesn't re-use it.
+ if (!MBB.isLiveIn(ScratchOffsetReg) &&
+ ScratchOffsetReg != AMDGPU::NoRegister)
+ MBB.addLiveIn(ScratchOffsetReg);
+ RS.enterBasicBlock(&MBB);
+
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+ I != E; ++I) {
+ MachineInstr &MI = *I;
+ RS.forward(I);
+ DebugLoc DL = MI.getDebugLoc();
+ switch(MI.getOpcode()) {
+ default: break;
+ case AMDGPU::SI_SPILL_V512_SAVE:
+ case AMDGPU::SI_SPILL_V256_SAVE:
+ case AMDGPU::SI_SPILL_V128_SAVE:
+ case AMDGPU::SI_SPILL_V96_SAVE:
+ case AMDGPU::SI_SPILL_V64_SAVE:
+ case AMDGPU::SI_SPILL_V32_SAVE:
+ case AMDGPU::SI_SPILL_V32_RESTORE:
+ case AMDGPU::SI_SPILL_V64_RESTORE:
+ case AMDGPU::SI_SPILL_V128_RESTORE:
+ case AMDGPU::SI_SPILL_V256_RESTORE:
+ case AMDGPU::SI_SPILL_V512_RESTORE:
+
+ // Scratch resource
+ unsigned ScratchRsrcReg =
+ RS.scavengeRegister(&AMDGPU::SReg_128RegClass, 0);
+
+ uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | AMDGPU::RSRC_TID_ENABLE |
+ 0xffffffff; // Size
+
+ unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
+ unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
+ unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
+ unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
+
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), Rsrc0)
+ .addExternalSymbol("SCRATCH_RSRC_DWORD0")
+ .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
+
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), Rsrc1)
+ .addExternalSymbol("SCRATCH_RSRC_DWORD1")
+ .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
+
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), Rsrc2)
+ .addImm(Rsrc & 0xffffffff)
+ .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
+
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), Rsrc3)
+ .addImm(Rsrc >> 32)
+ .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
+
+ // Scratch Offset
+ if (ScratchOffsetReg == AMDGPU::NoRegister) {
+ ScratchOffsetReg = RS.scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_SPILL_S32_RESTORE),
+ ScratchOffsetReg)
+ .addFrameIndex(ScratchOffsetFI)
+ .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef)
+ .addReg(AMDGPU::SGPR0, RegState::Undef);
+ } else if (!MBB.isLiveIn(ScratchOffsetReg)) {
+ MBB.addLiveIn(ScratchOffsetReg);
+ }
+
+ if (ScratchRsrcReg == AMDGPU::NoRegister ||
+ ScratchOffsetReg == AMDGPU::NoRegister) {
+ LLVMContext &Ctx = MF.getFunction()->getContext();
+ Ctx.emitError("ran out of SGPRs for spilling VGPRs");
+ ScratchRsrcReg = AMDGPU::SGPR0;
+ ScratchOffsetReg = AMDGPU::SGPR0;
+ }
+ MI.getOperand(2).setReg(ScratchRsrcReg);
+ MI.getOperand(2).setIsKill(true);
+ MI.getOperand(2).setIsUndef(false);
+ MI.getOperand(3).setReg(ScratchOffsetReg);
+ MI.getOperand(3).setIsUndef(false);
+ MI.getOperand(3).setIsKill(false);
+ MI.addOperand(MachineOperand::CreateReg(Rsrc0, false, true, true));
+ MI.addOperand(MachineOperand::CreateReg(Rsrc1, false, true, true));
+ MI.addOperand(MachineOperand::CreateReg(Rsrc2, false, true, true));
+ MI.addOperand(MachineOperand::CreateReg(Rsrc3, false, true, true));
+
+ break;
+ }
+ }
+ }
+ return true;
+}
diff --git a/lib/Target/R600/SIRegisterInfo.cpp b/lib/Target/R600/SIRegisterInfo.cpp
index cffea12..9224e14 100644
--- a/lib/Target/R600/SIRegisterInfo.cpp
+++ b/lib/Target/R600/SIRegisterInfo.cpp
@@ -40,6 +40,8 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
Reserved.set(AMDGPU::FLAT_SCR);
+ Reserved.set(AMDGPU::FLAT_SCR_LO);
+ Reserved.set(AMDGPU::FLAT_SCR_HI);
// Reserve some VGPRs to use as temp registers in case we have to spill VGPRs
Reserved.set(AMDGPU::VGPR255);
@@ -48,9 +50,32 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
-unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
- MachineFunction &MF) const {
- return RC->getNumRegs();
+unsigned SIRegisterInfo::getRegPressureSetLimit(unsigned Idx) const {
+
+ // FIXME: We should adjust the max number of waves based on LDS size.
+ unsigned SGPRLimit = getNumSGPRsAllowed(ST.getMaxWavesPerCU());
+ unsigned VGPRLimit = getNumVGPRsAllowed(ST.getMaxWavesPerCU());
+
+ for (regclass_iterator I = regclass_begin(), E = regclass_end();
+ I != E; ++I) {
+
+ unsigned NumSubRegs = std::max((int)(*I)->getSize() / 4, 1);
+ unsigned Limit;
+
+ if (isSGPRClass(*I)) {
+ Limit = SGPRLimit / NumSubRegs;
+ } else {
+ Limit = VGPRLimit / NumSubRegs;
+ }
+
+ const int *Sets = getRegClassPressureSets(*I);
+ assert(Sets);
+ for (unsigned i = 0; Sets[i] != -1; ++i) {
+ if (Sets[i] == (int)Idx)
+ return Limit;
+ }
+ }
+ return 256;
}
bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
@@ -92,6 +117,60 @@ static unsigned getNumSubRegsForSpillOp(unsigned Op) {
}
}
+void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
+ unsigned LoadStoreOp,
+ unsigned Value,
+ unsigned ScratchRsrcReg,
+ unsigned ScratchOffset,
+ int64_t Offset,
+ RegScavenger *RS) const {
+
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
+ MachineBasicBlock *MBB = MI->getParent();
+ const MachineFunction *MF = MI->getParent()->getParent();
+ LLVMContext &Ctx = MF->getFunction()->getContext();
+ DebugLoc DL = MI->getDebugLoc();
+ bool IsLoad = TII->get(LoadStoreOp).mayLoad();
+
+ bool RanOutOfSGPRs = false;
+ unsigned SOffset = ScratchOffset;
+
+ unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
+ unsigned Size = NumSubRegs * 4;
+
+ if (!isUInt<12>(Offset + Size)) {
+ SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
+ if (SOffset == AMDGPU::NoRegister) {
+ RanOutOfSGPRs = true;
+ SOffset = AMDGPU::SGPR0;
+ }
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
+ .addReg(ScratchOffset)
+ .addImm(Offset);
+ Offset = 0;
+ }
+
+ if (RanOutOfSGPRs)
+ Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
+
+ for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
+ unsigned SubReg = NumSubRegs > 1 ?
+ getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
+ Value;
+ bool IsKill = (i == e - 1);
+
+ BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
+ .addReg(SubReg, getDefRegState(IsLoad))
+ .addReg(ScratchRsrcReg, getKillRegState(IsKill))
+ .addImm(Offset)
+ .addReg(SOffset)
+ .addImm(0) // glc
+ .addImm(0) // slc
+ .addImm(0) // tfe
+ .addReg(Value, RegState::Implicit | getDefRegState(IsLoad));
+ }
+}
+
void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
@@ -125,7 +204,9 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
Ctx.emitError("Ran out of VGPRs for spilling SGPR");
}
- BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill.VGPR)
+ BuildMI(*MBB, MI, DL,
+ TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
+ Spill.VGPR)
.addReg(SubReg)
.addImm(Spill.Lane);
@@ -154,13 +235,15 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
Ctx.emitError("Ran out of VGPRs for spilling SGPR");
}
- if (isM0) {
+ if (isM0)
SubReg = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
- }
- BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), SubReg)
+ BuildMI(*MBB, MI, DL,
+ TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
+ SubReg)
.addReg(Spill.VGPR)
- .addImm(Spill.Lane);
+ .addImm(Spill.Lane)
+ .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
if (isM0) {
BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
.addReg(SubReg);
@@ -177,71 +260,25 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
case AMDGPU::SI_SPILL_V128_SAVE:
case AMDGPU::SI_SPILL_V96_SAVE:
case AMDGPU::SI_SPILL_V64_SAVE:
- case AMDGPU::SI_SPILL_V32_SAVE: {
- unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
- unsigned SrcReg = MI->getOperand(0).getReg();
- int64_t Offset = FrameInfo->getObjectOffset(Index);
- unsigned Size = NumSubRegs * 4;
- unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
-
- for (unsigned i = 0, e = NumSubRegs; i != e; ++i) {
- unsigned SubReg = NumSubRegs > 1 ?
- getPhysRegSubReg(SrcReg, &AMDGPU::VGPR_32RegClass, i) :
- SrcReg;
- Offset += (i * 4);
- MFI->LDSWaveSpillSize = std::max((unsigned)Offset + 4, (unsigned)MFI->LDSWaveSpillSize);
-
- unsigned AddrReg = TII->calculateLDSSpillAddress(*MBB, MI, RS, TmpReg,
- Offset, Size);
-
- if (AddrReg == AMDGPU::NoRegister) {
- LLVMContext &Ctx = MF->getFunction()->getContext();
- Ctx.emitError("Ran out of VGPRs for spilling VGPRS");
- AddrReg = AMDGPU::VGPR0;
- }
-
- // Store the value in LDS
- BuildMI(*MBB, MI, DL, TII->get(AMDGPU::DS_WRITE_B32))
- .addImm(0) // gds
- .addReg(AddrReg, RegState::Kill) // addr
- .addReg(SubReg) // data0
- .addImm(0); // offset
- }
-
+ case AMDGPU::SI_SPILL_V32_SAVE:
+ buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
+ TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
+ TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
+ TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
+ FrameInfo->getObjectOffset(Index), RS);
MI->eraseFromParent();
break;
- }
case AMDGPU::SI_SPILL_V32_RESTORE:
case AMDGPU::SI_SPILL_V64_RESTORE:
+ case AMDGPU::SI_SPILL_V96_RESTORE:
case AMDGPU::SI_SPILL_V128_RESTORE:
case AMDGPU::SI_SPILL_V256_RESTORE:
case AMDGPU::SI_SPILL_V512_RESTORE: {
- unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
- unsigned DstReg = MI->getOperand(0).getReg();
- int64_t Offset = FrameInfo->getObjectOffset(Index);
- unsigned Size = NumSubRegs * 4;
- unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
-
- // FIXME: We could use DS_READ_B64 here to optimize for larger registers.
- for (unsigned i = 0, e = NumSubRegs; i != e; ++i) {
- unsigned SubReg = NumSubRegs > 1 ?
- getPhysRegSubReg(DstReg, &AMDGPU::VGPR_32RegClass, i) :
- DstReg;
-
- Offset += (i * 4);
- unsigned AddrReg = TII->calculateLDSSpillAddress(*MBB, MI, RS, TmpReg,
- Offset, Size);
- if (AddrReg == AMDGPU::NoRegister) {
- LLVMContext &Ctx = MF->getFunction()->getContext();
- Ctx.emitError("Ran out of VGPRs for spilling VGPRs");
- AddrReg = AMDGPU::VGPR0;
- }
-
- BuildMI(*MBB, MI, DL, TII->get(AMDGPU::DS_READ_B32), SubReg)
- .addImm(0) // gds
- .addReg(AddrReg, RegState::Kill) // addr
- .addImm(0); //offset
- }
+ buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
+ TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
+ TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
+ TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
+ FrameInfo->getObjectOffset(Index), RS);
MI->eraseFromParent();
break;
}
@@ -250,11 +287,11 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
int64_t Offset = FrameInfo->getObjectOffset(Index);
FIOp.ChangeToImmediate(Offset);
if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
- unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VReg_32RegClass, MI, SPAdj);
+ unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
BuildMI(*MBB, MI, MI->getDebugLoc(),
TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
.addImm(Offset);
- FIOp.ChangeToRegister(TmpReg, false);
+ FIOp.ChangeToRegister(TmpReg, false, false, true);
}
}
}
@@ -264,7 +301,7 @@ const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
MVT VT) const {
switch(VT.SimpleTy) {
default:
- case MVT::i32: return &AMDGPU::VReg_32RegClass;
+ case MVT::i32: return &AMDGPU::VGPR_32RegClass;
}
}
@@ -276,7 +313,7 @@ const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
assert(!TargetRegisterInfo::isVirtualRegister(Reg));
static const TargetRegisterClass *BaseClasses[] = {
- &AMDGPU::VReg_32RegClass,
+ &AMDGPU::VGPR_32RegClass,
&AMDGPU::SReg_32RegClass,
&AMDGPU::VReg_64RegClass,
&AMDGPU::SReg_64RegClass,
@@ -297,7 +334,7 @@ const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
}
bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
- return getCommonSubClass(&AMDGPU::VReg_32RegClass, RC) ||
+ return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) ||
getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) ||
getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) ||
getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) ||
@@ -312,7 +349,7 @@ const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
} else if (SRC == &AMDGPU::SCCRegRegClass) {
return &AMDGPU::VCCRegRegClass;
} else if (getCommonSubClass(SRC, &AMDGPU::SGPR_32RegClass)) {
- return &AMDGPU::VReg_32RegClass;
+ return &AMDGPU::VGPR_32RegClass;
} else if (getCommonSubClass(SRC, &AMDGPU::SGPR_64RegClass)) {
return &AMDGPU::VReg_64RegClass;
} else if (getCommonSubClass(SRC, &AMDGPU::SReg_128RegClass)) {
@@ -388,40 +425,17 @@ unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
return SubRC->getRegister(Index + Channel);
}
-bool SIRegisterInfo::regClassCanUseLiteralConstant(int RCID) const {
- switch (RCID) {
- default: return false;
- case AMDGPU::SSrc_32RegClassID:
- case AMDGPU::SSrc_64RegClassID:
- case AMDGPU::VSrc_32RegClassID:
- case AMDGPU::VSrc_64RegClassID:
- return true;
- }
-}
-
-bool SIRegisterInfo::regClassCanUseLiteralConstant(
- const TargetRegisterClass *RC) const {
- return regClassCanUseLiteralConstant(RC->getID());
+bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
+ return OpType == AMDGPU::OPERAND_REG_IMM32;
}
-bool SIRegisterInfo::regClassCanUseInlineConstant(int RCID) const {
- if (regClassCanUseLiteralConstant(RCID))
+bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
+ if (opCanUseLiteralConstant(OpType))
return true;
- switch (RCID) {
- default: return false;
- case AMDGPU::VCSrc_32RegClassID:
- case AMDGPU::VCSrc_64RegClassID:
- return true;
- }
-}
-
-bool SIRegisterInfo::regClassCanUseInlineConstant(
- const TargetRegisterClass *RC) const {
- return regClassCanUseInlineConstant(RC->getID());
+ return OpType == AMDGPU::OPERAND_REG_INLINE_C;
}
-
unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
enum PreloadedValue Value) const {
@@ -434,6 +448,8 @@ unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
case SIRegisterInfo::TGID_Z:
return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
case SIRegisterInfo::SCRATCH_WAVE_OFFSET:
+ if (MFI->getShaderType() != ShaderType::COMPUTE)
+ return MFI->ScratchOffsetReg;
return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
case SIRegisterInfo::SCRATCH_PTR:
return AMDGPU::SGPR2_SGPR3;
@@ -452,9 +468,8 @@ unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
/// \brief Returns a register that is not used at any point in the function.
/// If all registers are used, then this function will return
// AMDGPU::NoRegister.
-unsigned SIRegisterInfo::findUnusedVGPR(const MachineRegisterInfo &MRI) const {
-
- const TargetRegisterClass *RC = &AMDGPU::VGPR_32RegClass;
+unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
+ const TargetRegisterClass *RC) const {
for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
I != E; ++I) {
@@ -464,3 +479,29 @@ unsigned SIRegisterInfo::findUnusedVGPR(const MachineRegisterInfo &MRI) const {
return AMDGPU::NoRegister;
}
+unsigned SIRegisterInfo::getNumVGPRsAllowed(unsigned WaveCount) const {
+ switch(WaveCount) {
+ case 10: return 24;
+ case 9: return 28;
+ case 8: return 32;
+ case 7: return 36;
+ case 6: return 40;
+ case 5: return 48;
+ case 4: return 64;
+ case 3: return 84;
+ case 2: return 128;
+ default: return 256;
+ }
+}
+
+unsigned SIRegisterInfo::getNumSGPRsAllowed(unsigned WaveCount) const {
+ switch(WaveCount) {
+ case 10: return 48;
+ case 9: return 56;
+ case 8: return 64;
+ case 7: return 72;
+ case 6: return 80;
+ case 5: return 96;
+ default: return 103;
+ }
+}
diff --git a/lib/Target/R600/SIRegisterInfo.h b/lib/Target/R600/SIRegisterInfo.h
index c7e54db..d908ffd 100644
--- a/lib/Target/R600/SIRegisterInfo.h
+++ b/lib/Target/R600/SIRegisterInfo.h
@@ -17,6 +17,7 @@
#define LLVM_LIB_TARGET_R600_SIREGISTERINFO_H
#include "AMDGPURegisterInfo.h"
+#include "llvm/Support/Debug.h"
namespace llvm {
@@ -26,8 +27,7 @@ struct SIRegisterInfo : public AMDGPURegisterInfo {
BitVector getReservedRegs(const MachineFunction &MF) const override;
- unsigned getRegPressureLimit(const TargetRegisterClass *RC,
- MachineFunction &MF) const override;
+ unsigned getRegPressureSetLimit(unsigned Idx) const override;
bool requiresRegisterScavenging(const MachineFunction &Fn) const override;
@@ -42,7 +42,7 @@ struct SIRegisterInfo : public AMDGPURegisterInfo {
unsigned getHWRegIndex(unsigned Reg) const override;
/// \brief Return the 'base' register class for this register.
- /// e.g. SGPR0 => SReg_32, VGPR => VReg_32 SGPR0_SGPR1 -> SReg_32, etc.
+ /// e.g. SGPR0 => SReg_32, VGPR => VGPR_32 SGPR0_SGPR1 -> SReg_32, etc.
const TargetRegisterClass *getPhysRegClass(unsigned Reg) const;
/// \returns true if this class contains only SGPR registers
@@ -80,22 +80,14 @@ struct SIRegisterInfo : public AMDGPURegisterInfo {
unsigned getPhysRegSubReg(unsigned Reg, const TargetRegisterClass *SubRC,
unsigned Channel) const;
- /// \returns True if operands defined with this register class can accept
+ /// \returns True if operands defined with this operand type can accept
/// a literal constant (i.e. any 32-bit immediate).
- bool regClassCanUseLiteralConstant(int RCID) const;
+ bool opCanUseLiteralConstant(unsigned OpType) const;
- /// \returns True if operands defined with this register class can accept
- /// a literal constant (i.e. any 32-bit immediate).
- bool regClassCanUseLiteralConstant(const TargetRegisterClass *RC) const;
-
- /// \returns True if operands defined with this register class can accept
+ /// \returns True if operands defined with this operand type can accept
/// an inline constant. i.e. An integer value in the range (-16, 64) or
/// -4.0f, -2.0f, -1.0f, -0.5f, 0.0f, 0.5f, 1.0f, 2.0f, 4.0f.
- bool regClassCanUseInlineConstant(int RCID) const;
-
- /// \returns True if operands defined with this register class can accept
- /// a literal constant. i.e. A value in the range (-16, 64).
- bool regClassCanUseInlineConstant(const TargetRegisterClass *RC) const;
+ bool opCanUseInlineConstant(unsigned OpType) const;
enum PreloadedValue {
TGID_X,
@@ -113,7 +105,22 @@ struct SIRegisterInfo : public AMDGPURegisterInfo {
unsigned getPreloadedValue(const MachineFunction &MF,
enum PreloadedValue Value) const;
- unsigned findUnusedVGPR(const MachineRegisterInfo &MRI) const;
+ /// \brief Give the maximum number of VGPRs that can be used by \p WaveCount
+ /// concurrent waves.
+ unsigned getNumVGPRsAllowed(unsigned WaveCount) const;
+
+ /// \brief Give the maximum number of SGPRs that can be used by \p WaveCount
+ /// concurrent waves.
+ unsigned getNumSGPRsAllowed(unsigned WaveCount) const;
+
+ unsigned findUnusedRegister(const MachineRegisterInfo &MRI,
+ const TargetRegisterClass *RC) const;
+
+private:
+ void buildScratchLoadStore(MachineBasicBlock::iterator MI,
+ unsigned LoadStoreOp, unsigned Value,
+ unsigned ScratchRsrcReg, unsigned ScratchOffset,
+ int64_t Offset, RegScavenger *RS) const;
};
} // End namespace llvm
diff --git a/lib/Target/R600/SIRegisterInfo.td b/lib/Target/R600/SIRegisterInfo.td
index 45c2b41..8b25e95 100644
--- a/lib/Target/R600/SIRegisterInfo.td
+++ b/lib/Target/R600/SIRegisterInfo.td
@@ -21,7 +21,7 @@ def VCC_LO : SIReg<"vcc_lo", 106>;
def VCC_HI : SIReg<"vcc_hi", 107>;
// VCC for 64-bit instructions
-def VCC : RegisterWithSubRegs<"VCC", [VCC_LO, VCC_HI]> {
+def VCC : RegisterWithSubRegs<"vcc", [VCC_LO, VCC_HI]> {
let Namespace = "AMDGPU";
let SubRegIndices = [sub0, sub1];
let HWEncoding = 106;
@@ -36,14 +36,14 @@ def EXEC : RegisterWithSubRegs<"EXEC", [EXEC_LO, EXEC_HI]> {
let HWEncoding = 126;
}
-def SCC : SIReg<"SCC", 253>;
-def M0 : SIReg <"M0", 124>;
+def SCC : SIReg<"scc", 253>;
+def M0 : SIReg <"m0", 124>;
def FLAT_SCR_LO : SIReg<"flat_scr_lo", 104>; // Offset in units of 256-bytes.
def FLAT_SCR_HI : SIReg<"flat_scr_hi", 105>; // Size is the per-thread scratch size, in bytes.
// Pair to indicate location of scratch space for flat accesses.
-def FLAT_SCR : RegisterWithSubRegs <"FLAT_SCR", [FLAT_SCR_LO, FLAT_SCR_HI]> {
+def FLAT_SCR : RegisterWithSubRegs <"flat_scr", [FLAT_SCR_LO, FLAT_SCR_HI]> {
let Namespace = "AMDGPU";
let SubRegIndices = [sub0, sub1];
let HWEncoding = 104;
@@ -184,9 +184,9 @@ def SReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
(add SGPR_32, M0Reg, VCC_LO, VCC_HI, EXEC_LO, EXEC_HI, FLAT_SCR_LO, FLAT_SCR_HI)
>;
-def SGPR_64 : RegisterClass<"AMDGPU", [v2i32, i64], 64, (add SGPR_64Regs)>;
+def SGPR_64 : RegisterClass<"AMDGPU", [v2i32, i64, f64], 64, (add SGPR_64Regs)>;
-def SReg_64 : RegisterClass<"AMDGPU", [v2i32, i64, i1], 64,
+def SReg_64 : RegisterClass<"AMDGPU", [v2i32, i64, f64, i1], 64,
(add SGPR_64, VCCReg, EXECReg, FLAT_SCR)
>;
@@ -197,8 +197,6 @@ def SReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add SGPR_256
def SReg_512 : RegisterClass<"AMDGPU", [v64i8, v16i32], 512, (add SGPR_512)>;
// Register class for all vector registers (VGPRs + Interploation Registers)
-def VReg_32 : RegisterClass<"AMDGPU", [i32, f32, v1i32], 32, (add VGPR_32)>;
-
def VReg_64 : RegisterClass<"AMDGPU", [i64, f64, v2i32, v2f32], 64, (add VGPR_64)>;
def VReg_96 : RegisterClass<"AMDGPU", [untyped], 96, (add VGPR_96)> {
@@ -211,31 +209,53 @@ def VReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add VGPR_256
def VReg_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 512, (add VGPR_512)>;
-def VReg_1 : RegisterClass<"AMDGPU", [i1], 32, (add VGPR_32)>;
+def VReg_1 : RegisterClass<"AMDGPU", [i1], 32, (add VGPR_32)> {
+ let Size = 32;
+}
+
+class RegImmOperand <RegisterClass rc> : RegisterOperand<rc> {
+ let OperandNamespace = "AMDGPU";
+ let OperandType = "OPERAND_REG_IMM32";
+}
+
+class RegInlineOperand <RegisterClass rc> : RegisterOperand<rc> {
+ let OperandNamespace = "AMDGPU";
+ let OperandType = "OPERAND_REG_INLINE_C";
+}
//===----------------------------------------------------------------------===//
// SSrc_* Operands with an SGPR or a 32-bit immediate
//===----------------------------------------------------------------------===//
-def SSrc_32 : RegisterClass<"AMDGPU", [i32, f32], 32, (add SReg_32)>;
+def SSrc_32 : RegImmOperand<SReg_32>;
-def SSrc_64 : RegisterClass<"AMDGPU", [i64, f64, i1], 64, (add SReg_64)>;
+def SSrc_64 : RegImmOperand<SReg_64>;
+
+//===----------------------------------------------------------------------===//
+// SCSrc_* Operands with an SGPR or a inline constant
+//===----------------------------------------------------------------------===//
+
+def SCSrc_32 : RegInlineOperand<SReg_32>;
//===----------------------------------------------------------------------===//
// VSrc_* Operands with an SGPR, VGPR or a 32-bit immediate
//===----------------------------------------------------------------------===//
-def VSrc_32 : RegisterClass<"AMDGPU", [i32, f32], 32, (add VReg_32, SReg_32)>;
+def VS_32 : RegisterClass<"AMDGPU", [i32, f32], 32, (add VGPR_32, SReg_32)>;
+
+def VS_64 : RegisterClass<"AMDGPU", [i64, f64], 64, (add VReg_64, SReg_64)>;
+
+def VSrc_32 : RegImmOperand<VS_32>;
-def VSrc_64 : RegisterClass<"AMDGPU", [i64, f64], 64, (add VReg_64, SReg_64)>;
+def VSrc_64 : RegImmOperand<VS_64>;
//===----------------------------------------------------------------------===//
// VCSrc_* Operands with an SGPR, VGPR or an inline constant
//===----------------------------------------------------------------------===//
-def VCSrc_32 : RegisterClass<"AMDGPU", [i32, f32], 32, (add VReg_32, SReg_32)>;
+def VCSrc_32 : RegInlineOperand<VS_32>;
-def VCSrc_64 : RegisterClass<"AMDGPU", [i64, f64], 64, (add VReg_64, SReg_64)>;
+def VCSrc_64 : RegInlineOperand<VS_64>;
//===----------------------------------------------------------------------===//
// SGPR and VGPR register classes
diff --git a/lib/Target/R600/SISchedule.td b/lib/Target/R600/SISchedule.td
index 28b65b8..9b1f676 100644
--- a/lib/Target/R600/SISchedule.td
+++ b/lib/Target/R600/SISchedule.td
@@ -7,9 +7,85 @@
//
//===----------------------------------------------------------------------===//
//
-// TODO: This is just a place holder for now.
+// MachineModel definitions for Southern Islands (SI)
//
//===----------------------------------------------------------------------===//
+def WriteBranch : SchedWrite;
+def WriteExport : SchedWrite;
+def WriteLDS : SchedWrite;
+def WriteSALU : SchedWrite;
+def WriteSMEM : SchedWrite;
+def WriteVMEM : SchedWrite;
-def SI_Itin : ProcessorItineraries <[], [], []>;
+// Vector ALU instructions
+def Write32Bit : SchedWrite;
+def WriteQuarterRate32 : SchedWrite;
+
+def WriteFloatFMA : SchedWrite;
+
+def WriteDouble : SchedWrite;
+def WriteDoubleAdd : SchedWrite;
+
+def SIFullSpeedModel : SchedMachineModel;
+def SIQuarterSpeedModel : SchedMachineModel;
+
+// BufferSize = 0 means the processors are in-order.
+let BufferSize = 0 in {
+
+// XXX: Are the resource counts correct?
+def HWBranch : ProcResource<1>;
+def HWExport : ProcResource<7>; // Taken from S_WAITCNT
+def HWLGKM : ProcResource<31>; // Taken from S_WAITCNT
+def HWSALU : ProcResource<1>;
+def HWVMEM : ProcResource<15>; // Taken from S_WAITCNT
+def HWVALU : ProcResource<1>;
+
+}
+
+class HWWriteRes<SchedWrite write, list<ProcResourceKind> resources,
+ int latency> : WriteRes<write, resources> {
+ let Latency = latency;
+}
+
+class HWVALUWriteRes<SchedWrite write, int latency> :
+ HWWriteRes<write, [HWVALU], latency>;
+
+
+// The latency numbers are taken from AMD Accelerated Parallel Processing
+// guide. They may not be acurate.
+
+// The latency values are 1 / (operations / cycle) / 4.
+multiclass SICommonWriteRes {
+
+ def : HWWriteRes<WriteBranch, [HWBranch], 100>; // XXX: Guessed ???
+ def : HWWriteRes<WriteExport, [HWExport], 100>; // XXX: Guessed ???
+ def : HWWriteRes<WriteLDS, [HWLGKM], 32>; // 2 - 64
+ def : HWWriteRes<WriteSALU, [HWSALU], 1>;
+ def : HWWriteRes<WriteSMEM, [HWLGKM], 10>; // XXX: Guessed ???
+ def : HWWriteRes<WriteVMEM, [HWVMEM], 450>; // 300 - 600
+
+ def : HWVALUWriteRes<Write32Bit, 1>;
+ def : HWVALUWriteRes<WriteQuarterRate32, 4>;
+}
+
+
+let SchedModel = SIFullSpeedModel in {
+
+defm : SICommonWriteRes;
+
+def : HWVALUWriteRes<WriteFloatFMA, 1>;
+def : HWVALUWriteRes<WriteDouble, 4>;
+def : HWVALUWriteRes<WriteDoubleAdd, 2>;
+
+} // End SchedModel = SIFullSpeedModel
+
+let SchedModel = SIQuarterSpeedModel in {
+
+defm : SICommonWriteRes;
+
+def : HWVALUWriteRes<WriteFloatFMA, 16>;
+def : HWVALUWriteRes<WriteDouble, 16>;
+def : HWVALUWriteRes<WriteDoubleAdd, 8>;
+
+} // End SchedModel = SIQuarterSpeedModel
diff --git a/lib/Target/R600/SIShrinkInstructions.cpp b/lib/Target/R600/SIShrinkInstructions.cpp
index 45e83f5..97bbd78 100644
--- a/lib/Target/R600/SIShrinkInstructions.cpp
+++ b/lib/Target/R600/SIShrinkInstructions.cpp
@@ -10,6 +10,7 @@
//
#include "AMDGPU.h"
+#include "AMDGPUMCInstLower.h"
#include "AMDGPUSubtarget.h"
#include "SIInstrInfo.h"
#include "llvm/ADT/Statistic.h"
@@ -126,37 +127,32 @@ static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
TII->isVOPC(MI.getOpcode()));
const SIRegisterInfo &TRI = TII->getRegisterInfo();
- MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
+ int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
+ MachineOperand &Src0 = MI.getOperand(Src0Idx);
// Only one literal constant is allowed per instruction, so if src0 is a
// literal constant then we can't do any folding.
- if ((Src0->isImm() || Src0->isFPImm()) && TII->isLiteralConstant(*Src0))
+ if (Src0.isImm() &&
+ TII->isLiteralConstant(Src0, TII->getOpSize(MI, Src0Idx)))
return;
-
// Literal constants and SGPRs can only be used in Src0, so if Src0 is an
// SGPR, we cannot commute the instruction, so we can't fold any literal
// constants.
- if (Src0->isReg() && !isVGPR(Src0, TRI, MRI))
+ if (Src0.isReg() && !isVGPR(&Src0, TRI, MRI))
return;
// Try to fold Src0
- if (Src0->isReg()) {
- unsigned Reg = Src0->getReg();
+ if (Src0.isReg()) {
+ unsigned Reg = Src0.getReg();
MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
if (Def && Def->isMoveImmediate()) {
MachineOperand &MovSrc = Def->getOperand(1);
bool ConstantFolded = false;
if (MovSrc.isImm() && isUInt<32>(MovSrc.getImm())) {
- Src0->ChangeToImmediate(MovSrc.getImm());
+ Src0.ChangeToImmediate(MovSrc.getImm());
ConstantFolded = true;
- } else if (MovSrc.isFPImm()) {
- const ConstantFP *CFP = MovSrc.getFPImm();
- if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEsingle) {
- Src0->ChangeToFPImmediate(CFP);
- ConstantFolded = true;
- }
}
if (ConstantFolded) {
if (MRI.use_empty(Reg))
@@ -193,13 +189,12 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
const MachineOperand &Src = MI.getOperand(1);
- // TODO: Handle FPImm?
if (Src.isImm()) {
- if (isInt<16>(Src.getImm()) && !TII->isInlineConstant(Src)) {
+ if (isInt<16>(Src.getImm()) && !TII->isInlineConstant(Src, 4))
MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
- continue;
- }
}
+
+ continue;
}
if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
@@ -213,13 +208,13 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
continue;
}
- int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
-
- // Op32 could be -1 here if we started with an instruction that had a
+ // getVOPe32 could be -1 here if we started with an instruction that had
// a 32-bit encoding and then commuted it to an instruction that did not.
- if (Op32 == -1)
+ if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
continue;
+ int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
+
if (TII->isVOPC(Op32)) {
unsigned DstReg = MI.getOperand(0).getReg();
if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
diff --git a/lib/Target/R600/SITypeRewriter.cpp b/lib/Target/R600/SITypeRewriter.cpp
index 9318dc1..27bbf4f 100644
--- a/lib/Target/R600/SITypeRewriter.cpp
+++ b/lib/Target/R600/SITypeRewriter.cpp
@@ -61,8 +61,7 @@ bool SITypeRewriter::doInitialization(Module &M) {
}
bool SITypeRewriter::runOnFunction(Function &F) {
- AttributeSet Set = F.getAttributes();
- Attribute A = Set.getAttribute(AttributeSet::FunctionIndex, "ShaderType");
+ Attribute A = F.getFnAttribute("ShaderType");
unsigned ShaderType = ShaderType::COMPUTE;
if (A.isStringAttribute()) {
diff --git a/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp b/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
index f437564..d723d6e 100644
--- a/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
+++ b/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
@@ -16,11 +16,15 @@
using namespace llvm;
-/// \brief The target for the AMDGPU backend
+/// \brief The target which suports all AMD GPUs. This will eventually
+/// be deprecated and there will be a R600 target and a GCN target.
Target llvm::TheAMDGPUTarget;
+/// \brief The target for GCN GPUs
+Target llvm::TheGCNTarget;
/// \brief Extern function to initialize the targets for the AMDGPU backend
extern "C" void LLVMInitializeR600TargetInfo() {
RegisterTarget<Triple::r600, false>
R600(TheAMDGPUTarget, "r600", "AMD GPUs HD2XXX-HD6XXX");
+ RegisterTarget<Triple::amdgcn, false> GCN(TheGCNTarget, "amdgcn", "AMD GCN GPUs");
}
diff --git a/lib/Target/R600/VIInstrFormats.td b/lib/Target/R600/VIInstrFormats.td
new file mode 100644
index 0000000..d8738f9
--- /dev/null
+++ b/lib/Target/R600/VIInstrFormats.td
@@ -0,0 +1,166 @@
+//===-- VIInstrFormats.td - VI Instruction Encodings ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// VI Instruction format definitions.
+//
+//===----------------------------------------------------------------------===//
+
+class DSe_vi <bits<8> op> : Enc64 {
+ bits<8> vdst;
+ bits<1> gds;
+ bits<8> addr;
+ bits<8> data0;
+ bits<8> data1;
+ bits<8> offset0;
+ bits<8> offset1;
+
+ let Inst{7-0} = offset0;
+ let Inst{15-8} = offset1;
+ let Inst{16} = gds;
+ let Inst{24-17} = op;
+ let Inst{31-26} = 0x36; //encoding
+ let Inst{39-32} = addr;
+ let Inst{47-40} = data0;
+ let Inst{55-48} = data1;
+ let Inst{63-56} = vdst;
+}
+
+class MUBUFe_vi <bits<7> op> : Enc64 {
+ bits<12> offset;
+ bits<1> offen;
+ bits<1> idxen;
+ bits<1> glc;
+ bits<1> lds;
+ bits<8> vaddr;
+ bits<8> vdata;
+ bits<7> srsrc;
+ bits<1> slc;
+ bits<1> tfe;
+ bits<8> soffset;
+
+ let Inst{11-0} = offset;
+ let Inst{12} = offen;
+ let Inst{13} = idxen;
+ let Inst{14} = glc;
+ let Inst{16} = lds;
+ let Inst{17} = slc;
+ let Inst{24-18} = op;
+ let Inst{31-26} = 0x38; //encoding
+ let Inst{39-32} = vaddr;
+ let Inst{47-40} = vdata;
+ let Inst{52-48} = srsrc{6-2};
+ let Inst{55} = tfe;
+ let Inst{63-56} = soffset;
+}
+
+class MTBUFe_vi <bits<4> op> : Enc64 {
+ bits<12> offset;
+ bits<1> offen;
+ bits<1> idxen;
+ bits<1> glc;
+ bits<4> dfmt;
+ bits<3> nfmt;
+ bits<8> vaddr;
+ bits<8> vdata;
+ bits<7> srsrc;
+ bits<1> slc;
+ bits<1> tfe;
+ bits<8> soffset;
+
+ let Inst{11-0} = offset;
+ let Inst{12} = offen;
+ let Inst{13} = idxen;
+ let Inst{14} = glc;
+ let Inst{18-15} = op;
+ let Inst{22-19} = dfmt;
+ let Inst{25-23} = nfmt;
+ let Inst{31-26} = 0x3a; //encoding
+ let Inst{39-32} = vaddr;
+ let Inst{47-40} = vdata;
+ let Inst{52-48} = srsrc{6-2};
+ let Inst{54} = slc;
+ let Inst{55} = tfe;
+ let Inst{63-56} = soffset;
+}
+
+class SMEMe_vi <bits<8> op, bit imm> : Enc64 {
+ bits<7> sbase;
+ bits<7> sdata;
+ bits<1> glc;
+ bits<20> offset;
+
+ let Inst{5-0} = sbase{6-1};
+ let Inst{12-6} = sdata;
+ let Inst{16} = glc;
+ let Inst{17} = imm;
+ let Inst{25-18} = op;
+ let Inst{31-26} = 0x30; //encoding
+ let Inst{51-32} = offset;
+}
+
+class VOP3e_vi <bits<10> op> : Enc64 {
+ bits<8> vdst;
+ bits<2> src0_modifiers;
+ bits<9> src0;
+ bits<2> src1_modifiers;
+ bits<9> src1;
+ bits<2> src2_modifiers;
+ bits<9> src2;
+ bits<1> clamp;
+ bits<2> omod;
+
+ let Inst{7-0} = vdst;
+ let Inst{8} = src0_modifiers{1};
+ let Inst{9} = src1_modifiers{1};
+ let Inst{10} = src2_modifiers{1};
+ let Inst{15} = clamp;
+ let Inst{25-16} = op;
+ let Inst{31-26} = 0x34; //encoding
+ let Inst{40-32} = src0;
+ let Inst{49-41} = src1;
+ let Inst{58-50} = src2;
+ let Inst{60-59} = omod;
+ let Inst{61} = src0_modifiers{0};
+ let Inst{62} = src1_modifiers{0};
+ let Inst{63} = src2_modifiers{0};
+}
+
+class VOP3be_vi <bits<10> op> : Enc64 {
+ bits<8> vdst;
+ bits<2> src0_modifiers;
+ bits<9> src0;
+ bits<2> src1_modifiers;
+ bits<9> src1;
+ bits<2> src2_modifiers;
+ bits<9> src2;
+ bits<7> sdst;
+ bits<2> omod;
+ bits<1> clamp;
+
+ let Inst{7-0} = vdst;
+ let Inst{14-8} = sdst;
+ let Inst{15} = clamp;
+ let Inst{25-16} = op;
+ let Inst{31-26} = 0x34; //encoding
+ let Inst{40-32} = src0;
+ let Inst{49-41} = src1;
+ let Inst{58-50} = src2;
+ let Inst{60-59} = omod;
+ let Inst{61} = src0_modifiers{0};
+ let Inst{62} = src1_modifiers{0};
+ let Inst{63} = src2_modifiers{0};
+}
+
+class EXPe_vi : EXPe {
+ let Inst{31-26} = 0x31; //encoding
+}
+
+class VINTRPe_vi <bits<2> op> : VINTRPe <op> {
+ let Inst{31-26} = 0x35; // encoding
+}
diff --git a/lib/Target/R600/VIInstructions.td b/lib/Target/R600/VIInstructions.td
new file mode 100644
index 0000000..4a6e933
--- /dev/null
+++ b/lib/Target/R600/VIInstructions.td
@@ -0,0 +1,25 @@
+//===-- VIInstructions.td - VI Instruction Defintions ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Instruction definitions for VI and newer.
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// SMEM Patterns
+//===----------------------------------------------------------------------===//
+
+let Predicates = [isVI] in {
+
+// 1. Offset as 20bit DWORD immediate
+def : Pat <
+ (SIload_constant v4i32:$sbase, IMM20bit:$offset),
+ (S_BUFFER_LOAD_DWORD_IMM $sbase, (as_i32imm $offset))
+>;
+
+} // End Predicates = [isVI]
diff --git a/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
index d0b362c..551189c 100644
--- a/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
+++ b/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
@@ -393,9 +393,6 @@ bool SparcAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
MatchingInlineAsm);
switch (MatchResult) {
- default:
- break;
-
case Match_Success: {
Inst.setLoc(IDLoc);
Out.EmitInstruction(Inst, STI);
@@ -422,7 +419,7 @@ bool SparcAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
case Match_MnemonicFail:
return Error(IDLoc, "invalid instruction mnemonic");
}
- return true;
+ llvm_unreachable("Implement any new match types added!");
}
bool SparcAsmParser::
diff --git a/lib/Target/Sparc/DelaySlotFiller.cpp b/lib/Target/Sparc/DelaySlotFiller.cpp
index 28369fd..38bff44 100644
--- a/lib/Target/Sparc/DelaySlotFiller.cpp
+++ b/lib/Target/Sparc/DelaySlotFiller.cpp
@@ -45,10 +45,7 @@ namespace {
const SparcSubtarget *Subtarget;
static char ID;
- Filler(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm),
- Subtarget(&TM.getSubtarget<SparcSubtarget>()) {
- }
+ Filler(TargetMachine &tm) : MachineFunctionPass(ID), TM(tm) {}
const char *getPassName() const override {
return "SPARC Delay Slot Filler";
@@ -57,6 +54,7 @@ namespace {
bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
bool runOnMachineFunction(MachineFunction &F) override {
bool Changed = false;
+ Subtarget = &F.getSubtarget<SparcSubtarget>();
// This pass invalidates liveness information when it reorders
// instructions to fill delay slot.
@@ -109,8 +107,8 @@ FunctionPass *llvm::createSparcDelaySlotFillerPass(TargetMachine &tm) {
///
bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
-
- const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
+ Subtarget = &MBB.getParent()->getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
MachineBasicBlock::iterator MI = I;
@@ -187,7 +185,7 @@ Filler::findDelayInstr(MachineBasicBlock &MBB,
if (J->getOpcode() == SP::RESTORErr
|| J->getOpcode() == SP::RESTOREri) {
// change retl to ret.
- slot->setDesc(TM.getSubtargetImpl()->getInstrInfo()->get(SP::RET));
+ slot->setDesc(Subtarget->getInstrInfo()->get(SP::RET));
return J;
}
}
@@ -329,8 +327,7 @@ void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
bool Filler::IsRegInSet(SmallSet<unsigned, 32>& RegSet, unsigned Reg)
{
// Check Reg and all aliased Registers.
- for (MCRegAliasIterator AI(Reg, TM.getSubtargetImpl()->getRegisterInfo(),
- true);
+ for (MCRegAliasIterator AI(Reg, Subtarget->getRegisterInfo(), true);
AI.isValid(); ++AI)
if (RegSet.count(*AI))
return true;
@@ -483,7 +480,7 @@ bool Filler::tryCombineRestoreWithPrevInst(MachineBasicBlock &MBB,
if (PrevInst->isBundledWithSucc())
return false;
- const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
switch (PrevInst->getOpcode()) {
default: break;
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
index 3a9c987..6767e4b 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
@@ -42,9 +42,7 @@ SparcELFMCAsmInfo::SparcELFMCAsmInfo(StringRef TT) {
SunStyleELFSectionSwitchSyntax = true;
UsesELFSectionDirectiveForBSS = true;
- if (TheTriple.getOS() == llvm::Triple::Solaris ||
- TheTriple.getOS() == llvm::Triple::OpenBSD)
- UseIntegratedAssembler = true;
+ UseIntegratedAssembler = true;
}
const MCExpr*
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
index eea9626..5128843 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
@@ -31,8 +31,8 @@ STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
namespace {
class SparcMCCodeEmitter : public MCCodeEmitter {
- SparcMCCodeEmitter(const SparcMCCodeEmitter &) LLVM_DELETED_FUNCTION;
- void operator=(const SparcMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ SparcMCCodeEmitter(const SparcMCCodeEmitter &) = delete;
+ void operator=(const SparcMCCodeEmitter &) = delete;
MCContext &Ctx;
public:
diff --git a/lib/Target/Sparc/SparcAsmPrinter.cpp b/lib/Target/Sparc/SparcAsmPrinter.cpp
index 6432003..0439f9d 100644
--- a/lib/Target/Sparc/SparcAsmPrinter.cpp
+++ b/lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -43,8 +43,9 @@ namespace {
*OutStreamer.getTargetStreamer());
}
public:
- explicit SparcAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {}
+ explicit SparcAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)) {}
const char *getPassName() const override {
return "Sparc Assembly Printer";
@@ -277,7 +278,7 @@ void SparcAsmPrinter::EmitInstruction(const MachineInstr *MI)
}
void SparcAsmPrinter::EmitFunctionBodyStart() {
- if (!TM.getSubtarget<SparcSubtarget>().is64Bit())
+ if (!MF->getSubtarget<SparcSubtarget>().is64Bit())
return;
const MachineRegisterInfo &MRI = MF->getRegInfo();
@@ -296,7 +297,7 @@ void SparcAsmPrinter::EmitFunctionBodyStart() {
void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
raw_ostream &O) {
- const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = TM.getDataLayout();
const MachineOperand &MO = MI->getOperand (opNum);
SparcMCExpr::VariantKind TF = (SparcMCExpr::VariantKind) MO.getTargetFlags();
@@ -450,8 +451,7 @@ void SparcAsmPrinter::EmitEndOfAsmFile(Module &M) {
MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
if (!Stubs.empty()) {
OutStreamer.SwitchSection(TLOFELF.getDataSection());
- unsigned PtrSize =
- TM.getSubtargetImpl()->getDataLayout()->getPointerSize(0);
+ unsigned PtrSize = TM.getDataLayout()->getPointerSize(0);
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
OutStreamer.EmitLabel(Stubs[i].first);
OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(), PtrSize);
diff --git a/lib/Target/Sparc/SparcFrameLowering.cpp b/lib/Target/Sparc/SparcFrameLowering.cpp
index 1b67b4b..a065d3a 100644
--- a/lib/Target/Sparc/SparcFrameLowering.cpp
+++ b/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -103,9 +103,7 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF) const {
SAVEri = SP::ADDri;
SAVErr = SP::ADDrr;
}
- NumBytes =
- -MF.getTarget().getSubtarget<SparcSubtarget>().getAdjustedFrameSize(
- NumBytes);
+ NumBytes = -MF.getSubtarget<SparcSubtarget>().getAdjustedFrameSize(NumBytes);
emitSPAdjustment(MF, MBB, MBBI, NumBytes, SAVErr, SAVEri);
MachineModuleInfo &MMI = MF.getMMI();
@@ -168,8 +166,7 @@ void SparcFrameLowering::emitEpilogue(MachineFunction &MF,
if (NumBytes == 0)
return;
- NumBytes = MF.getTarget().getSubtarget<SparcSubtarget>().getAdjustedFrameSize(
- NumBytes);
+ NumBytes = MF.getSubtarget<SparcSubtarget>().getAdjustedFrameSize(NumBytes);
emitSPAdjustment(MF, MBB, MBBI, NumBytes, SP::ADDrr, SP::ADDri);
}
diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index b3b029e..9f03b04 100644
--- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -32,13 +32,13 @@ namespace {
class SparcDAGToDAGISel : public SelectionDAGISel {
/// Subtarget - Keep a pointer to the Sparc Subtarget around so that we can
/// make the right decision when generating code for different targets.
- const SparcSubtarget &Subtarget;
- SparcTargetMachine &TM;
+ const SparcSubtarget *Subtarget;
public:
- explicit SparcDAGToDAGISel(SparcTargetMachine &tm)
- : SelectionDAGISel(tm),
- Subtarget(tm.getSubtarget<SparcSubtarget>()),
- TM(tm) {
+ explicit SparcDAGToDAGISel(SparcTargetMachine &tm) : SelectionDAGISel(tm) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ return SelectionDAGISel::runOnMachineFunction(MF);
}
SDNode *Select(SDNode *N) override;
@@ -66,8 +66,7 @@ private:
} // end anonymous namespace
SDNode* SparcDAGToDAGISel::getGlobalBaseReg() {
- unsigned GlobalBaseReg =
- TM.getSubtargetImpl()->getInstrInfo()->getGlobalBaseReg(MF);
+ unsigned GlobalBaseReg = Subtarget->getInstrInfo()->getGlobalBaseReg(MF);
return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy()).getNode();
}
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index e6a69d2..6774977 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -57,7 +57,7 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT,
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
};
// Try to get first reg.
- if (unsigned Reg = State.AllocateReg(RegList, 6)) {
+ if (unsigned Reg = State.AllocateReg(RegList)) {
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
} else {
// Assign whole thing in stack.
@@ -68,7 +68,7 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT,
}
// Try to get second reg.
- if (unsigned Reg = State.AllocateReg(RegList, 6))
+ if (unsigned Reg = State.AllocateReg(RegList))
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
else
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
@@ -497,7 +497,7 @@ LowerFormalArguments_32(SDValue Chain,
static const MCPhysReg ArgRegs[] = {
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
};
- unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6);
+ unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
unsigned ArgOffset = CCInfo.getNextStackOffset();
if (NumAllocated == 6)
@@ -914,8 +914,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
RegsToPass[i].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
- const SparcRegisterInfo *TRI =
- getTargetMachine().getSubtarget<SparcSubtarget>().getRegisterInfo();
+ const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
const uint32_t *Mask = ((hasReturnsTwice)
? TRI->getRTCallPreservedMask(CallConv)
: TRI->getCallPreservedMask(CallConv));
@@ -1227,8 +1226,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
RegsToPass[i].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
- const SparcRegisterInfo *TRI =
- getTargetMachine().getSubtarget<SparcSubtarget>().getRegisterInfo();
+ const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
const uint32_t *Mask =
((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
: TRI->getCallPreservedMask(CLI.CallConv));
@@ -1365,10 +1363,9 @@ static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
}
}
-SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
- : TargetLowering(TM) {
- Subtarget = &TM.getSubtarget<SparcSubtarget>();
-
+SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
+ const SparcSubtarget &STI)
+ : TargetLowering(TM), Subtarget(&STI) {
// Set up the register classes.
addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
@@ -1378,11 +1375,14 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
// Turn FP extload into load/fextend
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
+ for (MVT VT : MVT::fp_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
+ }
// Sparc doesn't have i1 sign extending load
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ for (MVT VT : MVT::integer_valuetypes())
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
// Turn FP truncstore into trunc + store.
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
@@ -1669,7 +1669,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setMinFunctionAlignment(2);
- computeRegisterProperties();
+ computeRegisterProperties(Subtarget->getRegisterInfo());
}
const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
@@ -1904,10 +1904,8 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
Ops.push_back(Callee);
Ops.push_back(Symbol);
Ops.push_back(DAG.getRegister(SP::O0, PtrVT));
- const uint32_t *Mask = getTargetMachine()
- .getSubtargetImpl()
- ->getRegisterInfo()
- ->getCallPreservedMask(CallingConv::C);
+ const uint32_t *Mask =
+ Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
Ops.push_back(InFlag);
@@ -2903,8 +2901,7 @@ MachineBasicBlock*
SparcTargetLowering::expandSelectCC(MachineInstr *MI,
MachineBasicBlock *BB,
unsigned BROpcode) const {
- const TargetInstrInfo &TII =
- *getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
unsigned CC = (SPCC::CondCodes)MI->getOperand(3).getImm();
@@ -2965,8 +2962,7 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr *MI,
MachineBasicBlock *MBB,
unsigned Opcode,
unsigned CondCode) const {
- const TargetInstrInfo &TII =
- *getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
DebugLoc DL = MI->getDebugLoc();
@@ -3134,8 +3130,9 @@ LowerAsmOperandForConstraint(SDValue Op,
TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
-std::pair<unsigned, const TargetRegisterClass*>
-SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
+std::pair<unsigned, const TargetRegisterClass *>
+SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
@@ -3160,11 +3157,12 @@ SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
char regIdx = '0' + (intVal % 8);
char tmp[] = { '{', regType, regIdx, '}', 0 };
std::string newConstraint = std::string(tmp);
- return TargetLowering::getRegForInlineAsmConstraint(newConstraint, VT);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
+ VT);
}
}
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
bool
diff --git a/lib/Target/Sparc/SparcISelLowering.h b/lib/Target/Sparc/SparcISelLowering.h
index a62d569..8715326 100644
--- a/lib/Target/Sparc/SparcISelLowering.h
+++ b/lib/Target/Sparc/SparcISelLowering.h
@@ -54,7 +54,7 @@ namespace llvm {
class SparcTargetLowering : public TargetLowering {
const SparcSubtarget *Subtarget;
public:
- SparcTargetLowering(TargetMachine &TM);
+ SparcTargetLowering(TargetMachine &TM, const SparcSubtarget &STI);
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
/// computeKnownBitsForTargetNode - Determine which of the bits specified
@@ -80,8 +80,10 @@ namespace llvm {
std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const override;
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const override;
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
+ MVT VT) const override;
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i32; }
diff --git a/lib/Target/Sparc/SparcInstrInfo.td b/lib/Target/Sparc/SparcInstrInfo.td
index c320239..17daeca 100644
--- a/lib/Target/Sparc/SparcInstrInfo.td
+++ b/lib/Target/Sparc/SparcInstrInfo.td
@@ -22,38 +22,38 @@ include "SparcInstrFormats.td"
//===----------------------------------------------------------------------===//
// True when generating 32-bit code.
-def Is32Bit : Predicate<"!Subtarget.is64Bit()">;
+def Is32Bit : Predicate<"!Subtarget->is64Bit()">;
// True when generating 64-bit code. This also implies HasV9.
-def Is64Bit : Predicate<"Subtarget.is64Bit()">;
+def Is64Bit : Predicate<"Subtarget->is64Bit()">;
// HasV9 - This predicate is true when the target processor supports V9
// instructions. Note that the machine may be running in 32-bit mode.
-def HasV9 : Predicate<"Subtarget.isV9()">,
+def HasV9 : Predicate<"Subtarget->isV9()">,
AssemblerPredicate<"FeatureV9">;
// HasNoV9 - This predicate is true when the target doesn't have V9
// instructions. Use of this is just a hack for the isel not having proper
// costs for V8 instructions that are more expensive than their V9 ones.
-def HasNoV9 : Predicate<"!Subtarget.isV9()">;
+def HasNoV9 : Predicate<"!Subtarget->isV9()">;
// HasVIS - This is true when the target processor has VIS extensions.
-def HasVIS : Predicate<"Subtarget.isVIS()">,
+def HasVIS : Predicate<"Subtarget->isVIS()">,
AssemblerPredicate<"FeatureVIS">;
-def HasVIS2 : Predicate<"Subtarget.isVIS2()">,
+def HasVIS2 : Predicate<"Subtarget->isVIS2()">,
AssemblerPredicate<"FeatureVIS2">;
-def HasVIS3 : Predicate<"Subtarget.isVIS3()">,
+def HasVIS3 : Predicate<"Subtarget->isVIS3()">,
AssemblerPredicate<"FeatureVIS3">;
// HasHardQuad - This is true when the target processor supports quad floating
// point instructions.
-def HasHardQuad : Predicate<"Subtarget.hasHardQuad()">;
+def HasHardQuad : Predicate<"Subtarget->hasHardQuad()">;
// UseDeprecatedInsts - This predicate is true when the target processor is a
// V8, or when it is V9 but the V8 deprecated instructions are efficient enough
// to use when appropriate. In either of these cases, the instruction selector
// will pick deprecated instructions.
-def UseDeprecatedInsts : Predicate<"Subtarget.useDeprecatedV8Instructions()">;
+def UseDeprecatedInsts : Predicate<"Subtarget->useDeprecatedV8Instructions()">;
//===----------------------------------------------------------------------===//
// Instruction Pattern Stuff
diff --git a/lib/Target/Sparc/SparcSubtarget.cpp b/lib/Target/Sparc/SparcSubtarget.cpp
index eea0c8c..ce1105f 100644
--- a/lib/Target/Sparc/SparcSubtarget.cpp
+++ b/lib/Target/Sparc/SparcSubtarget.cpp
@@ -26,32 +26,6 @@ using namespace llvm;
void SparcSubtarget::anchor() { }
-static std::string computeDataLayout(const SparcSubtarget &ST) {
- // Sparc is big endian.
- std::string Ret = "E-m:e";
-
- // Some ABIs have 32bit pointers.
- if (!ST.is64Bit())
- Ret += "-p:32:32";
-
- // Alignments for 64 bit integers.
- Ret += "-i64:64";
-
- // On SparcV9 128 floats are aligned to 128 bits, on others only to 64.
- // On SparcV9 registers can hold 64 or 32 bits, on others only 32.
- if (ST.is64Bit())
- Ret += "-n32:64";
- else
- Ret += "-f128:64-n32";
-
- if (ST.is64Bit())
- Ret += "-S128";
- else
- Ret += "-S64";
-
- return Ret;
-}
-
SparcSubtarget &SparcSubtarget::initializeSubtargetDependencies(StringRef CPU,
StringRef FS) {
IsV9 = false;
@@ -79,8 +53,8 @@ SparcSubtarget::SparcSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, TargetMachine &TM,
bool is64Bit)
: SparcGenSubtargetInfo(TT, CPU, FS), Is64Bit(is64Bit),
- DL(computeDataLayout(initializeSubtargetDependencies(CPU, FS))),
- InstrInfo(*this), TLInfo(TM), TSInfo(DL), FrameLowering(*this) {}
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
+ TSInfo(*TM.getDataLayout()), FrameLowering(*this) {}
int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
diff --git a/lib/Target/Sparc/SparcSubtarget.h b/lib/Target/Sparc/SparcSubtarget.h
index d503b2b..e6cf460 100644
--- a/lib/Target/Sparc/SparcSubtarget.h
+++ b/lib/Target/Sparc/SparcSubtarget.h
@@ -37,7 +37,6 @@ class SparcSubtarget : public SparcGenSubtargetInfo {
bool Is64Bit;
bool HasHardQuad;
bool UsePopc;
- const DataLayout DL; // Calculates type size & alignment
SparcInstrInfo InstrInfo;
SparcTargetLowering TLInfo;
SparcSelectionDAGInfo TSInfo;
@@ -60,7 +59,6 @@ public:
const SparcSelectionDAGInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
- const DataLayout *getDataLayout() const override { return &DL; }
bool isV9() const { return IsV9; }
bool isVIS() const { return IsVIS; }
diff --git a/lib/Target/Sparc/SparcTargetMachine.cpp b/lib/Target/Sparc/SparcTargetMachine.cpp
index 489bb69..1c423dc 100644
--- a/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -14,7 +14,7 @@
#include "SparcTargetObjectFile.h"
#include "Sparc.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/PassManager.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -24,6 +24,32 @@ extern "C" void LLVMInitializeSparcTarget() {
RegisterTargetMachine<SparcV9TargetMachine> Y(TheSparcV9Target);
}
+static std::string computeDataLayout(bool is64Bit) {
+ // Sparc is big endian.
+ std::string Ret = "E-m:e";
+
+ // Some ABIs have 32bit pointers.
+ if (!is64Bit)
+ Ret += "-p:32:32";
+
+ // Alignments for 64 bit integers.
+ Ret += "-i64:64";
+
+ // On SparcV9 128 floats are aligned to 128 bits, on others only to 64.
+ // On SparcV9 registers can hold 64 or 32 bits, on others only 32.
+ if (is64Bit)
+ Ret += "-n32:64";
+ else
+ Ret += "-f128:64-n32";
+
+ if (is64Bit)
+ Ret += "-S128";
+ else
+ Ret += "-S64";
+
+ return Ret;
+}
+
/// SparcTargetMachine ctor - Create an ILP32 architecture model
///
SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT,
@@ -34,6 +60,7 @@ SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT,
bool is64bit)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
TLOF(make_unique<SparcELFTargetObjectFile>()),
+ DL(computeDataLayout(is64bit)),
Subtarget(TT, CPU, FS, *this, is64bit) {
initAsmInfo();
}
@@ -53,7 +80,7 @@ public:
void addIRPasses() override;
bool addInstSelector() override;
- bool addPreEmitPass() override;
+ void addPreEmitPass() override;
};
} // namespace
@@ -72,12 +99,8 @@ bool SparcPassConfig::addInstSelector() {
return false;
}
-/// addPreEmitPass - This pass may be implemented by targets that want to run
-/// passes immediately before machine code is emitted. This should return
-/// true if -print-machineinstrs should print out the code after the passes.
-bool SparcPassConfig::addPreEmitPass(){
+void SparcPassConfig::addPreEmitPass(){
addPass(createSparcDelaySlotFillerPass(getSparcTargetMachine()));
- return true;
}
void SparcV8TargetMachine::anchor() { }
diff --git a/lib/Target/Sparc/SparcTargetMachine.h b/lib/Target/Sparc/SparcTargetMachine.h
index 096e7c8..4f93980 100644
--- a/lib/Target/Sparc/SparcTargetMachine.h
+++ b/lib/Target/Sparc/SparcTargetMachine.h
@@ -22,6 +22,7 @@ namespace llvm {
class SparcTargetMachine : public LLVMTargetMachine {
std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ const DataLayout DL;
SparcSubtarget Subtarget;
public:
SparcTargetMachine(const Target &T, StringRef TT,
@@ -30,6 +31,7 @@ public:
CodeGenOpt::Level OL, bool is64bit);
~SparcTargetMachine() override;
+ const DataLayout *getDataLayout() const override { return &DL; }
const SparcSubtarget *getSubtargetImpl() const override { return &Subtarget; }
// Pass Pipeline Configuration
diff --git a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
index 0955f4a..9181ff7 100644
--- a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
+++ b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
@@ -57,6 +57,7 @@ private:
KindReg,
KindAccessReg,
KindImm,
+ KindImmTLS,
KindMem
};
@@ -96,11 +97,19 @@ private:
const MCExpr *Length;
};
+ // Imm is an immediate operand, and Sym is an optional TLS symbol
+ // for use with a __tls_get_offset marker relocation.
+ struct ImmTLSOp {
+ const MCExpr *Imm;
+ const MCExpr *Sym;
+ };
+
union {
TokenOp Token;
RegOp Reg;
unsigned AccessReg;
const MCExpr *Imm;
+ ImmTLSOp ImmTLS;
MemOp Mem;
};
@@ -160,6 +169,14 @@ public:
Op->Mem.Length = Length;
return Op;
}
+ static std::unique_ptr<SystemZOperand>
+ createImmTLS(const MCExpr *Imm, const MCExpr *Sym,
+ SMLoc StartLoc, SMLoc EndLoc) {
+ auto Op = make_unique<SystemZOperand>(KindImmTLS, StartLoc, EndLoc);
+ Op->ImmTLS.Imm = Imm;
+ Op->ImmTLS.Sym = Sym;
+ return Op;
+ }
// Token operands
bool isToken() const override {
@@ -200,6 +217,11 @@ public:
return Imm;
}
+ // Immediate operands with optional TLS symbol.
+ bool isImmTLS() const {
+ return Kind == KindImmTLS;
+ }
+
// Memory operands.
bool isMem() const override {
return Kind == KindMem;
@@ -260,6 +282,13 @@ public:
addExpr(Inst, Mem.Disp);
addExpr(Inst, Mem.Length);
}
+ void addImmTLSOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands");
+ assert(Kind == KindImmTLS && "Invalid operand type");
+ addExpr(Inst, ImmTLS.Imm);
+ if (ImmTLS.Sym)
+ addExpr(Inst, ImmTLS.Sym);
+ }
// Used by the TableGen code to check for particular operand types.
bool isGR32() const { return isReg(GR32Reg); }
@@ -325,6 +354,9 @@ private:
const unsigned *Regs, RegisterKind RegKind,
MemoryKind MemKind);
+ OperandMatchResultTy parsePCRel(OperandVector &Operands, int64_t MinVal,
+ int64_t MaxVal, bool AllowTLS);
+
bool parseOperand(OperandVector &Operands, StringRef Mnemonic);
public:
@@ -395,13 +427,17 @@ public:
return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDLMem);
}
OperandMatchResultTy parseAccessReg(OperandVector &Operands);
- OperandMatchResultTy parsePCRel(OperandVector &Operands, int64_t MinVal,
- int64_t MaxVal);
OperandMatchResultTy parsePCRel16(OperandVector &Operands) {
- return parsePCRel(Operands, -(1LL << 16), (1LL << 16) - 1);
+ return parsePCRel(Operands, -(1LL << 16), (1LL << 16) - 1, false);
}
OperandMatchResultTy parsePCRel32(OperandVector &Operands) {
- return parsePCRel(Operands, -(1LL << 32), (1LL << 32) - 1);
+ return parsePCRel(Operands, -(1LL << 32), (1LL << 32) - 1, false);
+ }
+ OperandMatchResultTy parsePCRelTLS16(OperandVector &Operands) {
+ return parsePCRel(Operands, -(1LL << 16), (1LL << 16) - 1, true);
+ }
+ OperandMatchResultTy parsePCRelTLS32(OperandVector &Operands) {
+ return parsePCRel(Operands, -(1LL << 32), (1LL << 32) - 1, true);
}
};
} // end anonymous namespace
@@ -685,7 +721,6 @@ bool SystemZAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
MatchingInlineAsm);
switch (MatchResult) {
- default: break;
case Match_Success:
Inst.setLoc(IDLoc);
Out.EmitInstruction(Inst, STI);
@@ -744,7 +779,7 @@ SystemZAsmParser::parseAccessReg(OperandVector &Operands) {
SystemZAsmParser::OperandMatchResultTy
SystemZAsmParser::parsePCRel(OperandVector &Operands, int64_t MinVal,
- int64_t MaxVal) {
+ int64_t MaxVal, bool AllowTLS) {
MCContext &Ctx = getContext();
MCStreamer &Out = getStreamer();
const MCExpr *Expr;
@@ -767,9 +802,54 @@ SystemZAsmParser::parsePCRel(OperandVector &Operands, int64_t MinVal,
Expr = Value == 0 ? Base : MCBinaryExpr::CreateAdd(Base, Expr, Ctx);
}
+ // Optionally match :tls_gdcall: or :tls_ldcall: followed by a TLS symbol.
+ const MCExpr *Sym = nullptr;
+ if (AllowTLS && getLexer().is(AsmToken::Colon)) {
+ Parser.Lex();
+
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ Error(Parser.getTok().getLoc(), "unexpected token");
+ return MatchOperand_ParseFail;
+ }
+
+ MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_None;
+ StringRef Name = Parser.getTok().getString();
+ if (Name == "tls_gdcall")
+ Kind = MCSymbolRefExpr::VK_TLSGD;
+ else if (Name == "tls_ldcall")
+ Kind = MCSymbolRefExpr::VK_TLSLDM;
+ else {
+ Error(Parser.getTok().getLoc(), "unknown TLS tag");
+ return MatchOperand_ParseFail;
+ }
+ Parser.Lex();
+
+ if (Parser.getTok().isNot(AsmToken::Colon)) {
+ Error(Parser.getTok().getLoc(), "unexpected token");
+ return MatchOperand_ParseFail;
+ }
+ Parser.Lex();
+
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ Error(Parser.getTok().getLoc(), "unexpected token");
+ return MatchOperand_ParseFail;
+ }
+
+ StringRef Identifier = Parser.getTok().getString();
+ Sym = MCSymbolRefExpr::Create(Ctx.GetOrCreateSymbol(Identifier),
+ Kind, Ctx);
+ Parser.Lex();
+ }
+
SMLoc EndLoc =
SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc));
+
+ if (AllowTLS)
+ Operands.push_back(SystemZOperand::createImmTLS(Expr, Sym,
+ StartLoc, EndLoc));
+ else
+ Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc));
+
return MatchOperand_Success;
}
diff --git a/lib/Target/SystemZ/CMakeLists.txt b/lib/Target/SystemZ/CMakeLists.txt
index 41a614d..60a3912 100644
--- a/lib/Target/SystemZ/CMakeLists.txt
+++ b/lib/Target/SystemZ/CMakeLists.txt
@@ -20,6 +20,7 @@ add_llvm_target(SystemZCodeGen
SystemZISelDAGToDAG.cpp
SystemZISelLowering.cpp
SystemZInstrInfo.cpp
+ SystemZLDCleanup.cpp
SystemZLongBranch.cpp
SystemZMachineFunctionInfo.cpp
SystemZMCInstLower.cpp
diff --git a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
index d2ba9b6..996a492 100644
--- a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
+++ b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
@@ -10,6 +10,7 @@
#include "SystemZInstPrinter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -124,6 +125,29 @@ void SystemZInstPrinter::printPCRelOperand(const MCInst *MI, int OpNum,
O << *MO.getExpr();
}
+void SystemZInstPrinter::printPCRelTLSOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ // Output the PC-relative operand.
+ printPCRelOperand(MI, OpNum, O);
+
+ // Output the TLS marker if present.
+ if ((unsigned)OpNum + 1 < MI->getNumOperands()) {
+ const MCOperand &MO = MI->getOperand(OpNum + 1);
+ const MCSymbolRefExpr &refExp = cast<MCSymbolRefExpr>(*MO.getExpr());
+ switch (refExp.getKind()) {
+ case MCSymbolRefExpr::VK_TLSGD:
+ O << ":tls_gdcall:";
+ break;
+ case MCSymbolRefExpr::VK_TLSLDM:
+ O << ":tls_ldcall:";
+ break;
+ default:
+ llvm_unreachable("Unexpected symbol kind");
+ }
+ O << refExp.getSymbol().getName();
+ }
+}
+
void SystemZInstPrinter::printOperand(const MCInst *MI, int OpNum,
raw_ostream &O) {
printOperand(MI->getOperand(OpNum), O);
diff --git a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
index 753903c..732e5fa 100644
--- a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
+++ b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
@@ -56,6 +56,7 @@ private:
void printS32ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
void printU32ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
void printPCRelOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printPCRelTLSOperand(const MCInst *MI, int OpNum, raw_ostream &O);
void printAccessRegOperand(const MCInst *MI, int OpNum, raw_ostream &O);
// Print the mnemonic for a condition-code mask ("ne", "lh", etc.)
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
index 6e7268d..b79b1d8 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
@@ -27,9 +27,10 @@ static uint64_t extractBitsForFixup(MCFixupKind Kind, uint64_t Value) {
switch (unsigned(Kind)) {
case SystemZ::FK_390_PC16DBL:
case SystemZ::FK_390_PC32DBL:
- case SystemZ::FK_390_PLT16DBL:
- case SystemZ::FK_390_PLT32DBL:
return (int64_t)Value / 2;
+
+ case SystemZ::FK_390_TLS_CALL:
+ return 0;
}
llvm_unreachable("Unknown fixup kind!");
@@ -72,8 +73,7 @@ SystemZMCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
const static MCFixupKindInfo Infos[SystemZ::NumTargetFixupKinds] = {
{ "FK_390_PC16DBL", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
{ "FK_390_PC32DBL", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
- { "FK_390_PLT16DBL", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
- { "FK_390_PLT32DBL", 0, 32, MCFixupKindInfo::FKF_IsPCRel }
+ { "FK_390_TLS_CALL", 0, 0, 0 }
};
if (Kind < FirstTargetFixupKind)
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
index 35887fa..0161d62 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
@@ -24,4 +24,6 @@ SystemZMCAsmInfo::SystemZMCAsmInfo(StringRef TT) {
UsesELFSectionDirectiveForBSS = true;
SupportsDebugInformation = true;
ExceptionsType = ExceptionHandling::DwarfCFI;
+
+ UseIntegratedAssembler = true;
}
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
index 27b4bd8..d9bb916 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
@@ -74,20 +74,36 @@ private:
// Operand OpNum of MI needs a PC-relative fixup of kind Kind at
// Offset bytes from the start of MI. Add the fixup to Fixups
// and return the in-place addend, which since we're a RELA target
- // is always 0.
+ // is always 0. If AllowTLS is true and optional operand OpNum + 1
+ // is present, also emit a TLS call fixup for it.
uint64_t getPCRelEncoding(const MCInst &MI, unsigned OpNum,
SmallVectorImpl<MCFixup> &Fixups,
- unsigned Kind, int64_t Offset) const;
+ unsigned Kind, int64_t Offset,
+ bool AllowTLS) const;
uint64_t getPC16DBLEncoding(const MCInst &MI, unsigned OpNum,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
- return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PC16DBL, 2);
+ return getPCRelEncoding(MI, OpNum, Fixups,
+ SystemZ::FK_390_PC16DBL, 2, false);
}
uint64_t getPC32DBLEncoding(const MCInst &MI, unsigned OpNum,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
- return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PC32DBL, 2);
+ return getPCRelEncoding(MI, OpNum, Fixups,
+ SystemZ::FK_390_PC32DBL, 2, false);
+ }
+ uint64_t getPC16DBLTLSEncoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ return getPCRelEncoding(MI, OpNum, Fixups,
+ SystemZ::FK_390_PC16DBL, 2, true);
+ }
+ uint64_t getPC32DBLTLSEncoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ return getPCRelEncoding(MI, OpNum, Fixups,
+ SystemZ::FK_390_PC32DBL, 2, true);
}
};
} // end anonymous namespace
@@ -181,7 +197,8 @@ getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum,
uint64_t
SystemZMCCodeEmitter::getPCRelEncoding(const MCInst &MI, unsigned OpNum,
SmallVectorImpl<MCFixup> &Fixups,
- unsigned Kind, int64_t Offset) const {
+ unsigned Kind, int64_t Offset,
+ bool AllowTLS) const {
const MCOperand &MO = MI.getOperand(OpNum);
const MCExpr *Expr;
if (MO.isImm())
@@ -198,6 +215,13 @@ SystemZMCCodeEmitter::getPCRelEncoding(const MCInst &MI, unsigned OpNum,
}
}
Fixups.push_back(MCFixup::Create(Offset, Expr, (MCFixupKind)Kind));
+
+ // Output the fixup for the TLS marker if present.
+ if (AllowTLS && OpNum + 1 < MI.getNumOperands()) {
+ const MCOperand &MOTLS = MI.getOperand(OpNum + 1);
+ Fixups.push_back(MCFixup::Create(0, MOTLS.getExpr(),
+ (MCFixupKind)SystemZ::FK_390_TLS_CALL));
+ }
return 0;
}
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h b/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h
index 52a8d1d..229ab5d 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h
@@ -18,8 +18,7 @@ enum FixupKind {
// These correspond directly to R_390_* relocations.
FK_390_PC16DBL = FirstTargetFixupKind,
FK_390_PC32DBL,
- FK_390_PLT16DBL,
- FK_390_PLT32DBL,
+ FK_390_TLS_CALL,
// Marker
LastTargetFixupKind,
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
index c6a1816..2632518 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
@@ -55,8 +55,6 @@ static unsigned getPCRelReloc(unsigned Kind) {
case FK_Data_8: return ELF::R_390_PC64;
case SystemZ::FK_390_PC16DBL: return ELF::R_390_PC16DBL;
case SystemZ::FK_390_PC32DBL: return ELF::R_390_PC32DBL;
- case SystemZ::FK_390_PLT16DBL: return ELF::R_390_PLT16DBL;
- case SystemZ::FK_390_PLT32DBL: return ELF::R_390_PLT32DBL;
}
llvm_unreachable("Unsupported PC-relative address");
}
@@ -70,6 +68,35 @@ static unsigned getTLSLEReloc(unsigned Kind) {
llvm_unreachable("Unsupported absolute address");
}
+// Return the R_390_TLS_LDO* relocation type for MCFixupKind Kind.
+static unsigned getTLSLDOReloc(unsigned Kind) {
+ switch (Kind) {
+ case FK_Data_4: return ELF::R_390_TLS_LDO32;
+ case FK_Data_8: return ELF::R_390_TLS_LDO64;
+ }
+ llvm_unreachable("Unsupported absolute address");
+}
+
+// Return the R_390_TLS_LDM* relocation type for MCFixupKind Kind.
+static unsigned getTLSLDMReloc(unsigned Kind) {
+ switch (Kind) {
+ case FK_Data_4: return ELF::R_390_TLS_LDM32;
+ case FK_Data_8: return ELF::R_390_TLS_LDM64;
+ case SystemZ::FK_390_TLS_CALL: return ELF::R_390_TLS_LDCALL;
+ }
+ llvm_unreachable("Unsupported absolute address");
+}
+
+// Return the R_390_TLS_GD* relocation type for MCFixupKind Kind.
+static unsigned getTLSGDReloc(unsigned Kind) {
+ switch (Kind) {
+ case FK_Data_4: return ELF::R_390_TLS_GD32;
+ case FK_Data_8: return ELF::R_390_TLS_GD64;
+ case SystemZ::FK_390_TLS_CALL: return ELF::R_390_TLS_GDCALL;
+ }
+ llvm_unreachable("Unsupported absolute address");
+}
+
// Return the PLT relocation counterpart of MCFixupKind Kind.
static unsigned getPLTReloc(unsigned Kind) {
switch (Kind) {
@@ -94,6 +121,23 @@ unsigned SystemZObjectWriter::GetRelocType(const MCValue &Target,
assert(!IsPCRel && "NTPOFF shouldn't be PC-relative");
return getTLSLEReloc(Kind);
+ case MCSymbolRefExpr::VK_INDNTPOFF:
+ if (IsPCRel && Kind == SystemZ::FK_390_PC32DBL)
+ return ELF::R_390_TLS_IEENT;
+ llvm_unreachable("Only PC-relative INDNTPOFF accesses are supported for now");
+
+ case MCSymbolRefExpr::VK_DTPOFF:
+ assert(!IsPCRel && "DTPOFF shouldn't be PC-relative");
+ return getTLSLDOReloc(Kind);
+
+ case MCSymbolRefExpr::VK_TLSLDM:
+ assert(!IsPCRel && "TLSLDM shouldn't be PC-relative");
+ return getTLSLDMReloc(Kind);
+
+ case MCSymbolRefExpr::VK_TLSGD:
+ assert(!IsPCRel && "TLSGD shouldn't be PC-relative");
+ return getTLSGDReloc(Kind);
+
case MCSymbolRefExpr::VK_GOT:
if (IsPCRel && Kind == SystemZ::FK_390_PC32DBL)
return ELF::R_390_GOTENT;
diff --git a/lib/Target/SystemZ/SystemZ.h b/lib/Target/SystemZ/SystemZ.h
index c8b95b2..5f17edb 100644
--- a/lib/Target/SystemZ/SystemZ.h
+++ b/lib/Target/SystemZ/SystemZ.h
@@ -111,6 +111,7 @@ FunctionPass *createSystemZISelDag(SystemZTargetMachine &TM,
FunctionPass *createSystemZElimComparePass(SystemZTargetMachine &TM);
FunctionPass *createSystemZShortenInstPass(SystemZTargetMachine &TM);
FunctionPass *createSystemZLongBranchPass(SystemZTargetMachine &TM);
+FunctionPass *createSystemZLDCleanupPass(SystemZTargetMachine &TM);
} // end namespace llvm
#endif
diff --git a/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/lib/Target/SystemZ/SystemZAsmPrinter.cpp
index f4f3ec7..18e37e3 100644
--- a/lib/Target/SystemZ/SystemZAsmPrinter.cpp
+++ b/lib/Target/SystemZ/SystemZAsmPrinter.cpp
@@ -66,6 +66,20 @@ static MCInst lowerRIEfLow(const MachineInstr *MI, unsigned Opcode) {
.addImm(MI->getOperand(5).getImm());
}
+static const MCSymbolRefExpr *getTLSGetOffset(MCContext &Context) {
+ StringRef Name = "__tls_get_offset";
+ return MCSymbolRefExpr::Create(Context.GetOrCreateSymbol(Name),
+ MCSymbolRefExpr::VK_PLT,
+ Context);
+}
+
+static const MCSymbolRefExpr *getGlobalOffsetTable(MCContext &Context) {
+ StringRef Name = "_GLOBAL_OFFSET_TABLE_";
+ return MCSymbolRefExpr::Create(Context.GetOrCreateSymbol(Name),
+ MCSymbolRefExpr::VK_None,
+ Context);
+}
+
void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
SystemZMCInstLower Lower(MF->getContext(), *this);
MCInst LoweredMI;
@@ -95,6 +109,26 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
LoweredMI = MCInstBuilder(SystemZ::BR).addReg(SystemZ::R1D);
break;
+ case SystemZ::TLS_GDCALL:
+ LoweredMI = MCInstBuilder(SystemZ::BRASL)
+ .addReg(SystemZ::R14D)
+ .addExpr(getTLSGetOffset(MF->getContext()))
+ .addExpr(Lower.getExpr(MI->getOperand(0), MCSymbolRefExpr::VK_TLSGD));
+ break;
+
+ case SystemZ::TLS_LDCALL:
+ LoweredMI = MCInstBuilder(SystemZ::BRASL)
+ .addReg(SystemZ::R14D)
+ .addExpr(getTLSGetOffset(MF->getContext()))
+ .addExpr(Lower.getExpr(MI->getOperand(0), MCSymbolRefExpr::VK_TLSLDM));
+ break;
+
+ case SystemZ::GOT:
+ LoweredMI = MCInstBuilder(SystemZ::LARL)
+ .addReg(MI->getOperand(0).getReg())
+ .addExpr(getGlobalOffsetTable(MF->getContext()));
+ break;
+
case SystemZ::IILF64:
LoweredMI = MCInstBuilder(SystemZ::IILF)
.addReg(SystemZMC::getRegAsGR32(MI->getOperand(0).getReg()))
@@ -152,7 +186,7 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
#undef LOWER_HIGH
case SystemZ::Serialize:
- if (Subtarget->hasFastSerialization())
+ if (MF->getSubtarget<SystemZSubtarget>().hasFastSerialization())
LoweredMI = MCInstBuilder(SystemZ::AsmBCR)
.addImm(14).addReg(SystemZ::R0D);
else
@@ -172,6 +206,9 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
static MCSymbolRefExpr::VariantKind
getModifierVariantKind(SystemZCP::SystemZCPModifier Modifier) {
switch (Modifier) {
+ case SystemZCP::TLSGD: return MCSymbolRefExpr::VK_TLSGD;
+ case SystemZCP::TLSLDM: return MCSymbolRefExpr::VK_TLSLDM;
+ case SystemZCP::DTPOFF: return MCSymbolRefExpr::VK_DTPOFF;
case SystemZCP::NTPOFF: return MCSymbolRefExpr::VK_NTPOFF;
}
llvm_unreachable("Invalid SystemCPModifier!");
@@ -185,8 +222,7 @@ EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
MCSymbolRefExpr::Create(getSymbol(ZCPV->getGlobalValue()),
getModifierVariantKind(ZCPV->getModifier()),
OutContext);
- uint64_t Size =
- TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(ZCPV->getType());
+ uint64_t Size = TM.getDataLayout()->getTypeAllocSize(ZCPV->getType());
OutStreamer.EmitValue(Expr, Size);
}
@@ -220,7 +256,7 @@ bool SystemZAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
}
void SystemZAsmPrinter::EmitEndOfAsmFile(Module &M) {
- if (Subtarget->isTargetELF()) {
+ if (Triple(TM.getTargetTriple()).isOSBinFormatELF()) {
auto &TLOFELF =
static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
@@ -230,7 +266,7 @@ void SystemZAsmPrinter::EmitEndOfAsmFile(Module &M) {
MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
if (!Stubs.empty()) {
OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
OutStreamer.EmitLabel(Stubs[i].first);
diff --git a/lib/Target/SystemZ/SystemZAsmPrinter.h b/lib/Target/SystemZ/SystemZAsmPrinter.h
index 6467279..a4d5b78 100644
--- a/lib/Target/SystemZ/SystemZAsmPrinter.h
+++ b/lib/Target/SystemZ/SystemZAsmPrinter.h
@@ -22,14 +22,9 @@ class Module;
class raw_ostream;
class LLVM_LIBRARY_VISIBILITY SystemZAsmPrinter : public AsmPrinter {
-private:
- const SystemZSubtarget *Subtarget;
-
public:
- SystemZAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {
- Subtarget = &TM.getSubtarget<SystemZSubtarget>();
- }
+ SystemZAsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)) {}
// Override AsmPrinter.
const char *getPassName() const override {
diff --git a/lib/Target/SystemZ/SystemZConstantPoolValue.cpp b/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
index 19cec21..44ea1d2 100644
--- a/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
+++ b/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
@@ -28,6 +28,11 @@ SystemZConstantPoolValue::Create(const GlobalValue *GV,
unsigned SystemZConstantPoolValue::getRelocationInfo() const {
switch (Modifier) {
+ case SystemZCP::TLSGD:
+ case SystemZCP::TLSLDM:
+ case SystemZCP::DTPOFF:
+ // May require a dynamic relocation.
+ return 2;
case SystemZCP::NTPOFF:
// May require a relocation, but the relocations are always resolved
// by the static linker.
diff --git a/lib/Target/SystemZ/SystemZConstantPoolValue.h b/lib/Target/SystemZ/SystemZConstantPoolValue.h
index 0bd8c20..e5f1bb1 100644
--- a/lib/Target/SystemZ/SystemZConstantPoolValue.h
+++ b/lib/Target/SystemZ/SystemZConstantPoolValue.h
@@ -19,13 +19,17 @@ class GlobalValue;
namespace SystemZCP {
enum SystemZCPModifier {
+ TLSGD,
+ TLSLDM,
+ DTPOFF,
NTPOFF
};
} // end namespace SystemZCP
/// A SystemZ-specific constant pool value. At present, the only
-/// defined constant pool values are offsets of thread-local variables
-/// (written x@NTPOFF).
+/// defined constant pool values are module IDs or offsets of
+/// thread-local variables (written x@TLSGD, x@TLSLDM, x@DTPOFF,
+/// or x@NTPOFF).
class SystemZConstantPoolValue : public MachineConstantPoolValue {
const GlobalValue *GV;
SystemZCP::SystemZCPModifier Modifier;
diff --git a/lib/Target/SystemZ/SystemZElimCompare.cpp b/lib/Target/SystemZ/SystemZElimCompare.cpp
index ce99ee5..16f9adc 100644
--- a/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -47,7 +47,7 @@ struct Reference {
return *this;
}
- LLVM_EXPLICIT operator bool() const { return Def || Use; }
+ explicit operator bool() const { return Def || Use; }
// True if the register is defined or used in some form, either directly or
// via a sub- or super-register.
diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index 5f84624..b8b0db9 100644
--- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -127,8 +127,7 @@ struct RxSBGOperands {
};
class SystemZDAGToDAGISel : public SelectionDAGISel {
- const SystemZTargetLowering &Lowering;
- const SystemZSubtarget &Subtarget;
+ const SystemZSubtarget *Subtarget;
// Used by SystemZOperands.td to create integer constants.
inline SDValue getImm(const SDNode *Node, uint64_t Imm) const {
@@ -140,7 +139,7 @@ class SystemZDAGToDAGISel : public SelectionDAGISel {
}
const SystemZInstrInfo *getInstrInfo() const {
- return getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ return Subtarget->getInstrInfo();
}
// Try to fold more of the base or index of AM into AM, where IsBase
@@ -315,9 +314,12 @@ class SystemZDAGToDAGISel : public SelectionDAGISel {
public:
SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(TM, OptLevel),
- Lowering(*TM.getSubtargetImpl()->getTargetLowering()),
- Subtarget(*TM.getSubtargetImpl()) {}
+ : SelectionDAGISel(TM, OptLevel) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ Subtarget = &MF.getSubtarget<SystemZSubtarget>();
+ return SelectionDAGISel::runOnMachineFunction(MF);
+ }
// Override MachineFunctionPass.
const char *getPassName() const override {
@@ -897,7 +899,7 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
unsigned Opcode = SystemZ::RISBG;
EVT OpcodeVT = MVT::i64;
- if (VT == MVT::i32 && Subtarget.hasHighWord()) {
+ if (VT == MVT::i32 && Subtarget->hasHighWord()) {
Opcode = SystemZ::RISBMux;
OpcodeVT = MVT::i32;
RISBG.Start &= 31;
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index b282fca..e96398d 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -80,9 +80,9 @@ static MachineOperand earlyUseOperand(MachineOperand Op) {
return Op;
}
-SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm)
- : TargetLowering(tm),
- Subtarget(tm.getSubtarget<SystemZSubtarget>()) {
+SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm,
+ const SystemZSubtarget &STI)
+ : TargetLowering(tm), Subtarget(STI) {
MVT PtrVT = getPointerTy();
// Set up the register classes.
@@ -96,7 +96,7 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm)
addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
// Compute derived properties from the register classes
- computeRegisterProperties();
+ computeRegisterProperties(Subtarget.getRegisterInfo());
// Set up special registers.
setExceptionPointerRegister(SystemZ::R6D);
@@ -218,10 +218,12 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm)
setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
// We have native instructions for i8, i16 and i32 extensions, but not i1.
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ }
// Handle the various types of symbolic address.
setOperationAction(ISD::ConstantPool, PtrVT, Custom);
@@ -275,7 +277,8 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm)
// Needed so that we don't try to implement f128 constant loads using
// a load-and-extend of a f80 constant (in cases where the constant
// would fit in an f80).
- setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand);
+ for (MVT VT : MVT::fp_valuetypes())
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
// Floating-point truncation and stores need to be done separately.
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
@@ -496,8 +499,10 @@ parseRegisterNumber(const std::string &Constraint,
return std::make_pair(0U, nullptr);
}
-std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const {
+std::pair<unsigned, const TargetRegisterClass *>
+SystemZTargetLowering::getRegForInlineAsmConstraint(
+ const TargetRegisterInfo *TRI, const std::string &Constraint,
+ MVT VT) const {
if (Constraint.size() == 1) {
// GCC Constraint Letters
switch (Constraint[0]) {
@@ -554,7 +559,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const {
SystemZMC::FP64Regs);
}
}
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
void SystemZTargetLowering::
@@ -673,9 +678,9 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
SystemZMachineFunctionInfo *FuncInfo =
- MF.getInfo<SystemZMachineFunctionInfo>();
- auto *TFL = static_cast<const SystemZFrameLowering *>(
- DAG.getSubtarget().getFrameLowering());
+ MF.getInfo<SystemZMachineFunctionInfo>();
+ auto *TFL =
+ static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -914,8 +919,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
RegsToPass[I].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
@@ -1778,12 +1782,8 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
}
}
- SmallVector<SDValue, 5> Ops;
- Ops.push_back(TrueOp);
- Ops.push_back(FalseOp);
- Ops.push_back(DAG.getConstant(C.CCValid, MVT::i32));
- Ops.push_back(DAG.getConstant(C.CCMask, MVT::i32));
- Ops.push_back(Glue);
+ SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, MVT::i32),
+ DAG.getConstant(C.CCMask, MVT::i32), Glue};
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
@@ -1828,6 +1828,52 @@ SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
return Result;
}
+SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
+ SelectionDAG &DAG,
+ unsigned Opcode,
+ SDValue GOTOffset) const {
+ SDLoc DL(Node);
+ EVT PtrVT = getPointerTy();
+ SDValue Chain = DAG.getEntryNode();
+ SDValue Glue;
+
+ // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
+ SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
+ Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
+ Glue = Chain.getValue(1);
+ Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
+ Glue = Chain.getValue(1);
+
+ // The first call operand is the chain and the second is the TLS symbol.
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL,
+ Node->getValueType(0),
+ 0, 0));
+
+ // Add argument registers to the end of the list so that they are
+ // known live into the call.
+ Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT));
+ Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT));
+
+ // Add a register mask operand representing the call-preserved registers.
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(CallingConv::C);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
+ // Glue the call to the argument copies.
+ Ops.push_back(Glue);
+
+ // Emit the call.
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ Chain = DAG.getNode(Opcode, DL, NodeTys, Ops);
+ Glue = Chain.getValue(1);
+
+ // Copy the return value from %r2.
+ return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
+}
+
SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
SelectionDAG &DAG) const {
SDLoc DL(Node);
@@ -1835,9 +1881,6 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
EVT PtrVT = getPointerTy();
TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
- if (model != TLSModel::LocalExec)
- llvm_unreachable("only local-exec TLS mode supported");
-
// The high part of the thread pointer is in access register 0.
SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
DAG.getConstant(0, MVT::i32));
@@ -1853,15 +1896,79 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
DAG.getConstant(32, PtrVT));
SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
- // Get the offset of GA from the thread pointer.
- SystemZConstantPoolValue *CPV =
- SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
+ // Get the offset of GA from the thread pointer, based on the TLS model.
+ SDValue Offset;
+ switch (model) {
+ case TLSModel::GeneralDynamic: {
+ // Load the GOT offset of the tls_index (module ID / per-symbol offset).
+ SystemZConstantPoolValue *CPV =
+ SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD);
+
+ Offset = DAG.getConstantPool(CPV, PtrVT, 8);
+ Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
+ Offset, MachinePointerInfo::getConstantPool(),
+ false, false, false, 0);
+
+ // Call __tls_get_offset to retrieve the offset.
+ Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
+ break;
+ }
+
+ case TLSModel::LocalDynamic: {
+ // Load the GOT offset of the module ID.
+ SystemZConstantPoolValue *CPV =
+ SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM);
+
+ Offset = DAG.getConstantPool(CPV, PtrVT, 8);
+ Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
+ Offset, MachinePointerInfo::getConstantPool(),
+ false, false, false, 0);
+
+ // Call __tls_get_offset to retrieve the module base offset.
+ Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);
+
+ // Note: The SystemZLDCleanupPass will remove redundant computations
+ // of the module base offset. Count total number of local-dynamic
+ // accesses to trigger execution of that pass.
+ SystemZMachineFunctionInfo* MFI =
+ DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>();
+ MFI->incNumLocalDynamicTLSAccesses();
+
+ // Add the per-symbol offset.
+ CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF);
+
+ SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8);
+ DTPOffset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
+ DTPOffset, MachinePointerInfo::getConstantPool(),
+ false, false, false, 0);
- // Force the offset into the constant pool and load it from there.
- SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8);
- SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
- CPAddr, MachinePointerInfo::getConstantPool(),
- false, false, false, 0);
+ Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
+ break;
+ }
+
+ case TLSModel::InitialExec: {
+ // Load the offset from the GOT.
+ Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
+ SystemZII::MO_INDNTPOFF);
+ Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
+ Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
+ Offset, MachinePointerInfo::getGOT(),
+ false, false, false, 0);
+ break;
+ }
+
+ case TLSModel::LocalExec: {
+ // Force the offset into the constant pool and load it from there.
+ SystemZConstantPoolValue *CPV =
+ SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
+
+ Offset = DAG.getConstantPool(CPV, PtrVT, 8);
+ Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
+ Offset, MachinePointerInfo::getConstantPool(),
+ false, false, false, 0);
+ break;
+ }
+ }
// Add the base and offset together.
return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
@@ -2611,8 +2718,8 @@ static unsigned forceReg(MachineInstr *MI, MachineOperand &Base,
MachineBasicBlock *
SystemZTargetLowering::emitSelect(MachineInstr *MI,
MachineBasicBlock *MBB) const {
- const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>(
- MBB->getParent()->getSubtarget().getInstrInfo());
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
unsigned DestReg = MI->getOperand(0).getReg();
unsigned TrueReg = MI->getOperand(1).getReg();
@@ -2660,8 +2767,8 @@ SystemZTargetLowering::emitCondStore(MachineInstr *MI,
MachineBasicBlock *MBB,
unsigned StoreOpcode, unsigned STOCOpcode,
bool Invert) const {
- const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>(
- MBB->getParent()->getSubtarget().getInstrInfo());
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
unsigned SrcReg = MI->getOperand(0).getReg();
MachineOperand Base = MI->getOperand(1);
@@ -2730,7 +2837,7 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
bool Invert) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
- static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
bool IsSubWord = (BitSize < 32);
@@ -2850,7 +2957,7 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
unsigned BitSize) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
- static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
bool IsSubWord = (BitSize < 32);
@@ -2962,7 +3069,7 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
MachineBasicBlock *MBB) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
- static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
// Extract the operands. Base can be a register or a frame index.
@@ -3079,7 +3186,7 @@ SystemZTargetLowering::emitExt128(MachineInstr *MI,
bool ClearEven, unsigned SubReg) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
- static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();
@@ -3111,7 +3218,7 @@ SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
unsigned Opcode) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
- static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();
@@ -3281,7 +3388,7 @@ SystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
unsigned Opcode) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
- static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();
diff --git a/lib/Target/SystemZ/SystemZISelLowering.h b/lib/Target/SystemZ/SystemZISelLowering.h
index 887c236..a2b10b0 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/lib/Target/SystemZ/SystemZISelLowering.h
@@ -34,6 +34,11 @@ enum {
CALL,
SIBCALL,
+ // TLS calls. Like regular calls, except operand 1 is the TLS symbol.
+ // (The call target is implicitly __tls_get_offset.)
+ TLS_GDCALL,
+ TLS_LDCALL,
+
// Wraps a TargetGlobalAddress that should be loaded using PC-relative
// accesses (LARL). Operand 0 is the address.
PCREL_WRAPPER,
@@ -198,7 +203,8 @@ class SystemZTargetMachine;
class SystemZTargetLowering : public TargetLowering {
public:
- explicit SystemZTargetLowering(const TargetMachine &TM);
+ explicit SystemZTargetLowering(const TargetMachine &TM,
+ const SystemZSubtarget &STI);
// Override TargetLowering.
MVT getScalarShiftAmountTy(EVT LHSTy) const override {
@@ -215,8 +221,9 @@ public:
bool isTruncateFree(EVT, EVT) const override;
const char *getTargetNodeName(unsigned Opcode) const override;
std::pair<unsigned, const TargetRegisterClass *>
- getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const override;
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
+ MVT VT) const override;
TargetLowering::ConstraintType
getConstraintType(const std::string &Constraint) const override;
TargetLowering::ConstraintWeight
@@ -257,6 +264,9 @@ private:
SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerGlobalAddress(GlobalAddressSDNode *Node,
SelectionDAG &DAG) const;
+ SDValue lowerTLSGetOffset(GlobalAddressSDNode *Node,
+ SelectionDAG &DAG, unsigned Opcode,
+ SDValue GOTOffset) const;
SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
SelectionDAG &DAG) const;
SDValue lowerBlockAddress(BlockAddressSDNode *Node,
diff --git a/lib/Target/SystemZ/SystemZInstrFP.td b/lib/Target/SystemZ/SystemZInstrFP.td
index e8841e1..4a5582f 100644
--- a/lib/Target/SystemZ/SystemZInstrFP.td
+++ b/lib/Target/SystemZ/SystemZInstrFP.td
@@ -26,14 +26,14 @@ defm CondStoreF64 : CondStores<FP64, nonvolatile_store,
//===----------------------------------------------------------------------===//
// Load zero.
-let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
+let hasSideEffects = 0, isAsCheapAsAMove = 1, isMoveImm = 1 in {
def LZER : InherentRRE<"lzer", 0xB374, FP32, (fpimm0)>;
def LZDR : InherentRRE<"lzdr", 0xB375, FP64, (fpimm0)>;
def LZXR : InherentRRE<"lzxr", 0xB376, FP128, (fpimm0)>;
}
// Moves between two floating-point registers.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def LER : UnaryRR <"le", 0x38, null_frag, FP32, FP32>;
def LDR : UnaryRR <"ld", 0x28, null_frag, FP64, FP64>;
def LXR : UnaryRRE<"lx", 0xB365, null_frag, FP128, FP128>;
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 8ff9553..8488ec8 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -633,7 +633,7 @@ struct LogicOp {
LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
: RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
- LLVM_EXPLICIT operator bool() const { return RegSize; }
+ explicit operator bool() const { return RegSize; }
unsigned RegSize, ImmLSB, ImmSize;
};
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.h b/lib/Target/SystemZ/SystemZInstrInfo.h
index d2e3f54..e711f89 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -56,10 +56,13 @@ static inline unsigned getCompareZeroCCMask(unsigned int Flags) {
// SystemZ MachineOperand target flags.
enum {
// Masks out the bits for the access model.
- MO_SYMBOL_MODIFIER = (1 << 0),
+ MO_SYMBOL_MODIFIER = (3 << 0),
// @GOT (aka @GOTENT)
- MO_GOT = (1 << 0)
+ MO_GOT = (1 << 0),
+
+ // @INDNTPOFF
+ MO_INDNTPOFF = (2 << 0)
};
// Classifies a branch.
enum BranchType {
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td
index f4951ad..a7f7747 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -16,7 +16,7 @@ def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt),
def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
[(callseq_end timm:$amt1, timm:$amt2)]>;
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
// Takes as input the value of the stack pointer after a dynamic allocation
// has been made. Sets the output to the address of the dynamically-
// allocated area itself, skipping the outgoing arguments.
@@ -249,11 +249,21 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
def CallBR : Alias<2, (outs), (ins), [(z_sibcall R1D)]>;
}
+// TLS calls. These will be lowered into a call to __tls_get_offset,
+// with an extra relocation specifying the TLS symbol.
+let isCall = 1, Defs = [R14D, CC] in {
+ def TLS_GDCALL : Alias<6, (outs), (ins tlssym:$I2, variable_ops),
+ [(z_tls_gdcall tglobaltlsaddr:$I2)]>;
+ def TLS_LDCALL : Alias<6, (outs), (ins tlssym:$I2, variable_ops),
+ [(z_tls_ldcall tglobaltlsaddr:$I2)]>;
+}
+
// Define the general form of the call instructions for the asm parser.
// These instructions don't hard-code %r14 as the return address register.
-def BRAS : InstRI<0xA75, (outs), (ins GR64:$R1, brtarget16:$I2),
+// Allow an optional TLS marker symbol to generate TLS call relocations.
+def BRAS : InstRI<0xA75, (outs), (ins GR64:$R1, brtarget16tls:$I2),
"bras\t$R1, $I2", []>;
-def BRASL : InstRIL<0xC05, (outs), (ins GR64:$R1, brtarget32:$I2),
+def BRASL : InstRIL<0xC05, (outs), (ins GR64:$R1, brtarget32tls:$I2),
"brasl\t$R1, $I2", []>;
def BASR : InstRR<0x0D, (outs), (ins GR64:$R1, ADDR64:$R2),
"basr\t$R1, $R2", []>;
@@ -263,7 +273,7 @@ def BASR : InstRR<0x0D, (outs), (ins GR64:$R1, ADDR64:$R2),
//===----------------------------------------------------------------------===//
// Register moves.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
// Expands to LR, RISBHG or RISBLG, depending on the choice of registers.
def LRMux : UnaryRRPseudo<"l", null_frag, GRX32, GRX32>,
Requires<[FeatureHighWord]>;
@@ -286,7 +296,7 @@ let Uses = [CC] in {
}
// Immediate moves.
-let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1,
+let hasSideEffects = 0, isAsCheapAsAMove = 1, isMoveImm = 1,
isReMaterializable = 1 in {
// 16-bit sign-extended immediates. LHIMux expands to LHI or IIHF,
// deopending on the choice of register.
@@ -402,13 +412,13 @@ let mayLoad = 1, mayStore = 1, Defs = [CC], Uses = [R0L] in
//===----------------------------------------------------------------------===//
// 32-bit extensions from registers.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def LBR : UnaryRRE<"lb", 0xB926, sext8, GR32, GR32>;
def LHR : UnaryRRE<"lh", 0xB927, sext16, GR32, GR32>;
}
// 64-bit extensions from registers.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def LGBR : UnaryRRE<"lgb", 0xB906, sext8, GR64, GR64>;
def LGHR : UnaryRRE<"lgh", 0xB907, sext16, GR64, GR64>;
def LGFR : UnaryRRE<"lgf", 0xB914, sext32, GR64, GR32>;
@@ -452,7 +462,7 @@ let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in
//===----------------------------------------------------------------------===//
// 32-bit extensions from registers.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
// Expands to LLCR or RISB[LH]G, depending on the choice of registers.
def LLCRMux : UnaryRRPseudo<"llc", zext8, GRX32, GRX32>,
Requires<[FeatureHighWord]>;
@@ -464,7 +474,7 @@ let neverHasSideEffects = 1 in {
}
// 64-bit extensions from registers.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def LLGCR : UnaryRRE<"llgc", 0xB984, zext8, GR64, GR64>;
def LLGHR : UnaryRRE<"llgh", 0xB985, zext16, GR64, GR64>;
def LLGFR : UnaryRRE<"llgf", 0xB916, zext32, GR64, GR32>;
@@ -546,7 +556,7 @@ def STMG : StoreMultipleRSY<"stmg", 0xEB24, GR64>;
//===----------------------------------------------------------------------===//
// Byte-swapping register moves.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def LRVR : UnaryRRE<"lrv", 0xB91F, bswap, GR32, GR32>;
def LRVGR : UnaryRRE<"lrvg", 0xB90F, bswap, GR64, GR64>;
}
@@ -566,7 +576,7 @@ def STRVG : StoreRXY<"strvg", 0xE32F, storeu<bswap, nonvolatile_store>,
//===----------------------------------------------------------------------===//
// Load BDX-style addresses.
-let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isReMaterializable = 1,
+let hasSideEffects = 0, isAsCheapAsAMove = 1, isReMaterializable = 1,
DispKey = "la" in {
let DispSize = "12" in
def LA : InstRX<0x41, (outs GR64:$R1), (ins laaddr12pair:$XBD2),
@@ -580,13 +590,19 @@ let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isReMaterializable = 1,
// Load a PC-relative address. There's no version of this instruction
// with a 16-bit offset, so there's no relaxation.
-let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1,
+let hasSideEffects = 0, isAsCheapAsAMove = 1, isMoveImm = 1,
isReMaterializable = 1 in {
def LARL : InstRIL<0xC00, (outs GR64:$R1), (ins pcrel32:$I2),
"larl\t$R1, $I2",
[(set GR64:$R1, pcrel32:$I2)]>;
}
+// Load the Global Offset Table address. This will be lowered into a
+// larl $R1, _GLOBAL_OFFSET_TABLE_
+// instruction.
+def GOT : Alias<6, (outs GR64:$R1), (ins),
+ [(set GR64:$R1, (global_offset_table))]>;
+
//===----------------------------------------------------------------------===//
// Absolute and Negation
//===----------------------------------------------------------------------===//
@@ -1012,13 +1028,13 @@ def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load, 8>;
//===----------------------------------------------------------------------===//
// Shift left.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
defm SLL : BinaryRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>;
def SLLG : BinaryRSY<"sllg", 0xEB0D, shl, GR64>;
}
// Logical shift right.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
defm SRL : BinaryRSAndK<"srl", 0x88, 0xEBDE, srl, GR32>;
def SRLG : BinaryRSY<"srlg", 0xEB0C, srl, GR64>;
}
@@ -1030,7 +1046,7 @@ let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
}
// Rotate left.
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def RLL : BinaryRSY<"rll", 0xEB1D, rotl, GR32>;
def RLLG : BinaryRSY<"rllg", 0xEB1C, rotl, GR64>;
}
diff --git a/lib/Target/SystemZ/SystemZLDCleanup.cpp b/lib/Target/SystemZ/SystemZLDCleanup.cpp
new file mode 100644
index 0000000..24165be
--- /dev/null
+++ b/lib/Target/SystemZ/SystemZLDCleanup.cpp
@@ -0,0 +1,143 @@
+//===-- SystemZLDCleanup.cpp - Clean up local-dynamic TLS accesses --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass combines multiple accesses to local-dynamic TLS variables so that
+// the TLS base address for the module is only fetched once per execution path
+// through the function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZTargetMachine.h"
+#include "SystemZMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+namespace {
+
+class SystemZLDCleanup : public MachineFunctionPass {
+public:
+ static char ID;
+ SystemZLDCleanup(const SystemZTargetMachine &tm)
+ : MachineFunctionPass(ID), TII(nullptr), MF(nullptr) {}
+
+ const char *getPassName() const override {
+ return "SystemZ Local Dynamic TLS Access Clean-up";
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+private:
+ bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg);
+ MachineInstr *ReplaceTLSCall(MachineInstr *I, unsigned TLSBaseAddrReg);
+ MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg);
+
+ const SystemZInstrInfo *TII;
+ MachineFunction *MF;
+};
+
+char SystemZLDCleanup::ID = 0;
+
+} // end anonymous namespace
+
+FunctionPass *llvm::createSystemZLDCleanupPass(SystemZTargetMachine &TM) {
+ return new SystemZLDCleanup(TM);
+}
+
+void SystemZLDCleanup::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+bool SystemZLDCleanup::runOnMachineFunction(MachineFunction &F) {
+ TII = static_cast<const SystemZInstrInfo *>(F.getSubtarget().getInstrInfo());
+ MF = &F;
+
+ SystemZMachineFunctionInfo* MFI = F.getInfo<SystemZMachineFunctionInfo>();
+ if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
+ // No point folding accesses if there isn't at least two.
+ return false;
+ }
+
+ MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
+ return VisitNode(DT->getRootNode(), 0);
+}
+
+// Visit the dominator subtree rooted at Node in pre-order.
+// If TLSBaseAddrReg is non-null, then use that to replace any
+// TLS_LDCALL instructions. Otherwise, create the register
+// when the first such instruction is seen, and then use it
+// as we encounter more instructions.
+bool SystemZLDCleanup::VisitNode(MachineDomTreeNode *Node,
+ unsigned TLSBaseAddrReg) {
+ MachineBasicBlock *BB = Node->getBlock();
+ bool Changed = false;
+
+ // Traverse the current block.
+ for (auto I = BB->begin(), E = BB->end(); I != E; ++I) {
+ switch (I->getOpcode()) {
+ case SystemZ::TLS_LDCALL:
+ if (TLSBaseAddrReg)
+ I = ReplaceTLSCall(I, TLSBaseAddrReg);
+ else
+ I = SetRegister(I, &TLSBaseAddrReg);
+ Changed = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Visit the children of this block in the dominator tree.
+ for (auto I = Node->begin(), E = Node->end(); I != E; ++I)
+ Changed |= VisitNode(*I, TLSBaseAddrReg);
+
+ return Changed;
+}
+
+// Replace the TLS_LDCALL instruction I with a copy from TLSBaseAddrReg,
+// returning the new instruction.
+MachineInstr *SystemZLDCleanup::ReplaceTLSCall(MachineInstr *I,
+ unsigned TLSBaseAddrReg) {
+ // Insert a Copy from TLSBaseAddrReg to R2.
+ MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), SystemZ::R2D)
+ .addReg(TLSBaseAddrReg);
+
+ // Erase the TLS_LDCALL instruction.
+ I->eraseFromParent();
+
+ return Copy;
+}
+
+// Create a virtal register in *TLSBaseAddrReg, and populate it by
+// inserting a copy instruction after I. Returns the new instruction.
+MachineInstr *SystemZLDCleanup::SetRegister(MachineInstr *I,
+ unsigned *TLSBaseAddrReg) {
+ // Create a virtual register for the TLS base address.
+ MachineRegisterInfo &RegInfo = MF->getRegInfo();
+ *TLSBaseAddrReg = RegInfo.createVirtualRegister(&SystemZ::GR64BitRegClass);
+
+ // Insert a copy from R2 to TLSBaseAddrReg.
+ MachineInstr *Next = I->getNextNode();
+ MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
+ .addReg(SystemZ::R2D);
+
+ return Copy;
+}
+
diff --git a/lib/Target/SystemZ/SystemZMCInstLower.cpp b/lib/Target/SystemZ/SystemZMCInstLower.cpp
index df561e2..6bb96f1 100644
--- a/lib/Target/SystemZ/SystemZMCInstLower.cpp
+++ b/lib/Target/SystemZ/SystemZMCInstLower.cpp
@@ -22,6 +22,8 @@ static MCSymbolRefExpr::VariantKind getVariantKind(unsigned Flags) {
return MCSymbolRefExpr::VK_None;
case SystemZII::MO_GOT:
return MCSymbolRefExpr::VK_GOT;
+ case SystemZII::MO_INDNTPOFF:
+ return MCSymbolRefExpr::VK_INDNTPOFF;
}
llvm_unreachable("Unrecognised MO_ACCESS_MODEL");
}
diff --git a/lib/Target/SystemZ/SystemZMachineFunctionInfo.h b/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
index 92c2ce7..34fc36d 100644
--- a/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
+++ b/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
@@ -23,11 +23,13 @@ class SystemZMachineFunctionInfo : public MachineFunctionInfo {
unsigned VarArgsFrameIndex;
unsigned RegSaveFrameIndex;
bool ManipulatesSP;
+ unsigned NumLocalDynamics;
public:
explicit SystemZMachineFunctionInfo(MachineFunction &MF)
: LowSavedGPR(0), HighSavedGPR(0), VarArgsFirstGPR(0), VarArgsFirstFPR(0),
- VarArgsFrameIndex(0), RegSaveFrameIndex(0), ManipulatesSP(false) {}
+ VarArgsFrameIndex(0), RegSaveFrameIndex(0), ManipulatesSP(false),
+ NumLocalDynamics(0) {}
// Get and set the first call-saved GPR that should be saved and restored
// by this function. This is 0 if no GPRs need to be saved or restored.
@@ -61,6 +63,10 @@ public:
// e.g. through STACKSAVE or STACKRESTORE.
bool getManipulatesSP() const { return ManipulatesSP; }
void setManipulatesSP(bool MSP) { ManipulatesSP = MSP; }
+
+ // Count number of local-dynamic TLS symbols used.
+ unsigned getNumLocalDynamicTLSAccesses() const { return NumLocalDynamics; }
+ void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamics; }
};
} // end namespace llvm
diff --git a/lib/Target/SystemZ/SystemZOperands.td b/lib/Target/SystemZ/SystemZOperands.td
index 7be81dc..1b5b7d7 100644
--- a/lib/Target/SystemZ/SystemZOperands.td
+++ b/lib/Target/SystemZ/SystemZOperands.td
@@ -16,6 +16,11 @@ class ImmediateAsmOperand<string name>
let Name = name;
let RenderMethod = "addImmOperands";
}
+class ImmediateTLSAsmOperand<string name>
+ : AsmOperandClass {
+ let Name = name;
+ let RenderMethod = "addImmTLSOperands";
+}
// Constructs both a DAG pattern and instruction operand for an immediate
// of type VT. PRED returns true if a node is acceptable and XFORM returns
@@ -34,6 +39,11 @@ class PCRelAsmOperand<string size> : ImmediateAsmOperand<"PCRel"##size> {
let PredicateMethod = "isImm";
let ParserMethod = "parsePCRel"##size;
}
+class PCRelTLSAsmOperand<string size>
+ : ImmediateTLSAsmOperand<"PCRelTLS"##size> {
+ let PredicateMethod = "isImmTLS";
+ let ParserMethod = "parsePCRelTLS"##size;
+}
// Constructs an operand for a PC-relative address with address type VT.
// ASMOP is the associated asm operand.
@@ -41,6 +51,10 @@ class PCRelOperand<ValueType vt, AsmOperandClass asmop> : Operand<vt> {
let PrintMethod = "printPCRelOperand";
let ParserMatchClass = asmop;
}
+class PCRelTLSOperand<ValueType vt, AsmOperandClass asmop> : Operand<vt> {
+ let PrintMethod = "printPCRelTLSOperand";
+ let ParserMatchClass = asmop;
+}
// Constructs both a DAG pattern and instruction operand for a PC-relative
// address with address size VT. SELF is the name of the operand and
@@ -370,6 +384,8 @@ def fpimmneg0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(-0.0); }]>;
// PC-relative asm operands.
def PCRel16 : PCRelAsmOperand<"16">;
def PCRel32 : PCRelAsmOperand<"32">;
+def PCRelTLS16 : PCRelTLSAsmOperand<"16">;
+def PCRelTLS32 : PCRelTLSAsmOperand<"32">;
// PC-relative offsets of a basic block. The offset is sign-extended
// and multiplied by 2.
@@ -382,6 +398,20 @@ def brtarget32 : PCRelOperand<OtherVT, PCRel32> {
let DecoderMethod = "decodePC32DBLOperand";
}
+// Variants of brtarget16/32 with an optional additional TLS symbol.
+// These are used to annotate calls to __tls_get_offset.
+def tlssym : Operand<i64> { }
+def brtarget16tls : PCRelTLSOperand<OtherVT, PCRelTLS16> {
+ let MIOperandInfo = (ops brtarget16:$func, tlssym:$sym);
+ let EncoderMethod = "getPC16DBLTLSEncoding";
+ let DecoderMethod = "decodePC16DBLOperand";
+}
+def brtarget32tls : PCRelTLSOperand<OtherVT, PCRelTLS32> {
+ let MIOperandInfo = (ops brtarget32:$func, tlssym:$sym);
+ let EncoderMethod = "getPC32DBLTLSEncoding";
+ let DecoderMethod = "decodePC32DBLOperand";
+}
+
// A PC-relative offset of a global value. The offset is sign-extended
// and multiplied by 2.
def pcrel32 : PCRelAddress<i64, "pcrel32", PCRel32> {
diff --git a/lib/Target/SystemZ/SystemZOperators.td b/lib/Target/SystemZ/SystemZOperators.td
index c70e662..51ac5da 100644
--- a/lib/Target/SystemZ/SystemZOperators.td
+++ b/lib/Target/SystemZ/SystemZOperators.td
@@ -90,6 +90,7 @@ def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
[SDNPHasChain, SDNPSideEffect, SDNPOptInGlue,
SDNPOutGlue]>;
+def global_offset_table : SDNode<"ISD::GLOBAL_OFFSET_TABLE", SDTPtrLeaf>;
// Nodes for SystemZISD::*. See SystemZISelLowering.h for more details.
def z_retflag : SDNode<"SystemZISD::RET_FLAG", SDTNone,
@@ -100,6 +101,12 @@ def z_call : SDNode<"SystemZISD::CALL", SDT_ZCall,
def z_sibcall : SDNode<"SystemZISD::SIBCALL", SDT_ZCall,
[SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
SDNPVariadic]>;
+def z_tls_gdcall : SDNode<"SystemZISD::TLS_GDCALL", SDT_ZCall,
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
+def z_tls_ldcall : SDNode<"SystemZISD::TLS_LDCALL", SDT_ZCall,
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
def z_pcrel_wrapper : SDNode<"SystemZISD::PCREL_WRAPPER", SDT_ZWrapPtr, []>;
def z_pcrel_offset : SDNode<"SystemZISD::PCREL_OFFSET",
SDT_ZWrapOffset, []>;
diff --git a/lib/Target/SystemZ/SystemZProcessors.td b/lib/Target/SystemZ/SystemZProcessors.td
index e6b58f1..1594854 100644
--- a/lib/Target/SystemZ/SystemZProcessors.td
+++ b/lib/Target/SystemZ/SystemZProcessors.td
@@ -12,12 +12,12 @@
//===----------------------------------------------------------------------===//
class SystemZFeature<string extname, string intname, string desc>
- : Predicate<"Subtarget.has"##intname##"()">,
+ : Predicate<"Subtarget->has"##intname##"()">,
AssemblerPredicate<"Feature"##intname, extname>,
SubtargetFeature<extname, "Has"##intname, "true", desc>;
class SystemZMissingFeature<string intname>
- : Predicate<"!Subtarget.has"##intname##"()">;
+ : Predicate<"!Subtarget->has"##intname##"()">;
def FeatureDistinctOps : SystemZFeature<
"distinct-ops", "DistinctOps",
diff --git a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
index a3cba64..12fc198 100644
--- a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
+++ b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
@@ -103,7 +103,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
// we can move at most 2 halfwords.
uint64_t ByteVal = CByte->getZExtValue();
if (ByteVal == 0 || ByteVal == 255 ?
- Bytes <= 16 && CountPopulation_64(Bytes) <= 2 :
+ Bytes <= 16 && countPopulation(Bytes) <= 2 :
Bytes <= 4) {
unsigned Size1 = Bytes == 16 ? 8 : 1 << findLastSet(Bytes);
unsigned Size2 = Bytes - Size1;
@@ -222,12 +222,9 @@ EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
// Now select between End and null, depending on whether the character
// was found.
- SmallVector<SDValue, 5> Ops;
- Ops.push_back(End);
- Ops.push_back(DAG.getConstant(0, PtrVT));
- Ops.push_back(DAG.getConstant(SystemZ::CCMASK_SRST, MVT::i32));
- Ops.push_back(DAG.getConstant(SystemZ::CCMASK_SRST_FOUND, MVT::i32));
- Ops.push_back(Glue);
+ SDValue Ops[] = {End, DAG.getConstant(0, PtrVT),
+ DAG.getConstant(SystemZ::CCMASK_SRST, MVT::i32),
+ DAG.getConstant(SystemZ::CCMASK_SRST_FOUND, MVT::i32), Glue};
VTs = DAG.getVTList(PtrVT, MVT::Glue);
End = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
return std::make_pair(End, Chain);
diff --git a/lib/Target/SystemZ/SystemZSubtarget.cpp b/lib/Target/SystemZ/SystemZSubtarget.cpp
index e160bc8..31a2bff 100644
--- a/lib/Target/SystemZ/SystemZSubtarget.cpp
+++ b/lib/Target/SystemZ/SystemZSubtarget.cpp
@@ -44,13 +44,8 @@ SystemZSubtarget::SystemZSubtarget(const std::string &TT,
: SystemZGenSubtargetInfo(TT, CPU, FS), HasDistinctOps(false),
HasLoadStoreOnCond(false), HasHighWord(false), HasFPExtension(false),
HasFastSerialization(false), HasInterlockedAccess1(false),
- TargetTriple(TT),
- // Make sure that global data has at least 16 bits of alignment by
- // default, so that we can refer to it using LARL. We don't have any
- // special requirements for stack variables though.
- DL("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"),
- InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM),
- TSInfo(DL), FrameLowering() {}
+ TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
+ TLInfo(TM, *this), TSInfo(*TM.getDataLayout()), FrameLowering() {}
// Return true if GV binds locally under reloc model RM.
static bool bindsLocally(const GlobalValue *GV, Reloc::Model RM) {
diff --git a/lib/Target/SystemZ/SystemZSubtarget.h b/lib/Target/SystemZ/SystemZSubtarget.h
index f881552..99cb1ad 100644
--- a/lib/Target/SystemZ/SystemZSubtarget.h
+++ b/lib/Target/SystemZ/SystemZSubtarget.h
@@ -43,7 +43,6 @@ protected:
private:
Triple TargetTriple;
- const DataLayout DL;
SystemZInstrInfo InstrInfo;
SystemZTargetLowering TLInfo;
SystemZSelectionDAGInfo TSInfo;
@@ -59,7 +58,6 @@ public:
return &FrameLowering;
}
const SystemZInstrInfo *getInstrInfo() const override { return &InstrInfo; }
- const DataLayout *getDataLayout() const override { return &DL; }
const SystemZRegisterInfo *getRegisterInfo() const override {
return &InstrInfo.getRegisterInfo();
}
diff --git a/lib/Target/SystemZ/SystemZTargetMachine.cpp b/lib/Target/SystemZ/SystemZTargetMachine.cpp
index d7c432e..73198b1 100644
--- a/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -27,6 +27,10 @@ SystemZTargetMachine::SystemZTargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
TLOF(make_unique<TargetLoweringObjectFileELF>()),
+ // Make sure that global data has at least 16 bits of alignment by
+ // default, so that we can refer to it using LARL. We don't have any
+ // special requirements for stack variables though.
+ DL("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"),
Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
}
@@ -46,8 +50,8 @@ public:
void addIRPasses() override;
bool addInstSelector() override;
- bool addPreSched2() override;
- bool addPreEmitPass() override;
+ void addPreSched2() override;
+ void addPreEmitPass() override;
};
} // end anonymous namespace
@@ -57,17 +61,20 @@ void SystemZPassConfig::addIRPasses() {
bool SystemZPassConfig::addInstSelector() {
addPass(createSystemZISelDag(getSystemZTargetMachine(), getOptLevel()));
+
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createSystemZLDCleanupPass(getSystemZTargetMachine()));
+
return false;
}
-bool SystemZPassConfig::addPreSched2() {
+void SystemZPassConfig::addPreSched2() {
if (getOptLevel() != CodeGenOpt::None &&
getSystemZTargetMachine().getSubtargetImpl()->hasLoadStoreOnCond())
addPass(&IfConverterID);
- return true;
}
-bool SystemZPassConfig::addPreEmitPass() {
+void SystemZPassConfig::addPreEmitPass() {
// We eliminate comparisons here rather than earlier because some
// transformations can change the set of available CC values and we
// generally want those transformations to have priority. This is
@@ -92,11 +99,10 @@ bool SystemZPassConfig::addPreEmitPass() {
// between the comparison and the branch, but it isn't clear whether
// preventing that would be a win or not.
if (getOptLevel() != CodeGenOpt::None)
- addPass(createSystemZElimComparePass(getSystemZTargetMachine()));
+ addPass(createSystemZElimComparePass(getSystemZTargetMachine()), false);
if (getOptLevel() != CodeGenOpt::None)
- addPass(createSystemZShortenInstPass(getSystemZTargetMachine()));
+ addPass(createSystemZShortenInstPass(getSystemZTargetMachine()), false);
addPass(createSystemZLongBranchPass(getSystemZTargetMachine()));
- return true;
}
TargetPassConfig *SystemZTargetMachine::createPassConfig(PassManagerBase &PM) {
diff --git a/lib/Target/SystemZ/SystemZTargetMachine.h b/lib/Target/SystemZ/SystemZTargetMachine.h
index 9fae5e4..52ccc5a 100644
--- a/lib/Target/SystemZ/SystemZTargetMachine.h
+++ b/lib/Target/SystemZ/SystemZTargetMachine.h
@@ -24,6 +24,7 @@ class TargetFrameLowering;
class SystemZTargetMachine : public LLVMTargetMachine {
std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ const DataLayout DL;
SystemZSubtarget Subtarget;
public:
@@ -34,6 +35,7 @@ public:
~SystemZTargetMachine() override;
// Override TargetMachine.
+ const DataLayout *getDataLayout() const override { return &DL; }
const SystemZSubtarget *getSubtargetImpl() const override {
return &Subtarget;
}
diff --git a/lib/Target/Target.cpp b/lib/Target/Target.cpp
index 4b51b3f..5b7953d 100644
--- a/lib/Target/Target.cpp
+++ b/lib/Target/Target.cpp
@@ -18,24 +18,25 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Value.h"
#include "llvm/InitializePasses.h"
-#include "llvm/PassManager.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include <cstring>
using namespace llvm;
-inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) {
- return reinterpret_cast<TargetLibraryInfo*>(P);
+inline TargetLibraryInfoImpl *unwrap(LLVMTargetLibraryInfoRef P) {
+ return reinterpret_cast<TargetLibraryInfoImpl*>(P);
}
-inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfo *P) {
- TargetLibraryInfo *X = const_cast<TargetLibraryInfo*>(P);
+inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfoImpl *P) {
+ TargetLibraryInfoImpl *X = const_cast<TargetLibraryInfoImpl*>(P);
return reinterpret_cast<LLVMTargetLibraryInfoRef>(X);
}
void llvm::initializeTarget(PassRegistry &Registry) {
initializeDataLayoutPassPass(Registry);
- initializeTargetLibraryInfoPass(Registry);
+ initializeTargetLibraryInfoWrapperPassPass(Registry);
+ initializeTargetTransformInfoWrapperPassPass(Registry);
}
void LLVMInitializeTarget(LLVMPassRegistryRef R) {
@@ -54,7 +55,7 @@ void LLVMAddTargetData(LLVMTargetDataRef TD, LLVMPassManagerRef PM) {
void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef TLI,
LLVMPassManagerRef PM) {
- unwrap(PM)->add(new TargetLibraryInfo(*unwrap(TLI)));
+ unwrap(PM)->add(new TargetLibraryInfoWrapperPass(*unwrap(TLI)));
}
char *LLVMCopyStringRepOfTargetData(LLVMTargetDataRef TD) {
diff --git a/lib/Target/TargetLibraryInfo.cpp b/lib/Target/TargetLibraryInfo.cpp
deleted file mode 100644
index bca56b5..0000000
--- a/lib/Target/TargetLibraryInfo.cpp
+++ /dev/null
@@ -1,753 +0,0 @@
-//===-- TargetLibraryInfo.cpp - Runtime library information ----------------==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the TargetLibraryInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Target/TargetLibraryInfo.h"
-#include "llvm/ADT/Triple.h"
-using namespace llvm;
-
-// Register the default implementation.
-INITIALIZE_PASS(TargetLibraryInfo, "targetlibinfo",
- "Target Library Information", false, true)
-char TargetLibraryInfo::ID = 0;
-
-void TargetLibraryInfo::anchor() { }
-
-const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
- {
- "_IO_getc",
- "_IO_putc",
- "_ZdaPv",
- "_ZdaPvRKSt9nothrow_t",
- "_ZdaPvj",
- "_ZdaPvm",
- "_ZdlPv",
- "_ZdlPvRKSt9nothrow_t",
- "_ZdlPvj",
- "_ZdlPvm",
- "_Znaj",
- "_ZnajRKSt9nothrow_t",
- "_Znam",
- "_ZnamRKSt9nothrow_t",
- "_Znwj",
- "_ZnwjRKSt9nothrow_t",
- "_Znwm",
- "_ZnwmRKSt9nothrow_t",
- "__cospi",
- "__cospif",
- "__cxa_atexit",
- "__cxa_guard_abort",
- "__cxa_guard_acquire",
- "__cxa_guard_release",
- "__isoc99_scanf",
- "__isoc99_sscanf",
- "__memcpy_chk",
- "__memmove_chk",
- "__memset_chk",
- "__sincospi_stret",
- "__sincospif_stret",
- "__sinpi",
- "__sinpif",
- "__sqrt_finite",
- "__sqrtf_finite",
- "__sqrtl_finite",
- "__stpcpy_chk",
- "__stpncpy_chk",
- "__strcpy_chk",
- "__strdup",
- "__strncpy_chk",
- "__strndup",
- "__strtok_r",
- "abs",
- "access",
- "acos",
- "acosf",
- "acosh",
- "acoshf",
- "acoshl",
- "acosl",
- "asin",
- "asinf",
- "asinh",
- "asinhf",
- "asinhl",
- "asinl",
- "atan",
- "atan2",
- "atan2f",
- "atan2l",
- "atanf",
- "atanh",
- "atanhf",
- "atanhl",
- "atanl",
- "atof",
- "atoi",
- "atol",
- "atoll",
- "bcmp",
- "bcopy",
- "bzero",
- "calloc",
- "cbrt",
- "cbrtf",
- "cbrtl",
- "ceil",
- "ceilf",
- "ceill",
- "chmod",
- "chown",
- "clearerr",
- "closedir",
- "copysign",
- "copysignf",
- "copysignl",
- "cos",
- "cosf",
- "cosh",
- "coshf",
- "coshl",
- "cosl",
- "ctermid",
- "exp",
- "exp10",
- "exp10f",
- "exp10l",
- "exp2",
- "exp2f",
- "exp2l",
- "expf",
- "expl",
- "expm1",
- "expm1f",
- "expm1l",
- "fabs",
- "fabsf",
- "fabsl",
- "fclose",
- "fdopen",
- "feof",
- "ferror",
- "fflush",
- "ffs",
- "ffsl",
- "ffsll",
- "fgetc",
- "fgetpos",
- "fgets",
- "fileno",
- "fiprintf",
- "flockfile",
- "floor",
- "floorf",
- "floorl",
- "fmax",
- "fmaxf",
- "fmaxl",
- "fmin",
- "fminf",
- "fminl",
- "fmod",
- "fmodf",
- "fmodl",
- "fopen",
- "fopen64",
- "fprintf",
- "fputc",
- "fputs",
- "fread",
- "free",
- "frexp",
- "frexpf",
- "frexpl",
- "fscanf",
- "fseek",
- "fseeko",
- "fseeko64",
- "fsetpos",
- "fstat",
- "fstat64",
- "fstatvfs",
- "fstatvfs64",
- "ftell",
- "ftello",
- "ftello64",
- "ftrylockfile",
- "funlockfile",
- "fwrite",
- "getc",
- "getc_unlocked",
- "getchar",
- "getenv",
- "getitimer",
- "getlogin_r",
- "getpwnam",
- "gets",
- "gettimeofday",
- "htonl",
- "htons",
- "iprintf",
- "isascii",
- "isdigit",
- "labs",
- "lchown",
- "ldexp",
- "ldexpf",
- "ldexpl",
- "llabs",
- "log",
- "log10",
- "log10f",
- "log10l",
- "log1p",
- "log1pf",
- "log1pl",
- "log2",
- "log2f",
- "log2l",
- "logb",
- "logbf",
- "logbl",
- "logf",
- "logl",
- "lstat",
- "lstat64",
- "malloc",
- "memalign",
- "memccpy",
- "memchr",
- "memcmp",
- "memcpy",
- "memmove",
- "memrchr",
- "memset",
- "memset_pattern16",
- "mkdir",
- "mktime",
- "modf",
- "modff",
- "modfl",
- "nearbyint",
- "nearbyintf",
- "nearbyintl",
- "ntohl",
- "ntohs",
- "open",
- "open64",
- "opendir",
- "pclose",
- "perror",
- "popen",
- "posix_memalign",
- "pow",
- "powf",
- "powl",
- "pread",
- "printf",
- "putc",
- "putchar",
- "puts",
- "pwrite",
- "qsort",
- "read",
- "readlink",
- "realloc",
- "reallocf",
- "realpath",
- "remove",
- "rename",
- "rewind",
- "rint",
- "rintf",
- "rintl",
- "rmdir",
- "round",
- "roundf",
- "roundl",
- "scanf",
- "setbuf",
- "setitimer",
- "setvbuf",
- "sin",
- "sinf",
- "sinh",
- "sinhf",
- "sinhl",
- "sinl",
- "siprintf",
- "snprintf",
- "sprintf",
- "sqrt",
- "sqrtf",
- "sqrtl",
- "sscanf",
- "stat",
- "stat64",
- "statvfs",
- "statvfs64",
- "stpcpy",
- "stpncpy",
- "strcasecmp",
- "strcat",
- "strchr",
- "strcmp",
- "strcoll",
- "strcpy",
- "strcspn",
- "strdup",
- "strlen",
- "strncasecmp",
- "strncat",
- "strncmp",
- "strncpy",
- "strndup",
- "strnlen",
- "strpbrk",
- "strrchr",
- "strspn",
- "strstr",
- "strtod",
- "strtof",
- "strtok",
- "strtok_r",
- "strtol",
- "strtold",
- "strtoll",
- "strtoul",
- "strtoull",
- "strxfrm",
- "system",
- "tan",
- "tanf",
- "tanh",
- "tanhf",
- "tanhl",
- "tanl",
- "times",
- "tmpfile",
- "tmpfile64",
- "toascii",
- "trunc",
- "truncf",
- "truncl",
- "uname",
- "ungetc",
- "unlink",
- "unsetenv",
- "utime",
- "utimes",
- "valloc",
- "vfprintf",
- "vfscanf",
- "vprintf",
- "vscanf",
- "vsnprintf",
- "vsprintf",
- "vsscanf",
- "write"
- };
-
-static bool hasSinCosPiStret(const Triple &T) {
- // Only Darwin variants have _stret versions of combined trig functions.
- if (!T.isOSDarwin())
- return false;
-
- // The ABI is rather complicated on x86, so don't do anything special there.
- if (T.getArch() == Triple::x86)
- return false;
-
- if (T.isMacOSX() && T.isMacOSXVersionLT(10, 9))
- return false;
-
- if (T.isiOS() && T.isOSVersionLT(7, 0))
- return false;
-
- return true;
-}
-
-/// initialize - Initialize the set of available library functions based on the
-/// specified target triple. This should be carefully written so that a missing
-/// target triple gets a sane set of defaults.
-static void initialize(TargetLibraryInfo &TLI, const Triple &T,
- const char **StandardNames) {
- initializeTargetLibraryInfoPass(*PassRegistry::getPassRegistry());
-
-#ifndef NDEBUG
- // Verify that the StandardNames array is in alphabetical order.
- for (unsigned F = 1; F < LibFunc::NumLibFuncs; ++F) {
- if (strcmp(StandardNames[F-1], StandardNames[F]) >= 0)
- llvm_unreachable("TargetLibraryInfo function names must be sorted");
- }
-#endif // !NDEBUG
-
- // There are no library implementations of mempcy and memset for r600 and
- // these can be difficult to lower in the backend.
- if (T.getArch() == Triple::r600) {
- TLI.setUnavailable(LibFunc::memcpy);
- TLI.setUnavailable(LibFunc::memset);
- TLI.setUnavailable(LibFunc::memset_pattern16);
- return;
- }
-
- // memset_pattern16 is only available on iOS 3.0 and Mac OS X 10.5 and later.
- if (T.isMacOSX()) {
- if (T.isMacOSXVersionLT(10, 5))
- TLI.setUnavailable(LibFunc::memset_pattern16);
- } else if (T.isiOS()) {
- if (T.isOSVersionLT(3, 0))
- TLI.setUnavailable(LibFunc::memset_pattern16);
- } else {
- TLI.setUnavailable(LibFunc::memset_pattern16);
- }
-
- if (!hasSinCosPiStret(T)) {
- TLI.setUnavailable(LibFunc::sinpi);
- TLI.setUnavailable(LibFunc::sinpif);
- TLI.setUnavailable(LibFunc::cospi);
- TLI.setUnavailable(LibFunc::cospif);
- TLI.setUnavailable(LibFunc::sincospi_stret);
- TLI.setUnavailable(LibFunc::sincospif_stret);
- }
-
- if (T.isMacOSX() && T.getArch() == Triple::x86 &&
- !T.isMacOSXVersionLT(10, 7)) {
- // x86-32 OSX has a scheme where fwrite and fputs (and some other functions
- // we don't care about) have two versions; on recent OSX, the one we want
- // has a $UNIX2003 suffix. The two implementations are identical except
- // for the return value in some edge cases. However, we don't want to
- // generate code that depends on the old symbols.
- TLI.setAvailableWithName(LibFunc::fwrite, "fwrite$UNIX2003");
- TLI.setAvailableWithName(LibFunc::fputs, "fputs$UNIX2003");
- }
-
- // iprintf and friends are only available on XCore and TCE.
- if (T.getArch() != Triple::xcore && T.getArch() != Triple::tce) {
- TLI.setUnavailable(LibFunc::iprintf);
- TLI.setUnavailable(LibFunc::siprintf);
- TLI.setUnavailable(LibFunc::fiprintf);
- }
-
- if (T.isOSWindows() && !T.isOSCygMing()) {
- // Win32 does not support long double
- TLI.setUnavailable(LibFunc::acosl);
- TLI.setUnavailable(LibFunc::asinl);
- TLI.setUnavailable(LibFunc::atanl);
- TLI.setUnavailable(LibFunc::atan2l);
- TLI.setUnavailable(LibFunc::ceill);
- TLI.setUnavailable(LibFunc::copysignl);
- TLI.setUnavailable(LibFunc::cosl);
- TLI.setUnavailable(LibFunc::coshl);
- TLI.setUnavailable(LibFunc::expl);
- TLI.setUnavailable(LibFunc::fabsf); // Win32 and Win64 both lack fabsf
- TLI.setUnavailable(LibFunc::fabsl);
- TLI.setUnavailable(LibFunc::floorl);
- TLI.setUnavailable(LibFunc::fmaxl);
- TLI.setUnavailable(LibFunc::fminl);
- TLI.setUnavailable(LibFunc::fmodl);
- TLI.setUnavailable(LibFunc::frexpl);
- TLI.setUnavailable(LibFunc::ldexpf);
- TLI.setUnavailable(LibFunc::ldexpl);
- TLI.setUnavailable(LibFunc::logl);
- TLI.setUnavailable(LibFunc::modfl);
- TLI.setUnavailable(LibFunc::powl);
- TLI.setUnavailable(LibFunc::sinl);
- TLI.setUnavailable(LibFunc::sinhl);
- TLI.setUnavailable(LibFunc::sqrtl);
- TLI.setUnavailable(LibFunc::tanl);
- TLI.setUnavailable(LibFunc::tanhl);
-
- // Win32 only has C89 math
- TLI.setUnavailable(LibFunc::acosh);
- TLI.setUnavailable(LibFunc::acoshf);
- TLI.setUnavailable(LibFunc::acoshl);
- TLI.setUnavailable(LibFunc::asinh);
- TLI.setUnavailable(LibFunc::asinhf);
- TLI.setUnavailable(LibFunc::asinhl);
- TLI.setUnavailable(LibFunc::atanh);
- TLI.setUnavailable(LibFunc::atanhf);
- TLI.setUnavailable(LibFunc::atanhl);
- TLI.setUnavailable(LibFunc::cbrt);
- TLI.setUnavailable(LibFunc::cbrtf);
- TLI.setUnavailable(LibFunc::cbrtl);
- TLI.setUnavailable(LibFunc::exp2);
- TLI.setUnavailable(LibFunc::exp2f);
- TLI.setUnavailable(LibFunc::exp2l);
- TLI.setUnavailable(LibFunc::expm1);
- TLI.setUnavailable(LibFunc::expm1f);
- TLI.setUnavailable(LibFunc::expm1l);
- TLI.setUnavailable(LibFunc::log2);
- TLI.setUnavailable(LibFunc::log2f);
- TLI.setUnavailable(LibFunc::log2l);
- TLI.setUnavailable(LibFunc::log1p);
- TLI.setUnavailable(LibFunc::log1pf);
- TLI.setUnavailable(LibFunc::log1pl);
- TLI.setUnavailable(LibFunc::logb);
- TLI.setUnavailable(LibFunc::logbf);
- TLI.setUnavailable(LibFunc::logbl);
- TLI.setUnavailable(LibFunc::nearbyint);
- TLI.setUnavailable(LibFunc::nearbyintf);
- TLI.setUnavailable(LibFunc::nearbyintl);
- TLI.setUnavailable(LibFunc::rint);
- TLI.setUnavailable(LibFunc::rintf);
- TLI.setUnavailable(LibFunc::rintl);
- TLI.setUnavailable(LibFunc::round);
- TLI.setUnavailable(LibFunc::roundf);
- TLI.setUnavailable(LibFunc::roundl);
- TLI.setUnavailable(LibFunc::trunc);
- TLI.setUnavailable(LibFunc::truncf);
- TLI.setUnavailable(LibFunc::truncl);
-
- // Win32 provides some C99 math with mangled names
- TLI.setAvailableWithName(LibFunc::copysign, "_copysign");
-
- if (T.getArch() == Triple::x86) {
- // Win32 on x86 implements single-precision math functions as macros
- TLI.setUnavailable(LibFunc::acosf);
- TLI.setUnavailable(LibFunc::asinf);
- TLI.setUnavailable(LibFunc::atanf);
- TLI.setUnavailable(LibFunc::atan2f);
- TLI.setUnavailable(LibFunc::ceilf);
- TLI.setUnavailable(LibFunc::copysignf);
- TLI.setUnavailable(LibFunc::cosf);
- TLI.setUnavailable(LibFunc::coshf);
- TLI.setUnavailable(LibFunc::expf);
- TLI.setUnavailable(LibFunc::floorf);
- TLI.setUnavailable(LibFunc::fminf);
- TLI.setUnavailable(LibFunc::fmaxf);
- TLI.setUnavailable(LibFunc::fmodf);
- TLI.setUnavailable(LibFunc::logf);
- TLI.setUnavailable(LibFunc::powf);
- TLI.setUnavailable(LibFunc::sinf);
- TLI.setUnavailable(LibFunc::sinhf);
- TLI.setUnavailable(LibFunc::sqrtf);
- TLI.setUnavailable(LibFunc::tanf);
- TLI.setUnavailable(LibFunc::tanhf);
- }
-
- // Win32 does *not* provide provide these functions, but they are
- // generally available on POSIX-compliant systems:
- TLI.setUnavailable(LibFunc::access);
- TLI.setUnavailable(LibFunc::bcmp);
- TLI.setUnavailable(LibFunc::bcopy);
- TLI.setUnavailable(LibFunc::bzero);
- TLI.setUnavailable(LibFunc::chmod);
- TLI.setUnavailable(LibFunc::chown);
- TLI.setUnavailable(LibFunc::closedir);
- TLI.setUnavailable(LibFunc::ctermid);
- TLI.setUnavailable(LibFunc::fdopen);
- TLI.setUnavailable(LibFunc::ffs);
- TLI.setUnavailable(LibFunc::fileno);
- TLI.setUnavailable(LibFunc::flockfile);
- TLI.setUnavailable(LibFunc::fseeko);
- TLI.setUnavailable(LibFunc::fstat);
- TLI.setUnavailable(LibFunc::fstatvfs);
- TLI.setUnavailable(LibFunc::ftello);
- TLI.setUnavailable(LibFunc::ftrylockfile);
- TLI.setUnavailable(LibFunc::funlockfile);
- TLI.setUnavailable(LibFunc::getc_unlocked);
- TLI.setUnavailable(LibFunc::getitimer);
- TLI.setUnavailable(LibFunc::getlogin_r);
- TLI.setUnavailable(LibFunc::getpwnam);
- TLI.setUnavailable(LibFunc::gettimeofday);
- TLI.setUnavailable(LibFunc::htonl);
- TLI.setUnavailable(LibFunc::htons);
- TLI.setUnavailable(LibFunc::lchown);
- TLI.setUnavailable(LibFunc::lstat);
- TLI.setUnavailable(LibFunc::memccpy);
- TLI.setUnavailable(LibFunc::mkdir);
- TLI.setUnavailable(LibFunc::ntohl);
- TLI.setUnavailable(LibFunc::ntohs);
- TLI.setUnavailable(LibFunc::open);
- TLI.setUnavailable(LibFunc::opendir);
- TLI.setUnavailable(LibFunc::pclose);
- TLI.setUnavailable(LibFunc::popen);
- TLI.setUnavailable(LibFunc::pread);
- TLI.setUnavailable(LibFunc::pwrite);
- TLI.setUnavailable(LibFunc::read);
- TLI.setUnavailable(LibFunc::readlink);
- TLI.setUnavailable(LibFunc::realpath);
- TLI.setUnavailable(LibFunc::rmdir);
- TLI.setUnavailable(LibFunc::setitimer);
- TLI.setUnavailable(LibFunc::stat);
- TLI.setUnavailable(LibFunc::statvfs);
- TLI.setUnavailable(LibFunc::stpcpy);
- TLI.setUnavailable(LibFunc::stpncpy);
- TLI.setUnavailable(LibFunc::strcasecmp);
- TLI.setUnavailable(LibFunc::strncasecmp);
- TLI.setUnavailable(LibFunc::times);
- TLI.setUnavailable(LibFunc::uname);
- TLI.setUnavailable(LibFunc::unlink);
- TLI.setUnavailable(LibFunc::unsetenv);
- TLI.setUnavailable(LibFunc::utime);
- TLI.setUnavailable(LibFunc::utimes);
- TLI.setUnavailable(LibFunc::write);
-
- // Win32 does *not* provide provide these functions, but they are
- // specified by C99:
- TLI.setUnavailable(LibFunc::atoll);
- TLI.setUnavailable(LibFunc::frexpf);
- TLI.setUnavailable(LibFunc::llabs);
- }
-
- switch (T.getOS()) {
- case Triple::MacOSX:
- // exp10 and exp10f are not available on OS X until 10.9 and iOS until 7.0
- // and their names are __exp10 and __exp10f. exp10l is not available on
- // OS X or iOS.
- TLI.setUnavailable(LibFunc::exp10l);
- if (T.isMacOSXVersionLT(10, 9)) {
- TLI.setUnavailable(LibFunc::exp10);
- TLI.setUnavailable(LibFunc::exp10f);
- } else {
- TLI.setAvailableWithName(LibFunc::exp10, "__exp10");
- TLI.setAvailableWithName(LibFunc::exp10f, "__exp10f");
- }
- break;
- case Triple::IOS:
- TLI.setUnavailable(LibFunc::exp10l);
- if (T.isOSVersionLT(7, 0)) {
- TLI.setUnavailable(LibFunc::exp10);
- TLI.setUnavailable(LibFunc::exp10f);
- } else {
- TLI.setAvailableWithName(LibFunc::exp10, "__exp10");
- TLI.setAvailableWithName(LibFunc::exp10f, "__exp10f");
- }
- break;
- case Triple::Linux:
- // exp10, exp10f, exp10l is available on Linux (GLIBC) but are extremely
- // buggy prior to glibc version 2.18. Until this version is widely deployed
- // or we have a reasonable detection strategy, we cannot use exp10 reliably
- // on Linux.
- //
- // Fall through to disable all of them.
- default:
- TLI.setUnavailable(LibFunc::exp10);
- TLI.setUnavailable(LibFunc::exp10f);
- TLI.setUnavailable(LibFunc::exp10l);
- }
-
- // ffsl is available on at least Darwin, Mac OS X, iOS, FreeBSD, and
- // Linux (GLIBC):
- // http://developer.apple.com/library/mac/#documentation/Darwin/Reference/ManPages/man3/ffsl.3.html
- // http://svn.freebsd.org/base/user/eri/pf45/head/lib/libc/string/ffsl.c
- // http://www.gnu.org/software/gnulib/manual/html_node/ffsl.html
- switch (T.getOS()) {
- case Triple::Darwin:
- case Triple::MacOSX:
- case Triple::IOS:
- case Triple::FreeBSD:
- case Triple::Linux:
- break;
- default:
- TLI.setUnavailable(LibFunc::ffsl);
- }
-
- // ffsll is available on at least FreeBSD and Linux (GLIBC):
- // http://svn.freebsd.org/base/user/eri/pf45/head/lib/libc/string/ffsll.c
- // http://www.gnu.org/software/gnulib/manual/html_node/ffsll.html
- switch (T.getOS()) {
- case Triple::FreeBSD:
- case Triple::Linux:
- break;
- default:
- TLI.setUnavailable(LibFunc::ffsll);
- }
-
- // The following functions are available on at least Linux:
- if (!T.isOSLinux()) {
- TLI.setUnavailable(LibFunc::dunder_strdup);
- TLI.setUnavailable(LibFunc::dunder_strtok_r);
- TLI.setUnavailable(LibFunc::dunder_isoc99_scanf);
- TLI.setUnavailable(LibFunc::dunder_isoc99_sscanf);
- TLI.setUnavailable(LibFunc::under_IO_getc);
- TLI.setUnavailable(LibFunc::under_IO_putc);
- TLI.setUnavailable(LibFunc::memalign);
- TLI.setUnavailable(LibFunc::fopen64);
- TLI.setUnavailable(LibFunc::fseeko64);
- TLI.setUnavailable(LibFunc::fstat64);
- TLI.setUnavailable(LibFunc::fstatvfs64);
- TLI.setUnavailable(LibFunc::ftello64);
- TLI.setUnavailable(LibFunc::lstat64);
- TLI.setUnavailable(LibFunc::open64);
- TLI.setUnavailable(LibFunc::stat64);
- TLI.setUnavailable(LibFunc::statvfs64);
- TLI.setUnavailable(LibFunc::tmpfile64);
- }
-}
-
-
-TargetLibraryInfo::TargetLibraryInfo() : ImmutablePass(ID) {
- // Default to everything being available.
- memset(AvailableArray, -1, sizeof(AvailableArray));
-
- initialize(*this, Triple(), StandardNames);
-}
-
-TargetLibraryInfo::TargetLibraryInfo(const Triple &T) : ImmutablePass(ID) {
- // Default to everything being available.
- memset(AvailableArray, -1, sizeof(AvailableArray));
-
- initialize(*this, T, StandardNames);
-}
-
-TargetLibraryInfo::TargetLibraryInfo(const TargetLibraryInfo &TLI)
- : ImmutablePass(ID) {
- memcpy(AvailableArray, TLI.AvailableArray, sizeof(AvailableArray));
- CustomNames = TLI.CustomNames;
-}
-
-namespace {
-struct StringComparator {
- /// Compare two strings and return true if LHS is lexicographically less than
- /// RHS. Requires that RHS doesn't contain any zero bytes.
- bool operator()(const char *LHS, StringRef RHS) const {
- // Compare prefixes with strncmp. If prefixes match we know that LHS is
- // greater or equal to RHS as RHS can't contain any '\0'.
- return std::strncmp(LHS, RHS.data(), RHS.size()) < 0;
- }
-
- // Provided for compatibility with MSVC's debug mode.
- bool operator()(StringRef LHS, const char *RHS) const { return LHS < RHS; }
- bool operator()(StringRef LHS, StringRef RHS) const { return LHS < RHS; }
- bool operator()(const char *LHS, const char *RHS) const {
- return std::strcmp(LHS, RHS) < 0;
- }
-};
-}
-
-bool TargetLibraryInfo::getLibFunc(StringRef funcName,
- LibFunc::Func &F) const {
- const char **Start = &StandardNames[0];
- const char **End = &StandardNames[LibFunc::NumLibFuncs];
-
- // Filter out empty names and names containing null bytes, those can't be in
- // our table.
- if (funcName.empty() || funcName.find('\0') != StringRef::npos)
- return false;
-
- // Check for \01 prefix that is used to mangle __asm declarations and
- // strip it if present.
- if (funcName.front() == '\01')
- funcName = funcName.substr(1);
- const char **I = std::lower_bound(Start, End, funcName, StringComparator());
- if (I != End && *I == funcName) {
- F = (LibFunc::Func)(I - Start);
- return true;
- }
- return false;
-}
-
-/// disableAllFunctions - This disables all builtins, which is used for options
-/// like -fno-builtin.
-void TargetLibraryInfo::disableAllFunctions() {
- memset(AvailableArray, 0, sizeof(AvailableArray));
-}
diff --git a/lib/Target/TargetLoweringObjectFile.cpp b/lib/Target/TargetLoweringObjectFile.cpp
index 01139fb..faa6fbe 100644
--- a/lib/Target/TargetLoweringObjectFile.cpp
+++ b/lib/Target/TargetLoweringObjectFile.cpp
@@ -43,7 +43,7 @@ using namespace llvm;
void TargetLoweringObjectFile::Initialize(MCContext &ctx,
const TargetMachine &TM) {
Ctx = &ctx;
- DL = TM.getSubtargetImpl()->getDataLayout();
+ DL = TM.getDataLayout();
InitMCObjectFileInfo(TM.getTargetTriple(),
TM.getRelocationModel(), TM.getCodeModel(), *Ctx);
}
@@ -200,12 +200,12 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
// Otherwise, just drop it into a mergable constant section. If we have
// a section for this size, use it, otherwise use the arbitrary sized
// mergable section.
- switch (TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(
- C->getType())) {
+ switch (TM.getDataLayout()->getTypeAllocSize(C->getType())) {
case 4: return SectionKind::getMergeableConst4();
case 8: return SectionKind::getMergeableConst8();
case 16: return SectionKind::getMergeableConst16();
- default: return SectionKind::getMergeableConst();
+ default:
+ return SectionKind::getReadOnly();
}
case Constant::LocalRelocation:
@@ -270,11 +270,28 @@ SectionForGlobal(const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
return SelectSectionForGlobal(GV, Kind, Mang, TM);
}
-bool TargetLoweringObjectFile::isSectionAtomizableBySymbols(
- const MCSection &Section) const {
- return false;
+const MCSection *TargetLoweringObjectFile::getSectionForJumpTable(
+ const Function &F, Mangler &Mang, const TargetMachine &TM) const {
+ return getSectionForConstant(SectionKind::getReadOnly(), /*C=*/nullptr);
}
+bool TargetLoweringObjectFile::shouldPutJumpTableInFunctionSection(
+ bool UsesLabelDifference, const Function &F) const {
+ // In PIC mode, we need to emit the jump table to the same section as the
+ // function body itself, otherwise the label differences won't make sense.
+ // FIXME: Need a better predicate for this: what about custom entries?
+ if (UsesLabelDifference)
+ return true;
+
+ // We should also do if the section name is NULL or function is declared
+ // in discardable section
+ // FIXME: this isn't the right predicate, should be based on the MCSection
+ // for the function.
+ if (F.isWeakForLinker())
+ return true;
+
+ return false;
+}
/// getSectionForConstant - Given a mergable constant with the
/// specified size and relocation information, return a section that it
diff --git a/lib/Target/TargetMachine.cpp b/lib/Target/TargetMachine.cpp
index 309e1bf..307e93c 100644
--- a/lib/Target/TargetMachine.cpp
+++ b/lib/Target/TargetMachine.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
@@ -21,8 +22,10 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/MC/SectionKind.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
@@ -51,10 +54,8 @@ TargetMachine::~TargetMachine() {
void TargetMachine::resetTargetOptions(const Function &F) const {
#define RESET_OPTION(X, Y) \
do { \
- if (F.hasFnAttribute(Y)) \
- Options.X = (F.getAttributes() \
- .getAttribute(AttributeSet::FunctionIndex, Y) \
- .getValueAsString() == "true"); \
+ if (F.hasFnAttribute(Y)) \
+ Options.X = (F.getFnAttribute(Y).getValueAsString() == "true"); \
} while (0)
RESET_OPTION(NoFramePointerElim, "no-frame-pointer-elim");
@@ -145,28 +146,22 @@ void TargetMachine::setOptLevel(CodeGenOpt::Level Level) const {
CodeGenInfo->setOptLevel(Level);
}
-bool TargetMachine::getAsmVerbosityDefault() const {
- return Options.MCOptions.AsmVerbose;
+TargetIRAnalysis TargetMachine::getTargetIRAnalysis() {
+ return TargetIRAnalysis(
+ [this](Function &) { return TargetTransformInfo(getDataLayout()); });
}
-void TargetMachine::setAsmVerbosityDefault(bool V) {
- Options.MCOptions.AsmVerbose = V;
-}
-
-bool TargetMachine::getFunctionSections() const {
- return Options.FunctionSections;
-}
+static bool canUsePrivateLabel(const MCAsmInfo &AsmInfo,
+ const MCSection &Section) {
+ if (!AsmInfo.isSectionAtomizableBySymbols(Section))
+ return true;
-bool TargetMachine::getDataSections() const {
- return Options.DataSections;
-}
-
-void TargetMachine::setFunctionSections(bool V) {
- Options.FunctionSections = V;
-}
+ // If it is not dead stripped, it is safe to use private labels.
+ const MCSectionMachO &SMO = cast<MCSectionMachO>(Section);
+ if (SMO.hasAttribute(MachO::S_ATTR_NO_DEAD_STRIP))
+ return true;
-void TargetMachine::setDataSections(bool V) {
- Options.DataSections = V;
+ return false;
}
void TargetMachine::getNameWithPrefix(SmallVectorImpl<char> &Name,
@@ -179,17 +174,15 @@ void TargetMachine::getNameWithPrefix(SmallVectorImpl<char> &Name,
return;
}
SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, *this);
- const TargetLoweringObjectFile &TLOF =
- getSubtargetImpl()->getTargetLowering()->getObjFileLowering();
- const MCSection *TheSection = TLOF.SectionForGlobal(GV, GVKind, Mang, *this);
- bool CannotUsePrivateLabel = TLOF.isSectionAtomizableBySymbols(*TheSection);
+ const TargetLoweringObjectFile *TLOF = getObjFileLowering();
+ const MCSection *TheSection = TLOF->SectionForGlobal(GV, GVKind, Mang, *this);
+ bool CannotUsePrivateLabel = !canUsePrivateLabel(*AsmInfo, *TheSection);
Mang.getNameWithPrefix(Name, GV, CannotUsePrivateLabel);
}
MCSymbol *TargetMachine::getSymbol(const GlobalValue *GV, Mangler &Mang) const {
SmallString<60> NameStr;
getNameWithPrefix(NameStr, GV, Mang);
- const TargetLoweringObjectFile &TLOF =
- getSubtargetImpl()->getTargetLowering()->getObjFileLowering();
- return TLOF.getContext().GetOrCreateSymbol(NameStr.str());
+ const TargetLoweringObjectFile *TLOF = getObjFileLowering();
+ return TLOF->getContext().GetOrCreateSymbol(NameStr.str());
}
diff --git a/lib/Target/TargetMachineC.cpp b/lib/Target/TargetMachineC.cpp
index b3e07df..c7838a9 100644
--- a/lib/Target/TargetMachineC.cpp
+++ b/lib/Target/TargetMachineC.cpp
@@ -14,9 +14,10 @@
#include "llvm-c/TargetMachine.h"
#include "llvm-c/Core.h"
#include "llvm-c/Target.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Module.h"
-#include "llvm/PassManager.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormattedStream.h"
@@ -173,12 +174,12 @@ char* LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T) {
}
LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T) {
- return wrap(unwrap(T)->getSubtargetImpl()->getDataLayout());
+ return wrap(unwrap(T)->getDataLayout());
}
void LLVMSetTargetMachineAsmVerbosity(LLVMTargetMachineRef T,
LLVMBool VerboseAsm) {
- unwrap(T)->setAsmVerbosityDefault(VerboseAsm);
+ unwrap(T)->Options.MCOptions.AsmVerbose = VerboseAsm;
}
static LLVMBool LLVMTargetMachineEmit(LLVMTargetMachineRef T, LLVMModuleRef M,
@@ -186,11 +187,11 @@ static LLVMBool LLVMTargetMachineEmit(LLVMTargetMachineRef T, LLVMModuleRef M,
TargetMachine* TM = unwrap(T);
Module* Mod = unwrap(M);
- PassManager pass;
+ legacy::PassManager pass;
std::string error;
- const DataLayout *td = TM->getSubtargetImpl()->getDataLayout();
+ const DataLayout *td = TM->getDataLayout();
if (!td) {
error = "No DataLayout in TargetMachine";
@@ -255,5 +256,6 @@ char *LLVMGetDefaultTargetTriple(void) {
}
void LLVMAddAnalysisPasses(LLVMTargetMachineRef T, LLVMPassManagerRef PM) {
- unwrap(T)->addAnalysisPasses(*unwrap(PM));
+ unwrap(PM)->add(
+ createTargetTransformInfoWrapperPass(unwrap(T)->getTargetIRAnalysis()));
}
diff --git a/lib/Target/X86/Android.mk b/lib/Target/X86/Android.mk
index 861a41d..08646d0 100644
--- a/lib/Target/X86/Android.mk
+++ b/lib/Target/X86/Android.mk
@@ -12,6 +12,7 @@ x86_codegen_TBLGEN_TABLES := \
x86_codegen_SRC_FILES := \
X86AsmPrinter.cpp \
+ X86CallFrameOptimization.cpp \
X86FastISel.cpp \
X86FixupLEAs.cpp \
X86FloatingPoint.cpp \
diff --git a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
index 9c49a11..543af8e 100644
--- a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
@@ -165,9 +165,9 @@ public:
}
unsigned ChooseFrameReg(MVT::SimpleValueType VT) const {
- static const unsigned Candidates[] = { X86::RBP, X86::RAX, X86::RBX,
- X86::RCX, X86::RDX, X86::RDI,
- X86::RSI };
+ static const MCPhysReg Candidates[] = { X86::RBP, X86::RAX, X86::RBX,
+ X86::RCX, X86::RDX, X86::RDI,
+ X86::RSI };
for (unsigned Reg : Candidates) {
if (!std::count(BusyRegs.begin(), BusyRegs.end(), Reg))
return convReg(Reg, VT);
@@ -261,6 +261,23 @@ protected:
int64_t Displacement,
MCContext &Ctx, int64_t *Residue);
+ bool is64BitMode() const {
+ return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
+ }
+ bool is32BitMode() const {
+ return (STI.getFeatureBits() & X86::Mode32Bit) != 0;
+ }
+ bool is16BitMode() const {
+ return (STI.getFeatureBits() & X86::Mode16Bit) != 0;
+ }
+
+ unsigned getPointerWidth() {
+ if (is16BitMode()) return 16;
+ if (is32BitMode()) return 32;
+ if (is64BitMode()) return 64;
+ llvm_unreachable("invalid mode");
+ }
+
// True when previous instruction was actually REP prefix.
bool RepPrefix;
@@ -301,7 +318,7 @@ void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg,
{
const MCExpr *Disp = MCConstantExpr::Create(0, Ctx);
std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
- 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc()));
+ getPointerWidth(), 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc()));
InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
Out);
}
@@ -310,7 +327,8 @@ void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg,
{
const MCExpr *Disp = MCConstantExpr::Create(-1, Ctx);
std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
- 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(), SMLoc()));
+ getPointerWidth(), 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(),
+ SMLoc()));
InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
Out);
}
@@ -319,7 +337,7 @@ void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg,
{
const MCExpr *Disp = MCConstantExpr::Create(0, Ctx);
std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
- 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc()));
+ getPointerWidth(), 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc()));
InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
}
@@ -327,7 +345,8 @@ void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg,
{
const MCExpr *Disp = MCConstantExpr::Create(-1, Ctx);
std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
- 0, Disp, DstReg, CntReg, AccessSize, SMLoc(), SMLoc()));
+ getPointerWidth(), 0, Disp, DstReg, CntReg, AccessSize, SMLoc(),
+ SMLoc()));
InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
}
@@ -445,7 +464,8 @@ void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op,
const MCConstantExpr *Disp =
MCConstantExpr::Create(ApplyDisplacementBounds(Residue), Ctx);
std::unique_ptr<X86Operand> DispOp =
- X86Operand::CreateMem(0, Disp, Reg, 0, 1, SMLoc(), SMLoc());
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, Reg, 0, 1, SMLoc(),
+ SMLoc());
EmitLEA(*DispOp, VT, Reg, Out);
Residue -= Disp->getValue();
}
@@ -459,9 +479,10 @@ X86AddressSanitizer::AddDisplacement(X86Operand &Op, int64_t Displacement,
if (Displacement == 0 ||
(Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant)) {
*Residue = Displacement;
- return X86Operand::CreateMem(Op.getMemSegReg(), Op.getMemDisp(),
- Op.getMemBaseReg(), Op.getMemIndexReg(),
- Op.getMemScale(), SMLoc(), SMLoc());
+ return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(),
+ Op.getMemDisp(), Op.getMemBaseReg(),
+ Op.getMemIndexReg(), Op.getMemScale(),
+ SMLoc(), SMLoc());
}
int64_t OrigDisplacement =
@@ -474,9 +495,9 @@ X86AddressSanitizer::AddDisplacement(X86Operand &Op, int64_t Displacement,
*Residue = Displacement - NewDisplacement;
const MCExpr *Disp = MCConstantExpr::Create(NewDisplacement, Ctx);
- return X86Operand::CreateMem(Op.getMemSegReg(), Disp, Op.getMemBaseReg(),
- Op.getMemIndexReg(), Op.getMemScale(), SMLoc(),
- SMLoc());
+ return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(), Disp,
+ Op.getMemBaseReg(), Op.getMemIndexReg(),
+ Op.getMemScale(), SMLoc(), SMLoc());
}
class X86AddressSanitizer32 : public X86AddressSanitizer {
@@ -625,7 +646,8 @@ void X86AddressSanitizer32::InstrumentMemOperandSmall(
Inst.addOperand(MCOperand::CreateReg(ShadowRegI8));
const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ShadowRegI32, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
+ SMLoc(), SMLoc()));
Op->addMemOperands(Inst, 5);
EmitInstruction(Out, Inst);
}
@@ -634,7 +656,7 @@ void X86AddressSanitizer32::InstrumentMemOperandSmall(
Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
MCSymbol *DoneSym = Ctx.CreateTempSymbol();
const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
- EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr));
+ EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
AddressRegI32));
@@ -644,12 +666,14 @@ void X86AddressSanitizer32::InstrumentMemOperandSmall(
.addImm(7));
switch (AccessSize) {
+ default: llvm_unreachable("Incorrect access size");
case 1:
break;
case 2: {
const MCExpr *Disp = MCConstantExpr::Create(1, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ScratchRegI32, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
+ SMLoc(), SMLoc()));
EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
break;
}
@@ -659,9 +683,6 @@ void X86AddressSanitizer32::InstrumentMemOperandSmall(
.addReg(ScratchRegI32)
.addImm(3));
break;
- default:
- assert(false && "Incorrect access size");
- break;
}
EmitInstruction(
@@ -669,7 +690,7 @@ void X86AddressSanitizer32::InstrumentMemOperandSmall(
MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
ShadowRegI32));
- EmitInstruction(Out, MCInstBuilder(X86::JL_4).addExpr(DoneExpr));
+ EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
EmitLabel(Out, DoneSym);
@@ -692,26 +713,25 @@ void X86AddressSanitizer32::InstrumentMemOperandLarge(
{
MCInst Inst;
switch (AccessSize) {
+ default: llvm_unreachable("Incorrect access size");
case 8:
Inst.setOpcode(X86::CMP8mi);
break;
case 16:
Inst.setOpcode(X86::CMP16mi);
break;
- default:
- assert(false && "Incorrect access size");
- break;
}
const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ShadowRegI32, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
+ SMLoc(), SMLoc()));
Op->addMemOperands(Inst, 5);
Inst.addOperand(MCOperand::CreateImm(0));
EmitInstruction(Out, Inst);
}
MCSymbol *DoneSym = Ctx.CreateTempSymbol();
const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
- EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr));
+ EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
EmitLabel(Out, DoneSym);
@@ -727,7 +747,7 @@ void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize,
const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
EmitInstruction(
Out, MCInstBuilder(X86::TEST32rr).addReg(X86::ECX).addReg(X86::ECX));
- EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr));
+ EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
// Instrument first and last elements in src and dst range.
InstrumentMOVSBase(X86::EDI /* DstReg */, X86::ESI /* SrcReg */,
@@ -843,7 +863,8 @@ private:
void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) {
const MCExpr *Disp = MCConstantExpr::Create(Offset, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, X86::RSP, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, X86::RSP, 0, 1,
+ SMLoc(), SMLoc()));
EmitLEA(*Op, MVT::i64, X86::RSP, Out);
OrigSPOffset += Offset;
}
@@ -896,7 +917,8 @@ void X86AddressSanitizer64::InstrumentMemOperandSmall(
Inst.addOperand(MCOperand::CreateReg(ShadowRegI8));
const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ShadowRegI64, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
+ SMLoc(), SMLoc()));
Op->addMemOperands(Inst, 5);
EmitInstruction(Out, Inst);
}
@@ -905,7 +927,7 @@ void X86AddressSanitizer64::InstrumentMemOperandSmall(
Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
MCSymbol *DoneSym = Ctx.CreateTempSymbol();
const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
- EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr));
+ EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
AddressRegI32));
@@ -915,12 +937,14 @@ void X86AddressSanitizer64::InstrumentMemOperandSmall(
.addImm(7));
switch (AccessSize) {
+ default: llvm_unreachable("Incorrect access size");
case 1:
break;
case 2: {
const MCExpr *Disp = MCConstantExpr::Create(1, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ScratchRegI32, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
+ SMLoc(), SMLoc()));
EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
break;
}
@@ -930,9 +954,6 @@ void X86AddressSanitizer64::InstrumentMemOperandSmall(
.addReg(ScratchRegI32)
.addImm(3));
break;
- default:
- assert(false && "Incorrect access size");
- break;
}
EmitInstruction(
@@ -940,7 +961,7 @@ void X86AddressSanitizer64::InstrumentMemOperandSmall(
MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
ShadowRegI32));
- EmitInstruction(Out, MCInstBuilder(X86::JL_4).addExpr(DoneExpr));
+ EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
EmitLabel(Out, DoneSym);
@@ -963,19 +984,18 @@ void X86AddressSanitizer64::InstrumentMemOperandLarge(
{
MCInst Inst;
switch (AccessSize) {
+ default: llvm_unreachable("Incorrect access size");
case 8:
Inst.setOpcode(X86::CMP8mi);
break;
case 16:
Inst.setOpcode(X86::CMP16mi);
break;
- default:
- assert(false && "Incorrect access size");
- break;
}
const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ShadowRegI64, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
+ SMLoc(), SMLoc()));
Op->addMemOperands(Inst, 5);
Inst.addOperand(MCOperand::CreateImm(0));
EmitInstruction(Out, Inst);
@@ -983,7 +1003,7 @@ void X86AddressSanitizer64::InstrumentMemOperandLarge(
MCSymbol *DoneSym = Ctx.CreateTempSymbol();
const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
- EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr));
+ EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
EmitLabel(Out, DoneSym);
@@ -999,7 +1019,7 @@ void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize,
const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
EmitInstruction(
Out, MCInstBuilder(X86::TEST64rr).addReg(X86::RCX).addReg(X86::RCX));
- EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr));
+ EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
// Instrument first and last elements in src and dst range.
InstrumentMOVSBase(X86::RDI /* DstReg */, X86::RSI /* SrcReg */,
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 8ef2a55..0b6fb52 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -86,7 +86,7 @@ private:
typedef std::pair< InfixCalculatorTok, int64_t > ICToken;
SmallVector<InfixCalculatorTok, 4> InfixOperatorStack;
SmallVector<ICToken, 4> PostfixStack;
-
+
public:
int64_t popOperand() {
assert (!PostfixStack.empty() && "Poped an empty stack!");
@@ -100,7 +100,7 @@ private:
"Unexpected operand!");
PostfixStack.push_back(std::make_pair(Op, Val));
}
-
+
void popOperator() { InfixOperatorStack.pop_back(); }
void pushOperator(InfixCalculatorTok Op) {
// Push the new operator if the stack is empty.
@@ -108,7 +108,7 @@ private:
InfixOperatorStack.push_back(Op);
return;
}
-
+
// Push the new operator if it has a higher precedence than the operator
// on the top of the stack or the operator on the top of the stack is a
// left parentheses.
@@ -118,7 +118,7 @@ private:
InfixOperatorStack.push_back(Op);
return;
}
-
+
// The operator on the top of the stack has higher precedence than the
// new operator.
unsigned ParenCount = 0;
@@ -126,17 +126,17 @@ private:
// Nothing to process.
if (InfixOperatorStack.empty())
break;
-
+
Idx = InfixOperatorStack.size() - 1;
StackOp = InfixOperatorStack[Idx];
if (!(OpPrecedence[StackOp] >= OpPrecedence[Op] || ParenCount))
break;
-
+
// If we have an even parentheses count and we see a left parentheses,
// then stop processing.
if (!ParenCount && StackOp == IC_LPAREN)
break;
-
+
if (StackOp == IC_RPAREN) {
++ParenCount;
InfixOperatorStack.pop_back();
@@ -158,10 +158,10 @@ private:
if (StackOp != IC_LPAREN && StackOp != IC_RPAREN)
PostfixStack.push_back(std::make_pair(StackOp, 0));
}
-
+
if (PostfixStack.empty())
return 0;
-
+
SmallVector<ICToken, 16> OperandStack;
for (unsigned i = 0, e = PostfixStack.size(); i != e; ++i) {
ICToken Op = PostfixStack[i];
@@ -263,7 +263,7 @@ private:
State(IES_PLUS), PrevState(IES_ERROR), BaseReg(0), IndexReg(0), TmpReg(0),
Scale(1), Imm(imm), Sym(nullptr), StopOnLBrac(stoponlbrac),
AddImmPrefix(addimmprefix) { Info.clear(); }
-
+
unsigned getBaseReg() { return BaseReg; }
unsigned getIndexReg() { return IndexReg; }
unsigned getScale() { return Scale; }
@@ -684,6 +684,7 @@ private:
bool ParseDirectiveWord(unsigned Size, SMLoc L);
bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
+ bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
bool processInstruction(MCInst &Inst, const OperandVector &Ops);
/// Wrapper around MCStreamer::EmitInstruction(). Possibly adds
@@ -711,13 +712,6 @@ private:
uint64_t &ErrorInfo,
bool MatchingInlineAsm);
- unsigned getPointerSize() {
- if (is16BitMode()) return 16;
- if (is32BitMode()) return 32;
- if (is64BitMode()) return 64;
- llvm_unreachable("invalid mode");
- }
-
bool OmitRegisterFromClobberLists(unsigned RegNo) override;
/// doSrcDstMatch - Returns true if operands are matching in their
@@ -977,16 +971,18 @@ std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(SMLoc Loc) {
unsigned basereg =
is64BitMode() ? X86::RSI : (is32BitMode() ? X86::ESI : X86::SI);
const MCExpr *Disp = MCConstantExpr::Create(0, getContext());
- return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/basereg,
- /*IndexReg=*/0, /*Scale=*/1, Loc, Loc, 0);
+ return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
+ /*BaseReg=*/basereg, /*IndexReg=*/0, /*Scale=*/1,
+ Loc, Loc, 0);
}
std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(SMLoc Loc) {
unsigned basereg =
is64BitMode() ? X86::RDI : (is32BitMode() ? X86::EDI : X86::DI);
const MCExpr *Disp = MCConstantExpr::Create(0, getContext());
- return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/basereg,
- /*IndexReg=*/0, /*Scale=*/1, Loc, Loc, 0);
+ return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
+ /*BaseReg=*/basereg, /*IndexReg=*/0, /*Scale=*/1,
+ Loc, Loc, 0);
}
std::unique_ptr<X86Operand> X86AsmParser::ParseOperand() {
@@ -1027,8 +1023,8 @@ std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
// Create an absolute memory reference in order to match against
// instructions taking a PC relative operand.
- return X86Operand::CreateMem(Disp, Start, End, Size, Identifier,
- Info.OpDecl);
+ return X86Operand::CreateMem(getPointerWidth(), Disp, Start, End, Size,
+ Identifier, Info.OpDecl);
}
// We either have a direct symbol reference, or an offset from a symbol. The
@@ -1050,8 +1046,9 @@ std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
// if we don't know the actual value at this time. This is necessary to
// get the matching correct in some cases.
BaseReg = BaseReg ? BaseReg : 1;
- return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, Start,
- End, Size, Identifier, Info.OpDecl);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
+ IndexReg, Scale, Start, End, Size, Identifier,
+ Info.OpDecl);
}
static void
@@ -1103,7 +1100,7 @@ RewriteIntelBracExpression(SmallVectorImpl<AsmRewrite> *AsmRewrites,
(*I).Kind = AOK_Delete;
}
const char *SymLocPtr = SymName.data();
- // Skip everything before the symbol.
+ // Skip everything before the symbol.
if (unsigned Len = SymLocPtr - StartInBrac.getPointer()) {
assert(Len > 0 && "Expected a non-negative length.");
AsmRewrites->push_back(AsmRewrite(AOK_Skip, StartInBrac, Len));
@@ -1128,7 +1125,7 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
// identifier. Don't try an parse it as a register.
if (Tok.getString().startswith("."))
break;
-
+
// If we're parsing an immediate expression, we don't expect a '['.
if (SM.getStopOnLBrac() && getLexer().getKind() == AsmToken::LBrac)
break;
@@ -1194,7 +1191,7 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
MCSymbol *Sym =
getContext().GetDirectionalLocalSymbol(IntVal, IDVal == "b");
MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
- const MCExpr *Val =
+ const MCExpr *Val =
MCSymbolRefExpr::Create(Sym, Variant, getContext());
if (IDVal == "b" && Sym->isUndefined())
return Error(Loc, "invalid reference to undefined symbol");
@@ -1279,7 +1276,7 @@ X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
const MCExpr *NewDisp;
if (ParseIntelDotOperator(Disp, NewDisp))
return nullptr;
-
+
End = Tok.getEndLoc();
Parser.Lex(); // Eat the field.
Disp = NewDisp;
@@ -1292,17 +1289,17 @@ X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
// handle [-42]
if (!BaseReg && !IndexReg) {
if (!SegReg)
- return X86Operand::CreateMem(Disp, Start, End, Size);
- else
- return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), Disp, Start, End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1,
+ Start, End, Size);
}
StringRef ErrMsg;
if (CheckBaseRegAndIndexReg(BaseReg, IndexReg, ErrMsg)) {
Error(StartInBrac, ErrMsg);
return nullptr;
}
- return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, Start,
- End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
+ IndexReg, Scale, Start, End, Size);
}
InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo();
@@ -1383,9 +1380,9 @@ X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg, SMLoc Start,
// be followed by a bracketed expression. If it isn't we know we have our
// final segment override.
const MCExpr *Disp = MCConstantExpr::Create(ImmDisp, getContext());
- return X86Operand::CreateMem(SegReg, Disp, /*BaseReg=*/0, /*IndexReg=*/0,
- /*Scale=*/1, Start, ImmDispToken.getEndLoc(),
- Size);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp,
+ /*BaseReg=*/0, /*IndexReg=*/0, /*Scale=*/1,
+ Start, ImmDispToken.getEndLoc(), Size);
}
}
@@ -1398,7 +1395,7 @@ X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg, SMLoc Start,
if (getParser().parsePrimaryExpr(Val, End))
return ErrorOperand(Tok.getLoc(), "unknown token in expression");
- return X86Operand::CreateMem(Val, Start, End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), Val, Start, End, Size);
}
InlineAsmIdentifierInfo Info;
@@ -1428,7 +1425,7 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseIntelMemOperand(int64_t ImmDisp,
if (getParser().parsePrimaryExpr(Val, End))
return ErrorOperand(Tok.getLoc(), "unknown token in expression");
- return X86Operand::CreateMem(Val, Start, End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), Val, Start, End, Size);
}
InlineAsmIdentifierInfo Info;
@@ -1466,9 +1463,9 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseIntelMemOperand(int64_t ImmDisp,
// BaseReg is non-zero to avoid assertions. In the context of inline asm,
// we're pointing to a local variable in memory, so the base register is
// really the frame or stack pointer.
- return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/1, /*IndexReg=*/0,
- /*Scale=*/1, Start, End, Size, Identifier,
- Info.OpDecl);
+ return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
+ /*BaseReg=*/1, /*IndexReg=*/0, /*Scale=*/1,
+ Start, End, Size, Identifier, Info.OpDecl);
}
/// Parse the '.' operator.
@@ -1643,7 +1640,8 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOperand() {
// to the MCExpr with the directional local symbol and this is a
// memory operand not an immediate operand.
if (SM.getSym())
- return X86Operand::CreateMem(SM.getSym(), Start, End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), SM.getSym(), Start, End,
+ Size);
const MCExpr *ImmExpr = MCConstantExpr::Create(Imm, getContext());
return X86Operand::CreateImm(ImmExpr, Start, End);
@@ -1802,8 +1800,9 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseMemOperand(unsigned SegReg,
if (getLexer().isNot(AsmToken::LParen)) {
// Unless we have a segment register, treat this as an immediate.
if (SegReg == 0)
- return X86Operand::CreateMem(Disp, MemStart, ExprEnd);
- return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
+ return X86Operand::CreateMem(getPointerWidth(), Disp, MemStart, ExprEnd);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1,
+ MemStart, ExprEnd);
}
// Eat the '('.
@@ -1829,8 +1828,10 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseMemOperand(unsigned SegReg,
if (getLexer().isNot(AsmToken::LParen)) {
// Unless we have a segment register, treat this as an immediate.
if (SegReg == 0)
- return X86Operand::CreateMem(Disp, LParenLoc, ExprEnd);
- return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
+ return X86Operand::CreateMem(getPointerWidth(), Disp, LParenLoc,
+ ExprEnd);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1,
+ MemStart, ExprEnd);
}
// Eat the '('.
@@ -1946,9 +1947,9 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseMemOperand(unsigned SegReg,
}
if (SegReg || BaseReg || IndexReg)
- return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
- MemStart, MemEnd);
- return X86Operand::CreateMem(Disp, MemStart, MemEnd);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
+ IndexReg, Scale, MemStart, MemEnd);
+ return X86Operand::CreateMem(getPointerWidth(), Disp, MemStart, MemEnd);
}
bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
@@ -1963,14 +1964,13 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
PatchedName = PatchedName.substr(0, Name.size()-1);
// FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
- const MCExpr *ExtraImmOp = nullptr;
if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
(PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
bool IsVCMP = PatchedName[0] == 'v';
- unsigned SSECCIdx = IsVCMP ? 4 : 3;
- unsigned SSEComparisonCode = StringSwitch<unsigned>(
- PatchedName.slice(SSECCIdx, PatchedName.size() - 2))
+ unsigned CCIdx = IsVCMP ? 4 : 3;
+ unsigned ComparisonCode = StringSwitch<unsigned>(
+ PatchedName.slice(CCIdx, PatchedName.size() - 2))
.Case("eq", 0x00)
.Case("lt", 0x01)
.Case("le", 0x02)
@@ -2005,27 +2005,75 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
.Case("gt_oq", 0x1E)
.Case("true_us", 0x1F)
.Default(~0U);
- if (SSEComparisonCode != ~0U && (IsVCMP || SSEComparisonCode < 8)) {
- ExtraImmOp = MCConstantExpr::Create(SSEComparisonCode,
- getParser().getContext());
- if (PatchedName.endswith("ss")) {
- PatchedName = IsVCMP ? "vcmpss" : "cmpss";
- } else if (PatchedName.endswith("sd")) {
- PatchedName = IsVCMP ? "vcmpsd" : "cmpsd";
- } else if (PatchedName.endswith("ps")) {
- PatchedName = IsVCMP ? "vcmpps" : "cmpps";
- } else {
- assert(PatchedName.endswith("pd") && "Unexpected mnemonic!");
- PatchedName = IsVCMP ? "vcmppd" : "cmppd";
- }
+ if (ComparisonCode != ~0U && (IsVCMP || ComparisonCode < 8)) {
+
+ Operands.push_back(X86Operand::CreateToken(PatchedName.slice(0, CCIdx),
+ NameLoc));
+
+ const MCExpr *ImmOp = MCConstantExpr::Create(ComparisonCode,
+ getParser().getContext());
+ Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
+
+ PatchedName = PatchedName.substr(PatchedName.size() - 2);
+ }
+ }
+
+ // FIXME: Hack to recognize vpcmp<comparison code>{ub,uw,ud,uq,b,w,d,q}.
+ if (PatchedName.startswith("vpcmp") &&
+ (PatchedName.endswith("b") || PatchedName.endswith("w") ||
+ PatchedName.endswith("d") || PatchedName.endswith("q"))) {
+ unsigned CCIdx = PatchedName.drop_back().back() == 'u' ? 2 : 1;
+ unsigned ComparisonCode = StringSwitch<unsigned>(
+ PatchedName.slice(5, PatchedName.size() - CCIdx))
+ .Case("eq", 0x0) // Only allowed on unsigned. Checked below.
+ .Case("lt", 0x1)
+ .Case("le", 0x2)
+ //.Case("false", 0x3) // Not a documented alias.
+ .Case("neq", 0x4)
+ .Case("nlt", 0x5)
+ .Case("nle", 0x6)
+ //.Case("true", 0x7) // Not a documented alias.
+ .Default(~0U);
+ if (ComparisonCode != ~0U && (ComparisonCode != 0 || CCIdx == 2)) {
+ Operands.push_back(X86Operand::CreateToken("vpcmp", NameLoc));
+
+ const MCExpr *ImmOp = MCConstantExpr::Create(ComparisonCode,
+ getParser().getContext());
+ Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
+
+ PatchedName = PatchedName.substr(PatchedName.size() - CCIdx);
+ }
+ }
+
+ // FIXME: Hack to recognize vpcom<comparison code>{ub,uw,ud,uq,b,w,d,q}.
+ if (PatchedName.startswith("vpcom") &&
+ (PatchedName.endswith("b") || PatchedName.endswith("w") ||
+ PatchedName.endswith("d") || PatchedName.endswith("q"))) {
+ unsigned CCIdx = PatchedName.drop_back().back() == 'u' ? 2 : 1;
+ unsigned ComparisonCode = StringSwitch<unsigned>(
+ PatchedName.slice(5, PatchedName.size() - CCIdx))
+ .Case("lt", 0x0)
+ .Case("le", 0x1)
+ .Case("gt", 0x2)
+ .Case("ge", 0x3)
+ .Case("eq", 0x4)
+ .Case("neq", 0x5)
+ .Case("false", 0x6)
+ .Case("true", 0x7)
+ .Default(~0U);
+ if (ComparisonCode != ~0U) {
+ Operands.push_back(X86Operand::CreateToken("vpcom", NameLoc));
+
+ const MCExpr *ImmOp = MCConstantExpr::Create(ComparisonCode,
+ getParser().getContext());
+ Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
+
+ PatchedName = PatchedName.substr(PatchedName.size() - CCIdx);
}
}
Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
- if (ExtraImmOp && !isParsingIntelSyntax())
- Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
-
// Determine whether this is an instruction prefix.
bool isPrefix =
Name == "lock" || Name == "rep" ||
@@ -2071,9 +2119,6 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
(isPrefix && getLexer().is(AsmToken::Slash)))
Parser.Lex();
- if (ExtraImmOp && isParsingIntelSyntax())
- Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
-
// This is a terrible hack to handle "out[bwl]? %al, (%dx)" ->
// "outb %al, %dx". Out doesn't take a memory form, but this is a widely
// documented form in various unofficial manuals, so a lot of code uses it.
@@ -2272,6 +2317,22 @@ static bool convert64i32to64ri8(MCInst &Inst, unsigned Opcode,
return convertToSExti8(Inst, Opcode, X86::RAX, isCmp);
}
+bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) {
+ switch (Inst.getOpcode()) {
+ default: return true;
+ case X86::INT:
+ X86Operand &Op = static_cast<X86Operand &>(*Ops[1]);
+ assert(Op.isImm() && "expected immediate");
+ int64_t Res;
+ if (!Op.getImm()->EvaluateAsAbsolute(Res) || Res > 255) {
+ Error(Op.getStartLoc(), "interrupt vector must be in range [0-255]");
+ return false;
+ }
+ return true;
+ }
+ llvm_unreachable("handle the instruction appropriately");
+}
+
bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
switch (Inst.getOpcode()) {
default: return false;
@@ -2432,8 +2493,11 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
switch (MatchInstructionImpl(Operands, Inst,
ErrorInfo, MatchingInlineAsm,
isParsingIntelSyntax())) {
- default: break;
+ default: llvm_unreachable("Unexpected match result!");
case Match_Success:
+ if (!validateInstruction(Inst, Operands))
+ return true;
+
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the
// individual transformations can chain off each other.
@@ -2614,7 +2678,7 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
static const char *const PtrSizedInstrs[] = {"call", "jmp", "push"};
for (const char *Instr : PtrSizedInstrs) {
if (Mnemonic == Instr) {
- UnsizedMemOp->Mem.Size = getPointerSize();
+ UnsizedMemOp->Mem.Size = getPointerWidth();
break;
}
}
@@ -2626,7 +2690,7 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVector<unsigned, 8> Match;
uint64_t ErrorInfoMissingFeature = 0;
if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) {
- static const unsigned MopSizes[] = {8, 16, 32, 64, 80};
+ static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
for (unsigned Size : MopSizes) {
UnsizedMemOp->Mem.Size = Size;
uint64_t ErrorInfoIgnore;
@@ -2648,7 +2712,7 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
}
// If we haven't matched anything yet, this is not a basic integer or FPU
- // operation. There shouldn't be any ambiguity in our mneumonic table, so try
+ // operation. There shouldn't be any ambiguity in our mnemonic table, so try
// matching with the unsized operand.
if (Match.empty()) {
Match.push_back(MatchInstructionImpl(Operands, Inst, ErrorInfo,
@@ -2677,6 +2741,9 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
unsigned NumSuccessfulMatches =
std::count(std::begin(Match), std::end(Match), Match_Success);
if (NumSuccessfulMatches == 1) {
+ if (!validateInstruction(Inst, Operands))
+ return true;
+
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the individual
// transformations can chain off each other.
diff --git a/lib/Target/X86/AsmParser/X86AsmParserCommon.h b/lib/Target/X86/AsmParser/X86AsmParserCommon.h
index 72aeeaa..7610806 100644
--- a/lib/Target/X86/AsmParser/X86AsmParserCommon.h
+++ b/lib/Target/X86/AsmParser/X86AsmParserCommon.h
@@ -34,6 +34,11 @@ inline bool isImmSExti64i32Value(uint64_t Value) {
(0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
}
+inline bool isImmUnsignedi8Value(uint64_t Value) {
+ return (( Value <= 0x00000000000000FFULL)||
+ (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+}
+
} // End of namespace llvm
#endif
diff --git a/lib/Target/X86/AsmParser/X86Operand.h b/lib/Target/X86/AsmParser/X86Operand.h
index e0fab8d..d67e119 100644
--- a/lib/Target/X86/AsmParser/X86Operand.h
+++ b/lib/Target/X86/AsmParser/X86Operand.h
@@ -53,6 +53,7 @@ struct X86Operand : public MCParsedAsmOperand {
unsigned IndexReg;
unsigned Scale;
unsigned Size;
+ unsigned ModeSize;
};
union {
@@ -120,6 +121,10 @@ struct X86Operand : public MCParsedAsmOperand {
assert(Kind == Memory && "Invalid access!");
return Mem.Scale;
}
+ unsigned getMemModeSize() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.ModeSize;
+ }
bool isToken() const override {return Kind == Token; }
@@ -182,6 +187,13 @@ struct X86Operand : public MCParsedAsmOperand {
return isImmSExti64i32Value(CE->getValue());
}
+ bool isImmUnsignedi8() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ return isImmUnsignedi8Value(CE->getValue());
+ }
+
bool isOffsetOf() const override {
return OffsetOfLoc.getPointer();
}
@@ -249,6 +261,10 @@ struct X86Operand : public MCParsedAsmOperand {
!getMemIndexReg() && getMemScale() == 1;
}
+ bool isAbsMem16() const {
+ return isAbsMem() && Mem.ModeSize == 16;
+ }
+
bool isSrcIdx() const {
return !getMemIndexReg() && getMemScale() == 1 &&
(getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
@@ -288,21 +304,43 @@ struct X86Operand : public MCParsedAsmOperand {
return isMem64() && isDstIdx();
}
- bool isMemOffs8() const {
- return Kind == Memory && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 8);
+ bool isMemOffs() const {
+ return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() &&
+ getMemScale() == 1;
+ }
+
+ bool isMemOffs16_8() const {
+ return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8);
+ }
+ bool isMemOffs16_16() const {
+ return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16);
}
- bool isMemOffs16() const {
- return Kind == Memory && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 16);
+ bool isMemOffs16_32() const {
+ return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32);
}
- bool isMemOffs32() const {
- return Kind == Memory && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 32);
+ bool isMemOffs32_8() const {
+ return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8);
}
- bool isMemOffs64() const {
- return Kind == Memory && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 64);
+ bool isMemOffs32_16() const {
+ return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16);
+ }
+ bool isMemOffs32_32() const {
+ return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32);
+ }
+ bool isMemOffs32_64() const {
+ return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64);
+ }
+ bool isMemOffs64_8() const {
+ return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8);
+ }
+ bool isMemOffs64_16() const {
+ return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16);
+ }
+ bool isMemOffs64_32() const {
+ return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32);
+ }
+ bool isMemOffs64_64() const {
+ return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64);
}
bool isReg() const override { return Kind == Register; }
@@ -430,8 +468,9 @@ struct X86Operand : public MCParsedAsmOperand {
/// Create an absolute memory operand.
static std::unique_ptr<X86Operand>
- CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, unsigned Size = 0,
- StringRef SymName = StringRef(), void *OpDecl = nullptr) {
+ CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
+ unsigned Size = 0, StringRef SymName = StringRef(),
+ void *OpDecl = nullptr) {
auto Res = llvm::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
Res->Mem.SegReg = 0;
Res->Mem.Disp = Disp;
@@ -439,6 +478,7 @@ struct X86Operand : public MCParsedAsmOperand {
Res->Mem.IndexReg = 0;
Res->Mem.Scale = 1;
Res->Mem.Size = Size;
+ Res->Mem.ModeSize = ModeSize;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;
@@ -447,9 +487,9 @@ struct X86Operand : public MCParsedAsmOperand {
/// Create a generalized memory operand.
static std::unique_ptr<X86Operand>
- CreateMem(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg,
- unsigned IndexReg, unsigned Scale, SMLoc StartLoc, SMLoc EndLoc,
- unsigned Size = 0, StringRef SymName = StringRef(),
+ CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
+ unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
+ SMLoc EndLoc, unsigned Size = 0, StringRef SymName = StringRef(),
void *OpDecl = nullptr) {
// We should never just have a displacement, that should be parsed as an
// absolute memory operand.
@@ -465,6 +505,7 @@ struct X86Operand : public MCParsedAsmOperand {
Res->Mem.IndexReg = IndexReg;
Res->Mem.Scale = Scale;
Res->Mem.Size = Size;
+ Res->Mem.ModeSize = ModeSize;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;
diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt
index 1083fad..be61b47 100644
--- a/lib/Target/X86/CMakeLists.txt
+++ b/lib/Target/X86/CMakeLists.txt
@@ -14,6 +14,7 @@ add_public_tablegen_target(X86CommonTableGen)
set(sources
X86AsmPrinter.cpp
+ X86CallFrameOptimization.cpp
X86FastISel.cpp
X86FloatingPoint.cpp
X86FrameLowering.cpp
@@ -38,7 +39,7 @@ if( CMAKE_CL_64 )
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj
MAIN_DEPENDENCY X86CompilationCallback_Win64.asm
- COMMAND ${CMAKE_ASM_MASM_COMPILER} /Fo ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj /c ${CMAKE_CURRENT_SOURCE_DIR}/X86CompilationCallback_Win64.asm
+ COMMAND ${CMAKE_ASM_MASM_COMPILER} /nologo /Fo ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj /c ${CMAKE_CURRENT_SOURCE_DIR}/X86CompilationCallback_Win64.asm
)
set(sources ${sources} ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj)
endif()
diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp
index 5e8c2d6..99fb1ab 100644
--- a/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -51,8 +51,8 @@ const char *llvm::X86Disassembler::GetInstrName(unsigned Opcode,
#define debug(s) DEBUG(Debug(__FILE__, __LINE__, s));
-namespace llvm {
-
+namespace llvm {
+
// Fill-ins to make the compiler happy. These constants are never actually
// assigned; they are just filler to make an automatically-generated switch
// statement work.
@@ -127,11 +127,11 @@ static int regionReader(const void *Arg, uint8_t *Byte, uint64_t Address) {
static void logger(void* arg, const char* log) {
if (!arg)
return;
-
+
raw_ostream &vStream = *(static_cast<raw_ostream*>(arg));
vStream << log << "\n";
-}
-
+}
+
//
// Public interface for the disassembler
//
@@ -184,7 +184,7 @@ static void translateRegister(MCInst &mcInst, Reg reg) {
}
/// tryAddingSymbolicOperand - trys to add a symbolic operand in place of the
-/// immediate Value in the MCInst.
+/// immediate Value in the MCInst.
///
/// @param Value - The immediate Value, has had any PC adjustment made by
/// the caller.
@@ -196,7 +196,7 @@ static void translateRegister(MCInst &mcInst, Reg reg) {
/// If the getOpInfo() function was set when setupForSymbolicDisassembly() was
/// called then that function is called to get any symbolic information for the
/// immediate in the instruction using the Address, Offset and Width. If that
-/// returns non-zero then the symbolic information it returns is used to create
+/// returns non-zero then the symbolic information it returns is used to create
/// an MCExpr and that is added as an operand to the MCInst. If getOpInfo()
/// returns zero and isBranch is true then a symbol look up for immediate Value
/// is done and if a symbol is found an MCExpr is created with that, else
@@ -204,8 +204,8 @@ static void translateRegister(MCInst &mcInst, Reg reg) {
/// if it adds an operand to the MCInst and false otherwise.
static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch,
uint64_t Address, uint64_t Offset,
- uint64_t Width, MCInst &MI,
- const MCDisassembler *Dis) {
+ uint64_t Width, MCInst &MI,
+ const MCDisassembler *Dis) {
return Dis->tryAddingSymbolicOperand(MI, Value, Address, isBranch,
Offset, Width);
}
@@ -215,7 +215,7 @@ static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch,
/// These can often be addresses in a literal pool. The Address of the
/// instruction and its immediate Value are used to determine the address
/// being referenced in the literal pool entry. The SymbolLookUp call back will
-/// return a pointer to a literal 'C' string if the referenced address is an
+/// return a pointer to a literal 'C' string if the referenced address is an
/// address into a section with 'C' string literals.
static void tryAddingPcLoadReferenceComment(uint64_t Address, uint64_t Value,
const void *Decoder) {
@@ -287,7 +287,7 @@ static bool translateDstIndex(MCInst &mcInst, InternalInstruction &insn) {
static void translateImmediate(MCInst &mcInst, uint64_t immediate,
const OperandSpecifier &operand,
InternalInstruction &insn,
- const MCDisassembler *Dis) {
+ const MCDisassembler *Dis) {
// Sign-extend the immediate if necessary.
OperandType type = (OperandType)operand.type;
@@ -320,24 +320,12 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
// By default sign-extend all X86 immediates based on their encoding.
else if (type == TYPE_IMM8 || type == TYPE_IMM16 || type == TYPE_IMM32 ||
type == TYPE_IMM64 || type == TYPE_IMMv) {
- uint32_t Opcode = mcInst.getOpcode();
switch (operand.encoding) {
default:
break;
case ENCODING_IB:
- // Special case those X86 instructions that use the imm8 as a set of
- // bits, bit count, etc. and are not sign-extend.
- if (Opcode != X86::BLENDPSrri && Opcode != X86::BLENDPDrri &&
- Opcode != X86::PBLENDWrri && Opcode != X86::MPSADBWrri &&
- Opcode != X86::DPPSrri && Opcode != X86::DPPDrri &&
- Opcode != X86::INSERTPSrr && Opcode != X86::VBLENDPSYrri &&
- Opcode != X86::VBLENDPSYrmi && Opcode != X86::VBLENDPDYrri &&
- Opcode != X86::VBLENDPDYrmi && Opcode != X86::VPBLENDWrri &&
- Opcode != X86::VMPSADBWrri && Opcode != X86::VDPPSYrri &&
- Opcode != X86::VDPPSYrmi && Opcode != X86::VDPPDrri &&
- Opcode != X86::VINSERTPSrr)
- if(immediate & 0x80)
- immediate |= ~(0xffull);
+ if(immediate & 0x80)
+ immediate |= ~(0xffull);
break;
case ENCODING_IW:
if(immediate & 0x8000)
@@ -350,6 +338,199 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
case ENCODING_IO:
break;
}
+ } else if (type == TYPE_IMM3) {
+ // Check for immediates that printSSECC can't handle.
+ if (immediate >= 8) {
+ unsigned NewOpc;
+ switch (mcInst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode");
+ case X86::CMPPDrmi: NewOpc = X86::CMPPDrmi_alt; break;
+ case X86::CMPPDrri: NewOpc = X86::CMPPDrri_alt; break;
+ case X86::CMPPSrmi: NewOpc = X86::CMPPSrmi_alt; break;
+ case X86::CMPPSrri: NewOpc = X86::CMPPSrri_alt; break;
+ case X86::CMPSDrm: NewOpc = X86::CMPSDrm_alt; break;
+ case X86::CMPSDrr: NewOpc = X86::CMPSDrr_alt; break;
+ case X86::CMPSSrm: NewOpc = X86::CMPSSrm_alt; break;
+ case X86::CMPSSrr: NewOpc = X86::CMPSSrr_alt; break;
+ case X86::VPCOMBri: NewOpc = X86::VPCOMBri_alt; break;
+ case X86::VPCOMBmi: NewOpc = X86::VPCOMBmi_alt; break;
+ case X86::VPCOMWri: NewOpc = X86::VPCOMWri_alt; break;
+ case X86::VPCOMWmi: NewOpc = X86::VPCOMWmi_alt; break;
+ case X86::VPCOMDri: NewOpc = X86::VPCOMDri_alt; break;
+ case X86::VPCOMDmi: NewOpc = X86::VPCOMDmi_alt; break;
+ case X86::VPCOMQri: NewOpc = X86::VPCOMQri_alt; break;
+ case X86::VPCOMQmi: NewOpc = X86::VPCOMQmi_alt; break;
+ case X86::VPCOMUBri: NewOpc = X86::VPCOMUBri_alt; break;
+ case X86::VPCOMUBmi: NewOpc = X86::VPCOMUBmi_alt; break;
+ case X86::VPCOMUWri: NewOpc = X86::VPCOMUWri_alt; break;
+ case X86::VPCOMUWmi: NewOpc = X86::VPCOMUWmi_alt; break;
+ case X86::VPCOMUDri: NewOpc = X86::VPCOMUDri_alt; break;
+ case X86::VPCOMUDmi: NewOpc = X86::VPCOMUDmi_alt; break;
+ case X86::VPCOMUQri: NewOpc = X86::VPCOMUQri_alt; break;
+ case X86::VPCOMUQmi: NewOpc = X86::VPCOMUQmi_alt; break;
+ }
+ // Switch opcode to the one that doesn't get special printing.
+ mcInst.setOpcode(NewOpc);
+ }
+ } else if (type == TYPE_IMM5) {
+ // Check for immediates that printAVXCC can't handle.
+ if (immediate >= 32) {
+ unsigned NewOpc;
+ switch (mcInst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode");
+ case X86::VCMPPDrmi: NewOpc = X86::VCMPPDrmi_alt; break;
+ case X86::VCMPPDrri: NewOpc = X86::VCMPPDrri_alt; break;
+ case X86::VCMPPSrmi: NewOpc = X86::VCMPPSrmi_alt; break;
+ case X86::VCMPPSrri: NewOpc = X86::VCMPPSrri_alt; break;
+ case X86::VCMPSDrm: NewOpc = X86::VCMPSDrm_alt; break;
+ case X86::VCMPSDrr: NewOpc = X86::VCMPSDrr_alt; break;
+ case X86::VCMPSSrm: NewOpc = X86::VCMPSSrm_alt; break;
+ case X86::VCMPSSrr: NewOpc = X86::VCMPSSrr_alt; break;
+ case X86::VCMPPDYrmi: NewOpc = X86::VCMPPDYrmi_alt; break;
+ case X86::VCMPPDYrri: NewOpc = X86::VCMPPDYrri_alt; break;
+ case X86::VCMPPSYrmi: NewOpc = X86::VCMPPSYrmi_alt; break;
+ case X86::VCMPPSYrri: NewOpc = X86::VCMPPSYrri_alt; break;
+ case X86::VCMPPDZrmi: NewOpc = X86::VCMPPDZrmi_alt; break;
+ case X86::VCMPPDZrri: NewOpc = X86::VCMPPDZrri_alt; break;
+ case X86::VCMPPSZrmi: NewOpc = X86::VCMPPSZrmi_alt; break;
+ case X86::VCMPPSZrri: NewOpc = X86::VCMPPSZrri_alt; break;
+ case X86::VCMPSDZrm: NewOpc = X86::VCMPSDZrmi_alt; break;
+ case X86::VCMPSDZrr: NewOpc = X86::VCMPSDZrri_alt; break;
+ case X86::VCMPSSZrm: NewOpc = X86::VCMPSSZrmi_alt; break;
+ case X86::VCMPSSZrr: NewOpc = X86::VCMPSSZrri_alt; break;
+ }
+ // Switch opcode to the one that doesn't get special printing.
+ mcInst.setOpcode(NewOpc);
+ }
+ } else if (type == TYPE_AVX512ICC) {
+ if (immediate >= 8 || ((immediate & 0x3) == 3)) {
+ unsigned NewOpc;
+ switch (mcInst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode");
+ case X86::VPCMPBZ128rmi: NewOpc = X86::VPCMPBZ128rmi_alt; break;
+ case X86::VPCMPBZ128rmik: NewOpc = X86::VPCMPBZ128rmik_alt; break;
+ case X86::VPCMPBZ128rri: NewOpc = X86::VPCMPBZ128rri_alt; break;
+ case X86::VPCMPBZ128rrik: NewOpc = X86::VPCMPBZ128rrik_alt; break;
+ case X86::VPCMPBZ256rmi: NewOpc = X86::VPCMPBZ256rmi_alt; break;
+ case X86::VPCMPBZ256rmik: NewOpc = X86::VPCMPBZ256rmik_alt; break;
+ case X86::VPCMPBZ256rri: NewOpc = X86::VPCMPBZ256rri_alt; break;
+ case X86::VPCMPBZ256rrik: NewOpc = X86::VPCMPBZ256rrik_alt; break;
+ case X86::VPCMPBZrmi: NewOpc = X86::VPCMPBZrmi_alt; break;
+ case X86::VPCMPBZrmik: NewOpc = X86::VPCMPBZrmik_alt; break;
+ case X86::VPCMPBZrri: NewOpc = X86::VPCMPBZrri_alt; break;
+ case X86::VPCMPBZrrik: NewOpc = X86::VPCMPBZrrik_alt; break;
+ case X86::VPCMPDZ128rmi: NewOpc = X86::VPCMPDZ128rmi_alt; break;
+ case X86::VPCMPDZ128rmib: NewOpc = X86::VPCMPDZ128rmib_alt; break;
+ case X86::VPCMPDZ128rmibk: NewOpc = X86::VPCMPDZ128rmibk_alt; break;
+ case X86::VPCMPDZ128rmik: NewOpc = X86::VPCMPDZ128rmik_alt; break;
+ case X86::VPCMPDZ128rri: NewOpc = X86::VPCMPDZ128rri_alt; break;
+ case X86::VPCMPDZ128rrik: NewOpc = X86::VPCMPDZ128rrik_alt; break;
+ case X86::VPCMPDZ256rmi: NewOpc = X86::VPCMPDZ256rmi_alt; break;
+ case X86::VPCMPDZ256rmib: NewOpc = X86::VPCMPDZ256rmib_alt; break;
+ case X86::VPCMPDZ256rmibk: NewOpc = X86::VPCMPDZ256rmibk_alt; break;
+ case X86::VPCMPDZ256rmik: NewOpc = X86::VPCMPDZ256rmik_alt; break;
+ case X86::VPCMPDZ256rri: NewOpc = X86::VPCMPDZ256rri_alt; break;
+ case X86::VPCMPDZ256rrik: NewOpc = X86::VPCMPDZ256rrik_alt; break;
+ case X86::VPCMPDZrmi: NewOpc = X86::VPCMPDZrmi_alt; break;
+ case X86::VPCMPDZrmib: NewOpc = X86::VPCMPDZrmib_alt; break;
+ case X86::VPCMPDZrmibk: NewOpc = X86::VPCMPDZrmibk_alt; break;
+ case X86::VPCMPDZrmik: NewOpc = X86::VPCMPDZrmik_alt; break;
+ case X86::VPCMPDZrri: NewOpc = X86::VPCMPDZrri_alt; break;
+ case X86::VPCMPDZrrik: NewOpc = X86::VPCMPDZrrik_alt; break;
+ case X86::VPCMPQZ128rmi: NewOpc = X86::VPCMPQZ128rmi_alt; break;
+ case X86::VPCMPQZ128rmib: NewOpc = X86::VPCMPQZ128rmib_alt; break;
+ case X86::VPCMPQZ128rmibk: NewOpc = X86::VPCMPQZ128rmibk_alt; break;
+ case X86::VPCMPQZ128rmik: NewOpc = X86::VPCMPQZ128rmik_alt; break;
+ case X86::VPCMPQZ128rri: NewOpc = X86::VPCMPQZ128rri_alt; break;
+ case X86::VPCMPQZ128rrik: NewOpc = X86::VPCMPQZ128rrik_alt; break;
+ case X86::VPCMPQZ256rmi: NewOpc = X86::VPCMPQZ256rmi_alt; break;
+ case X86::VPCMPQZ256rmib: NewOpc = X86::VPCMPQZ256rmib_alt; break;
+ case X86::VPCMPQZ256rmibk: NewOpc = X86::VPCMPQZ256rmibk_alt; break;
+ case X86::VPCMPQZ256rmik: NewOpc = X86::VPCMPQZ256rmik_alt; break;
+ case X86::VPCMPQZ256rri: NewOpc = X86::VPCMPQZ256rri_alt; break;
+ case X86::VPCMPQZ256rrik: NewOpc = X86::VPCMPQZ256rrik_alt; break;
+ case X86::VPCMPQZrmi: NewOpc = X86::VPCMPQZrmi_alt; break;
+ case X86::VPCMPQZrmib: NewOpc = X86::VPCMPQZrmib_alt; break;
+ case X86::VPCMPQZrmibk: NewOpc = X86::VPCMPQZrmibk_alt; break;
+ case X86::VPCMPQZrmik: NewOpc = X86::VPCMPQZrmik_alt; break;
+ case X86::VPCMPQZrri: NewOpc = X86::VPCMPQZrri_alt; break;
+ case X86::VPCMPQZrrik: NewOpc = X86::VPCMPQZrrik_alt; break;
+ case X86::VPCMPUBZ128rmi: NewOpc = X86::VPCMPUBZ128rmi_alt; break;
+ case X86::VPCMPUBZ128rmik: NewOpc = X86::VPCMPUBZ128rmik_alt; break;
+ case X86::VPCMPUBZ128rri: NewOpc = X86::VPCMPUBZ128rri_alt; break;
+ case X86::VPCMPUBZ128rrik: NewOpc = X86::VPCMPUBZ128rrik_alt; break;
+ case X86::VPCMPUBZ256rmi: NewOpc = X86::VPCMPUBZ256rmi_alt; break;
+ case X86::VPCMPUBZ256rmik: NewOpc = X86::VPCMPUBZ256rmik_alt; break;
+ case X86::VPCMPUBZ256rri: NewOpc = X86::VPCMPUBZ256rri_alt; break;
+ case X86::VPCMPUBZ256rrik: NewOpc = X86::VPCMPUBZ256rrik_alt; break;
+ case X86::VPCMPUBZrmi: NewOpc = X86::VPCMPUBZrmi_alt; break;
+ case X86::VPCMPUBZrmik: NewOpc = X86::VPCMPUBZrmik_alt; break;
+ case X86::VPCMPUBZrri: NewOpc = X86::VPCMPUBZrri_alt; break;
+ case X86::VPCMPUBZrrik: NewOpc = X86::VPCMPUBZrrik_alt; break;
+ case X86::VPCMPUDZ128rmi: NewOpc = X86::VPCMPUDZ128rmi_alt; break;
+ case X86::VPCMPUDZ128rmib: NewOpc = X86::VPCMPUDZ128rmib_alt; break;
+ case X86::VPCMPUDZ128rmibk: NewOpc = X86::VPCMPUDZ128rmibk_alt; break;
+ case X86::VPCMPUDZ128rmik: NewOpc = X86::VPCMPUDZ128rmik_alt; break;
+ case X86::VPCMPUDZ128rri: NewOpc = X86::VPCMPUDZ128rri_alt; break;
+ case X86::VPCMPUDZ128rrik: NewOpc = X86::VPCMPUDZ128rrik_alt; break;
+ case X86::VPCMPUDZ256rmi: NewOpc = X86::VPCMPUDZ256rmi_alt; break;
+ case X86::VPCMPUDZ256rmib: NewOpc = X86::VPCMPUDZ256rmib_alt; break;
+ case X86::VPCMPUDZ256rmibk: NewOpc = X86::VPCMPUDZ256rmibk_alt; break;
+ case X86::VPCMPUDZ256rmik: NewOpc = X86::VPCMPUDZ256rmik_alt; break;
+ case X86::VPCMPUDZ256rri: NewOpc = X86::VPCMPUDZ256rri_alt; break;
+ case X86::VPCMPUDZ256rrik: NewOpc = X86::VPCMPUDZ256rrik_alt; break;
+ case X86::VPCMPUDZrmi: NewOpc = X86::VPCMPUDZrmi_alt; break;
+ case X86::VPCMPUDZrmib: NewOpc = X86::VPCMPUDZrmib_alt; break;
+ case X86::VPCMPUDZrmibk: NewOpc = X86::VPCMPUDZrmibk_alt; break;
+ case X86::VPCMPUDZrmik: NewOpc = X86::VPCMPUDZrmik_alt; break;
+ case X86::VPCMPUDZrri: NewOpc = X86::VPCMPUDZrri_alt; break;
+ case X86::VPCMPUDZrrik: NewOpc = X86::VPCMPUDZrrik_alt; break;
+ case X86::VPCMPUQZ128rmi: NewOpc = X86::VPCMPUQZ128rmi_alt; break;
+ case X86::VPCMPUQZ128rmib: NewOpc = X86::VPCMPUQZ128rmib_alt; break;
+ case X86::VPCMPUQZ128rmibk: NewOpc = X86::VPCMPUQZ128rmibk_alt; break;
+ case X86::VPCMPUQZ128rmik: NewOpc = X86::VPCMPUQZ128rmik_alt; break;
+ case X86::VPCMPUQZ128rri: NewOpc = X86::VPCMPUQZ128rri_alt; break;
+ case X86::VPCMPUQZ128rrik: NewOpc = X86::VPCMPUQZ128rrik_alt; break;
+ case X86::VPCMPUQZ256rmi: NewOpc = X86::VPCMPUQZ256rmi_alt; break;
+ case X86::VPCMPUQZ256rmib: NewOpc = X86::VPCMPUQZ256rmib_alt; break;
+ case X86::VPCMPUQZ256rmibk: NewOpc = X86::VPCMPUQZ256rmibk_alt; break;
+ case X86::VPCMPUQZ256rmik: NewOpc = X86::VPCMPUQZ256rmik_alt; break;
+ case X86::VPCMPUQZ256rri: NewOpc = X86::VPCMPUQZ256rri_alt; break;
+ case X86::VPCMPUQZ256rrik: NewOpc = X86::VPCMPUQZ256rrik_alt; break;
+ case X86::VPCMPUQZrmi: NewOpc = X86::VPCMPUQZrmi_alt; break;
+ case X86::VPCMPUQZrmib: NewOpc = X86::VPCMPUQZrmib_alt; break;
+ case X86::VPCMPUQZrmibk: NewOpc = X86::VPCMPUQZrmibk_alt; break;
+ case X86::VPCMPUQZrmik: NewOpc = X86::VPCMPUQZrmik_alt; break;
+ case X86::VPCMPUQZrri: NewOpc = X86::VPCMPUQZrri_alt; break;
+ case X86::VPCMPUQZrrik: NewOpc = X86::VPCMPUQZrrik_alt; break;
+ case X86::VPCMPUWZ128rmi: NewOpc = X86::VPCMPUWZ128rmi_alt; break;
+ case X86::VPCMPUWZ128rmik: NewOpc = X86::VPCMPUWZ128rmik_alt; break;
+ case X86::VPCMPUWZ128rri: NewOpc = X86::VPCMPUWZ128rri_alt; break;
+ case X86::VPCMPUWZ128rrik: NewOpc = X86::VPCMPUWZ128rrik_alt; break;
+ case X86::VPCMPUWZ256rmi: NewOpc = X86::VPCMPUWZ256rmi_alt; break;
+ case X86::VPCMPUWZ256rmik: NewOpc = X86::VPCMPUWZ256rmik_alt; break;
+ case X86::VPCMPUWZ256rri: NewOpc = X86::VPCMPUWZ256rri_alt; break;
+ case X86::VPCMPUWZ256rrik: NewOpc = X86::VPCMPUWZ256rrik_alt; break;
+ case X86::VPCMPUWZrmi: NewOpc = X86::VPCMPUWZrmi_alt; break;
+ case X86::VPCMPUWZrmik: NewOpc = X86::VPCMPUWZrmik_alt; break;
+ case X86::VPCMPUWZrri: NewOpc = X86::VPCMPUWZrri_alt; break;
+ case X86::VPCMPUWZrrik: NewOpc = X86::VPCMPUWZrrik_alt; break;
+ case X86::VPCMPWZ128rmi: NewOpc = X86::VPCMPWZ128rmi_alt; break;
+ case X86::VPCMPWZ128rmik: NewOpc = X86::VPCMPWZ128rmik_alt; break;
+ case X86::VPCMPWZ128rri: NewOpc = X86::VPCMPWZ128rri_alt; break;
+ case X86::VPCMPWZ128rrik: NewOpc = X86::VPCMPWZ128rrik_alt; break;
+ case X86::VPCMPWZ256rmi: NewOpc = X86::VPCMPWZ256rmi_alt; break;
+ case X86::VPCMPWZ256rmik: NewOpc = X86::VPCMPWZ256rmik_alt; break;
+ case X86::VPCMPWZ256rri: NewOpc = X86::VPCMPWZ256rri_alt; break;
+ case X86::VPCMPWZ256rrik: NewOpc = X86::VPCMPWZ256rrik_alt; break;
+ case X86::VPCMPWZrmi: NewOpc = X86::VPCMPWZrmi_alt; break;
+ case X86::VPCMPWZrmik: NewOpc = X86::VPCMPWZrmik_alt; break;
+ case X86::VPCMPWZrri: NewOpc = X86::VPCMPWZrri_alt; break;
+ case X86::VPCMPWZrrik: NewOpc = X86::VPCMPWZrrik_alt; break;
+ }
+ // Switch opcode to the one that doesn't get special printing.
+ mcInst.setOpcode(NewOpc);
+ }
}
switch (type) {
@@ -407,7 +588,7 @@ static bool translateRMRegister(MCInst &mcInst,
debug("A R/M register operand may not have a SIB byte");
return true;
}
-
+
switch (insn.eaBase) {
default:
debug("Unexpected EA base register");
@@ -427,7 +608,7 @@ static bool translateRMRegister(MCInst &mcInst,
ALL_REGS
#undef ENTRY
}
-
+
return false;
}
@@ -440,26 +621,26 @@ static bool translateRMRegister(MCInst &mcInst,
/// from.
/// @return - 0 on success; nonzero otherwise
static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
- const MCDisassembler *Dis) {
+ const MCDisassembler *Dis) {
// Addresses in an MCInst are represented as five operands:
- // 1. basereg (register) The R/M base, or (if there is a SIB) the
+ // 1. basereg (register) The R/M base, or (if there is a SIB) the
// SIB base
- // 2. scaleamount (immediate) 1, or (if there is a SIB) the specified
+ // 2. scaleamount (immediate) 1, or (if there is a SIB) the specified
// scale amount
// 3. indexreg (register) x86_registerNONE, or (if there is a SIB)
- // the index (which is multiplied by the
+ // the index (which is multiplied by the
// scale amount)
// 4. displacement (immediate) 0, or the displacement if there is one
// 5. segmentreg (register) x86_registerNONE for now, but could be set
// if we have segment overrides
-
+
MCOperand baseReg;
MCOperand scaleAmount;
MCOperand indexReg;
MCOperand displacement;
MCOperand segmentReg;
uint64_t pcrel = 0;
-
+
if (insn.eaBase == EA_BASE_sib || insn.eaBase == EA_BASE_sib64) {
if (insn.sibBase != SIB_BASE_NONE) {
switch (insn.sibBase) {
@@ -512,7 +693,7 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
(insn.addressSize == 8 ? SIB_INDEX_RAX:SIB_INDEX_EAX);
SIBIndex IndexBase = IndexIs512 ? SIB_INDEX_ZMM0 :
IndexIs256 ? SIB_INDEX_YMM0 : SIB_INDEX_XMM0;
- insn.sibIndex = (SIBIndex)(IndexBase +
+ insn.sibIndex = (SIBIndex)(IndexBase +
(insn.sibIndex == SIB_INDEX_NONE ? 4 : IndexOffset));
}
@@ -534,7 +715,7 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
} else {
indexReg = MCOperand::CreateReg(0);
}
-
+
scaleAmount = MCOperand::CreateImm(insn.sibScale);
} else {
switch (insn.eaBase) {
@@ -553,7 +734,7 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
}
else
baseReg = MCOperand::CreateReg(0);
-
+
indexReg = MCOperand::CreateReg(0);
break;
case EA_BASE_BX_SI:
@@ -584,7 +765,7 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
// placeholders to keep the compiler happy.
#define ENTRY(x) \
case EA_BASE_##x: \
- baseReg = MCOperand::CreateReg(X86::x); break;
+ baseReg = MCOperand::CreateReg(X86::x); break;
ALL_EA_BASES
#undef ENTRY
#define ENTRY(x) case EA_REG_##x:
@@ -595,14 +776,14 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
return true;
}
}
-
+
scaleAmount = MCOperand::CreateImm(1);
}
-
+
displacement = MCOperand::CreateImm(insn.displacement);
segmentReg = MCOperand::CreateReg(segmentRegnums[insn.segmentOverride]);
-
+
mcInst.addOperand(baseReg);
mcInst.addOperand(scaleAmount);
mcInst.addOperand(indexReg);
@@ -623,7 +804,7 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
/// from.
/// @return - 0 on success; nonzero otherwise
static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
- InternalInstruction &insn, const MCDisassembler *Dis) {
+ InternalInstruction &insn, const MCDisassembler *Dis) {
switch (operand.type) {
default:
debug("Unexpected type for a R/M operand");
@@ -633,8 +814,6 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
case TYPE_R32:
case TYPE_R64:
case TYPE_Rv:
- case TYPE_MM:
- case TYPE_MM32:
case TYPE_MM64:
case TYPE_XMM:
case TYPE_XMM32:
@@ -660,9 +839,6 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
case TYPE_M32FP:
case TYPE_M64FP:
case TYPE_M80FP:
- case TYPE_M16INT:
- case TYPE_M32INT:
- case TYPE_M64INT:
case TYPE_M1616:
case TYPE_M1632:
case TYPE_M1664:
@@ -670,7 +846,7 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
return translateRMMemory(mcInst, insn, Dis);
}
}
-
+
/// translateFPRegister - Translates a stack position on the FPU stack to its
/// LLVM form, and appends it to an MCInst.
///
@@ -698,7 +874,7 @@ static bool translateMaskRegister(MCInst &mcInst,
return false;
}
-/// translateOperand - Translates an operand stored in an internal instruction
+/// translateOperand - Translates an operand stored in an internal instruction
/// to LLVM's format and appends it to an MCInst.
///
/// @param mcInst - The MCInst to append to.
@@ -707,7 +883,7 @@ static bool translateMaskRegister(MCInst &mcInst,
/// @return - false on success; true otherwise.
static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
InternalInstruction &insn,
- const MCDisassembler *Dis) {
+ const MCDisassembler *Dis) {
switch (operand.encoding) {
default:
debug("Unhandled operand encoding during translation");
@@ -761,7 +937,7 @@ static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
insn, Dis);
}
}
-
+
/// translateInstruction - Translates an internal instruction and all its
/// operands to an MCInst.
///
@@ -770,12 +946,12 @@ static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
/// @return - false on success; true otherwise.
static bool translateInstruction(MCInst &mcInst,
InternalInstruction &insn,
- const MCDisassembler *Dis) {
+ const MCDisassembler *Dis) {
if (!insn.spec) {
debug("Instruction has no specification");
return true;
}
-
+
mcInst.setOpcode(insn.instructionID);
// If when reading the prefix bytes we determined the overlapping 0xf2 or 0xf3
// prefix bytes should be disassembled as xrelease and xacquire then set the
@@ -786,9 +962,9 @@ static bool translateInstruction(MCInst &mcInst,
else if(mcInst.getOpcode() == X86::REPNE_PREFIX)
mcInst.setOpcode(X86::XACQUIRE_PREFIX);
}
-
+
insn.numImmediatesTranslated = 0;
-
+
for (const auto &Op : insn.operands) {
if (Op.encoding != ENCODING_NONE) {
if (translateOperand(mcInst, Op, insn, Dis)) {
@@ -796,7 +972,7 @@ static bool translateInstruction(MCInst &mcInst,
}
}
}
-
+
return false;
}
@@ -807,9 +983,9 @@ static MCDisassembler *createX86Disassembler(const Target &T,
return new X86Disassembler::X86GenericDisassembler(STI, Ctx, std::move(MII));
}
-extern "C" void LLVMInitializeX86Disassembler() {
+extern "C" void LLVMInitializeX86Disassembler() {
// Register the disassembler.
- TargetRegistry::RegisterMCDisassembler(TheX86_32Target,
+ TargetRegistry::RegisterMCDisassembler(TheX86_32Target,
createX86Disassembler);
TargetRegistry::RegisterMCDisassembler(TheX86_64Target,
createX86Disassembler);
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp
index 98b3440..619a0d4 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp
@@ -975,27 +975,16 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
if (insn->rexPrefix & 0x08)
attrMask |= ATTR_REXW;
- if (getIDWithAttrMask(&instructionID, insn, attrMask))
- return -1;
-
/*
* JCXZ/JECXZ need special handling for 16-bit mode because the meaning
* of the AdSize prefix is inverted w.r.t. 32-bit mode.
*/
- if (insn->mode == MODE_16BIT && insn->opcode == 0xE3) {
- const struct InstructionSpecifier *spec;
- spec = specifierForUID(instructionID);
+ if (insn->mode == MODE_16BIT && insn->opcodeType == ONEBYTE &&
+ insn->opcode == 0xE3)
+ attrMask ^= ATTR_ADSIZE;
- /*
- * Check for Ii8PCRel instructions. We could alternatively do a
- * string-compare on the names, but this is probably cheaper.
- */
- if (x86OperandSets[spec->operands][0].type == TYPE_REL8) {
- attrMask ^= ATTR_ADSIZE;
- if (getIDWithAttrMask(&instructionID, insn, attrMask))
- return -1;
- }
- }
+ if (getIDWithAttrMask(&instructionID, insn, attrMask))
+ return -1;
/* The following clauses compensate for limitations of the tables. */
@@ -1030,6 +1019,32 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
}
}
+ /*
+ * Absolute moves need special handling.
+ * -For 16-bit mode because the meaning of the AdSize and OpSize prefixes are
+ * inverted w.r.t.
+ * -For 32-bit mode we need to ensure the ADSIZE prefix is observed in
+ * any position.
+ */
+ if (insn->opcodeType == ONEBYTE && ((insn->opcode & 0xFC) == 0xA0)) {
+ /* Make sure we observed the prefixes in any position. */
+ if (insn->prefixPresent[0x67])
+ attrMask |= ATTR_ADSIZE;
+ if (insn->prefixPresent[0x66])
+ attrMask |= ATTR_OPSIZE;
+
+ /* In 16-bit, invert the attributes. */
+ if (insn->mode == MODE_16BIT)
+ attrMask ^= ATTR_ADSIZE | ATTR_OPSIZE;
+
+ if (getIDWithAttrMask(&instructionID, insn, attrMask))
+ return -1;
+
+ insn->instructionID = instructionID;
+ insn->spec = specifierForUID(instructionID);
+ return 0;
+ }
+
if ((insn->mode == MODE_16BIT || insn->prefixPresent[0x66]) &&
!(attrMask & ATTR_OPSIZE)) {
/*
@@ -1445,22 +1460,14 @@ static int readModRM(struct InternalInstruction* insn) {
case TYPE_VK16: \
return prefix##_K0 + index; \
case TYPE_MM64: \
- case TYPE_MM32: \
- case TYPE_MM: \
- if (index > 7) \
- *valid = 0; \
- return prefix##_MM0 + index; \
+ return prefix##_MM0 + (index & 0x7); \
case TYPE_SEGMENTREG: \
if (index > 5) \
*valid = 0; \
return prefix##_ES + index; \
case TYPE_DEBUGREG: \
- if (index > 7) \
- *valid = 0; \
return prefix##_DR0 + index; \
case TYPE_CONTROLREG: \
- if (index > 8) \
- *valid = 0; \
return prefix##_CR0 + index; \
} \
}
@@ -1737,12 +1744,6 @@ static int readOperands(struct InternalInstruction* insn) {
}
if (readImmediate(insn, 1))
return -1;
- if (Op.type == TYPE_IMM3 &&
- insn->immediates[insn->numImmediatesConsumed - 1] > 7)
- return -1;
- if (Op.type == TYPE_IMM5 &&
- insn->immediates[insn->numImmediatesConsumed - 1] > 31)
- return -1;
if (Op.type == TYPE_XMM128 ||
Op.type == TYPE_XMM256)
sawRegImm = 1;
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
index 457b382..a79a923 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
@@ -341,7 +341,15 @@ namespace X86Disassembler {
ENTRY(DR4) \
ENTRY(DR5) \
ENTRY(DR6) \
- ENTRY(DR7)
+ ENTRY(DR7) \
+ ENTRY(DR8) \
+ ENTRY(DR9) \
+ ENTRY(DR10) \
+ ENTRY(DR11) \
+ ENTRY(DR12) \
+ ENTRY(DR13) \
+ ENTRY(DR14) \
+ ENTRY(DR15)
#define REGS_CONTROL \
ENTRY(CR0) \
@@ -352,7 +360,14 @@ namespace X86Disassembler {
ENTRY(CR5) \
ENTRY(CR6) \
ENTRY(CR7) \
- ENTRY(CR8)
+ ENTRY(CR8) \
+ ENTRY(CR9) \
+ ENTRY(CR10) \
+ ENTRY(CR11) \
+ ENTRY(CR12) \
+ ENTRY(CR13) \
+ ENTRY(CR14) \
+ ENTRY(CR15)
#define ALL_EA_BASES \
EA_BASES_16BIT \
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
index bec4f0e..70c6042 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
@@ -82,6 +82,7 @@ enum attributeBits {
"operands change width") \
ENUM_ENTRY(IC_ADSIZE, 3, "requires an ADSIZE prefix, so " \
"operands change width") \
+ ENUM_ENTRY(IC_OPSIZE_ADSIZE, 4, "requires ADSIZE and OPSIZE prefixes") \
ENUM_ENTRY(IC_XD, 2, "may say something about the opcode " \
"but not the operands") \
ENUM_ENTRY(IC_XS, 2, "may say something about the opcode " \
@@ -90,20 +91,24 @@ enum attributeBits {
"operands change width") \
ENUM_ENTRY(IC_XS_OPSIZE, 3, "requires an OPSIZE prefix, so " \
"operands change width") \
- ENUM_ENTRY(IC_64BIT_REXW, 4, "requires a REX.W prefix, so operands "\
+ ENUM_ENTRY(IC_64BIT_REXW, 5, "requires a REX.W prefix, so operands "\
"change width; overrides IC_OPSIZE") \
+ ENUM_ENTRY(IC_64BIT_REXW_ADSIZE, 6, "requires a REX.W prefix and 0x67 " \
+ "prefix") \
ENUM_ENTRY(IC_64BIT_OPSIZE, 3, "Just as meaningful as IC_OPSIZE") \
ENUM_ENTRY(IC_64BIT_ADSIZE, 3, "Just as meaningful as IC_ADSIZE") \
- ENUM_ENTRY(IC_64BIT_XD, 5, "XD instructions are SSE; REX.W is " \
+ ENUM_ENTRY(IC_64BIT_OPSIZE_ADSIZE, 4, "Just as meaningful as IC_OPSIZE/" \
+ "IC_ADSIZE") \
+ ENUM_ENTRY(IC_64BIT_XD, 6, "XD instructions are SSE; REX.W is " \
"secondary") \
- ENUM_ENTRY(IC_64BIT_XS, 5, "Just as meaningful as IC_64BIT_XD") \
+ ENUM_ENTRY(IC_64BIT_XS, 6, "Just as meaningful as IC_64BIT_XD") \
ENUM_ENTRY(IC_64BIT_XD_OPSIZE, 3, "Just as meaningful as IC_XD_OPSIZE") \
ENUM_ENTRY(IC_64BIT_XS_OPSIZE, 3, "Just as meaningful as IC_XS_OPSIZE") \
- ENUM_ENTRY(IC_64BIT_REXW_XS, 6, "OPSIZE could mean a different " \
+ ENUM_ENTRY(IC_64BIT_REXW_XS, 7, "OPSIZE could mean a different " \
"opcode") \
- ENUM_ENTRY(IC_64BIT_REXW_XD, 6, "Just as meaningful as " \
+ ENUM_ENTRY(IC_64BIT_REXW_XD, 7, "Just as meaningful as " \
"IC_64BIT_REXW_XS") \
- ENUM_ENTRY(IC_64BIT_REXW_OPSIZE, 7, "The Dynamic Duo! Prefer over all " \
+ ENUM_ENTRY(IC_64BIT_REXW_OPSIZE, 8, "The Dynamic Duo! Prefer over all " \
"else because this changes most " \
"operands' meaning") \
ENUM_ENTRY(IC_VEX, 1, "requires a VEX prefix") \
@@ -401,6 +406,8 @@ enum OperandEncoding {
ENUM_ENTRY(TYPE_IMM64, "8-byte") \
ENUM_ENTRY(TYPE_IMM3, "1-byte immediate operand between 0 and 7") \
ENUM_ENTRY(TYPE_IMM5, "1-byte immediate operand between 0 and 31") \
+ ENUM_ENTRY(TYPE_AVX512ICC, "1-byte immediate operand for AVX512 icmp") \
+ ENUM_ENTRY(TYPE_UIMM8, "1-byte unsigned immediate operand") \
ENUM_ENTRY(TYPE_RM8, "1-byte register or memory operand") \
ENUM_ENTRY(TYPE_RM16, "2-byte") \
ENUM_ENTRY(TYPE_RM32, "4-byte") \
@@ -416,10 +423,6 @@ enum OperandEncoding {
ENUM_ENTRY(TYPE_M1616, "2+2-byte segment+offset address") \
ENUM_ENTRY(TYPE_M1632, "2+4-byte") \
ENUM_ENTRY(TYPE_M1664, "2+8-byte") \
- ENUM_ENTRY(TYPE_M16_32, "2+4-byte two-part memory operand (LIDT, LGDT)") \
- ENUM_ENTRY(TYPE_M16_16, "2+2-byte (BOUND)") \
- ENUM_ENTRY(TYPE_M32_32, "4+4-byte (BOUND)") \
- ENUM_ENTRY(TYPE_M16_64, "2+8-byte (LIDT, LGDT)") \
ENUM_ENTRY(TYPE_SRCIDX8, "1-byte memory at source index") \
ENUM_ENTRY(TYPE_SRCIDX16, "2-byte memory at source index") \
ENUM_ENTRY(TYPE_SRCIDX32, "4-byte memory at source index") \
@@ -438,14 +441,8 @@ enum OperandEncoding {
ENUM_ENTRY(TYPE_M32FP, "32-bit IEE754 memory floating-point operand") \
ENUM_ENTRY(TYPE_M64FP, "64-bit") \
ENUM_ENTRY(TYPE_M80FP, "80-bit extended") \
- ENUM_ENTRY(TYPE_M16INT, "2-byte memory integer operand for use in " \
- "floating-point instructions") \
- ENUM_ENTRY(TYPE_M32INT, "4-byte") \
- ENUM_ENTRY(TYPE_M64INT, "8-byte") \
ENUM_ENTRY(TYPE_ST, "Position on the floating-point stack") \
- ENUM_ENTRY(TYPE_MM, "MMX register operand") \
- ENUM_ENTRY(TYPE_MM32, "4-byte MMX register or memory operand") \
- ENUM_ENTRY(TYPE_MM64, "8-byte") \
+ ENUM_ENTRY(TYPE_MM64, "8-byte MMX register") \
ENUM_ENTRY(TYPE_XMM, "XMM register operand") \
ENUM_ENTRY(TYPE_XMM32, "4-byte XMM register or memory operand") \
ENUM_ENTRY(TYPE_XMM64, "8-byte") \
diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
index b72730c..65461af 100644
--- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
+++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
@@ -72,35 +72,11 @@ void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
printAnnotation(OS, Annot);
}
-void X86ATTInstPrinter::printSSECC(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
- int64_t Imm = MI->getOperand(Op).getImm() & 0xf;
- switch (Imm) {
- default: llvm_unreachable("Invalid ssecc argument!");
- case 0: O << "eq"; break;
- case 1: O << "lt"; break;
- case 2: O << "le"; break;
- case 3: O << "unord"; break;
- case 4: O << "neq"; break;
- case 5: O << "nlt"; break;
- case 6: O << "nle"; break;
- case 7: O << "ord"; break;
- case 8: O << "eq_uq"; break;
- case 9: O << "nge"; break;
- case 0xa: O << "ngt"; break;
- case 0xb: O << "false"; break;
- case 0xc: O << "neq_oq"; break;
- case 0xd: O << "ge"; break;
- case 0xe: O << "gt"; break;
- case 0xf: O << "true"; break;
- }
-}
-
-void X86ATTInstPrinter::printAVXCC(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
- int64_t Imm = MI->getOperand(Op).getImm() & 0x1f;
+void X86ATTInstPrinter::printSSEAVXCC(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ int64_t Imm = MI->getOperand(Op).getImm();
switch (Imm) {
- default: llvm_unreachable("Invalid avxcc argument!");
+ default: llvm_unreachable("Invalid ssecc/avxcc argument!");
case 0: O << "eq"; break;
case 1: O << "lt"; break;
case 2: O << "le"; break;
@@ -136,8 +112,24 @@ void X86ATTInstPrinter::printAVXCC(const MCInst *MI, unsigned Op,
}
}
-void X86ATTInstPrinter::printRoundingControl(const MCInst *MI, unsigned Op,
+void X86ATTInstPrinter::printXOPCC(const MCInst *MI, unsigned Op,
raw_ostream &O) {
+ int64_t Imm = MI->getOperand(Op).getImm();
+ switch (Imm) {
+ default: llvm_unreachable("Invalid xopcc argument!");
+ case 0: O << "lt"; break;
+ case 1: O << "le"; break;
+ case 2: O << "gt"; break;
+ case 3: O << "ge"; break;
+ case 4: O << "eq"; break;
+ case 5: O << "neq"; break;
+ case 6: O << "false"; break;
+ case 7: O << "true"; break;
+ }
+}
+
+void X86ATTInstPrinter::printRoundingControl(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
int64_t Imm = MI->getOperand(Op).getImm() & 0x3;
switch (Imm) {
case 0: O << "{rn-sae}"; break;
@@ -163,8 +155,7 @@ void X86ATTInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo,
int64_t Address;
if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) {
O << formatHex((uint64_t)Address);
- }
- else {
+ } else {
// Otherwise, just print the expression.
O << *Op.getExpr();
}
@@ -295,3 +286,10 @@ void X86ATTInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
O << markup(">");
}
+
+void X86ATTInstPrinter::printU8Imm(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ O << markup("<imm:")
+ << '$' << formatImm(MI->getOperand(Op).getImm() & 0xff)
+ << markup(">");
+}
diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
index 41be14b..f71cb81 100644
--- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
+++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
@@ -45,18 +45,23 @@ public:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &OS);
- void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &OS);
- void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &OS);
+ void printSSEAVXCC(const MCInst *MI, unsigned Op, raw_ostream &OS);
+ void printXOPCC(const MCInst *MI, unsigned Op, raw_ostream &OS);
void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printSrcIdx(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printDstIdx(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printMemOffset(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printRoundingControl(const MCInst *MI, unsigned Op, raw_ostream &OS);
+ void printU8Imm(const MCInst *MI, unsigned Op, raw_ostream &OS);
+
+ void printanymem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printMemReference(MI, OpNo, O);
+ }
void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
-
+
void printi8mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
@@ -137,7 +142,7 @@ public:
private:
bool HasCustomInstComment;
};
-
+
}
#endif
diff --git a/lib/Target/X86/InstPrinter/X86InstComments.cpp b/lib/Target/X86/InstPrinter/X86InstComments.cpp
index a8f15e6..10a1482 100644
--- a/lib/Target/X86/InstPrinter/X86InstComments.cpp
+++ b/lib/Target/X86/InstPrinter/X86InstComments.cpp
@@ -1,724 +1,982 @@
-//===-- X86InstComments.cpp - Generate verbose-asm comments for instrs ----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This defines functionality used to emit comments about X86 instructions to
-// an output stream for -fverbose-asm.
-//
-//===----------------------------------------------------------------------===//
-
-#include "X86InstComments.h"
-#include "MCTargetDesc/X86MCTargetDesc.h"
-#include "Utils/X86ShuffleDecode.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/CodeGen/MachineValueType.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Top Level Entrypoint
-//===----------------------------------------------------------------------===//
-
-/// EmitAnyX86InstComments - This function decodes x86 instructions and prints
-/// newline terminated strings to the specified string if desired. This
-/// information is shown in disassembly dumps when verbose assembly is enabled.
-bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
- const char *(*getRegName)(unsigned)) {
- // If this is a shuffle operation, the switch should fill in this state.
- SmallVector<int, 8> ShuffleMask;
- const char *DestName = nullptr, *Src1Name = nullptr, *Src2Name = nullptr;
-
- switch (MI->getOpcode()) {
- default:
- // Not an instruction for which we can decode comments.
- return false;
-
- case X86::BLENDPDrri:
- case X86::VBLENDPDrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::BLENDPDrmi:
- case X86::VBLENDPDrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeBLENDMask(MVT::v2f64,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VBLENDPDYrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VBLENDPDYrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeBLENDMask(MVT::v4f64,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
-
- case X86::BLENDPSrri:
- case X86::VBLENDPSrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::BLENDPSrmi:
- case X86::VBLENDPSrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeBLENDMask(MVT::v4f32,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VBLENDPSYrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VBLENDPSYrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeBLENDMask(MVT::v8f32,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
-
- case X86::PBLENDWrri:
- case X86::VPBLENDWrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::PBLENDWrmi:
- case X86::VPBLENDWrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeBLENDMask(MVT::v8i16,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VPBLENDWYrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPBLENDWYrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeBLENDMask(MVT::v16i16,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
-
- case X86::VPBLENDDrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPBLENDDrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeBLENDMask(MVT::v4i32,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
-
- case X86::VPBLENDDYrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPBLENDDYrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeBLENDMask(MVT::v8i32,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
-
- case X86::INSERTPSrr:
- case X86::VINSERTPSrr:
- DestName = getRegName(MI->getOperand(0).getReg());
- Src1Name = getRegName(MI->getOperand(1).getReg());
- Src2Name = getRegName(MI->getOperand(2).getReg());
- if(MI->getOperand(3).isImm())
- DecodeINSERTPSMask(MI->getOperand(3).getImm(), ShuffleMask);
- break;
-
- case X86::MOVLHPSrr:
- case X86::VMOVLHPSrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeMOVLHPSMask(2, ShuffleMask);
- break;
-
- case X86::MOVHLPSrr:
- case X86::VMOVHLPSrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeMOVHLPSMask(2, ShuffleMask);
- break;
-
- case X86::MOVSLDUPrr:
- case X86::VMOVSLDUPrr:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::MOVSLDUPrm:
- case X86::VMOVSLDUPrm:
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeMOVSLDUPMask(MVT::v4f32, ShuffleMask);
- break;
-
- case X86::VMOVSHDUPYrr:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::VMOVSHDUPYrm:
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeMOVSHDUPMask(MVT::v8f32, ShuffleMask);
- break;
-
- case X86::VMOVSLDUPYrr:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::VMOVSLDUPYrm:
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeMOVSLDUPMask(MVT::v8f32, ShuffleMask);
- break;
-
- case X86::MOVSHDUPrr:
- case X86::VMOVSHDUPrr:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::MOVSHDUPrm:
- case X86::VMOVSHDUPrm:
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeMOVSHDUPMask(MVT::v4f32, ShuffleMask);
- break;
-
- case X86::PSLLDQri:
- case X86::VPSLLDQri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSLLDQMask(MVT::v16i8,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
-
- case X86::VPSLLDQYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSLLDQMask(MVT::v32i8,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
-
- case X86::PSRLDQri:
- case X86::VPSRLDQri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSRLDQMask(MVT::v16i8,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
-
- case X86::VPSRLDQYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSRLDQMask(MVT::v32i8,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
-
- case X86::PALIGNR128rr:
- case X86::VPALIGNR128rr:
- Src1Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::PALIGNR128rm:
- case X86::VPALIGNR128rm:
- Src2Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePALIGNRMask(MVT::v16i8,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
- case X86::VPALIGNR256rr:
- Src1Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPALIGNR256rm:
- Src2Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePALIGNRMask(MVT::v32i8,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
-
- case X86::PSHUFDri:
- case X86::VPSHUFDri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::PSHUFDmi:
- case X86::VPSHUFDmi:
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSHUFMask(MVT::v4i32,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
- case X86::VPSHUFDYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::VPSHUFDYmi:
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSHUFMask(MVT::v8i32,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
-
-
- case X86::PSHUFHWri:
- case X86::VPSHUFHWri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::PSHUFHWmi:
- case X86::VPSHUFHWmi:
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSHUFHWMask(MVT::v8i16,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
- case X86::VPSHUFHWYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::VPSHUFHWYmi:
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSHUFHWMask(MVT::v16i16,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
- case X86::PSHUFLWri:
- case X86::VPSHUFLWri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::PSHUFLWmi:
- case X86::VPSHUFLWmi:
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSHUFLWMask(MVT::v8i16,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
- case X86::VPSHUFLWYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::VPSHUFLWYmi:
- DestName = getRegName(MI->getOperand(0).getReg());
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSHUFLWMask(MVT::v16i16,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- break;
-
- case X86::PUNPCKHBWrr:
- case X86::VPUNPCKHBWrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::PUNPCKHBWrm:
- case X86::VPUNPCKHBWrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKHMask(MVT::v16i8, ShuffleMask);
- break;
- case X86::VPUNPCKHBWYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPUNPCKHBWYrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKHMask(MVT::v32i8, ShuffleMask);
- break;
- case X86::PUNPCKHWDrr:
- case X86::VPUNPCKHWDrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::PUNPCKHWDrm:
- case X86::VPUNPCKHWDrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKHMask(MVT::v8i16, ShuffleMask);
- break;
- case X86::VPUNPCKHWDYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPUNPCKHWDYrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKHMask(MVT::v16i16, ShuffleMask);
- break;
- case X86::PUNPCKHDQrr:
- case X86::VPUNPCKHDQrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::PUNPCKHDQrm:
- case X86::VPUNPCKHDQrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKHMask(MVT::v4i32, ShuffleMask);
- break;
- case X86::VPUNPCKHDQYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPUNPCKHDQYrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKHMask(MVT::v8i32, ShuffleMask);
- break;
- case X86::PUNPCKHQDQrr:
- case X86::VPUNPCKHQDQrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::PUNPCKHQDQrm:
- case X86::VPUNPCKHQDQrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKHMask(MVT::v2i64, ShuffleMask);
- break;
- case X86::VPUNPCKHQDQYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPUNPCKHQDQYrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKHMask(MVT::v4i64, ShuffleMask);
- break;
-
- case X86::PUNPCKLBWrr:
- case X86::VPUNPCKLBWrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::PUNPCKLBWrm:
- case X86::VPUNPCKLBWrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKLMask(MVT::v16i8, ShuffleMask);
- break;
- case X86::VPUNPCKLBWYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPUNPCKLBWYrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKLMask(MVT::v32i8, ShuffleMask);
- break;
- case X86::PUNPCKLWDrr:
- case X86::VPUNPCKLWDrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::PUNPCKLWDrm:
- case X86::VPUNPCKLWDrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKLMask(MVT::v8i16, ShuffleMask);
- break;
- case X86::VPUNPCKLWDYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPUNPCKLWDYrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKLMask(MVT::v16i16, ShuffleMask);
- break;
- case X86::PUNPCKLDQrr:
- case X86::VPUNPCKLDQrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::PUNPCKLDQrm:
- case X86::VPUNPCKLDQrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKLMask(MVT::v4i32, ShuffleMask);
- break;
- case X86::VPUNPCKLDQYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPUNPCKLDQYrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKLMask(MVT::v8i32, ShuffleMask);
- break;
- case X86::PUNPCKLQDQrr:
- case X86::VPUNPCKLQDQrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::PUNPCKLQDQrm:
- case X86::VPUNPCKLQDQrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKLMask(MVT::v2i64, ShuffleMask);
- break;
- case X86::VPUNPCKLQDQYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPUNPCKLQDQYrm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- DecodeUNPCKLMask(MVT::v4i64, ShuffleMask);
- break;
-
- case X86::SHUFPDrri:
- case X86::VSHUFPDrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::SHUFPDrmi:
- case X86::VSHUFPDrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeSHUFPMask(MVT::v2f64,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VSHUFPDYrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VSHUFPDYrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeSHUFPMask(MVT::v4f64,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
-
- case X86::SHUFPSrri:
- case X86::VSHUFPSrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::SHUFPSrmi:
- case X86::VSHUFPSrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeSHUFPMask(MVT::v4f32,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VSHUFPSYrri:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VSHUFPSYrmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeSHUFPMask(MVT::v8f32,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
-
- case X86::UNPCKLPDrr:
- case X86::VUNPCKLPDrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::UNPCKLPDrm:
- case X86::VUNPCKLPDrm:
- DecodeUNPCKLMask(MVT::v2f64, ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VUNPCKLPDYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VUNPCKLPDYrm:
- DecodeUNPCKLMask(MVT::v4f64, ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::UNPCKLPSrr:
- case X86::VUNPCKLPSrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::UNPCKLPSrm:
- case X86::VUNPCKLPSrm:
- DecodeUNPCKLMask(MVT::v4f32, ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VUNPCKLPSYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VUNPCKLPSYrm:
- DecodeUNPCKLMask(MVT::v8f32, ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::UNPCKHPDrr:
- case X86::VUNPCKHPDrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::UNPCKHPDrm:
- case X86::VUNPCKHPDrm:
- DecodeUNPCKHMask(MVT::v2f64, ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VUNPCKHPDYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VUNPCKHPDYrm:
- DecodeUNPCKHMask(MVT::v4f64, ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::UNPCKHPSrr:
- case X86::VUNPCKHPSrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::UNPCKHPSrm:
- case X86::VUNPCKHPSrm:
- DecodeUNPCKHMask(MVT::v4f32, ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VUNPCKHPSYrr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VUNPCKHPSYrm:
- DecodeUNPCKHMask(MVT::v8f32, ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VPERMILPSri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::VPERMILPSmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSHUFMask(MVT::v4f32,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VPERMILPSYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::VPERMILPSYmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSHUFMask(MVT::v8f32,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VPERMILPDri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::VPERMILPDmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSHUFMask(MVT::v2f64,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VPERMILPDYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::VPERMILPDYmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodePSHUFMask(MVT::v4f64,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VPERM2F128rr:
- case X86::VPERM2I128rr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
- // FALL THROUGH.
- case X86::VPERM2F128rm:
- case X86::VPERM2I128rm:
- // For instruction comments purpose, assume the 256-bit vector is v4i64.
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeVPERM2X128Mask(MVT::v4i64,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- case X86::VPERMQYri:
- case X86::VPERMPDYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
- // FALL THROUGH.
- case X86::VPERMQYmi:
- case X86::VPERMPDYmi:
- if(MI->getOperand(MI->getNumOperands()-1).isImm())
- DecodeVPERMMask(MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
- }
-
- // The only comments we decode are shuffles, so give up if we were unable to
- // decode a shuffle mask.
- if (ShuffleMask.empty())
- return false;
-
- if (!DestName) DestName = Src1Name;
- OS << (DestName ? DestName : "mem") << " = ";
-
- // If the two sources are the same, canonicalize the input elements to be
- // from the first src so that we get larger element spans.
- if (Src1Name == Src2Name) {
- for (unsigned i = 0, e = ShuffleMask.size(); i != e; ++i) {
- if ((int)ShuffleMask[i] >= 0 && // Not sentinel.
- ShuffleMask[i] >= (int)e) // From second mask.
- ShuffleMask[i] -= e;
- }
- }
-
- // The shuffle mask specifies which elements of the src1/src2 fill in the
- // destination, with a few sentinel values. Loop through and print them
- // out.
- for (unsigned i = 0, e = ShuffleMask.size(); i != e; ++i) {
- if (i != 0)
- OS << ',';
- if (ShuffleMask[i] == SM_SentinelZero) {
- OS << "zero";
- continue;
- }
-
- // Otherwise, it must come from src1 or src2. Print the span of elements
- // that comes from this src.
- bool isSrc1 = ShuffleMask[i] < (int)ShuffleMask.size();
- const char *SrcName = isSrc1 ? Src1Name : Src2Name;
- OS << (SrcName ? SrcName : "mem") << '[';
- bool IsFirst = true;
- while (i != e && (int)ShuffleMask[i] != SM_SentinelZero &&
- (ShuffleMask[i] < (int)ShuffleMask.size()) == isSrc1) {
- if (!IsFirst)
- OS << ',';
- else
- IsFirst = false;
- if (ShuffleMask[i] == SM_SentinelUndef)
- OS << "u";
- else
- OS << ShuffleMask[i] % ShuffleMask.size();
- ++i;
- }
- OS << ']';
- --i; // For loop increments element #.
- }
- //MI->print(OS, 0);
- OS << "\n";
-
- // We successfully added a comment to this instruction.
- return true;
-}
+//===-- X86InstComments.cpp - Generate verbose-asm comments for instrs ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines functionality used to emit comments about X86 instructions to
+// an output stream for -fverbose-asm.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86InstComments.h"
+#include "MCTargetDesc/X86MCTargetDesc.h"
+#include "Utils/X86ShuffleDecode.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+/// \brief Extracts the src/dst types for a given zero extension instruction.
+/// \note While the number of elements in DstVT type correct, the
+/// number in the SrcVT type is expanded to fill the src xmm register and the
+/// upper elements may not be included in the dst xmm/ymm register.
+static void getZeroExtensionTypes(const MCInst *MI, MVT &SrcVT, MVT &DstVT) {
+ switch (MI->getOpcode()) {
+ default:
+ llvm_unreachable("Unknown zero extension instruction");
+ // i8 zero extension
+ case X86::PMOVZXBWrm:
+ case X86::PMOVZXBWrr:
+ case X86::VPMOVZXBWrm:
+ case X86::VPMOVZXBWrr:
+ SrcVT = MVT::v16i8;
+ DstVT = MVT::v8i16;
+ break;
+ case X86::VPMOVZXBWYrm:
+ case X86::VPMOVZXBWYrr:
+ SrcVT = MVT::v16i8;
+ DstVT = MVT::v16i16;
+ break;
+ case X86::PMOVZXBDrm:
+ case X86::PMOVZXBDrr:
+ case X86::VPMOVZXBDrm:
+ case X86::VPMOVZXBDrr:
+ SrcVT = MVT::v16i8;
+ DstVT = MVT::v4i32;
+ break;
+ case X86::VPMOVZXBDYrm:
+ case X86::VPMOVZXBDYrr:
+ SrcVT = MVT::v16i8;
+ DstVT = MVT::v8i32;
+ break;
+ case X86::PMOVZXBQrm:
+ case X86::PMOVZXBQrr:
+ case X86::VPMOVZXBQrm:
+ case X86::VPMOVZXBQrr:
+ SrcVT = MVT::v16i8;
+ DstVT = MVT::v2i64;
+ break;
+ case X86::VPMOVZXBQYrm:
+ case X86::VPMOVZXBQYrr:
+ SrcVT = MVT::v16i8;
+ DstVT = MVT::v4i64;
+ break;
+ // i16 zero extension
+ case X86::PMOVZXWDrm:
+ case X86::PMOVZXWDrr:
+ case X86::VPMOVZXWDrm:
+ case X86::VPMOVZXWDrr:
+ SrcVT = MVT::v8i16;
+ DstVT = MVT::v4i32;
+ break;
+ case X86::VPMOVZXWDYrm:
+ case X86::VPMOVZXWDYrr:
+ SrcVT = MVT::v8i16;
+ DstVT = MVT::v8i32;
+ break;
+ case X86::PMOVZXWQrm:
+ case X86::PMOVZXWQrr:
+ case X86::VPMOVZXWQrm:
+ case X86::VPMOVZXWQrr:
+ SrcVT = MVT::v8i16;
+ DstVT = MVT::v2i64;
+ break;
+ case X86::VPMOVZXWQYrm:
+ case X86::VPMOVZXWQYrr:
+ SrcVT = MVT::v8i16;
+ DstVT = MVT::v4i64;
+ break;
+ // i32 zero extension
+ case X86::PMOVZXDQrm:
+ case X86::PMOVZXDQrr:
+ case X86::VPMOVZXDQrm:
+ case X86::VPMOVZXDQrr:
+ SrcVT = MVT::v4i32;
+ DstVT = MVT::v2i64;
+ break;
+ case X86::VPMOVZXDQYrm:
+ case X86::VPMOVZXDQYrr:
+ SrcVT = MVT::v4i32;
+ DstVT = MVT::v4i64;
+ break;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Top Level Entrypoint
+//===----------------------------------------------------------------------===//
+
+/// EmitAnyX86InstComments - This function decodes x86 instructions and prints
+/// newline terminated strings to the specified string if desired. This
+/// information is shown in disassembly dumps when verbose assembly is enabled.
+bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
+ const char *(*getRegName)(unsigned)) {
+ // If this is a shuffle operation, the switch should fill in this state.
+ SmallVector<int, 8> ShuffleMask;
+ const char *DestName = nullptr, *Src1Name = nullptr, *Src2Name = nullptr;
+
+ switch (MI->getOpcode()) {
+ default:
+ // Not an instruction for which we can decode comments.
+ return false;
+
+ case X86::BLENDPDrri:
+ case X86::VBLENDPDrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::BLENDPDrmi:
+ case X86::VBLENDPDrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeBLENDMask(MVT::v2f64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VBLENDPDYrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VBLENDPDYrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeBLENDMask(MVT::v4f64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ case X86::BLENDPSrri:
+ case X86::VBLENDPSrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::BLENDPSrmi:
+ case X86::VBLENDPSrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeBLENDMask(MVT::v4f32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VBLENDPSYrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VBLENDPSYrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeBLENDMask(MVT::v8f32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ case X86::PBLENDWrri:
+ case X86::VPBLENDWrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::PBLENDWrmi:
+ case X86::VPBLENDWrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeBLENDMask(MVT::v8i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VPBLENDWYrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPBLENDWYrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeBLENDMask(MVT::v16i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ case X86::VPBLENDDrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPBLENDDrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeBLENDMask(MVT::v4i32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ case X86::VPBLENDDYrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPBLENDDYrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeBLENDMask(MVT::v8i32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ case X86::INSERTPSrr:
+ case X86::VINSERTPSrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::INSERTPSrm:
+ case X86::VINSERTPSrm:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeINSERTPSMask(MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+
+ case X86::MOVLHPSrr:
+ case X86::VMOVLHPSrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeMOVLHPSMask(2, ShuffleMask);
+ break;
+
+ case X86::MOVHLPSrr:
+ case X86::VMOVHLPSrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeMOVHLPSMask(2, ShuffleMask);
+ break;
+
+ case X86::MOVSLDUPrr:
+ case X86::VMOVSLDUPrr:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::MOVSLDUPrm:
+ case X86::VMOVSLDUPrm:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeMOVSLDUPMask(MVT::v4f32, ShuffleMask);
+ break;
+
+ case X86::VMOVSHDUPYrr:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VMOVSHDUPYrm:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeMOVSHDUPMask(MVT::v8f32, ShuffleMask);
+ break;
+
+ case X86::VMOVSLDUPYrr:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VMOVSLDUPYrm:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeMOVSLDUPMask(MVT::v8f32, ShuffleMask);
+ break;
+
+ case X86::MOVSHDUPrr:
+ case X86::VMOVSHDUPrr:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::MOVSHDUPrm:
+ case X86::VMOVSHDUPrm:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeMOVSHDUPMask(MVT::v4f32, ShuffleMask);
+ break;
+
+ case X86::VMOVDDUPYrr:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VMOVDDUPYrm:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeMOVDDUPMask(MVT::v4f64, ShuffleMask);
+ break;
+
+ case X86::MOVDDUPrr:
+ case X86::VMOVDDUPrr:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::MOVDDUPrm:
+ case X86::VMOVDDUPrm:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeMOVDDUPMask(MVT::v2f64, ShuffleMask);
+ break;
+
+ case X86::PSLLDQri:
+ case X86::VPSLLDQri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSLLDQMask(MVT::v16i8,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+
+ case X86::VPSLLDQYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSLLDQMask(MVT::v32i8,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+
+ case X86::PSRLDQri:
+ case X86::VPSRLDQri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSRLDQMask(MVT::v16i8,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+
+ case X86::VPSRLDQYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSRLDQMask(MVT::v32i8,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+
+ case X86::PALIGNR128rr:
+ case X86::VPALIGNR128rr:
+ Src1Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::PALIGNR128rm:
+ case X86::VPALIGNR128rm:
+ Src2Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePALIGNRMask(MVT::v16i8,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+ case X86::VPALIGNR256rr:
+ Src1Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPALIGNR256rm:
+ Src2Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePALIGNRMask(MVT::v32i8,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+
+ case X86::PSHUFDri:
+ case X86::VPSHUFDri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::PSHUFDmi:
+ case X86::VPSHUFDmi:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v4i32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+ case X86::VPSHUFDYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPSHUFDYmi:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v8i32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+
+
+ case X86::PSHUFHWri:
+ case X86::VPSHUFHWri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::PSHUFHWmi:
+ case X86::VPSHUFHWmi:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFHWMask(MVT::v8i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+ case X86::VPSHUFHWYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPSHUFHWYmi:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFHWMask(MVT::v16i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+ case X86::PSHUFLWri:
+ case X86::VPSHUFLWri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::PSHUFLWmi:
+ case X86::VPSHUFLWmi:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFLWMask(MVT::v8i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+ case X86::VPSHUFLWYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPSHUFLWYmi:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFLWMask(MVT::v16i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+
+ case X86::PUNPCKHBWrr:
+ case X86::VPUNPCKHBWrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::PUNPCKHBWrm:
+ case X86::VPUNPCKHBWrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v16i8, ShuffleMask);
+ break;
+ case X86::VPUNPCKHBWYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHBWYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v32i8, ShuffleMask);
+ break;
+ case X86::PUNPCKHWDrr:
+ case X86::VPUNPCKHWDrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::PUNPCKHWDrm:
+ case X86::VPUNPCKHWDrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v8i16, ShuffleMask);
+ break;
+ case X86::VPUNPCKHWDYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHWDYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v16i16, ShuffleMask);
+ break;
+ case X86::PUNPCKHDQrr:
+ case X86::VPUNPCKHDQrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::PUNPCKHDQrm:
+ case X86::VPUNPCKHDQrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v4i32, ShuffleMask);
+ break;
+ case X86::VPUNPCKHDQYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHDQYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v8i32, ShuffleMask);
+ break;
+ case X86::VPUNPCKHDQZrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHDQZrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v16i32, ShuffleMask);
+ break;
+ case X86::PUNPCKHQDQrr:
+ case X86::VPUNPCKHQDQrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::PUNPCKHQDQrm:
+ case X86::VPUNPCKHQDQrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v2i64, ShuffleMask);
+ break;
+ case X86::VPUNPCKHQDQYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHQDQYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v4i64, ShuffleMask);
+ break;
+ case X86::VPUNPCKHQDQZrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHQDQZrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v8i64, ShuffleMask);
+ break;
+
+ case X86::PUNPCKLBWrr:
+ case X86::VPUNPCKLBWrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::PUNPCKLBWrm:
+ case X86::VPUNPCKLBWrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v16i8, ShuffleMask);
+ break;
+ case X86::VPUNPCKLBWYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLBWYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v32i8, ShuffleMask);
+ break;
+ case X86::PUNPCKLWDrr:
+ case X86::VPUNPCKLWDrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::PUNPCKLWDrm:
+ case X86::VPUNPCKLWDrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v8i16, ShuffleMask);
+ break;
+ case X86::VPUNPCKLWDYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLWDYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v16i16, ShuffleMask);
+ break;
+ case X86::PUNPCKLDQrr:
+ case X86::VPUNPCKLDQrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::PUNPCKLDQrm:
+ case X86::VPUNPCKLDQrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v4i32, ShuffleMask);
+ break;
+ case X86::VPUNPCKLDQYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLDQYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v8i32, ShuffleMask);
+ break;
+ case X86::VPUNPCKLDQZrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLDQZrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v16i32, ShuffleMask);
+ break;
+ case X86::PUNPCKLQDQrr:
+ case X86::VPUNPCKLQDQrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::PUNPCKLQDQrm:
+ case X86::VPUNPCKLQDQrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v2i64, ShuffleMask);
+ break;
+ case X86::VPUNPCKLQDQYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLQDQYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v4i64, ShuffleMask);
+ break;
+ case X86::VPUNPCKLQDQZrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLQDQZrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v8i64, ShuffleMask);
+ break;
+
+ case X86::SHUFPDrri:
+ case X86::VSHUFPDrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::SHUFPDrmi:
+ case X86::VSHUFPDrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeSHUFPMask(MVT::v2f64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VSHUFPDYrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VSHUFPDYrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeSHUFPMask(MVT::v4f64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ case X86::SHUFPSrri:
+ case X86::VSHUFPSrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::SHUFPSrmi:
+ case X86::VSHUFPSrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeSHUFPMask(MVT::v4f32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VSHUFPSYrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VSHUFPSYrmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeSHUFPMask(MVT::v8f32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ case X86::UNPCKLPDrr:
+ case X86::VUNPCKLPDrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::UNPCKLPDrm:
+ case X86::VUNPCKLPDrm:
+ DecodeUNPCKLMask(MVT::v2f64, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VUNPCKLPDYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKLPDYrm:
+ DecodeUNPCKLMask(MVT::v4f64, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VUNPCKLPDZrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKLPDZrm:
+ DecodeUNPCKLMask(MVT::v8f64, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::UNPCKLPSrr:
+ case X86::VUNPCKLPSrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::UNPCKLPSrm:
+ case X86::VUNPCKLPSrm:
+ DecodeUNPCKLMask(MVT::v4f32, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VUNPCKLPSYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKLPSYrm:
+ DecodeUNPCKLMask(MVT::v8f32, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VUNPCKLPSZrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKLPSZrm:
+ DecodeUNPCKLMask(MVT::v16f32, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::UNPCKHPDrr:
+ case X86::VUNPCKHPDrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::UNPCKHPDrm:
+ case X86::VUNPCKHPDrm:
+ DecodeUNPCKHMask(MVT::v2f64, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VUNPCKHPDYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKHPDYrm:
+ DecodeUNPCKHMask(MVT::v4f64, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VUNPCKHPDZrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKHPDZrm:
+ DecodeUNPCKHMask(MVT::v8f64, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::UNPCKHPSrr:
+ case X86::VUNPCKHPSrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::UNPCKHPSrm:
+ case X86::VUNPCKHPSrm:
+ DecodeUNPCKHMask(MVT::v4f32, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VUNPCKHPSYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKHPSYrm:
+ DecodeUNPCKHMask(MVT::v8f32, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VUNPCKHPSZrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKHPSZrm:
+ DecodeUNPCKHMask(MVT::v16f32, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VPERMILPSri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPERMILPSmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v4f32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VPERMILPSYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPERMILPSYmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v8f32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VPERMILPDri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPERMILPDmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v2f64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VPERMILPDYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPERMILPDYmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v4f64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VPERM2F128rr:
+ case X86::VPERM2I128rr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPERM2F128rm:
+ case X86::VPERM2I128rm:
+ // For instruction comments purpose, assume the 256-bit vector is v4i64.
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeVPERM2X128Mask(MVT::v4i64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VPERMQYri:
+ case X86::VPERMPDYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPERMQYmi:
+ case X86::VPERMPDYmi:
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeVPERMMask(MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ case X86::MOVSDrr:
+ case X86::VMOVSDrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::MOVSDrm:
+ case X86::VMOVSDrm:
+ DecodeScalarMoveMask(MVT::v2f64, nullptr == Src2Name, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::MOVSSrr:
+ case X86::VMOVSSrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::MOVSSrm:
+ case X86::VMOVSSrm:
+ DecodeScalarMoveMask(MVT::v4f32, nullptr == Src2Name, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ case X86::MOVPQI2QIrr:
+ case X86::MOVZPQILo2PQIrr:
+ case X86::VMOVPQI2QIrr:
+ case X86::VMOVZPQILo2PQIrr:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::MOVQI2PQIrm:
+ case X86::MOVZQI2PQIrm:
+ case X86::MOVZPQILo2PQIrm:
+ case X86::VMOVQI2PQIrm:
+ case X86::VMOVZQI2PQIrm:
+ case X86::VMOVZPQILo2PQIrm:
+ DecodeZeroMoveLowMask(MVT::v2i64, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::MOVDI2PDIrm:
+ case X86::VMOVDI2PDIrm:
+ DecodeZeroMoveLowMask(MVT::v4i32, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ case X86::PMOVZXBWrr:
+ case X86::PMOVZXBDrr:
+ case X86::PMOVZXBQrr:
+ case X86::PMOVZXWDrr:
+ case X86::PMOVZXWQrr:
+ case X86::PMOVZXDQrr:
+ case X86::VPMOVZXBWrr:
+ case X86::VPMOVZXBDrr:
+ case X86::VPMOVZXBQrr:
+ case X86::VPMOVZXWDrr:
+ case X86::VPMOVZXWQrr:
+ case X86::VPMOVZXDQrr:
+ case X86::VPMOVZXBWYrr:
+ case X86::VPMOVZXBDYrr:
+ case X86::VPMOVZXBQYrr:
+ case X86::VPMOVZXWDYrr:
+ case X86::VPMOVZXWQYrr:
+ case X86::VPMOVZXDQYrr:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::PMOVZXBWrm:
+ case X86::PMOVZXBDrm:
+ case X86::PMOVZXBQrm:
+ case X86::PMOVZXWDrm:
+ case X86::PMOVZXWQrm:
+ case X86::PMOVZXDQrm:
+ case X86::VPMOVZXBWrm:
+ case X86::VPMOVZXBDrm:
+ case X86::VPMOVZXBQrm:
+ case X86::VPMOVZXWDrm:
+ case X86::VPMOVZXWQrm:
+ case X86::VPMOVZXDQrm:
+ case X86::VPMOVZXBWYrm:
+ case X86::VPMOVZXBDYrm:
+ case X86::VPMOVZXBQYrm:
+ case X86::VPMOVZXWDYrm:
+ case X86::VPMOVZXWQYrm:
+ case X86::VPMOVZXDQYrm: {
+ MVT SrcVT, DstVT;
+ getZeroExtensionTypes(MI, SrcVT, DstVT);
+ DecodeZeroExtendMask(SrcVT, DstVT, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ } break;
+ }
+
+ // The only comments we decode are shuffles, so give up if we were unable to
+ // decode a shuffle mask.
+ if (ShuffleMask.empty())
+ return false;
+
+ if (!DestName) DestName = Src1Name;
+ OS << (DestName ? DestName : "mem") << " = ";
+
+ // If the two sources are the same, canonicalize the input elements to be
+ // from the first src so that we get larger element spans.
+ if (Src1Name == Src2Name) {
+ for (unsigned i = 0, e = ShuffleMask.size(); i != e; ++i) {
+ if ((int)ShuffleMask[i] >= 0 && // Not sentinel.
+ ShuffleMask[i] >= (int)e) // From second mask.
+ ShuffleMask[i] -= e;
+ }
+ }
+
+ // The shuffle mask specifies which elements of the src1/src2 fill in the
+ // destination, with a few sentinel values. Loop through and print them
+ // out.
+ for (unsigned i = 0, e = ShuffleMask.size(); i != e; ++i) {
+ if (i != 0)
+ OS << ',';
+ if (ShuffleMask[i] == SM_SentinelZero) {
+ OS << "zero";
+ continue;
+ }
+
+ // Otherwise, it must come from src1 or src2. Print the span of elements
+ // that comes from this src.
+ bool isSrc1 = ShuffleMask[i] < (int)ShuffleMask.size();
+ const char *SrcName = isSrc1 ? Src1Name : Src2Name;
+ OS << (SrcName ? SrcName : "mem") << '[';
+ bool IsFirst = true;
+ while (i != e && (int)ShuffleMask[i] != SM_SentinelZero &&
+ (ShuffleMask[i] < (int)ShuffleMask.size()) == isSrc1) {
+ if (!IsFirst)
+ OS << ',';
+ else
+ IsFirst = false;
+ if (ShuffleMask[i] == SM_SentinelUndef)
+ OS << "u";
+ else
+ OS << ShuffleMask[i] % ShuffleMask.size();
+ ++i;
+ }
+ OS << ']';
+ --i; // For loop increments element #.
+ }
+ //MI->print(OS, 0);
+ OS << "\n";
+
+ // We successfully added a comment to this instruction.
+ return true;
+}
diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
index 1c8466b..91d1828 100644
--- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
+++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
@@ -50,33 +50,9 @@ void X86IntelInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
EmitAnyX86InstComments(MI, *CommentStream, getRegisterName);
}
-void X86IntelInstPrinter::printSSECC(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
- int64_t Imm = MI->getOperand(Op).getImm() & 0xf;
- switch (Imm) {
- default: llvm_unreachable("Invalid ssecc argument!");
- case 0: O << "eq"; break;
- case 1: O << "lt"; break;
- case 2: O << "le"; break;
- case 3: O << "unord"; break;
- case 4: O << "neq"; break;
- case 5: O << "nlt"; break;
- case 6: O << "nle"; break;
- case 7: O << "ord"; break;
- case 8: O << "eq_uq"; break;
- case 9: O << "nge"; break;
- case 0xa: O << "ngt"; break;
- case 0xb: O << "false"; break;
- case 0xc: O << "neq_oq"; break;
- case 0xd: O << "ge"; break;
- case 0xe: O << "gt"; break;
- case 0xf: O << "true"; break;
- }
-}
-
-void X86IntelInstPrinter::printAVXCC(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
- int64_t Imm = MI->getOperand(Op).getImm() & 0x1f;
+void X86IntelInstPrinter::printSSEAVXCC(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ int64_t Imm = MI->getOperand(Op).getImm();
switch (Imm) {
default: llvm_unreachable("Invalid avxcc argument!");
case 0: O << "eq"; break;
@@ -114,8 +90,24 @@ void X86IntelInstPrinter::printAVXCC(const MCInst *MI, unsigned Op,
}
}
+void X86IntelInstPrinter::printXOPCC(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ int64_t Imm = MI->getOperand(Op).getImm();
+ switch (Imm) {
+ default: llvm_unreachable("Invalid xopcc argument!");
+ case 0: O << "lt"; break;
+ case 1: O << "le"; break;
+ case 2: O << "gt"; break;
+ case 3: O << "ge"; break;
+ case 4: O << "eq"; break;
+ case 5: O << "neq"; break;
+ case 6: O << "false"; break;
+ case 7: O << "true"; break;
+ }
+}
+
void X86IntelInstPrinter::printRoundingControl(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
+ raw_ostream &O) {
int64_t Imm = MI->getOperand(Op).getImm() & 0x3;
switch (Imm) {
case 0: O << "{rn-sae}"; break;
@@ -168,21 +160,21 @@ void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
const MCOperand &IndexReg = MI->getOperand(Op+X86::AddrIndexReg);
const MCOperand &DispSpec = MI->getOperand(Op+X86::AddrDisp);
const MCOperand &SegReg = MI->getOperand(Op+X86::AddrSegmentReg);
-
+
// If this has a segment register, print it.
if (SegReg.getReg()) {
printOperand(MI, Op+X86::AddrSegmentReg, O);
O << ':';
}
-
+
O << '[';
-
+
bool NeedPlus = false;
if (BaseReg.getReg()) {
printOperand(MI, Op+X86::AddrBaseReg, O);
NeedPlus = true;
}
-
+
if (IndexReg.getReg()) {
if (NeedPlus) O << " + ";
if (ScaleVal != 1)
@@ -209,7 +201,7 @@ void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
O << formatImm(DispVal);
}
}
-
+
O << ']';
}
@@ -257,3 +249,8 @@ void X86IntelInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
O << ']';
}
+
+void X86IntelInstPrinter::printU8Imm(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ O << formatImm(MI->getOperand(Op).getImm() & 0xff);
+}
diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
index d082f0b..2150144 100644
--- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
+++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
@@ -36,19 +36,24 @@ public:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &O);
- void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &O);
- void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &O);
+ void printSSEAVXCC(const MCInst *MI, unsigned Op, raw_ostream &O);
+ void printXOPCC(const MCInst *MI, unsigned Op, raw_ostream &O);
void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printMemOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printSrcIdx(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printDstIdx(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printRoundingControl(const MCInst *MI, unsigned Op, raw_ostream &OS);
+ void printU8Imm(const MCInst *MI, unsigned Op, raw_ostream &O);
+
+ void printanymem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printMemReference(MI, OpNo, O);
+ }
void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "opaque ptr ";
printMemReference(MI, OpNo, O);
}
-
+
void printi8mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "byte ptr ";
printMemReference(MI, OpNo, O);
@@ -152,7 +157,7 @@ public:
printMemOffset(MI, OpNo, O);
}
};
-
+
}
#endif
diff --git a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index befa6c2..719b761 100644
--- a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -512,7 +512,7 @@ protected:
// Defines a new offset for the CFA. E.g.
//
// With frame:
- //
+ //
// pushq %rbp
// L0:
// .cfi_def_cfa_offset 16
@@ -682,7 +682,7 @@ private:
// 4 3
// 5 3
//
- for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) {
+ for (unsigned i = 0; i < RegCount; ++i) {
int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
if (CUReg == -1) return ~0U;
SavedRegs[i] = CUReg;
@@ -777,39 +777,6 @@ public:
MachO::CPU_TYPE_X86_64, Subtype);
}
- bool doesSectionRequireSymbols(const MCSection &Section) const override {
- // Temporary labels in the string literals sections require symbols. The
- // issue is that the x86_64 relocation format does not allow symbol +
- // offset, and so the linker does not have enough information to resolve the
- // access to the appropriate atom unless an external relocation is used. For
- // non-cstring sections, we expect the compiler to use a non-temporary label
- // for anything that could have an addend pointing outside the symbol.
- //
- // See <rdar://problem/4765733>.
- const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
- return SMO.getType() == MachO::S_CSTRING_LITERALS;
- }
-
- bool isSectionAtomizable(const MCSection &Section) const override {
- const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
- // Fixed sized data sections are uniqued, they cannot be diced into atoms.
- switch (SMO.getType()) {
- default:
- return true;
-
- case MachO::S_4BYTE_LITERALS:
- case MachO::S_8BYTE_LITERALS:
- case MachO::S_16BYTE_LITERALS:
- case MachO::S_LITERAL_POINTERS:
- case MachO::S_NON_LAZY_SYMBOL_POINTERS:
- case MachO::S_LAZY_SYMBOL_POINTERS:
- case MachO::S_MOD_INIT_FUNC_POINTERS:
- case MachO::S_MOD_TERM_FUNC_POINTERS:
- case MachO::S_INTERPOSING:
- return false;
- }
- }
-
/// \brief Generate the compact unwind encoding for the CFI instructions.
uint32_t generateCompactUnwindEncoding(
ArrayRef<MCCFIInstruction> Instrs) const override {
diff --git a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index 365cf0c..d4698bf 100644
--- a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -302,19 +302,21 @@ namespace X86II {
//// MRM_XX - A mod/rm byte of exactly 0xXX.
MRM_C0 = 32, MRM_C1 = 33, MRM_C2 = 34, MRM_C3 = 35,
- MRM_C4 = 36, MRM_C8 = 37, MRM_C9 = 38, MRM_CA = 39,
- MRM_CB = 40, MRM_CF = 41, MRM_D0 = 42, MRM_D1 = 43,
- MRM_D4 = 44, MRM_D5 = 45, MRM_D6 = 46, MRM_D7 = 47,
- MRM_D8 = 48, MRM_D9 = 49, MRM_DA = 50, MRM_DB = 51,
- MRM_DC = 52, MRM_DD = 53, MRM_DE = 54, MRM_DF = 55,
- MRM_E0 = 56, MRM_E1 = 57, MRM_E2 = 58, MRM_E3 = 59,
- MRM_E4 = 60, MRM_E5 = 61, MRM_E8 = 62, MRM_E9 = 63,
- MRM_EA = 64, MRM_EB = 65, MRM_EC = 66, MRM_ED = 67,
- MRM_EE = 68, MRM_F0 = 69, MRM_F1 = 70, MRM_F2 = 71,
- MRM_F3 = 72, MRM_F4 = 73, MRM_F5 = 74, MRM_F6 = 75,
- MRM_F7 = 76, MRM_F8 = 77, MRM_F9 = 78, MRM_FA = 79,
- MRM_FB = 80, MRM_FC = 81, MRM_FD = 82, MRM_FE = 83,
- MRM_FF = 84,
+ MRM_C4 = 36, MRM_C5 = 37, MRM_C6 = 38, MRM_C7 = 39,
+ MRM_C8 = 40, MRM_C9 = 41, MRM_CA = 42, MRM_CB = 43,
+ MRM_CC = 44, MRM_CD = 45, MRM_CE = 46, MRM_CF = 47,
+ MRM_D0 = 48, MRM_D1 = 49, MRM_D2 = 50, MRM_D3 = 51,
+ MRM_D4 = 52, MRM_D5 = 53, MRM_D6 = 54, MRM_D7 = 55,
+ MRM_D8 = 56, MRM_D9 = 57, MRM_DA = 58, MRM_DB = 59,
+ MRM_DC = 60, MRM_DD = 61, MRM_DE = 62, MRM_DF = 63,
+ MRM_E0 = 64, MRM_E1 = 65, MRM_E2 = 66, MRM_E3 = 67,
+ MRM_E4 = 68, MRM_E5 = 69, MRM_E6 = 70, MRM_E7 = 71,
+ MRM_E8 = 72, MRM_E9 = 73, MRM_EA = 74, MRM_EB = 75,
+ MRM_EC = 76, MRM_ED = 77, MRM_EE = 78, MRM_EF = 79,
+ MRM_F0 = 80, MRM_F1 = 81, MRM_F2 = 82, MRM_F3 = 83,
+ MRM_F4 = 84, MRM_F5 = 85, MRM_F6 = 86, MRM_F7 = 87,
+ MRM_F8 = 88, MRM_F9 = 89, MRM_FA = 90, MRM_FB = 91,
+ MRM_FC = 92, MRM_FD = 93, MRM_FE = 94, MRM_FF = 95,
FormMask = 127,
@@ -328,21 +330,28 @@ namespace X86II {
OpSizeShift = 7,
OpSizeMask = 0x3 << OpSizeShift,
- OpSize16 = 1 << OpSizeShift,
- OpSize32 = 2 << OpSizeShift,
+ OpSizeFixed = 0 << OpSizeShift,
+ OpSize16 = 1 << OpSizeShift,
+ OpSize32 = 2 << OpSizeShift,
- // AsSize - Set if this instruction requires an operand size prefix (0x67),
- // which most often indicates that the instruction address 16 bit address
- // instead of 32 bit address (or 32 bit address in 64 bit mode).
+ // AsSize - AdSizeX implies this instruction determines its need of 0x67
+ // prefix from a normal ModRM memory operand. The other types indicate that
+ // an operand is encoded with a specific width and a prefix is needed if
+ // it differs from the current mode.
AdSizeShift = OpSizeShift + 2,
- AdSize = 1 << AdSizeShift,
+ AdSizeMask = 0x3 << AdSizeShift,
+
+ AdSizeX = 1 << AdSizeShift,
+ AdSize16 = 1 << AdSizeShift,
+ AdSize32 = 2 << AdSizeShift,
+ AdSize64 = 3 << AdSizeShift,
//===------------------------------------------------------------------===//
// OpPrefix - There are several prefix bytes that are used as opcode
// extensions. These are 0x66, 0xF3, and 0xF2. If this field is 0 there is
// no prefix.
//
- OpPrefixShift = AdSizeShift + 1,
+ OpPrefixShift = AdSizeShift + 2,
OpPrefixMask = 0x7 << OpPrefixShift,
// PS, PD - Prefix code for packed single and double precision vector
@@ -669,19 +678,10 @@ namespace X86II {
return -1;
case X86II::MRMDestMem:
return 0;
- case X86II::MRMSrcMem: {
- unsigned FirstMemOp = 1;
- if (HasVEX_4V)
- ++FirstMemOp;// Skip the register source (which is encoded in VEX_VVVV).
- if (HasMemOp4)
- ++FirstMemOp;// Skip the register source (which is encoded in I8IMM).
- if (HasEVEX_K)
- ++FirstMemOp;// Skip the mask register
- // FIXME: Maybe lea should have its own form? This is a horrible hack.
- //if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
- // Opcode == X86::LEA16r || Opcode == X86::LEA32r)
- return FirstMemOp;
- }
+ case X86II::MRMSrcMem:
+ // Start from 1, skip any registers encoded in VEX_VVVV or I8IMM, or a
+ // mask register.
+ return 1 + HasVEX_4V + HasMemOp4 + HasEVEX_K;
case X86II::MRMXr:
case X86II::MRM0r: case X86II::MRM1r:
case X86II::MRM2r: case X86II::MRM3r:
@@ -692,15 +692,9 @@ namespace X86II {
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
- case X86II::MRM6m: case X86II::MRM7m: {
- bool HasVEX_4V = TSFlags & X86II::VEX_4V;
- unsigned FirstMemOp = 0;
- if (HasVEX_4V)
- ++FirstMemOp;// Skip the register dest (which is encoded in VEX_VVVV).
- if (HasEVEX_K)
- ++FirstMemOp;// Skip the mask register
- return FirstMemOp;
- }
+ case X86II::MRM6m: case X86II::MRM7m:
+ // Start from 0, skip registers encoded in VEX_VVVV or a mask register.
+ return 0 + HasVEX_4V + HasEVEX_K;
case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2:
case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C8:
case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB:
@@ -759,7 +753,7 @@ namespace X86II {
(RegNo > X86::ZMM15 && RegNo <= X86::ZMM31));
}
-
+
inline bool isX86_64NonExtLowByteReg(unsigned reg) {
return (reg == X86::SPL || reg == X86::BPL ||
reg == X86::SIL || reg == X86::DIL);
diff --git a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
index be6a8e4..e8b0b4c 100644
--- a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
@@ -222,6 +222,9 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
case MCSymbolRefExpr::VK_GOT:
Type = ELF::R_386_GOT32;
break;
+ case MCSymbolRefExpr::VK_PLT:
+ Type = ELF::R_386_PLT32;
+ break;
case MCSymbolRefExpr::VK_GOTOFF:
Type = ELF::R_386_GOTOFF;
break;
diff --git a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index 5679d63..e64b963 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -108,12 +108,6 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
// Exceptions handling
ExceptionsType = ExceptionHandling::DwarfCFI;
- // OpenBSD and Bitrig have buggy support for .quad in 32-bit mode, just split
- // into two .words.
- if ((T.getOS() == Triple::OpenBSD || T.getOS() == Triple::Bitrig) &&
- T.getArch() == Triple::x86)
- Data64bitsDirective = nullptr;
-
// Always enable the integrated assembler by default.
// Clang also enabled it when the OS is Solaris but that is redundant here.
UseIntegratedAssembler = true;
@@ -135,9 +129,10 @@ void X86MCAsmInfoMicrosoft::anchor() { }
X86MCAsmInfoMicrosoft::X86MCAsmInfoMicrosoft(const Triple &Triple) {
if (Triple.getArch() == Triple::x86_64) {
PrivateGlobalPrefix = ".L";
+ PrivateLabelPrefix = ".L";
PointerSize = 8;
WinEHEncodingType = WinEH::EncodingType::Itanium;
- ExceptionsType = ExceptionHandling::ItaniumWinEH;
+ ExceptionsType = ExceptionHandling::WinEH;
}
AssemblerDialect = AsmWriterFlavor;
@@ -155,9 +150,10 @@ X86MCAsmInfoGNUCOFF::X86MCAsmInfoGNUCOFF(const Triple &Triple) {
assert(Triple.isOSWindows() && "Windows is the only supported COFF target");
if (Triple.getArch() == Triple::x86_64) {
PrivateGlobalPrefix = ".L";
+ PrivateLabelPrefix = ".L";
PointerSize = 8;
WinEHEncodingType = WinEH::EncodingType::Itanium;
- ExceptionsType = ExceptionHandling::ItaniumWinEH;
+ ExceptionsType = ExceptionHandling::WinEH;
} else {
ExceptionsType = ExceptionHandling::DwarfCFI;
}
diff --git a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
index f2f06c3..deaad2a 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
+++ b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
@@ -23,7 +23,8 @@ namespace llvm {
class Triple;
class X86MCAsmInfoDarwin : public MCAsmInfoDarwin {
- void anchor() override;
+ virtual void anchor();
+
public:
explicit X86MCAsmInfoDarwin(const Triple &Triple);
};
diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index 31b8e2d..3ad8ab1 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -30,8 +30,8 @@ using namespace llvm;
namespace {
class X86MCCodeEmitter : public MCCodeEmitter {
- X86MCCodeEmitter(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
- void operator=(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ X86MCCodeEmitter(const X86MCCodeEmitter &) = delete;
+ void operator=(const X86MCCodeEmitter &) = delete;
const MCInstrInfo &MCII;
MCContext &Ctx;
public:
@@ -590,6 +590,8 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const MCInstrDesc &Desc,
raw_ostream &OS) const {
+ assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.");
+
uint64_t Encoding = TSFlags & X86II::EncodingMask;
bool HasEVEX_K = TSFlags & X86II::EVEX_K;
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
@@ -721,7 +723,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// MemAddr, src1(VEX_4V), src2(ModR/M)
// MemAddr, src1(ModR/M), imm8
//
- if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
X86::AddrBaseReg).getReg()))
VEX_B = 0x0;
if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
@@ -863,7 +865,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
EVEX_rc = MI.getOperand(RcOperand).getImm() & 0x3;
}
EncodeRC = true;
- }
+ }
break;
case X86II::MRMDestReg:
// MRMDestReg instructions forms:
@@ -1109,6 +1111,10 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
: X86II::OpSize16))
EmitByte(0x66, CurByte, OS);
+ // Emit the LOCK opcode prefix.
+ if (TSFlags & X86II::LOCK)
+ EmitByte(0xF0, CurByte, OS);
+
switch (TSFlags & X86II::OpPrefixMask) {
case X86II::PD: // 66
EmitByte(0x66, CurByte, OS);
@@ -1182,10 +1188,6 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
if (MemoryOperand != -1) MemoryOperand += CurOp;
- // Emit the lock opcode prefix as needed.
- if (TSFlags & X86II::LOCK)
- EmitByte(0xF0, CurByte, OS);
-
// Emit segment override opcode prefix as needed.
if (MemoryOperand >= 0)
EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg,
@@ -1197,16 +1199,10 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
// Emit the address size opcode prefix as needed.
bool need_address_override;
- // The AdSize prefix is only for 32-bit and 64-bit modes. Hm, perhaps we
- // should introduce an AdSize16 bit instead of having seven special cases?
- if ((!is16BitMode(STI) && TSFlags & X86II::AdSize) ||
- (is16BitMode(STI) && (MI.getOpcode() == X86::JECXZ_32 ||
- MI.getOpcode() == X86::MOV8o8a ||
- MI.getOpcode() == X86::MOV16o16a ||
- MI.getOpcode() == X86::MOV32o32a ||
- MI.getOpcode() == X86::MOV8ao8 ||
- MI.getOpcode() == X86::MOV16ao16 ||
- MI.getOpcode() == X86::MOV32ao32))) {
+ uint64_t AdSize = TSFlags & X86II::AdSizeMask;
+ if ((is16BitMode(STI) && AdSize == X86II::AdSize32) ||
+ (is32BitMode(STI) && AdSize == X86II::AdSize16) ||
+ (is64BitMode(STI) && AdSize == X86II::AdSize32)) {
need_address_override = true;
} else if (MemoryOperand < 0) {
need_address_override = false;
@@ -1430,83 +1426,31 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
break;
}
case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2:
- case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C8:
+ case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C5:
+ case X86II::MRM_C6: case X86II::MRM_C7: case X86II::MRM_C8:
case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB:
+ case X86II::MRM_CC: case X86II::MRM_CD: case X86II::MRM_CE:
case X86II::MRM_CF: case X86II::MRM_D0: case X86II::MRM_D1:
- case X86II::MRM_D4: case X86II::MRM_D5: case X86II::MRM_D6:
- case X86II::MRM_D7: case X86II::MRM_D8: case X86II::MRM_D9:
- case X86II::MRM_DA: case X86II::MRM_DB: case X86II::MRM_DC:
- case X86II::MRM_DD: case X86II::MRM_DE: case X86II::MRM_DF:
- case X86II::MRM_E0: case X86II::MRM_E1: case X86II::MRM_E2:
- case X86II::MRM_E3: case X86II::MRM_E4: case X86II::MRM_E5:
- case X86II::MRM_E8: case X86II::MRM_E9: case X86II::MRM_EA:
- case X86II::MRM_EB: case X86II::MRM_EC: case X86II::MRM_ED:
- case X86II::MRM_EE: case X86II::MRM_F0: case X86II::MRM_F1:
- case X86II::MRM_F2: case X86II::MRM_F3: case X86II::MRM_F4:
- case X86II::MRM_F5: case X86II::MRM_F6: case X86II::MRM_F7:
- case X86II::MRM_F8: case X86II::MRM_F9: case X86II::MRM_FA:
- case X86II::MRM_FB: case X86II::MRM_FC: case X86II::MRM_FD:
- case X86II::MRM_FE: case X86II::MRM_FF:
+ case X86II::MRM_D2: case X86II::MRM_D3: case X86II::MRM_D4:
+ case X86II::MRM_D5: case X86II::MRM_D6: case X86II::MRM_D7:
+ case X86II::MRM_D8: case X86II::MRM_D9: case X86II::MRM_DA:
+ case X86II::MRM_DB: case X86II::MRM_DC: case X86II::MRM_DD:
+ case X86II::MRM_DE: case X86II::MRM_DF: case X86II::MRM_E0:
+ case X86II::MRM_E1: case X86II::MRM_E2: case X86II::MRM_E3:
+ case X86II::MRM_E4: case X86II::MRM_E5: case X86II::MRM_E6:
+ case X86II::MRM_E7: case X86II::MRM_E8: case X86II::MRM_E9:
+ case X86II::MRM_EA: case X86II::MRM_EB: case X86II::MRM_EC:
+ case X86II::MRM_ED: case X86II::MRM_EE: case X86II::MRM_EF:
+ case X86II::MRM_F0: case X86II::MRM_F1: case X86II::MRM_F2:
+ case X86II::MRM_F3: case X86II::MRM_F4: case X86II::MRM_F5:
+ case X86II::MRM_F6: case X86II::MRM_F7: case X86II::MRM_F8:
+ case X86II::MRM_F9: case X86II::MRM_FA: case X86II::MRM_FB:
+ case X86II::MRM_FC: case X86II::MRM_FD: case X86II::MRM_FE:
+ case X86II::MRM_FF:
EmitByte(BaseOpcode, CurByte, OS);
- unsigned char MRM;
- switch (TSFlags & X86II::FormMask) {
- default: llvm_unreachable("Invalid Form");
- case X86II::MRM_C0: MRM = 0xC0; break;
- case X86II::MRM_C1: MRM = 0xC1; break;
- case X86II::MRM_C2: MRM = 0xC2; break;
- case X86II::MRM_C3: MRM = 0xC3; break;
- case X86II::MRM_C4: MRM = 0xC4; break;
- case X86II::MRM_C8: MRM = 0xC8; break;
- case X86II::MRM_C9: MRM = 0xC9; break;
- case X86II::MRM_CA: MRM = 0xCA; break;
- case X86II::MRM_CB: MRM = 0xCB; break;
- case X86II::MRM_CF: MRM = 0xCF; break;
- case X86II::MRM_D0: MRM = 0xD0; break;
- case X86II::MRM_D1: MRM = 0xD1; break;
- case X86II::MRM_D4: MRM = 0xD4; break;
- case X86II::MRM_D5: MRM = 0xD5; break;
- case X86II::MRM_D6: MRM = 0xD6; break;
- case X86II::MRM_D7: MRM = 0xD7; break;
- case X86II::MRM_D8: MRM = 0xD8; break;
- case X86II::MRM_D9: MRM = 0xD9; break;
- case X86II::MRM_DA: MRM = 0xDA; break;
- case X86II::MRM_DB: MRM = 0xDB; break;
- case X86II::MRM_DC: MRM = 0xDC; break;
- case X86II::MRM_DD: MRM = 0xDD; break;
- case X86II::MRM_DE: MRM = 0xDE; break;
- case X86II::MRM_DF: MRM = 0xDF; break;
- case X86II::MRM_E0: MRM = 0xE0; break;
- case X86II::MRM_E1: MRM = 0xE1; break;
- case X86II::MRM_E2: MRM = 0xE2; break;
- case X86II::MRM_E3: MRM = 0xE3; break;
- case X86II::MRM_E4: MRM = 0xE4; break;
- case X86II::MRM_E5: MRM = 0xE5; break;
- case X86II::MRM_E8: MRM = 0xE8; break;
- case X86II::MRM_E9: MRM = 0xE9; break;
- case X86II::MRM_EA: MRM = 0xEA; break;
- case X86II::MRM_EB: MRM = 0xEB; break;
- case X86II::MRM_EC: MRM = 0xEC; break;
- case X86II::MRM_ED: MRM = 0xED; break;
- case X86II::MRM_EE: MRM = 0xEE; break;
- case X86II::MRM_F0: MRM = 0xF0; break;
- case X86II::MRM_F1: MRM = 0xF1; break;
- case X86II::MRM_F2: MRM = 0xF2; break;
- case X86II::MRM_F3: MRM = 0xF3; break;
- case X86II::MRM_F4: MRM = 0xF4; break;
- case X86II::MRM_F5: MRM = 0xF5; break;
- case X86II::MRM_F6: MRM = 0xF6; break;
- case X86II::MRM_F7: MRM = 0xF7; break;
- case X86II::MRM_F8: MRM = 0xF8; break;
- case X86II::MRM_F9: MRM = 0xF9; break;
- case X86II::MRM_FA: MRM = 0xFA; break;
- case X86II::MRM_FB: MRM = 0xFB; break;
- case X86II::MRM_FC: MRM = 0xFC; break;
- case X86II::MRM_FD: MRM = 0xFD; break;
- case X86II::MRM_FE: MRM = 0xFE; break;
- case X86II::MRM_FF: MRM = 0xFF; break;
- }
- EmitByte(MRM, CurByte, OS);
+ uint64_t Form = TSFlags & X86II::FormMask;
+ EmitByte(0xC0 + Form - X86II::MRM_C0, CurByte, OS);
break;
}
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 5a9181d..0e7b4e5 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -134,18 +134,13 @@ bool X86_MC::GetCpuIDAndInfoEx(unsigned value, unsigned subleaf, unsigned *rEAX,
"c" (subleaf));
return false;
#elif defined(_MSC_VER)
- // __cpuidex was added in MSVC++ 9.0 SP1
- #if (_MSC_VER > 1500) || (_MSC_VER == 1500 && _MSC_FULL_VER >= 150030729)
- int registers[4];
- __cpuidex(registers, value, subleaf);
- *rEAX = registers[0];
- *rEBX = registers[1];
- *rECX = registers[2];
- *rEDX = registers[3];
- return false;
- #else
- return true;
- #endif
+ int registers[4];
+ __cpuidex(registers, value, subleaf);
+ *rEAX = registers[0];
+ *rEBX = registers[1];
+ *rECX = registers[2];
+ *rEDX = registers[3];
+ return false;
#else
return true;
#endif
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
index aef9571..d8320b9 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
@@ -40,8 +40,8 @@ namespace DWARFFlavour {
enum {
X86_64 = 0, X86_32_DarwinEH = 1, X86_32_Generic = 2
};
-}
-
+}
+
/// N86 namespace - Native X86 register numbers
///
namespace N86 {
diff --git a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
index 5685a7f..7a83f4c 100644
--- a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
@@ -10,6 +10,7 @@
#include "MCTargetDesc/X86MCTargetDesc.h"
#include "MCTargetDesc/X86FixupKinds.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
@@ -47,23 +48,21 @@ class X86MachObjectWriter : public MCMachObjectTargetWriter {
const MCFixup &Fixup,
MCValue Target,
uint64_t &FixedValue);
- void RecordX86_64Relocation(MachObjectWriter *Writer,
- const MCAssembler &Asm,
+ void RecordX86_64Relocation(MachObjectWriter *Writer, MCAssembler &Asm,
const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup,
- MCValue Target,
- uint64_t &FixedValue);
+ const MCFragment *Fragment, const MCFixup &Fixup,
+ MCValue Target, uint64_t &FixedValue);
+
public:
X86MachObjectWriter(bool Is64Bit, uint32_t CPUType,
uint32_t CPUSubtype)
: MCMachObjectTargetWriter(Is64Bit, CPUType, CPUSubtype,
/*UseAggressiveSymbolFolding=*/Is64Bit) {}
- void RecordRelocation(MachObjectWriter *Writer,
- const MCAssembler &Asm, const MCAsmLayout &Layout,
- const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, uint64_t &FixedValue) override {
+ void RecordRelocation(MachObjectWriter *Writer, MCAssembler &Asm,
+ const MCAsmLayout &Layout, const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) override {
if (Writer->is64Bit())
RecordX86_64Relocation(Writer, Asm, Layout, Fragment, Fixup, Target,
FixedValue);
@@ -97,13 +96,10 @@ static unsigned getFixupKindLog2Size(unsigned Kind) {
}
}
-void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
- const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup,
- MCValue Target,
- uint64_t &FixedValue) {
+void X86MachObjectWriter::RecordX86_64Relocation(
+ MachObjectWriter *Writer, MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
unsigned IsRIPRel = isFixupKindRIPRel(Fixup.getKind());
unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
@@ -117,6 +113,7 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
unsigned Index = 0;
unsigned IsExtern = 0;
unsigned Type = 0;
+ const MCSymbolData *RelSymbol = nullptr;
Value = Target.getConstant();
@@ -132,7 +129,6 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
if (Target.isAbsolute()) { // constant
// SymbolNum of 0 indicates the absolute section.
Type = MachO::X86_64_RELOC_UNSIGNED;
- Index = 0;
// FIXME: I believe this is broken, I don't think the linker can understand
// it. I think it would require a local relocation, but I'm not sure if that
@@ -184,7 +180,7 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
if (A->isUndefined() || B->isUndefined()) {
StringRef Name = A->isUndefined() ? A->getName() : B->getName();
Asm.getContext().FatalError(Fixup.getLoc(),
- "unsupported relocation with subtraction expression, symbol '" +
+ "unsupported relocation with subtraction expression, symbol '" +
Name + "' can not be undefined in a subtraction expression");
}
@@ -193,38 +189,30 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
Value -= Writer->getSymbolAddress(&B_SD, Layout) -
(!B_Base ? 0 : Writer->getSymbolAddress(B_Base, Layout));
- if (A_Base) {
- Index = A_Base->getIndex();
- IsExtern = 1;
- }
- else {
+ if (!A_Base)
Index = A_SD.getFragment()->getParent()->getOrdinal() + 1;
- IsExtern = 0;
- }
Type = MachO::X86_64_RELOC_UNSIGNED;
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
- MRE.r_word1 = ((Index << 0) |
- (IsPCRel << 24) |
- (Log2Size << 25) |
- (IsExtern << 27) |
- (Type << 28));
- Writer->addRelocation(Fragment->getParent(), MRE);
-
- if (B_Base) {
- Index = B_Base->getIndex();
- IsExtern = 1;
- }
- else {
+ MRE.r_word1 =
+ (Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
+ Writer->addRelocation(A_Base, Fragment->getParent(), MRE);
+
+ if (B_Base)
+ RelSymbol = B_Base;
+ else
Index = B_SD.getFragment()->getParent()->getOrdinal() + 1;
- IsExtern = 0;
- }
Type = MachO::X86_64_RELOC_SUBTRACTOR;
} else {
const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
+ if (Symbol->isTemporary() && Value) {
+ const MCSection &Sec = Symbol->getSection();
+ if (!Asm.getContext().getAsmInfo()->isSectionAtomizableBySymbols(Sec))
+ Asm.addLocalUsedInReloc(*Symbol);
+ }
const MCSymbolData &SD = Asm.getSymbolData(*Symbol);
- const MCSymbolData *Base = Asm.getAtom(&SD);
+ RelSymbol = Asm.getAtom(&SD);
// Relocations inside debug sections always use local relocations when
// possible. This seems to be done because the debugger doesn't fully
@@ -234,23 +222,20 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
const MCSectionMachO &Section = static_cast<const MCSectionMachO&>(
Fragment->getParent()->getSection());
if (Section.hasAttribute(MachO::S_ATTR_DEBUG))
- Base = nullptr;
+ RelSymbol = nullptr;
}
// x86_64 almost always uses external relocations, except when there is no
// symbol to use as a base address (a local symbol with no preceding
// non-local symbol).
- if (Base) {
- Index = Base->getIndex();
- IsExtern = 1;
-
+ if (RelSymbol) {
// Add the local offset, if needed.
- if (Base != &SD)
- Value += Layout.getSymbolOffset(&SD) - Layout.getSymbolOffset(Base);
+ if (RelSymbol != &SD)
+ Value +=
+ Layout.getSymbolOffset(&SD) - Layout.getSymbolOffset(RelSymbol);
} else if (Symbol->isInSection() && !Symbol->isVariable()) {
// The index is the section ordinal (1-based).
Index = SD.getFragment()->getParent()->getOrdinal() + 1;
- IsExtern = 0;
Value += Writer->getSymbolAddress(&SD, Layout);
if (IsPCRel)
@@ -349,12 +334,9 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
// struct relocation_info (8 bytes)
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
- MRE.r_word1 = ((Index << 0) |
- (IsPCRel << 24) |
- (Log2Size << 25) |
- (IsExtern << 27) |
- (Type << 28));
- Writer->addRelocation(Fragment->getParent(), MRE);
+ MRE.r_word1 = (Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
+ (IsExtern << 27) | (Type << 28);
+ Writer->addRelocation(RelSymbol, Fragment->getParent(), MRE);
}
bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
@@ -426,7 +408,7 @@ bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
(IsPCRel << 30) |
MachO::R_SCATTERED);
MRE.r_word1 = Value2;
- Writer->addRelocation(Fragment->getParent(), MRE);
+ Writer->addRelocation(nullptr, Fragment->getParent(), MRE);
} else {
// If the offset is more than 24-bits, it won't fit in a scattered
// relocation offset field, so we fall back to using a non-scattered
@@ -448,7 +430,7 @@ bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
(IsPCRel << 30) |
MachO::R_SCATTERED);
MRE.r_word1 = Value;
- Writer->addRelocation(Fragment->getParent(), MRE);
+ Writer->addRelocation(nullptr, Fragment->getParent(), MRE);
return true;
}
@@ -469,7 +451,6 @@ void X86MachObjectWriter::RecordTLVPRelocation(MachObjectWriter *Writer,
// Get the symbol data.
const MCSymbolData *SD_A = &Asm.getSymbolData(Target.getSymA()->getSymbol());
- unsigned Index = SD_A->getIndex();
// We're only going to have a second symbol in pic mode and it'll be a
// subtraction from the picbase. For 32-bit pic the addend is the difference
@@ -492,12 +473,9 @@ void X86MachObjectWriter::RecordTLVPRelocation(MachObjectWriter *Writer,
// struct relocation_info (8 bytes)
MachO::any_relocation_info MRE;
MRE.r_word0 = Value;
- MRE.r_word1 = ((Index << 0) |
- (IsPCRel << 24) |
- (Log2Size << 25) |
- (1 << 27) | // r_extern
- (MachO::GENERIC_RELOC_TLV << 28)); // r_type
- Writer->addRelocation(Fragment->getParent(), MRE);
+ MRE.r_word1 =
+ (IsPCRel << 24) | (Log2Size << 25) | (MachO::GENERIC_RELOC_TLV << 28);
+ Writer->addRelocation(SD_A, Fragment->getParent(), MRE);
}
void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer,
@@ -548,8 +526,8 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer,
// See <reloc.h>.
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
unsigned Index = 0;
- unsigned IsExtern = 0;
unsigned Type = 0;
+ const MCSymbolData *RelSymbol = nullptr;
if (Target.isAbsolute()) { // constant
// SymbolNum of 0 indicates the absolute section.
@@ -570,8 +548,7 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer,
// Check whether we need an external or internal relocation.
if (Writer->doesSymbolRequireExternRelocation(SD)) {
- IsExtern = 1;
- Index = SD->getIndex();
+ RelSymbol = SD;
// For external relocations, make sure to offset the fixup value to
// compensate for the addend of the symbol address, if it was
// undefined. This occurs with weak definitions, for example.
@@ -593,12 +570,9 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer,
// struct relocation_info (8 bytes)
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
- MRE.r_word1 = ((Index << 0) |
- (IsPCRel << 24) |
- (Log2Size << 25) |
- (IsExtern << 27) |
- (Type << 28));
- Writer->addRelocation(Fragment->getParent(), MRE);
+ MRE.r_word1 =
+ (Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
+ Writer->addRelocation(RelSymbol, Fragment->getParent(), MRE);
}
MCObjectWriter *llvm::createX86MachObjectWriter(raw_ostream &OS,
diff --git a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
index 40af822..e1df5c2 100644
--- a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
@@ -28,7 +28,8 @@ namespace {
virtual ~X86WinCOFFObjectWriter();
unsigned getRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsCrossSection) const override;
+ bool IsCrossSection,
+ const MCAsmBackend &MAB) const override;
};
}
@@ -40,7 +41,8 @@ X86WinCOFFObjectWriter::~X86WinCOFFObjectWriter() {}
unsigned X86WinCOFFObjectWriter::getRelocType(const MCValue &Target,
const MCFixup &Fixup,
- bool IsCrossSection) const {
+ bool IsCrossSection,
+ const MCAsmBackend &MAB) const {
unsigned FixupKind = IsCrossSection ? FK_PCRel_4 : Fixup.getKind();
MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
diff --git a/lib/Target/X86/TargetInfo/X86TargetInfo.cpp b/lib/Target/X86/TargetInfo/X86TargetInfo.cpp
index 1ea8798..fceb083 100644
--- a/lib/Target/X86/TargetInfo/X86TargetInfo.cpp
+++ b/lib/Target/X86/TargetInfo/X86TargetInfo.cpp
@@ -13,7 +13,7 @@ using namespace llvm;
Target llvm::TheX86_32Target, llvm::TheX86_64Target;
-extern "C" void LLVMInitializeX86TargetInfo() {
+extern "C" void LLVMInitializeX86TargetInfo() {
RegisterTarget<Triple::x86, /*HasJIT=*/true>
X(TheX86_32Target, "x86", "32-bit X86: Pentium-Pro and above");
diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/lib/Target/X86/Utils/X86ShuffleDecode.cpp
index ba6cbc8..a7101e4 100644
--- a/lib/Target/X86/Utils/X86ShuffleDecode.cpp
+++ b/lib/Target/X86/Utils/X86ShuffleDecode.cpp
@@ -1,395 +1,434 @@
-//===-- X86ShuffleDecode.cpp - X86 shuffle decode logic -------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Define several functions to decode x86 specific shuffle semantics into a
-// generic vector mask.
-//
-//===----------------------------------------------------------------------===//
-
-#include "X86ShuffleDecode.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/CodeGen/MachineValueType.h"
-
-//===----------------------------------------------------------------------===//
-// Vector Mask Decoding
-//===----------------------------------------------------------------------===//
-
-namespace llvm {
-
-void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
- // Defaults the copying the dest value.
- ShuffleMask.push_back(0);
- ShuffleMask.push_back(1);
- ShuffleMask.push_back(2);
- ShuffleMask.push_back(3);
-
- // Decode the immediate.
- unsigned ZMask = Imm & 15;
- unsigned CountD = (Imm >> 4) & 3;
- unsigned CountS = (Imm >> 6) & 3;
-
- // CountS selects which input element to use.
- unsigned InVal = 4+CountS;
- // CountD specifies which element of destination to update.
- ShuffleMask[CountD] = InVal;
- // ZMask zaps values, potentially overriding the CountD elt.
- if (ZMask & 1) ShuffleMask[0] = SM_SentinelZero;
- if (ZMask & 2) ShuffleMask[1] = SM_SentinelZero;
- if (ZMask & 4) ShuffleMask[2] = SM_SentinelZero;
- if (ZMask & 8) ShuffleMask[3] = SM_SentinelZero;
-}
-
-// <3,1> or <6,7,2,3>
-void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
- for (unsigned i = NElts/2; i != NElts; ++i)
- ShuffleMask.push_back(NElts+i);
-
- for (unsigned i = NElts/2; i != NElts; ++i)
- ShuffleMask.push_back(i);
-}
-
-// <0,2> or <0,1,4,5>
-void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
- for (unsigned i = 0; i != NElts/2; ++i)
- ShuffleMask.push_back(i);
-
- for (unsigned i = 0; i != NElts/2; ++i)
- ShuffleMask.push_back(NElts+i);
-}
-
-void DecodeMOVSLDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
- unsigned NumElts = VT.getVectorNumElements();
- for (int i = 0, e = NumElts / 2; i < e; ++i) {
- ShuffleMask.push_back(2 * i);
- ShuffleMask.push_back(2 * i);
- }
-}
-
-void DecodeMOVSHDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
- unsigned NumElts = VT.getVectorNumElements();
- for (int i = 0, e = NumElts / 2; i < e; ++i) {
- ShuffleMask.push_back(2 * i + 1);
- ShuffleMask.push_back(2 * i + 1);
- }
-}
-
-void DecodePSLLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
- unsigned VectorSizeInBits = VT.getSizeInBits();
- unsigned NumElts = VectorSizeInBits / 8;
- unsigned NumLanes = VectorSizeInBits / 128;
- unsigned NumLaneElts = NumElts / NumLanes;
-
- for (unsigned l = 0; l < NumElts; l += NumLaneElts)
- for (unsigned i = 0; i < NumLaneElts; ++i) {
- int M = SM_SentinelZero;
- if (i >= Imm) M = i - Imm + l;
- ShuffleMask.push_back(M);
- }
-}
-
-void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
- unsigned VectorSizeInBits = VT.getSizeInBits();
- unsigned NumElts = VectorSizeInBits / 8;
- unsigned NumLanes = VectorSizeInBits / 128;
- unsigned NumLaneElts = NumElts / NumLanes;
-
- for (unsigned l = 0; l < NumElts; l += NumLaneElts)
- for (unsigned i = 0; i < NumLaneElts; ++i) {
- unsigned Base = i + Imm;
- int M = Base + l;
- if (Base >= NumLaneElts) M = SM_SentinelZero;
- ShuffleMask.push_back(M);
- }
-}
-
-void DecodePALIGNRMask(MVT VT, unsigned Imm,
- SmallVectorImpl<int> &ShuffleMask) {
- unsigned NumElts = VT.getVectorNumElements();
- unsigned Offset = Imm * (VT.getVectorElementType().getSizeInBits() / 8);
-
- unsigned NumLanes = VT.getSizeInBits() / 128;
- unsigned NumLaneElts = NumElts / NumLanes;
-
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- for (unsigned i = 0; i != NumLaneElts; ++i) {
- unsigned Base = i + Offset;
- // if i+offset is out of this lane then we actually need the other source
- if (Base >= NumLaneElts) Base += NumElts - NumLaneElts;
- ShuffleMask.push_back(Base + l);
- }
- }
-}
-
-/// DecodePSHUFMask - This decodes the shuffle masks for pshufd, and vpermilp*.
-/// VT indicates the type of the vector allowing it to handle different
-/// datatypes and vector widths.
-void DecodePSHUFMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
- unsigned NumElts = VT.getVectorNumElements();
-
- unsigned NumLanes = VT.getSizeInBits() / 128;
- unsigned NumLaneElts = NumElts / NumLanes;
-
- unsigned NewImm = Imm;
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- for (unsigned i = 0; i != NumLaneElts; ++i) {
- ShuffleMask.push_back(NewImm % NumLaneElts + l);
- NewImm /= NumLaneElts;
- }
- if (NumLaneElts == 4) NewImm = Imm; // reload imm
- }
-}
-
-void DecodePSHUFHWMask(MVT VT, unsigned Imm,
- SmallVectorImpl<int> &ShuffleMask) {
- unsigned NumElts = VT.getVectorNumElements();
-
- for (unsigned l = 0; l != NumElts; l += 8) {
- unsigned NewImm = Imm;
- for (unsigned i = 0, e = 4; i != e; ++i) {
- ShuffleMask.push_back(l + i);
- }
- for (unsigned i = 4, e = 8; i != e; ++i) {
- ShuffleMask.push_back(l + 4 + (NewImm & 3));
- NewImm >>= 2;
- }
- }
-}
-
-void DecodePSHUFLWMask(MVT VT, unsigned Imm,
- SmallVectorImpl<int> &ShuffleMask) {
- unsigned NumElts = VT.getVectorNumElements();
-
- for (unsigned l = 0; l != NumElts; l += 8) {
- unsigned NewImm = Imm;
- for (unsigned i = 0, e = 4; i != e; ++i) {
- ShuffleMask.push_back(l + (NewImm & 3));
- NewImm >>= 2;
- }
- for (unsigned i = 4, e = 8; i != e; ++i) {
- ShuffleMask.push_back(l + i);
- }
- }
-}
-
-/// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
-/// the type of the vector allowing it to handle different datatypes and vector
-/// widths.
-void DecodeSHUFPMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
- unsigned NumElts = VT.getVectorNumElements();
-
- unsigned NumLanes = VT.getSizeInBits() / 128;
- unsigned NumLaneElts = NumElts / NumLanes;
-
- unsigned NewImm = Imm;
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- // each half of a lane comes from different source
- for (unsigned s = 0; s != NumElts*2; s += NumElts) {
- for (unsigned i = 0; i != NumLaneElts/2; ++i) {
- ShuffleMask.push_back(NewImm % NumLaneElts + s + l);
- NewImm /= NumLaneElts;
- }
- }
- if (NumLaneElts == 4) NewImm = Imm; // reload imm
- }
-}
-
-/// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
-/// and punpckh*. VT indicates the type of the vector allowing it to handle
-/// different datatypes and vector widths.
-void DecodeUNPCKHMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
- unsigned NumElts = VT.getVectorNumElements();
-
- // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
- // independently on 128-bit lanes.
- unsigned NumLanes = VT.getSizeInBits() / 128;
- if (NumLanes == 0 ) NumLanes = 1; // Handle MMX
- unsigned NumLaneElts = NumElts / NumLanes;
-
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- for (unsigned i = l + NumLaneElts/2, e = l + NumLaneElts; i != e; ++i) {
- ShuffleMask.push_back(i); // Reads from dest/src1
- ShuffleMask.push_back(i+NumElts); // Reads from src/src2
- }
- }
-}
-
-/// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
-/// and punpckl*. VT indicates the type of the vector allowing it to handle
-/// different datatypes and vector widths.
-void DecodeUNPCKLMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
- unsigned NumElts = VT.getVectorNumElements();
-
- // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
- // independently on 128-bit lanes.
- unsigned NumLanes = VT.getSizeInBits() / 128;
- if (NumLanes == 0 ) NumLanes = 1; // Handle MMX
- unsigned NumLaneElts = NumElts / NumLanes;
-
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- for (unsigned i = l, e = l + NumLaneElts/2; i != e; ++i) {
- ShuffleMask.push_back(i); // Reads from dest/src1
- ShuffleMask.push_back(i+NumElts); // Reads from src/src2
- }
- }
-}
-
-void DecodeVPERM2X128Mask(MVT VT, unsigned Imm,
- SmallVectorImpl<int> &ShuffleMask) {
- if (Imm & 0x88)
- return; // Not a shuffle
-
- unsigned HalfSize = VT.getVectorNumElements()/2;
-
- for (unsigned l = 0; l != 2; ++l) {
- unsigned HalfBegin = ((Imm >> (l*4)) & 0x3) * HalfSize;
- for (unsigned i = HalfBegin, e = HalfBegin+HalfSize; i != e; ++i)
- ShuffleMask.push_back(i);
- }
-}
-
-void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
- Type *MaskTy = C->getType();
- assert(MaskTy->isVectorTy() && "Expected a vector constant mask!");
- assert(MaskTy->getVectorElementType()->isIntegerTy(8) &&
- "Expected i8 constant mask elements!");
- int NumElements = MaskTy->getVectorNumElements();
- // FIXME: Add support for AVX-512.
- assert((NumElements == 16 || NumElements == 32) &&
- "Only 128-bit and 256-bit vectors supported!");
- ShuffleMask.reserve(NumElements);
-
- if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
- assert((unsigned)NumElements == CDS->getNumElements() &&
- "Constant mask has a different number of elements!");
-
- for (int i = 0; i < NumElements; ++i) {
- // For AVX vectors with 32 bytes the base of the shuffle is the 16-byte
- // lane of the vector we're inside.
- int Base = i < 16 ? 0 : 16;
- uint64_t Element = CDS->getElementAsInteger(i);
- // If the high bit (7) of the byte is set, the element is zeroed.
- if (Element & (1 << 7))
- ShuffleMask.push_back(SM_SentinelZero);
- else {
- // Only the least significant 4 bits of the byte are used.
- int Index = Base + (Element & 0xf);
- ShuffleMask.push_back(Index);
- }
- }
- } else if (auto *CV = dyn_cast<ConstantVector>(C)) {
- assert((unsigned)NumElements == CV->getNumOperands() &&
- "Constant mask has a different number of elements!");
-
- for (int i = 0; i < NumElements; ++i) {
- // For AVX vectors with 32 bytes the base of the shuffle is the 16-byte
- // lane of the vector we're inside.
- int Base = i < 16 ? 0 : 16;
- Constant *COp = CV->getOperand(i);
- if (isa<UndefValue>(COp)) {
- ShuffleMask.push_back(SM_SentinelUndef);
- continue;
- }
- uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
- // If the high bit (7) of the byte is set, the element is zeroed.
- if (Element & (1 << 7))
- ShuffleMask.push_back(SM_SentinelZero);
- else {
- // Only the least significant 4 bits of the byte are used.
- int Index = Base + (Element & 0xf);
- ShuffleMask.push_back(Index);
- }
- }
- }
-}
-
-void DecodePSHUFBMask(ArrayRef<uint64_t> RawMask,
- SmallVectorImpl<int> &ShuffleMask) {
- for (int i = 0, e = RawMask.size(); i < e; ++i) {
- uint64_t M = RawMask[i];
- if (M == (uint64_t)SM_SentinelUndef) {
- ShuffleMask.push_back(M);
- continue;
- }
- // For AVX vectors with 32 bytes the base of the shuffle is the half of
- // the vector we're inside.
- int Base = i < 16 ? 0 : 16;
- // If the high bit (7) of the byte is set, the element is zeroed.
- if (M & (1 << 7))
- ShuffleMask.push_back(SM_SentinelZero);
- else {
- // Only the least significant 4 bits of the byte are used.
- int Index = Base + (M & 0xf);
- ShuffleMask.push_back(Index);
- }
- }
-}
-
-void DecodeBLENDMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
- int ElementBits = VT.getScalarSizeInBits();
- int NumElements = VT.getVectorNumElements();
- for (int i = 0; i < NumElements; ++i) {
- // If there are more than 8 elements in the vector, then any immediate blend
- // mask applies to each 128-bit lane. There can never be more than
- // 8 elements in a 128-bit lane with an immediate blend.
- int Bit = NumElements > 8 ? i % (128 / ElementBits) : i;
- assert(Bit < 8 &&
- "Immediate blends only operate over 8 elements at a time!");
- ShuffleMask.push_back(((Imm >> Bit) & 1) ? NumElements + i : i);
- }
-}
-
-/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
-/// No VT provided since it only works on 256-bit, 4 element vectors.
-void DecodeVPERMMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
- for (unsigned i = 0; i != 4; ++i) {
- ShuffleMask.push_back((Imm >> (2*i)) & 3);
- }
-}
-
-void DecodeVPERMILPMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
- Type *MaskTy = C->getType();
- assert(MaskTy->isVectorTy() && "Expected a vector constant mask!");
- assert(MaskTy->getVectorElementType()->isIntegerTy() &&
- "Expected integer constant mask elements!");
- int ElementBits = MaskTy->getScalarSizeInBits();
- int NumElements = MaskTy->getVectorNumElements();
- assert((NumElements == 2 || NumElements == 4 || NumElements == 8) &&
- "Unexpected number of vector elements.");
- ShuffleMask.reserve(NumElements);
- if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
- assert((unsigned)NumElements == CDS->getNumElements() &&
- "Constant mask has a different number of elements!");
-
- for (int i = 0; i < NumElements; ++i) {
- int Base = (i * ElementBits / 128) * (128 / ElementBits);
- uint64_t Element = CDS->getElementAsInteger(i);
- // Only the least significant 2 bits of the integer are used.
- int Index = Base + (Element & 0x3);
- ShuffleMask.push_back(Index);
- }
- } else if (auto *CV = dyn_cast<ConstantVector>(C)) {
- assert((unsigned)NumElements == C->getNumOperands() &&
- "Constant mask has a different number of elements!");
-
- for (int i = 0; i < NumElements; ++i) {
- int Base = (i * ElementBits / 128) * (128 / ElementBits);
- Constant *COp = CV->getOperand(i);
- if (isa<UndefValue>(COp)) {
- ShuffleMask.push_back(SM_SentinelUndef);
- continue;
- }
- uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
- // Only the least significant 2 bits of the integer are used.
- int Index = Base + (Element & 0x3);
- ShuffleMask.push_back(Index);
- }
- }
-}
-
-} // llvm namespace
+//===-- X86ShuffleDecode.cpp - X86 shuffle decode logic -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Define several functions to decode x86 specific shuffle semantics into a
+// generic vector mask.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86ShuffleDecode.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/CodeGen/MachineValueType.h"
+
+//===----------------------------------------------------------------------===//
+// Vector Mask Decoding
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+
+void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ // Defaults the copying the dest value.
+ ShuffleMask.push_back(0);
+ ShuffleMask.push_back(1);
+ ShuffleMask.push_back(2);
+ ShuffleMask.push_back(3);
+
+ // Decode the immediate.
+ unsigned ZMask = Imm & 15;
+ unsigned CountD = (Imm >> 4) & 3;
+ unsigned CountS = (Imm >> 6) & 3;
+
+ // CountS selects which input element to use.
+ unsigned InVal = 4+CountS;
+ // CountD specifies which element of destination to update.
+ ShuffleMask[CountD] = InVal;
+ // ZMask zaps values, potentially overriding the CountD elt.
+ if (ZMask & 1) ShuffleMask[0] = SM_SentinelZero;
+ if (ZMask & 2) ShuffleMask[1] = SM_SentinelZero;
+ if (ZMask & 4) ShuffleMask[2] = SM_SentinelZero;
+ if (ZMask & 8) ShuffleMask[3] = SM_SentinelZero;
+}
+
+// <3,1> or <6,7,2,3>
+void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
+ for (unsigned i = NElts/2; i != NElts; ++i)
+ ShuffleMask.push_back(NElts+i);
+
+ for (unsigned i = NElts/2; i != NElts; ++i)
+ ShuffleMask.push_back(i);
+}
+
+// <0,2> or <0,1,4,5>
+void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
+ for (unsigned i = 0; i != NElts/2; ++i)
+ ShuffleMask.push_back(i);
+
+ for (unsigned i = 0; i != NElts/2; ++i)
+ ShuffleMask.push_back(NElts+i);
+}
+
+void DecodeMOVSLDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+ for (int i = 0, e = NumElts / 2; i < e; ++i) {
+ ShuffleMask.push_back(2 * i);
+ ShuffleMask.push_back(2 * i);
+ }
+}
+
+void DecodeMOVSHDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+ for (int i = 0, e = NumElts / 2; i < e; ++i) {
+ ShuffleMask.push_back(2 * i + 1);
+ ShuffleMask.push_back(2 * i + 1);
+ }
+}
+
+void DecodeMOVDDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned VectorSizeInBits = VT.getSizeInBits();
+ unsigned ScalarSizeInBits = VT.getScalarSizeInBits();
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned NumLanes = VectorSizeInBits / 128;
+ unsigned NumLaneElts = NumElts / NumLanes;
+ unsigned NumLaneSubElts = 64 / ScalarSizeInBits;
+
+ for (unsigned l = 0; l < NumElts; l += NumLaneElts)
+ for (unsigned i = 0; i < NumLaneElts; i += NumLaneSubElts)
+ for (unsigned s = 0; s != NumLaneSubElts; s++)
+ ShuffleMask.push_back(l + s);
+}
+
+void DecodePSLLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned VectorSizeInBits = VT.getSizeInBits();
+ unsigned NumElts = VectorSizeInBits / 8;
+ unsigned NumLanes = VectorSizeInBits / 128;
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ for (unsigned l = 0; l < NumElts; l += NumLaneElts)
+ for (unsigned i = 0; i < NumLaneElts; ++i) {
+ int M = SM_SentinelZero;
+ if (i >= Imm) M = i - Imm + l;
+ ShuffleMask.push_back(M);
+ }
+}
+
+void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned VectorSizeInBits = VT.getSizeInBits();
+ unsigned NumElts = VectorSizeInBits / 8;
+ unsigned NumLanes = VectorSizeInBits / 128;
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ for (unsigned l = 0; l < NumElts; l += NumLaneElts)
+ for (unsigned i = 0; i < NumLaneElts; ++i) {
+ unsigned Base = i + Imm;
+ int M = Base + l;
+ if (Base >= NumLaneElts) M = SM_SentinelZero;
+ ShuffleMask.push_back(M);
+ }
+}
+
+void DecodePALIGNRMask(MVT VT, unsigned Imm,
+ SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned Offset = Imm * (VT.getVectorElementType().getSizeInBits() / 8);
+
+ unsigned NumLanes = VT.getSizeInBits() / 128;
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = 0; i != NumLaneElts; ++i) {
+ unsigned Base = i + Offset;
+ // if i+offset is out of this lane then we actually need the other source
+ if (Base >= NumLaneElts) Base += NumElts - NumLaneElts;
+ ShuffleMask.push_back(Base + l);
+ }
+ }
+}
+
+/// DecodePSHUFMask - This decodes the shuffle masks for pshufd, and vpermilp*.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
+void DecodePSHUFMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ unsigned NumLanes = VT.getSizeInBits() / 128;
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ unsigned NewImm = Imm;
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = 0; i != NumLaneElts; ++i) {
+ ShuffleMask.push_back(NewImm % NumLaneElts + l);
+ NewImm /= NumLaneElts;
+ }
+ if (NumLaneElts == 4) NewImm = Imm; // reload imm
+ }
+}
+
+void DecodePSHUFHWMask(MVT VT, unsigned Imm,
+ SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ for (unsigned l = 0; l != NumElts; l += 8) {
+ unsigned NewImm = Imm;
+ for (unsigned i = 0, e = 4; i != e; ++i) {
+ ShuffleMask.push_back(l + i);
+ }
+ for (unsigned i = 4, e = 8; i != e; ++i) {
+ ShuffleMask.push_back(l + 4 + (NewImm & 3));
+ NewImm >>= 2;
+ }
+ }
+}
+
+void DecodePSHUFLWMask(MVT VT, unsigned Imm,
+ SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ for (unsigned l = 0; l != NumElts; l += 8) {
+ unsigned NewImm = Imm;
+ for (unsigned i = 0, e = 4; i != e; ++i) {
+ ShuffleMask.push_back(l + (NewImm & 3));
+ NewImm >>= 2;
+ }
+ for (unsigned i = 4, e = 8; i != e; ++i) {
+ ShuffleMask.push_back(l + i);
+ }
+ }
+}
+
+/// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
+/// the type of the vector allowing it to handle different datatypes and vector
+/// widths.
+void DecodeSHUFPMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ unsigned NumLanes = VT.getSizeInBits() / 128;
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ unsigned NewImm = Imm;
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ // each half of a lane comes from different source
+ for (unsigned s = 0; s != NumElts*2; s += NumElts) {
+ for (unsigned i = 0; i != NumLaneElts/2; ++i) {
+ ShuffleMask.push_back(NewImm % NumLaneElts + s + l);
+ NewImm /= NumLaneElts;
+ }
+ }
+ if (NumLaneElts == 4) NewImm = Imm; // reload imm
+ }
+}
+
+/// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
+/// and punpckh*. VT indicates the type of the vector allowing it to handle
+/// different datatypes and vector widths.
+void DecodeUNPCKHMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
+ // independently on 128-bit lanes.
+ unsigned NumLanes = VT.getSizeInBits() / 128;
+ if (NumLanes == 0 ) NumLanes = 1; // Handle MMX
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = l + NumLaneElts/2, e = l + NumLaneElts; i != e; ++i) {
+ ShuffleMask.push_back(i); // Reads from dest/src1
+ ShuffleMask.push_back(i+NumElts); // Reads from src/src2
+ }
+ }
+}
+
+/// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
+/// and punpckl*. VT indicates the type of the vector allowing it to handle
+/// different datatypes and vector widths.
+void DecodeUNPCKLMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
+ // independently on 128-bit lanes.
+ unsigned NumLanes = VT.getSizeInBits() / 128;
+ if (NumLanes == 0 ) NumLanes = 1; // Handle MMX
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = l, e = l + NumLaneElts/2; i != e; ++i) {
+ ShuffleMask.push_back(i); // Reads from dest/src1
+ ShuffleMask.push_back(i+NumElts); // Reads from src/src2
+ }
+ }
+}
+
+void DecodeVPERM2X128Mask(MVT VT, unsigned Imm,
+ SmallVectorImpl<int> &ShuffleMask) {
+ if (Imm & 0x88)
+ return; // Not a shuffle
+
+ unsigned HalfSize = VT.getVectorNumElements()/2;
+
+ for (unsigned l = 0; l != 2; ++l) {
+ unsigned HalfBegin = ((Imm >> (l*4)) & 0x3) * HalfSize;
+ for (unsigned i = HalfBegin, e = HalfBegin+HalfSize; i != e; ++i)
+ ShuffleMask.push_back(i);
+ }
+}
+
+void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
+ Type *MaskTy = C->getType();
+ // It is not an error for the PSHUFB mask to not be a vector of i8 because the
+ // constant pool uniques constants by their bit representation.
+ // e.g. the following take up the same space in the constant pool:
+ // i128 -170141183420855150465331762880109871104
+ //
+ // <2 x i64> <i64 -9223372034707292160, i64 -9223372034707292160>
+ //
+ // <4 x i32> <i32 -2147483648, i32 -2147483648,
+ // i32 -2147483648, i32 -2147483648>
+
+ unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
+
+ if (MaskTySize != 128 && MaskTySize != 256) // FIXME: Add support for AVX-512.
+ return;
+
+ // This is a straightforward byte vector.
+ if (MaskTy->isVectorTy() && MaskTy->getVectorElementType()->isIntegerTy(8)) {
+ int NumElements = MaskTy->getVectorNumElements();
+ ShuffleMask.reserve(NumElements);
+
+ for (int i = 0; i < NumElements; ++i) {
+ // For AVX vectors with 32 bytes the base of the shuffle is the 16-byte
+ // lane of the vector we're inside.
+ int Base = i < 16 ? 0 : 16;
+ Constant *COp = C->getAggregateElement(i);
+ if (!COp) {
+ ShuffleMask.clear();
+ return;
+ } else if (isa<UndefValue>(COp)) {
+ ShuffleMask.push_back(SM_SentinelUndef);
+ continue;
+ }
+ uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
+ // If the high bit (7) of the byte is set, the element is zeroed.
+ if (Element & (1 << 7))
+ ShuffleMask.push_back(SM_SentinelZero);
+ else {
+ // Only the least significant 4 bits of the byte are used.
+ int Index = Base + (Element & 0xf);
+ ShuffleMask.push_back(Index);
+ }
+ }
+ }
+ // TODO: Handle funny-looking vectors too.
+}
+
+void DecodePSHUFBMask(ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask) {
+ for (int i = 0, e = RawMask.size(); i < e; ++i) {
+ uint64_t M = RawMask[i];
+ if (M == (uint64_t)SM_SentinelUndef) {
+ ShuffleMask.push_back(M);
+ continue;
+ }
+ // For AVX vectors with 32 bytes the base of the shuffle is the half of
+ // the vector we're inside.
+ int Base = i < 16 ? 0 : 16;
+ // If the high bit (7) of the byte is set, the element is zeroed.
+ if (M & (1 << 7))
+ ShuffleMask.push_back(SM_SentinelZero);
+ else {
+ // Only the least significant 4 bits of the byte are used.
+ int Index = Base + (M & 0xf);
+ ShuffleMask.push_back(Index);
+ }
+ }
+}
+
+void DecodeBLENDMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ int ElementBits = VT.getScalarSizeInBits();
+ int NumElements = VT.getVectorNumElements();
+ for (int i = 0; i < NumElements; ++i) {
+ // If there are more than 8 elements in the vector, then any immediate blend
+ // mask applies to each 128-bit lane. There can never be more than
+ // 8 elements in a 128-bit lane with an immediate blend.
+ int Bit = NumElements > 8 ? i % (128 / ElementBits) : i;
+ assert(Bit < 8 &&
+ "Immediate blends only operate over 8 elements at a time!");
+ ShuffleMask.push_back(((Imm >> Bit) & 1) ? NumElements + i : i);
+ }
+}
+
+/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
+/// No VT provided since it only works on 256-bit, 4 element vectors.
+void DecodeVPERMMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ for (unsigned i = 0; i != 4; ++i) {
+ ShuffleMask.push_back((Imm >> (2*i)) & 3);
+ }
+}
+
+void DecodeVPERMILPMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
+ Type *MaskTy = C->getType();
+ assert(MaskTy->isVectorTy() && "Expected a vector constant mask!");
+ assert(MaskTy->getVectorElementType()->isIntegerTy() &&
+ "Expected integer constant mask elements!");
+ int ElementBits = MaskTy->getScalarSizeInBits();
+ int NumElements = MaskTy->getVectorNumElements();
+ assert((NumElements == 2 || NumElements == 4 || NumElements == 8) &&
+ "Unexpected number of vector elements.");
+ ShuffleMask.reserve(NumElements);
+ if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
+ assert((unsigned)NumElements == CDS->getNumElements() &&
+ "Constant mask has a different number of elements!");
+
+ for (int i = 0; i < NumElements; ++i) {
+ int Base = (i * ElementBits / 128) * (128 / ElementBits);
+ uint64_t Element = CDS->getElementAsInteger(i);
+ // Only the least significant 2 bits of the integer are used.
+ int Index = Base + (Element & 0x3);
+ ShuffleMask.push_back(Index);
+ }
+ } else if (auto *CV = dyn_cast<ConstantVector>(C)) {
+ assert((unsigned)NumElements == C->getNumOperands() &&
+ "Constant mask has a different number of elements!");
+
+ for (int i = 0; i < NumElements; ++i) {
+ int Base = (i * ElementBits / 128) * (128 / ElementBits);
+ Constant *COp = CV->getOperand(i);
+ if (isa<UndefValue>(COp)) {
+ ShuffleMask.push_back(SM_SentinelUndef);
+ continue;
+ }
+ uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
+ // Only the least significant 2 bits of the integer are used.
+ int Index = Base + (Element & 0x3);
+ ShuffleMask.push_back(Index);
+ }
+ }
+}
+
+void DecodeZeroExtendMask(MVT SrcVT, MVT DstVT, SmallVectorImpl<int> &Mask) {
+ unsigned NumDstElts = DstVT.getVectorNumElements();
+ unsigned SrcScalarBits = SrcVT.getScalarSizeInBits();
+ unsigned DstScalarBits = DstVT.getScalarSizeInBits();
+ unsigned Scale = DstScalarBits / SrcScalarBits;
+ assert(SrcScalarBits < DstScalarBits &&
+ "Expected zero extension mask to increase scalar size");
+ assert(SrcVT.getVectorNumElements() >= NumDstElts &&
+ "Too many zero extension lanes");
+
+ for (unsigned i = 0; i != NumDstElts; i++) {
+ Mask.push_back(i);
+ for (unsigned j = 1; j != Scale; j++)
+ Mask.push_back(SM_SentinelZero);
+ }
+}
+
+void DecodeZeroMoveLowMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+ ShuffleMask.push_back(0);
+ for (unsigned i = 1; i < NumElts; i++)
+ ShuffleMask.push_back(SM_SentinelZero);
+}
+
+void DecodeScalarMoveMask(MVT VT, bool IsLoad, SmallVectorImpl<int> &Mask) {
+ // First element comes from the first element of second source.
+ // Remaining elements: Load zero extends / Move copies from first source.
+ unsigned NumElts = VT.getVectorNumElements();
+ Mask.push_back(NumElts);
+ for (unsigned i = 1; i < NumElts; i++)
+ Mask.push_back(IsLoad ? static_cast<int>(SM_SentinelZero) : i);
+}
+} // llvm namespace
diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.h b/lib/Target/X86/Utils/X86ShuffleDecode.h
index 6ba3c64..5c9a8cf 100644
--- a/lib/Target/X86/Utils/X86ShuffleDecode.h
+++ b/lib/Target/X86/Utils/X86ShuffleDecode.h
@@ -1,93 +1,105 @@
-//===-- X86ShuffleDecode.h - X86 shuffle decode logic -----------*-C++-*---===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Define several functions to decode x86 specific shuffle semantics into a
-// generic vector mask.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_X86_UTILS_X86SHUFFLEDECODE_H
-#define LLVM_LIB_TARGET_X86_UTILS_X86SHUFFLEDECODE_H
-
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/ArrayRef.h"
-
-//===----------------------------------------------------------------------===//
-// Vector Mask Decoding
-//===----------------------------------------------------------------------===//
-
-namespace llvm {
-class Constant;
-class MVT;
-
-enum { SM_SentinelUndef = -1, SM_SentinelZero = -2 };
-
-void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-
-// <3,1> or <6,7,2,3>
-void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
-
-// <0,2> or <0,1,4,5>
-void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
-
-void DecodeMOVSLDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-
-void DecodeMOVSHDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-
-void DecodePSLLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-
-void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-
-void DecodePALIGNRMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-
-void DecodePSHUFMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-
-void DecodePSHUFHWMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-
-void DecodePSHUFLWMask(MVT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-
-/// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
-/// the type of the vector allowing it to handle different datatypes and vector
-/// widths.
-void DecodeSHUFPMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-
-/// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
-/// and punpckh*. VT indicates the type of the vector allowing it to handle
-/// different datatypes and vector widths.
-void DecodeUNPCKHMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-
-/// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
-/// and punpckl*. VT indicates the type of the vector allowing it to handle
-/// different datatypes and vector widths.
-void DecodeUNPCKLMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-
-/// \brief Decode a PSHUFB mask from an IR-level vector constant.
-void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask);
-
-/// \brief Decode a PSHUFB mask from a raw array of constants such as from
-/// BUILD_VECTOR.
-void DecodePSHUFBMask(ArrayRef<uint64_t> RawMask,
- SmallVectorImpl<int> &ShuffleMask);
-
-/// \brief Decode a BLEND immediate mask into a shuffle mask.
-void DecodeBLENDMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-
-void DecodeVPERM2X128Mask(MVT VT, unsigned Imm,
- SmallVectorImpl<int> &ShuffleMask);
-
-/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
-/// No VT provided since it only works on 256-bit, 4 element vectors.
-void DecodeVPERMMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-
-/// \brief Decode a VPERMILP variable mask from an IR-level vector constant.
-void DecodeVPERMILPMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask);
-
-} // llvm namespace
-
-#endif
+//===-- X86ShuffleDecode.h - X86 shuffle decode logic -----------*-C++-*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Define several functions to decode x86 specific shuffle semantics into a
+// generic vector mask.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_X86_UTILS_X86SHUFFLEDECODE_H
+#define LLVM_LIB_TARGET_X86_UTILS_X86SHUFFLEDECODE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ArrayRef.h"
+
+//===----------------------------------------------------------------------===//
+// Vector Mask Decoding
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+class Constant;
+class MVT;
+
+enum { SM_SentinelUndef = -1, SM_SentinelZero = -2 };
+
+void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+
+// <3,1> or <6,7,2,3>
+void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
+
+// <0,2> or <0,1,4,5>
+void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
+
+void DecodeMOVSLDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
+
+void DecodeMOVSHDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
+
+void DecodeMOVDDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
+
+void DecodePSLLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+
+void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+
+void DecodePALIGNRMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+
+void DecodePSHUFMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+
+void DecodePSHUFHWMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+
+void DecodePSHUFLWMask(MVT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+
+/// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
+/// the type of the vector allowing it to handle different datatypes and vector
+/// widths.
+void DecodeSHUFPMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+
+/// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
+/// and punpckh*. VT indicates the type of the vector allowing it to handle
+/// different datatypes and vector widths.
+void DecodeUNPCKHMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
+
+/// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
+/// and punpckl*. VT indicates the type of the vector allowing it to handle
+/// different datatypes and vector widths.
+void DecodeUNPCKLMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
+
+/// \brief Decode a PSHUFB mask from an IR-level vector constant.
+void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask);
+
+/// \brief Decode a PSHUFB mask from a raw array of constants such as from
+/// BUILD_VECTOR.
+void DecodePSHUFBMask(ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// \brief Decode a BLEND immediate mask into a shuffle mask.
+void DecodeBLENDMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+
+void DecodeVPERM2X128Mask(MVT VT, unsigned Imm,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
+/// No VT provided since it only works on 256-bit, 4 element vectors.
+void DecodeVPERMMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+
+/// \brief Decode a VPERMILP variable mask from an IR-level vector constant.
+void DecodeVPERMILPMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask);
+
+/// \brief Decode a zero extension instruction as a shuffle mask.
+void DecodeZeroExtendMask(MVT SrcVT, MVT DstVT,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// \brief Decode a move lower and zero upper instruction as a shuffle mask.
+void DecodeZeroMoveLowMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
+
+/// \brief Decode a scalar float move instruction as a shuffle mask.
+void DecodeScalarMoveMask(MVT VT, bool IsLoad,
+ SmallVectorImpl<int> &ShuffleMask);
+} // llvm namespace
+
+#endif
diff --git a/lib/Target/X86/X86.h b/lib/Target/X86/X86.h
index 8bd5817..8b0a4cf 100644
--- a/lib/Target/X86/X86.h
+++ b/lib/Target/X86/X86.h
@@ -55,9 +55,6 @@ FunctionPass *createX86IssueVZeroUpperPass();
///
FunctionPass *createEmitX86CodeToMemory();
-/// \brief Creates an X86-specific Target Transformation Info pass.
-ImmutablePass *createX86TargetTransformInfoPass(const X86TargetMachine *TM);
-
/// createX86PadShortFunctions - Return a pass that pads short functions
/// with NOOPs. This will prevent a stall when returning on the Atom.
FunctionPass *createX86PadShortFunctions();
@@ -67,6 +64,11 @@ FunctionPass *createX86PadShortFunctions();
/// to eliminate execution delays in some Atom processors.
FunctionPass *createX86FixupLEAs();
+/// createX86CallFrameOptimization - Return a pass that optimizes
+/// the code-size of x86 call sequences. This is done by replacing
+/// esp-relative movs with pushes.
+FunctionPass *createX86CallFrameOptimization();
+
} // End llvm namespace
#endif
diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td
index 83f55d3..4f9836d 100644
--- a/lib/Target/X86/X86.td
+++ b/lib/Target/X86/X86.td
@@ -79,9 +79,16 @@ def FeatureSlowBTMem : SubtargetFeature<"slow-bt-mem", "IsBTMemSlow", "true",
"Bit testing of memory is slow">;
def FeatureSlowSHLD : SubtargetFeature<"slow-shld", "IsSHLDSlow", "true",
"SHLD instruction is slow">;
+// FIXME: This is a 16-byte (SSE/AVX) feature; we should rename it to make that
+// explicit. Also, it seems this would be the default state for most chips
+// going forward, so it would probably be better to negate the logic and
+// match the 32-byte "slow mem" feature below.
def FeatureFastUAMem : SubtargetFeature<"fast-unaligned-mem",
"IsUAMemFast", "true",
"Fast unaligned memory access">;
+def FeatureSlowUAMem32 : SubtargetFeature<"slow-unaligned-mem-32",
+ "IsUAMem32Slow", "true",
+ "Slow unaligned 32-byte memory access">;
def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true",
"Support SSE 4a instructions",
[FeatureSSE3]>;
@@ -125,9 +132,9 @@ def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true",
def FeatureXOP : SubtargetFeature<"xop", "HasXOP", "true",
"Enable XOP instructions",
[FeatureFMA4]>;
-def FeatureVectorUAMem : SubtargetFeature<"vector-unaligned-mem",
- "HasVectorUAMem", "true",
- "Allow unaligned memory operands on vector/SIMD instructions">;
+def FeatureSSEUnalignedMem : SubtargetFeature<"sse-unaligned-mem",
+ "HasSSEUnalignedMem", "true",
+ "Allow unaligned memory operands with SSE instructions">;
def FeatureAES : SubtargetFeature<"aes", "HasAES", "true",
"Enable AES instructions",
[FeatureSSE2]>;
@@ -157,19 +164,18 @@ def FeatureADX : SubtargetFeature<"adx", "HasADX", "true",
def FeatureSHA : SubtargetFeature<"sha", "HasSHA", "true",
"Enable SHA instructions",
[FeatureSSE2]>;
-def FeatureSGX : SubtargetFeature<"sgx", "HasSGX", "true",
- "Support SGX instructions">;
def FeaturePRFCHW : SubtargetFeature<"prfchw", "HasPRFCHW", "true",
"Support PRFCHW instructions">;
def FeatureRDSEED : SubtargetFeature<"rdseed", "HasRDSEED", "true",
"Support RDSEED instruction">;
-def FeatureSMAP : SubtargetFeature<"smap", "HasSMAP", "true",
- "Support SMAP instructions">;
def FeatureLeaForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true",
"Use LEA for adjusting the stack pointer">;
-def FeatureSlowDivide : SubtargetFeature<"idiv-to-divb",
- "HasSlowDivide", "true",
- "Use small divide for positive values less than 256">;
+def FeatureSlowDivide32 : SubtargetFeature<"idivl-to-divb",
+ "HasSlowDivide32", "true",
+ "Use 8-bit divide for positive values less than 256">;
+def FeatureSlowDivide64 : SubtargetFeature<"idivq-to-divw",
+ "HasSlowDivide64", "true",
+ "Use 16-bit divide for positive values less than 65536">;
def FeaturePadShortFunctions : SubtargetFeature<"pad-short-functions",
"PadShortFunctions", "true",
"Pad short functions">;
@@ -230,86 +236,166 @@ def : ProcessorModel<"core2", SandyBridgeModel,
def : ProcessorModel<"penryn", SandyBridgeModel,
[FeatureSSE41, FeatureCMPXCHG16B, FeatureSlowBTMem]>;
-// Atom.
-def : ProcessorModel<"atom", AtomModel,
- [ProcIntelAtom, FeatureSSSE3, FeatureCMPXCHG16B,
- FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP,
- FeatureSlowDivide,
- FeatureCallRegIndirect,
- FeatureLEAUsesAG,
- FeaturePadShortFunctions]>;
-
-// Atom Silvermont.
-def : ProcessorModel<"slm", SLMModel, [ProcIntelSLM,
- FeatureSSE42, FeatureCMPXCHG16B,
- FeatureMOVBE, FeaturePOPCNT,
- FeaturePCLMUL, FeatureAES,
- FeatureCallRegIndirect,
- FeaturePRFCHW,
- FeatureSlowLEA, FeatureSlowIncDec,
- FeatureSlowBTMem, FeatureFastUAMem]>;
+// Atom CPUs.
+class BonnellProc<string Name> : ProcessorModel<Name, AtomModel, [
+ ProcIntelAtom,
+ FeatureSSSE3,
+ FeatureCMPXCHG16B,
+ FeatureMOVBE,
+ FeatureSlowBTMem,
+ FeatureLeaForSP,
+ FeatureSlowDivide32,
+ FeatureSlowDivide64,
+ FeatureCallRegIndirect,
+ FeatureLEAUsesAG,
+ FeaturePadShortFunctions
+ ]>;
+def : BonnellProc<"bonnell">;
+def : BonnellProc<"atom">; // Pin the generic name to the baseline.
+
+class SilvermontProc<string Name> : ProcessorModel<Name, SLMModel, [
+ ProcIntelSLM,
+ FeatureSSE42,
+ FeatureCMPXCHG16B,
+ FeatureMOVBE,
+ FeaturePOPCNT,
+ FeaturePCLMUL,
+ FeatureAES,
+ FeatureSlowDivide64,
+ FeatureCallRegIndirect,
+ FeaturePRFCHW,
+ FeatureSlowLEA,
+ FeatureSlowIncDec,
+ FeatureSlowBTMem,
+ FeatureFastUAMem
+ ]>;
+def : SilvermontProc<"silvermont">;
+def : SilvermontProc<"slm">; // Legacy alias.
+
// "Arrandale" along with corei3 and corei5
-def : ProcessorModel<"corei7", SandyBridgeModel,
- [FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem,
- FeatureFastUAMem, FeaturePOPCNT, FeatureAES]>;
+class NehalemProc<string Name, list<SubtargetFeature> AdditionalFeatures>
+ : ProcessorModel<Name, SandyBridgeModel, !listconcat([
+ FeatureSSE42,
+ FeatureCMPXCHG16B,
+ FeatureSlowBTMem,
+ FeatureFastUAMem,
+ FeaturePOPCNT
+ ],
+ AdditionalFeatures)>;
+def : NehalemProc<"nehalem", []>;
+def : NehalemProc<"corei7", [FeatureAES]>;
-def : ProcessorModel<"nehalem", SandyBridgeModel,
- [FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem,
- FeatureFastUAMem, FeaturePOPCNT]>;
// Westmere is a similar machine to nehalem with some additional features.
// Westmere is the corei3/i5/i7 path from nehalem to sandybridge
-def : ProcessorModel<"westmere", SandyBridgeModel,
- [FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem,
- FeatureFastUAMem, FeaturePOPCNT, FeatureAES,
- FeaturePCLMUL]>;
-// Sandy Bridge
+class WestmereProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+ FeatureSSE42,
+ FeatureCMPXCHG16B,
+ FeatureSlowBTMem,
+ FeatureFastUAMem,
+ FeaturePOPCNT,
+ FeatureAES,
+ FeaturePCLMUL
+ ]>;
+def : WestmereProc<"westmere">;
+
// SSE is not listed here since llvm treats AVX as a reimplementation of SSE,
// rather than a superset.
-def : ProcessorModel<"corei7-avx", SandyBridgeModel,
- [FeatureAVX, FeatureCMPXCHG16B, FeatureFastUAMem,
- FeaturePOPCNT, FeatureAES, FeaturePCLMUL]>;
-// Ivy Bridge
-def : ProcessorModel<"core-avx-i", SandyBridgeModel,
- [FeatureAVX, FeatureCMPXCHG16B, FeatureFastUAMem,
- FeaturePOPCNT, FeatureAES, FeaturePCLMUL, FeatureRDRAND,
- FeatureF16C, FeatureFSGSBase]>;
-
-// Haswell
-def : ProcessorModel<"core-avx2", HaswellModel,
- [FeatureAVX2, FeatureCMPXCHG16B, FeatureFastUAMem,
- FeaturePOPCNT, FeatureAES, FeaturePCLMUL, FeatureRDRAND,
- FeatureF16C, FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT,
- FeatureBMI, FeatureBMI2, FeatureFMA, FeatureRTM,
- FeatureHLE, FeatureSlowIncDec]>;
-
-// Broadwell
-def : ProcessorModel<"broadwell", HaswellModel,
- [FeatureAVX2, FeatureCMPXCHG16B, FeatureFastUAMem,
- FeaturePOPCNT, FeatureAES, FeaturePCLMUL, FeatureRDRAND,
- FeatureF16C, FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT,
- FeatureBMI, FeatureBMI2, FeatureFMA, FeatureRTM,
- FeatureHLE, FeatureADX, FeatureRDSEED, FeatureSMAP,
- FeatureSlowIncDec]>;
-// KNL
+class SandyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+ FeatureAVX,
+ FeatureCMPXCHG16B,
+ FeatureFastUAMem,
+ FeatureSlowUAMem32,
+ FeaturePOPCNT,
+ FeatureAES,
+ FeaturePCLMUL
+ ]>;
+def : SandyBridgeProc<"sandybridge">;
+def : SandyBridgeProc<"corei7-avx">; // Legacy alias.
+
+class IvyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+ FeatureAVX,
+ FeatureCMPXCHG16B,
+ FeatureFastUAMem,
+ FeatureSlowUAMem32,
+ FeaturePOPCNT,
+ FeatureAES,
+ FeaturePCLMUL,
+ FeatureRDRAND,
+ FeatureF16C,
+ FeatureFSGSBase
+ ]>;
+def : IvyBridgeProc<"ivybridge">;
+def : IvyBridgeProc<"core-avx-i">; // Legacy alias.
+
+class HaswellProc<string Name> : ProcessorModel<Name, HaswellModel, [
+ FeatureAVX2,
+ FeatureCMPXCHG16B,
+ FeatureFastUAMem,
+ FeaturePOPCNT,
+ FeatureAES,
+ FeaturePCLMUL,
+ FeatureRDRAND,
+ FeatureF16C,
+ FeatureFSGSBase,
+ FeatureMOVBE,
+ FeatureLZCNT,
+ FeatureBMI,
+ FeatureBMI2,
+ FeatureFMA,
+ FeatureRTM,
+ FeatureHLE,
+ FeatureSlowIncDec
+ ]>;
+def : HaswellProc<"haswell">;
+def : HaswellProc<"core-avx2">; // Legacy alias.
+
+class BroadwellProc<string Name> : ProcessorModel<Name, HaswellModel, [
+ FeatureAVX2,
+ FeatureCMPXCHG16B,
+ FeatureFastUAMem,
+ FeaturePOPCNT,
+ FeatureAES,
+ FeaturePCLMUL,
+ FeatureRDRAND,
+ FeatureF16C,
+ FeatureFSGSBase,
+ FeatureMOVBE,
+ FeatureLZCNT,
+ FeatureBMI,
+ FeatureBMI2,
+ FeatureFMA,
+ FeatureRTM,
+ FeatureHLE,
+ FeatureADX,
+ FeatureRDSEED,
+ FeatureSlowIncDec
+ ]>;
+def : BroadwellProc<"broadwell">;
+
// FIXME: define KNL model
-def : ProcessorModel<"knl", HaswellModel,
+class KnightsLandingProc<string Name> : ProcessorModel<Name, HaswellModel,
[FeatureAVX512, FeatureERI, FeatureCDI, FeaturePFI,
FeatureCMPXCHG16B, FeatureFastUAMem, FeaturePOPCNT,
FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C,
FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI,
FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE,
FeatureSlowIncDec]>;
+def : KnightsLandingProc<"knl">;
-// SKX
// FIXME: define SKX model
-def : ProcessorModel<"skx", HaswellModel,
+class SkylakeProc<string Name> : ProcessorModel<Name, HaswellModel,
[FeatureAVX512, FeatureCDI,
FeatureDQI, FeatureBWI, FeatureVLX,
FeatureCMPXCHG16B, FeatureFastUAMem, FeaturePOPCNT,
FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C,
FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI,
FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE,
- FeatureSlowIncDec, FeatureSGX]>;
+ FeatureSlowIncDec]>;
+def : SkylakeProc<"skylake">;
+def : SkylakeProc<"skx">; // Legacy alias.
+
+
+// AMD CPUs.
def : Proc<"k6", [FeatureMMX]>;
def : Proc<"k6-2", [Feature3DNow]>;
@@ -318,7 +404,7 @@ def : Proc<"athlon", [Feature3DNowA, FeatureSlowBTMem,
FeatureSlowSHLD]>;
def : Proc<"athlon-tbird", [Feature3DNowA, FeatureSlowBTMem,
FeatureSlowSHLD]>;
-def : Proc<"athlon-4", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem,
+def : Proc<"athlon-4", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem,
FeatureSlowSHLD]>;
def : Proc<"athlon-xp", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem,
FeatureSlowSHLD]>;
@@ -342,6 +428,10 @@ def : Proc<"amdfam10", [FeatureSSE4A,
Feature3DNowA, FeatureCMPXCHG16B, FeatureLZCNT,
FeaturePOPCNT, FeatureSlowBTMem,
FeatureSlowSHLD]>;
+def : Proc<"barcelona", [FeatureSSE4A,
+ Feature3DNowA, FeatureCMPXCHG16B, FeatureLZCNT,
+ FeaturePOPCNT, FeatureSlowBTMem,
+ FeatureSlowSHLD]>;
// Bobcat
def : Proc<"btver1", [FeatureSSSE3, FeatureSSE4A, FeatureCMPXCHG16B,
FeaturePRFCHW, FeatureLZCNT, FeaturePOPCNT,
@@ -352,8 +442,10 @@ def : ProcessorModel<"btver2", BtVer2Model,
[FeatureAVX, FeatureSSE4A, FeatureCMPXCHG16B,
FeaturePRFCHW, FeatureAES, FeaturePCLMUL,
FeatureBMI, FeatureF16C, FeatureMOVBE,
- FeatureLZCNT, FeaturePOPCNT, FeatureSlowSHLD,
- FeatureUseSqrtEst, FeatureUseRecipEst]>;
+ FeatureLZCNT, FeaturePOPCNT, FeatureFastUAMem,
+ FeatureSlowSHLD, FeatureUseSqrtEst, FeatureUseRecipEst]>;
+
+// TODO: We should probably add 'FeatureFastUAMem' to all of the AMD chips.
// Bulldozer
def : Proc<"bdver1", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
@@ -394,7 +486,7 @@ def : Proc<"c3-2", [FeatureSSE1]>;
// be good for modern chips without enabling instruction set encodings past the
// basic SSE2 and 64-bit ones. It disables slow things from any mainstream and
// modern 64-bit x86 chip, and enables features that are generally beneficial.
-//
+//
// We currently use the Sandy Bridge model as the default scheduling model as
// we use it across Nehalem, Westmere, Sandy Bridge, and Ivy Bridge which
// covers a huge swath of x86 processors. If there are specific scheduling
diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp
index 4e5b7b8..bb0b9ce 100644
--- a/lib/Target/X86/X86AsmPrinter.cpp
+++ b/lib/Target/X86/X86AsmPrinter.cpp
@@ -47,6 +47,8 @@ using namespace llvm;
/// runOnMachineFunction - Emit the function body.
///
bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<X86Subtarget>();
+
SMShadowTracker.startFunction(MF);
SetupMachineFunction(MF);
@@ -505,13 +507,15 @@ bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
}
void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
- if (Subtarget->isTargetMacho())
+ Triple TT(TM.getTargetTriple());
+
+ if (TT.isOSBinFormatMachO())
OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
- if (Subtarget->isTargetCOFF()) {
+ if (TT.isOSBinFormatCOFF()) {
// Emit an absolute @feat.00 symbol. This appears to be some kind of
// compiler features bitfield read by link.exe.
- if (!Subtarget->is64Bit()) {
+ if (TT.getArch() == Triple::x86) {
MCSymbol *S = MMI->getContext().GetOrCreateSymbol(StringRef("@feat.00"));
OutStreamer.BeginCOFFSymbolDef(S);
OutStreamer.EmitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_STATIC);
@@ -558,8 +562,7 @@ MCSymbol *X86AsmPrinter::GetCPISymbol(unsigned CPID) const {
const MachineConstantPoolEntry &CPE =
MF->getConstantPool()->getConstants()[CPID];
if (!CPE.isMachineConstantPoolEntry()) {
- SectionKind Kind =
- CPE.getSectionKind(TM.getSubtargetImpl()->getDataLayout());
+ SectionKind Kind = CPE.getSectionKind(TM.getDataLayout());
const Constant *C = CPE.Val.ConstVal;
if (const MCSectionCOFF *S = dyn_cast<MCSectionCOFF>(
getObjFileLowering().getSectionForConstant(Kind, C))) {
@@ -579,20 +582,21 @@ void X86AsmPrinter::GenerateExportDirective(const MCSymbol *Sym, bool IsData) {
SmallString<128> Directive;
raw_svector_ostream OS(Directive);
StringRef Name = Sym->getName();
+ Triple TT(TM.getTargetTriple());
- if (Subtarget->isTargetKnownWindowsMSVC())
+ if (TT.isKnownWindowsMSVCEnvironment())
OS << " /EXPORT:";
else
OS << " -export:";
- if ((Subtarget->isTargetWindowsGNU() || Subtarget->isTargetWindowsCygwin()) &&
+ if ((TT.isWindowsGNUEnvironment() || TT.isWindowsCygwinEnvironment()) &&
(Name[0] == getDataLayout().getGlobalPrefix()))
Name = Name.drop_front();
OS << Name;
if (IsData) {
- if (Subtarget->isTargetKnownWindowsMSVC())
+ if (TT.isKnownWindowsMSVCEnvironment())
OS << ",DATA";
else
OS << ",data";
@@ -603,10 +607,12 @@ void X86AsmPrinter::GenerateExportDirective(const MCSymbol *Sym, bool IsData) {
}
void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
- if (Subtarget->isTargetMacho()) {
+ Triple TT(TM.getTargetTriple());
+
+ if (TT.isOSBinFormatMachO()) {
// All darwin targets use mach-o.
MachineModuleInfoMachO &MMIMacho =
- MMI->getObjFileInfo<MachineModuleInfoMachO>();
+ MMI->getObjFileInfo<MachineModuleInfoMachO>();
// Output stubs for dynamically-linked functions.
MachineModuleInfoMachO::SymbolListTy Stubs;
@@ -677,22 +683,23 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
OutStreamer.EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
}
- if (Subtarget->isTargetKnownWindowsMSVC() && MMI->usesVAFloatArgument()) {
- StringRef SymbolName = Subtarget->is64Bit() ? "_fltused" : "__fltused";
+ if (TT.isKnownWindowsMSVCEnvironment() && MMI->usesVAFloatArgument()) {
+ StringRef SymbolName =
+ (TT.getArch() == Triple::x86_64) ? "_fltused" : "__fltused";
MCSymbol *S = MMI->getContext().GetOrCreateSymbol(SymbolName);
OutStreamer.EmitSymbolAttribute(S, MCSA_Global);
}
- if (Subtarget->isTargetCOFF()) {
+ if (TT.isOSBinFormatCOFF()) {
// Necessary for dllexport support
std::vector<const MCSymbol*> DLLExportedFns, DLLExportedGlobals;
for (const auto &Function : M)
- if (Function.hasDLLExportStorageClass())
+ if (Function.hasDLLExportStorageClass() && !Function.isDeclaration())
DLLExportedFns.push_back(getSymbol(&Function));
for (const auto &Global : M.globals())
- if (Global.hasDLLExportStorageClass())
+ if (Global.hasDLLExportStorageClass() && !Global.isDeclaration())
DLLExportedGlobals.push_back(getSymbol(&Global));
for (const auto &Alias : M.aliases()) {
@@ -719,7 +726,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
}
}
- if (Subtarget->isTargetELF()) {
+ if (TT.isOSBinFormatELF()) {
const TargetLoweringObjectFileELF &TLOFELF =
static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
@@ -729,7 +736,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
if (!Stubs.empty()) {
OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
for (const auto &Stub : Stubs) {
OutStreamer.EmitLabel(Stub.first);
diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h
index 748b948..d101b8c 100644
--- a/lib/Target/X86/X86AsmPrinter.h
+++ b/lib/Target/X86/X86AsmPrinter.h
@@ -57,6 +57,7 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
void emitShadowPadding(MCStreamer &OutStreamer, const MCSubtargetInfo &STI);
private:
TargetMachine &TM;
+ const MachineFunction *MF;
std::unique_ptr<MCCodeEmitter> CodeEmitter;
bool InShadow;
@@ -85,10 +86,9 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
void LowerTlsAddr(X86MCInstLower &MCInstLowering, const MachineInstr &MI);
public:
- explicit X86AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), SM(*this), SMShadowTracker(TM) {
- Subtarget = &TM.getSubtarget<X86Subtarget>();
- }
+ explicit X86AsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)), SM(*this), SMShadowTracker(TM) {}
const char *getPassName() const override {
return "X86 Assembly / Object Emitter";
diff --git a/lib/Target/X86/X86CallFrameOptimization.cpp b/lib/Target/X86/X86CallFrameOptimization.cpp
new file mode 100644
index 0000000..5e8d374
--- /dev/null
+++ b/lib/Target/X86/X86CallFrameOptimization.cpp
@@ -0,0 +1,480 @@
+//===----- X86CallFrameOptimization.cpp - Optimize x86 call sequences -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a pass that optimizes call sequences on x86.
+// Currently, it converts movs of function parameters onto the stack into
+// pushes. This is beneficial for two main reasons:
+// 1) The push instruction encoding is much smaller than an esp-relative mov
+// 2) It is possible to push memory arguments directly. So, if the
+// the transformation is preformed pre-reg-alloc, it can help relieve
+// register pressure.
+//
+//===----------------------------------------------------------------------===//
+
+#include <algorithm>
+
+#include "X86.h"
+#include "X86InstrInfo.h"
+#include "X86Subtarget.h"
+#include "X86MachineFunctionInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "x86-cf-opt"
+
+static cl::opt<bool>
+ NoX86CFOpt("no-x86-call-frame-opt",
+ cl::desc("Avoid optimizing x86 call frames for size"),
+ cl::init(false), cl::Hidden);
+
+namespace {
+class X86CallFrameOptimization : public MachineFunctionPass {
+public:
+ X86CallFrameOptimization() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+ // Information we know about a particular call site
+ struct CallContext {
+ CallContext()
+ : Call(nullptr), SPCopy(nullptr), ExpectedDist(0),
+ MovVector(4, nullptr), NoStackParams(false), UsePush(false){};
+
+ // Actuall call instruction
+ MachineInstr *Call;
+
+ // A copy of the stack pointer
+ MachineInstr *SPCopy;
+
+ // The total displacement of all passed parameters
+ int64_t ExpectedDist;
+
+ // The sequence of movs used to pass the parameters
+ SmallVector<MachineInstr *, 4> MovVector;
+
+ // True if this call site has no stack parameters
+ bool NoStackParams;
+
+ // True of this callsite can use push instructions
+ bool UsePush;
+ };
+
+ typedef DenseMap<MachineInstr *, CallContext> ContextMap;
+
+ bool isLegal(MachineFunction &MF);
+
+ bool isProfitable(MachineFunction &MF, ContextMap &CallSeqMap);
+
+ void collectCallInfo(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, CallContext &Context);
+
+ bool adjustCallSequence(MachineFunction &MF, MachineBasicBlock::iterator I,
+ const CallContext &Context);
+
+ MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,
+ unsigned Reg);
+
+ const char *getPassName() const override { return "X86 Optimize Call Frame"; }
+
+ const TargetInstrInfo *TII;
+ const TargetFrameLowering *TFL;
+ const MachineRegisterInfo *MRI;
+ static char ID;
+};
+
+char X86CallFrameOptimization::ID = 0;
+}
+
+FunctionPass *llvm::createX86CallFrameOptimization() {
+ return new X86CallFrameOptimization();
+}
+
+// This checks whether the transformation is legal.
+// Also returns false in cases where it's potentially legal, but
+// we don't even want to try.
+bool X86CallFrameOptimization::isLegal(MachineFunction &MF) {
+ if (NoX86CFOpt.getValue())
+ return false;
+
+ // We currently only support call sequences where *all* parameters.
+ // are passed on the stack.
+ // No point in running this in 64-bit mode, since some arguments are
+ // passed in-register in all common calling conventions, so the pattern
+ // we're looking for will never match.
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ if (STI.is64Bit())
+ return false;
+
+ // You would expect straight-line code between call-frame setup and
+ // call-frame destroy. You would be wrong. There are circumstances (e.g.
+ // CMOV_GR8 expansion of a select that feeds a function call!) where we can
+ // end up with the setup and the destroy in different basic blocks.
+ // This is bad, and breaks SP adjustment.
+ // So, check that all of the frames in the function are closed inside
+ // the same block, and, for good measure, that there are no nested frames.
+ int FrameSetupOpcode = TII->getCallFrameSetupOpcode();
+ int FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
+ for (MachineBasicBlock &BB : MF) {
+ bool InsideFrameSequence = false;
+ for (MachineInstr &MI : BB) {
+ if (MI.getOpcode() == FrameSetupOpcode) {
+ if (InsideFrameSequence)
+ return false;
+ InsideFrameSequence = true;
+ } else if (MI.getOpcode() == FrameDestroyOpcode) {
+ if (!InsideFrameSequence)
+ return false;
+ InsideFrameSequence = false;
+ }
+ }
+
+ if (InsideFrameSequence)
+ return false;
+ }
+
+ return true;
+}
+
+// Check whether this trasnformation is profitable for a particular
+// function - in terms of code size.
+bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
+ ContextMap &CallSeqMap) {
+ // This transformation is always a win when we do not expect to have
+ // a reserved call frame. Under other circumstances, it may be either
+ // a win or a loss, and requires a heuristic.
+ bool CannotReserveFrame = MF.getFrameInfo()->hasVarSizedObjects();
+ if (CannotReserveFrame)
+ return true;
+
+ // Don't do this when not optimizing for size.
+ bool OptForSize =
+ MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
+ MF.getFunction()->hasFnAttribute(Attribute::MinSize);
+
+ if (!OptForSize)
+ return false;
+
+
+ unsigned StackAlign = TFL->getStackAlignment();
+
+ int64_t Advantage = 0;
+ for (auto CC : CallSeqMap) {
+ // Call sites where no parameters are passed on the stack
+ // do not affect the cost, since there needs to be no
+ // stack adjustment.
+ if (CC.second.NoStackParams)
+ continue;
+
+ if (!CC.second.UsePush) {
+ // If we don't use pushes for a particular call site,
+ // we pay for not having a reserved call frame with an
+ // additional sub/add esp pair. The cost is ~3 bytes per instruction,
+ // depending on the size of the constant.
+ // TODO: Callee-pop functions should have a smaller penalty, because
+ // an add is needed even with a reserved call frame.
+ Advantage -= 6;
+ } else {
+ // We can use pushes. First, account for the fixed costs.
+ // We'll need a add after the call.
+ Advantage -= 3;
+ // If we have to realign the stack, we'll also need and sub before
+ if (CC.second.ExpectedDist % StackAlign)
+ Advantage -= 3;
+ // Now, for each push, we save ~3 bytes. For small constants, we actually,
+ // save more (up to 5 bytes), but 3 should be a good approximation.
+ Advantage += (CC.second.ExpectedDist / 4) * 3;
+ }
+ }
+
+ return (Advantage >= 0);
+}
+
+
+bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
+ TII = MF.getSubtarget().getInstrInfo();
+ TFL = MF.getSubtarget().getFrameLowering();
+ MRI = &MF.getRegInfo();
+
+ if (!isLegal(MF))
+ return false;
+
+ int FrameSetupOpcode = TII->getCallFrameSetupOpcode();
+
+ bool Changed = false;
+
+ ContextMap CallSeqMap;
+
+ for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
+ for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
+ if (I->getOpcode() == FrameSetupOpcode) {
+ CallContext &Context = CallSeqMap[I];
+ collectCallInfo(MF, *BB, I, Context);
+ }
+
+ if (!isProfitable(MF, CallSeqMap))
+ return false;
+
+ for (auto CC : CallSeqMap)
+ if (CC.second.UsePush)
+ Changed |= adjustCallSequence(MF, CC.first, CC.second);
+
+ return Changed;
+}
+
+void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ CallContext &Context) {
+ // Check that this particular call sequence is amenable to the
+ // transformation.
+ const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(
+ MF.getSubtarget().getRegisterInfo());
+ unsigned StackPtr = RegInfo.getStackRegister();
+ int FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
+
+ // We expect to enter this at the beginning of a call sequence
+ assert(I->getOpcode() == TII->getCallFrameSetupOpcode());
+ MachineBasicBlock::iterator FrameSetup = I++;
+
+ // How much do we adjust the stack? This puts an upper bound on
+ // the number of parameters actually passed on it.
+ unsigned int MaxAdjust = FrameSetup->getOperand(0).getImm() / 4;
+
+ // A zero adjustment means no stack parameters
+ if (!MaxAdjust) {
+ Context.NoStackParams = true;
+ return;
+ }
+
+ // For globals in PIC mode, we can have some LEAs here.
+ // Ignore them, they don't bother us.
+ // TODO: Extend this to something that covers more cases.
+ while (I->getOpcode() == X86::LEA32r)
+ ++I;
+
+ // We expect a copy instruction here.
+ // TODO: The copy instruction is a lowering artifact.
+ // We should also support a copy-less version, where the stack
+ // pointer is used directly.
+ if (!I->isCopy() || !I->getOperand(0).isReg())
+ return;
+ Context.SPCopy = I++;
+ StackPtr = Context.SPCopy->getOperand(0).getReg();
+
+ // Scan the call setup sequence for the pattern we're looking for.
+ // We only handle a simple case - a sequence of MOV32mi or MOV32mr
+ // instructions, that push a sequence of 32-bit values onto the stack, with
+ // no gaps between them.
+ if (MaxAdjust > 4)
+ Context.MovVector.resize(MaxAdjust, nullptr);
+
+ do {
+ int Opcode = I->getOpcode();
+ if (Opcode != X86::MOV32mi && Opcode != X86::MOV32mr)
+ break;
+
+ // We only want movs of the form:
+ // movl imm/r32, k(%esp)
+ // If we run into something else, bail.
+ // Note that AddrBaseReg may, counter to its name, not be a register,
+ // but rather a frame index.
+ // TODO: Support the fi case. This should probably work now that we
+ // have the infrastructure to track the stack pointer within a call
+ // sequence.
+ if (!I->getOperand(X86::AddrBaseReg).isReg() ||
+ (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) ||
+ !I->getOperand(X86::AddrScaleAmt).isImm() ||
+ (I->getOperand(X86::AddrScaleAmt).getImm() != 1) ||
+ (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) ||
+ (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) ||
+ !I->getOperand(X86::AddrDisp).isImm())
+ return;
+
+ int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm();
+ assert(StackDisp >= 0 &&
+ "Negative stack displacement when passing parameters");
+
+ // We really don't want to consider the unaligned case.
+ if (StackDisp % 4)
+ return;
+ StackDisp /= 4;
+
+ assert((size_t)StackDisp < Context.MovVector.size() &&
+ "Function call has more parameters than the stack is adjusted for.");
+
+ // If the same stack slot is being filled twice, something's fishy.
+ if (Context.MovVector[StackDisp] != nullptr)
+ return;
+ Context.MovVector[StackDisp] = I;
+
+ ++I;
+ } while (I != MBB.end());
+
+ // We now expect the end of the sequence - a call and a stack adjust.
+ if (I == MBB.end())
+ return;
+
+ // For PCrel calls, we expect an additional COPY of the basereg.
+ // If we find one, skip it.
+ if (I->isCopy()) {
+ if (I->getOperand(1).getReg() ==
+ MF.getInfo<X86MachineFunctionInfo>()->getGlobalBaseReg())
+ ++I;
+ else
+ return;
+ }
+
+ if (!I->isCall())
+ return;
+
+ Context.Call = I;
+ if ((++I)->getOpcode() != FrameDestroyOpcode)
+ return;
+
+ // Now, go through the vector, and see that we don't have any gaps,
+ // but only a series of 32-bit MOVs.
+ auto MMI = Context.MovVector.begin(), MME = Context.MovVector.end();
+ for (; MMI != MME; ++MMI, Context.ExpectedDist += 4)
+ if (*MMI == nullptr)
+ break;
+
+ // If the call had no parameters, do nothing
+ if (MMI == Context.MovVector.begin())
+ return;
+
+ // We are either at the last parameter, or a gap.
+ // Make sure it's not a gap
+ for (; MMI != MME; ++MMI)
+ if (*MMI != nullptr)
+ return;
+
+ Context.UsePush = true;
+ return;
+}
+
+bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
+ MachineBasicBlock::iterator I,
+ const CallContext &Context) {
+ // Ok, we can in fact do the transformation for this call.
+ // Do not remove the FrameSetup instruction, but adjust the parameters.
+ // PEI will end up finalizing the handling of this.
+ MachineBasicBlock::iterator FrameSetup = I;
+ MachineBasicBlock &MBB = *(I->getParent());
+ FrameSetup->getOperand(1).setImm(Context.ExpectedDist);
+
+ DebugLoc DL = I->getDebugLoc();
+ // Now, iterate through the vector in reverse order, and replace the movs
+ // with pushes. MOVmi/MOVmr doesn't have any defs, so no need to
+ // replace uses.
+ for (int Idx = (Context.ExpectedDist / 4) - 1; Idx >= 0; --Idx) {
+ MachineBasicBlock::iterator MOV = *Context.MovVector[Idx];
+ MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands);
+ if (MOV->getOpcode() == X86::MOV32mi) {
+ unsigned PushOpcode = X86::PUSHi32;
+ // If the operand is a small (8-bit) immediate, we can use a
+ // PUSH instruction with a shorter encoding.
+ // Note that isImm() may fail even though this is a MOVmi, because
+ // the operand can also be a symbol.
+ if (PushOp.isImm()) {
+ int64_t Val = PushOp.getImm();
+ if (isInt<8>(Val))
+ PushOpcode = X86::PUSH32i8;
+ }
+ BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)).addOperand(PushOp);
+ } else {
+ unsigned int Reg = PushOp.getReg();
+
+ // If PUSHrmm is not slow on this target, try to fold the source of the
+ // push into the instruction.
+ const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
+ bool SlowPUSHrmm = ST.isAtom() || ST.isSLM();
+
+ // Check that this is legal to fold. Right now, we're extremely
+ // conservative about that.
+ MachineInstr *DefMov = nullptr;
+ if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) {
+ MachineInstr *Push =
+ BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32rmm));
+
+ unsigned NumOps = DefMov->getDesc().getNumOperands();
+ for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
+ Push->addOperand(DefMov->getOperand(i));
+
+ DefMov->eraseFromParent();
+ } else {
+ BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32r))
+ .addReg(Reg)
+ .getInstr();
+ }
+ }
+
+ MBB.erase(MOV);
+ }
+
+ // The stack-pointer copy is no longer used in the call sequences.
+ // There should not be any other users, but we can't commit to that, so:
+ if (MRI->use_empty(Context.SPCopy->getOperand(0).getReg()))
+ Context.SPCopy->eraseFromParent();
+
+ // Once we've done this, we need to make sure PEI doesn't assume a reserved
+ // frame.
+ X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
+ FuncInfo->setHasPushSequences(true);
+
+ return true;
+}
+
+MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
+ MachineBasicBlock::iterator FrameSetup, unsigned Reg) {
+ // Do an extremely restricted form of load folding.
+ // ISel will often create patterns like:
+ // movl 4(%edi), %eax
+ // movl 8(%edi), %ecx
+ // movl 12(%edi), %edx
+ // movl %edx, 8(%esp)
+ // movl %ecx, 4(%esp)
+ // movl %eax, (%esp)
+ // call
+ // Get rid of those with prejudice.
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ return nullptr;
+
+ // Make sure this is the only use of Reg.
+ if (!MRI->hasOneNonDBGUse(Reg))
+ return nullptr;
+
+ MachineBasicBlock::iterator DefMI = MRI->getVRegDef(Reg);
+
+ // Make sure the def is a MOV from memory.
+ // If the def is an another block, give up.
+ if (DefMI->getOpcode() != X86::MOV32rm ||
+ DefMI->getParent() != FrameSetup->getParent())
+ return nullptr;
+
+ // Now, make sure everything else up until the ADJCALLSTACK is a sequence
+ // of MOVs. To be less conservative would require duplicating a lot of the
+ // logic from PeepholeOptimizer.
+ // FIXME: A possibly better approach would be to teach the PeepholeOptimizer
+ // to be smarter about folding into pushes.
+ for (auto I = DefMI; I != FrameSetup; ++I)
+ if (I->getOpcode() != X86::MOV32rm)
+ return nullptr;
+
+ return DefMI;
+}
diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td
index 75a2ec0..41c759a 100644
--- a/lib/Target/X86/X86CallingConv.td
+++ b/lib/Target/X86/X86CallingConv.td
@@ -461,6 +461,10 @@ def CC_X86_32_Common : CallingConv<[
CCIfSubtarget<"hasFp256()",
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
+ // The first 4 AVX 512-bit vector arguments are passed in ZMM registers.
+ CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
+ CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>>,
+
// Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
@@ -468,6 +472,10 @@ def CC_X86_32_Common : CallingConv<[
CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
CCAssignToStack<32, 32>>,
+ // 512-bit AVX 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
+ CCAssignToStack<64, 64>>,
+
// __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
// passed in the parameter area.
CCIfType<[x86mmx], CCAssignToStack<8, 4>>]>;
@@ -626,6 +634,9 @@ def CC_Intel_OCL_BI : CallingConv<[
CCIfType<[v16f32, v8f64, v16i32, v8i64],
CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
+ // Pass masks in mask registers
+ CCIfType<[v16i1, v8i1], CCAssignToReg<[K1]>>,
+
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64_C>>,
CCDelegateTo<CC_X86_32_C>
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index 95cb718..a17f052 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -37,6 +37,7 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
@@ -58,8 +59,8 @@ class X86FastISel final : public FastISel {
public:
explicit X86FastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo)
- : FastISel(funcInfo, libInfo) {
- Subtarget = &TM.getSubtarget<X86Subtarget>();
+ : FastISel(funcInfo, libInfo) {
+ Subtarget = &funcInfo.MF->getSubtarget<X86Subtarget>();
X86ScalarSSEf64 = Subtarget->hasSSE2();
X86ScalarSSEf32 = Subtarget->hasSSE1();
}
@@ -80,7 +81,7 @@ public:
#include "X86GenFastISel.inc"
private:
- bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT);
+ bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT, DebugLoc DL);
bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, MachineMemOperand *MMO,
unsigned &ResultReg);
@@ -123,11 +124,15 @@ private:
bool X86SelectTrunc(const Instruction *I);
+ bool X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned Opc,
+ const TargetRegisterClass *RC);
+
bool X86SelectFPExt(const Instruction *I);
bool X86SelectFPTrunc(const Instruction *I);
+ bool X86SelectSIToFP(const Instruction *I);
const X86InstrInfo *getInstrInfo() const {
- return getTargetMachine()->getSubtargetImpl()->getInstrInfo();
+ return Subtarget->getInstrInfo();
}
const X86TargetMachine *getTargetMachine() const {
return static_cast<const X86TargetMachine *>(&TM);
@@ -137,7 +142,7 @@ private:
unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);
unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);
- unsigned X86MaterializeGV(const GlobalValue *GV,MVT VT);
+ unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT);
unsigned fastMaterializeConstant(const Constant *C) override;
unsigned fastMaterializeAlloca(const AllocaInst *C) override;
@@ -544,7 +549,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
// Ok, we need to do a load from a stub. If we've already loaded from
// this stub, reuse the loaded pointer, otherwise emit the load now.
- DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V);
+ DenseMap<const Value *, unsigned>::iterator I = LocalValueMap.find(V);
unsigned LoadReg;
if (I != LocalValueMap.end() && I->second != 0) {
LoadReg = I->second;
@@ -655,7 +660,7 @@ redo_gep:
case Instruction::Alloca: {
// Do static allocas.
const AllocaInst *A = cast<AllocaInst>(V);
- DenseMap<const AllocaInst*, int>::iterator SI =
+ DenseMap<const AllocaInst *, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(A);
if (SI != FuncInfo.StaticAllocaMap.end()) {
AM.BaseType = X86AddressMode::FrameIndexBase;
@@ -903,7 +908,7 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
unsigned Alignment = S->getAlignment();
unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType());
- if (Alignment == 0) // Ensure that codegen never sees alignment 0
+ if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = ABIAlignment;
bool Aligned = Alignment >= ABIAlignment;
@@ -1009,12 +1014,12 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
// Make the copy.
unsigned DstReg = VA.getLocReg();
- const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
// Avoid a cross-class copy. This is very unlikely.
if (!SrcRC->contains(DstReg))
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
- DstReg).addReg(SrcReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
// Add register to return instruction.
RetRegs.push_back(VA.getLocReg());
@@ -1030,14 +1035,15 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
assert(Reg &&
"SRetReturnReg should have been set in LowerFormalArguments()!");
unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
- RetReg).addReg(Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);
RetRegs.push_back(RetReg);
}
// Now emit the RET.
MachineInstrBuilder MIB =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
MIB.addReg(RetRegs[i], RegState::Implicit);
return true;
@@ -1108,7 +1114,7 @@ static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
}
bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
- EVT VT) {
+ EVT VT, DebugLoc CurDbgLoc) {
unsigned Op0Reg = getRegForValue(Op0);
if (Op0Reg == 0) return false;
@@ -1121,7 +1127,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
// CMPri, otherwise use CMPrr.
if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CompareImmOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareImmOpc))
.addReg(Op0Reg)
.addImm(Op1C->getSExtValue());
return true;
@@ -1133,7 +1139,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
unsigned Op1Reg = getRegForValue(Op1);
if (Op1Reg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CompareOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc))
.addReg(Op0Reg)
.addReg(Op1Reg);
@@ -1201,7 +1207,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
ResultReg = createResultReg(&X86::GR8RegClass);
if (SETFOpc) {
- if (!X86FastEmitCompare(LHS, RHS, VT))
+ if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
return false;
unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
@@ -1226,7 +1232,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
std::swap(LHS, RHS);
// Emit a compare of LHS/RHS.
- if (!X86FastEmitCompare(LHS, RHS, VT))
+ if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
@@ -1284,7 +1290,6 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
return true;
}
-
bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Unconditional branches are selected by tablegen-generated code.
// Handle a conditional branch.
@@ -1353,7 +1358,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
std::swap(CmpLHS, CmpRHS);
// Emit a compare of the LHS and RHS, setting the flags.
- if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT))
+ if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc()))
return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))
@@ -1362,7 +1367,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
// X86 requires a second branch to handle UNE (and OEQ, which is mapped
// to UNE above).
if (NeedExtraBranch) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_4))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_1))
.addMBB(TrueMBB);
}
@@ -1399,10 +1404,10 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc))
.addReg(OpReg).addImm(1);
- unsigned JmpOpc = X86::JNE_4;
+ unsigned JmpOpc = X86::JNE_1;
if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
std::swap(TrueMBB, FalseMBB);
- JmpOpc = X86::JE_4;
+ JmpOpc = X86::JE_1;
}
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(JmpOpc))
@@ -1444,7 +1449,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
.addReg(OpReg).addImm(1);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_4))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_1))
.addMBB(TrueMBB);
fastEmitBranch(FalseMBB, DbgLoc);
uint32_t BranchWeight = 0;
@@ -1632,8 +1637,8 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
TII.get(X86::MOV32r0), Zero32);
// Copy the zero into the appropriate sub/super/identical physical
- // register. Unfortunately the operations needed are not uniform enough to
- // fit neatly into the table above.
+ // register. Unfortunately the operations needed are not uniform enough
+ // to fit neatly into the table above.
if (VT.SimpleTy == MVT::i16) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Copy), TypeEntry.HighInReg)
@@ -1740,8 +1745,8 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
EVT CmpVT = TLI.getValueType(CmpLHS->getType());
// Emit a compare of the LHS and RHS, setting the flags.
- if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT))
- return false;
+ if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
+ return false;
if (SETFOpc) {
unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
@@ -1820,7 +1825,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
if (I->getType() != CI->getOperand(0)->getType() ||
!((Subtarget->hasSSE1() && RetVT == MVT::f32) ||
- (Subtarget->hasSSE2() && RetVT == MVT::f64) ))
+ (Subtarget->hasSSE2() && RetVT == MVT::f64)))
return false;
const Value *CmpLHS = CI->getOperand(0);
@@ -1924,7 +1929,7 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
std::swap(CmpLHS, CmpRHS);
EVT CmpVT = TLI.getValueType(CmpLHS->getType());
- if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT))
+ if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
return false;
} else {
unsigned CondReg = getRegForValue(Cond);
@@ -2001,41 +2006,91 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
return false;
}
+bool X86FastISel::X86SelectSIToFP(const Instruction *I) {
+ if (!I->getOperand(0)->getType()->isIntegerTy(32))
+ return false;
+
+ // Select integer to float/double conversion.
+ unsigned OpReg = getRegForValue(I->getOperand(0));
+ if (OpReg == 0)
+ return false;
+
+ bool HasAVX = Subtarget->hasAVX();
+ const TargetRegisterClass *RC = nullptr;
+ unsigned Opcode;
+
+ if (I->getType()->isDoubleTy() && X86ScalarSSEf64) {
+ // sitofp int -> double
+ Opcode = HasAVX ? X86::VCVTSI2SDrr : X86::CVTSI2SDrr;
+ RC = &X86::FR64RegClass;
+ } else if (I->getType()->isFloatTy() && X86ScalarSSEf32) {
+ // sitofp int -> float
+ Opcode = HasAVX ? X86::VCVTSI2SSrr : X86::CVTSI2SSrr;
+ RC = &X86::FR32RegClass;
+ } else
+ return false;
+
+
+ unsigned ImplicitDefReg = 0;
+ if (HasAVX) {
+ ImplicitDefReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
+ }
+
+ const MCInstrDesc &II = TII.get(Opcode);
+ OpReg = constrainOperandRegClass(II, OpReg, (HasAVX ? 2 : 1));
+
+ unsigned ResultReg = createResultReg(RC);
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
+ if (ImplicitDefReg)
+ MIB.addReg(ImplicitDefReg, RegState::Kill);
+ MIB.addReg(OpReg);
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+// Helper method used by X86SelectFPExt and X86SelectFPTrunc.
+bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I,
+ unsigned TargetOpc,
+ const TargetRegisterClass *RC) {
+ assert((I->getOpcode() == Instruction::FPExt ||
+ I->getOpcode() == Instruction::FPTrunc) &&
+ "Instruction must be an FPExt or FPTrunc!");
+
+ unsigned OpReg = getRegForValue(I->getOperand(0));
+ if (OpReg == 0)
+ return false;
+
+ unsigned ResultReg = createResultReg(RC);
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpc),
+ ResultReg);
+ if (Subtarget->hasAVX())
+ MIB.addReg(OpReg);
+ MIB.addReg(OpReg);
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
bool X86FastISel::X86SelectFPExt(const Instruction *I) {
- // fpext from float to double.
- if (X86ScalarSSEf64 &&
- I->getType()->isDoubleTy()) {
- const Value *V = I->getOperand(0);
- if (V->getType()->isFloatTy()) {
- unsigned OpReg = getRegForValue(V);
- if (OpReg == 0) return false;
- unsigned ResultReg = createResultReg(&X86::FR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(X86::CVTSS2SDrr), ResultReg)
- .addReg(OpReg);
- updateValueMap(I, ResultReg);
- return true;
- }
+ if (X86ScalarSSEf64 && I->getType()->isDoubleTy() &&
+ I->getOperand(0)->getType()->isFloatTy()) {
+ // fpext from float to double.
+ unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr;
+ return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR64RegClass);
}
return false;
}
bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
- if (X86ScalarSSEf64) {
- if (I->getType()->isFloatTy()) {
- const Value *V = I->getOperand(0);
- if (V->getType()->isDoubleTy()) {
- unsigned OpReg = getRegForValue(V);
- if (OpReg == 0) return false;
- unsigned ResultReg = createResultReg(&X86::FR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(X86::CVTSD2SSrr), ResultReg)
- .addReg(OpReg);
- updateValueMap(I, ResultReg);
- return true;
- }
- }
+ if (X86ScalarSSEf64 && I->getType()->isFloatTy() &&
+ I->getOperand(0)->getType()->isDoubleTy()) {
+ // fptrunc from double to float.
+ unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr;
+ return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR32RegClass);
}
return false;
@@ -2065,12 +2120,11 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
if (!Subtarget->is64Bit()) {
// If we're on x86-32; we can't extract an i8 from a general register.
// First issue a copy to GR16_ABCD or GR32_ABCD.
- const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ?
- (const TargetRegisterClass*)&X86::GR16_ABCDRegClass :
- (const TargetRegisterClass*)&X86::GR32_ABCDRegClass;
+ const TargetRegisterClass *CopyRC =
+ (SrcVT == MVT::i16) ? &X86::GR16_ABCDRegClass : &X86::GR32_ABCDRegClass;
unsigned CopyReg = createResultReg(CopyRC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
- CopyReg).addReg(InputReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), CopyReg).addReg(InputReg);
InputReg = CopyReg;
}
@@ -2107,9 +2161,8 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
VT = MVT::i32;
else if (Len >= 2)
VT = MVT::i16;
- else {
+ else
VT = MVT::i8;
- }
unsigned Reg;
bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
@@ -2129,7 +2182,73 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// FIXME: Handle more intrinsics.
switch (II->getIntrinsicID()) {
default: return false;
+ case Intrinsic::convert_from_fp16:
+ case Intrinsic::convert_to_fp16: {
+ if (TM.Options.UseSoftFloat || !Subtarget->hasF16C())
+ return false;
+
+ const Value *Op = II->getArgOperand(0);
+ unsigned InputReg = getRegForValue(Op);
+ if (InputReg == 0)
+ return false;
+
+ // F16C only allows converting from float to half and from half to float.
+ bool IsFloatToHalf = II->getIntrinsicID() == Intrinsic::convert_to_fp16;
+ if (IsFloatToHalf) {
+ if (!Op->getType()->isFloatTy())
+ return false;
+ } else {
+ if (!II->getType()->isFloatTy())
+ return false;
+ }
+
+ unsigned ResultReg = 0;
+ const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::v8i16);
+ if (IsFloatToHalf) {
+ // 'InputReg' is implicitly promoted from register class FR32 to
+ // register class VR128 by method 'constrainOperandRegClass' which is
+ // directly called by 'fastEmitInst_ri'.
+ // Instruction VCVTPS2PHrr takes an extra immediate operand which is
+ // used to provide rounding control.
+ InputReg = fastEmitInst_ri(X86::VCVTPS2PHrr, RC, InputReg, false, 0);
+
+ // Move the lower 32-bits of ResultReg to another register of class GR32.
+ ResultReg = createResultReg(&X86::GR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(X86::VMOVPDI2DIrr), ResultReg)
+ .addReg(InputReg, RegState::Kill);
+
+ // The result value is in the lower 16-bits of ResultReg.
+ unsigned RegIdx = X86::sub_16bit;
+ ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx);
+ } else {
+ assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!");
+ // Explicitly sign-extend the input to 32-bit.
+ InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::SIGN_EXTEND, InputReg,
+ /*Kill=*/false);
+
+ // The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr.
+ InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR,
+ InputReg, /*Kill=*/true);
+
+ InputReg = fastEmitInst_r(X86::VCVTPH2PSrr, RC, InputReg, /*Kill=*/true);
+
+ // The result value is in the lower 32-bits of ResultReg.
+ // Emit an explicit copy from register class VR128 to register class FR32.
+ ResultReg = createResultReg(&X86::FR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(InputReg, RegState::Kill);
+ }
+
+ updateValueMap(II, ResultReg);
+ return true;
+ }
case Intrinsic::frameaddress: {
+ MachineFunction *MF = FuncInfo.MF;
+ if (MF->getTarget().getMCAsmInfo()->usesWindowsCFI())
+ return false;
+
Type *RetTy = II->getCalledFunction()->getReturnType();
MVT VT;
@@ -2145,14 +2264,13 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break;
}
- // This needs to be set before we call getFrameRegister, otherwise we get
- // the wrong frame register.
- MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
+ // This needs to be set before we call getPtrSizedFrameRegister, otherwise
+ // we get the wrong frame register.
+ MachineFrameInfo *MFI = MF->getFrameInfo();
MFI->setFrameAddressIsTaken(true);
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
- unsigned FrameReg = RegInfo->getFrameRegister(*(FuncInfo.MF));
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*MF);
assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
(FrameReg == X86::EBP && VT == MVT::i32)) &&
"Invalid Frame Register!");
@@ -2372,19 +2490,16 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
unsigned ResultReg = 0;
// Check if we have an immediate version.
if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
- static const unsigned Opc[2][2][4] = {
- { { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
- { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r } },
- { { X86::INC8r, X86::INC64_16r, X86::INC64_32r, X86::INC64r },
- { X86::DEC8r, X86::DEC64_16r, X86::DEC64_32r, X86::DEC64r } }
+ static const unsigned Opc[2][4] = {
+ { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
+ { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
};
if (BaseOpc == X86ISD::INC || BaseOpc == X86ISD::DEC) {
ResultReg = createResultReg(TLI.getRegClassFor(VT));
- bool Is64Bit = Subtarget->is64Bit();
bool IsDec = BaseOpc == X86ISD::DEC;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(Opc[Is64Bit][IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
+ TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
.addReg(LHSReg, getKillRegState(LHSIsKill));
} else
ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
@@ -2529,7 +2644,7 @@ bool X86FastISel::fastLowerArguments() {
if (!Subtarget->is64Bit())
return false;
-
+
// Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
unsigned GPRCnt = 0;
unsigned FPRCnt = 0;
@@ -2674,6 +2789,9 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
TM.Options.GuaranteedTailCallOpt))
return false;
+ SmallVector<MVT, 16> OutVTs;
+ SmallVector<unsigned, 16> ArgRegs;
+
// If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra
// instruction. This is safe because it is common to all FastISel supported
// calling conventions on x86.
@@ -2691,28 +2809,34 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
// Passing bools around ends up doing a trunc to i1 and passing it.
// Codegen this as an argument + "and 1".
- if (auto *TI = dyn_cast<TruncInst>(Val)) {
- if (TI->getType()->isIntegerTy(1) && CLI.CS &&
- (TI->getParent() == CLI.CS->getInstruction()->getParent()) &&
- TI->hasOneUse()) {
- Val = cast<TruncInst>(Val)->getOperand(0);
- unsigned ResultReg = getRegForValue(Val);
-
- if (!ResultReg)
- return false;
-
- MVT ArgVT;
- if (!isTypeLegal(Val->getType(), ArgVT))
- return false;
+ MVT VT;
+ auto *TI = dyn_cast<TruncInst>(Val);
+ unsigned ResultReg;
+ if (TI && TI->getType()->isIntegerTy(1) && CLI.CS &&
+ (TI->getParent() == CLI.CS->getInstruction()->getParent()) &&
+ TI->hasOneUse()) {
+ Value *PrevVal = TI->getOperand(0);
+ ResultReg = getRegForValue(PrevVal);
+
+ if (!ResultReg)
+ return false;
- ResultReg =
- fastEmit_ri(ArgVT, ArgVT, ISD::AND, ResultReg, Val->hasOneUse(), 1);
+ if (!isTypeLegal(PrevVal->getType(), VT))
+ return false;
- if (!ResultReg)
- return false;
- updateValueMap(Val, ResultReg);
- }
+ ResultReg =
+ fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1);
+ } else {
+ if (!isTypeLegal(Val->getType(), VT))
+ return false;
+ ResultReg = getRegForValue(Val);
}
+
+ if (!ResultReg)
+ return false;
+
+ ArgRegs.push_back(ResultReg);
+ OutVTs.push_back(VT);
}
// Analyze operands of the call, assigning locations to each operand.
@@ -2723,13 +2847,6 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
if (IsWin64)
CCInfo.AllocateStack(32, 8);
- SmallVector<MVT, 16> OutVTs;
- for (auto *Val : OutVals) {
- MVT VT;
- if (!isTypeLegal(Val->getType(), VT))
- return false;
- OutVTs.push_back(VT);
- }
CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
// Get a count of how many bytes are to be pushed on the stack.
@@ -2738,11 +2855,10 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
// Issue CALLSEQ_START
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
- .addImm(NumBytes);
+ .addImm(NumBytes).addImm(0);
// Walk the register/memloc assignments, inserting copies/loads.
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign const &VA = ArgLocs[i];
const Value *ArgVal = OutVals[VA.getValNo()];
@@ -2751,9 +2867,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
if (ArgVT == MVT::x86mmx)
return false;
- unsigned ArgReg = getRegForValue(ArgVal);
- if (!ArgReg)
- return false;
+ unsigned ArgReg = ArgRegs[VA.getValNo()];
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -2875,7 +2989,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
- unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
+ unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
assert((Subtarget->hasSSE1() || !NumXMMRegs)
&& "SSE registers cannot be used when SSE is disabled");
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
@@ -3049,6 +3163,8 @@ X86FastISel::fastSelectInstruction(const Instruction *I) {
return X86SelectFPExt(I);
case Instruction::FPTrunc:
return X86SelectFPTrunc(I);
+ case Instruction::SIToFP:
+ return X86SelectSIToFP(I);
case Instruction::IntToPtr: // Deliberate fall-through.
case Instruction::PtrToInt: {
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
@@ -3194,8 +3310,8 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
TII.get(Opc), ResultReg);
addDirectMem(MIB, AddrReg);
MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
- MachinePointerInfo::getConstantPool(), MachineMemOperand::MOLoad,
- TM.getSubtargetImpl()->getDataLayout()->getPointerSize(), Align);
+ MachinePointerInfo::getConstantPool(), MachineMemOperand::MOLoad,
+ TM.getDataLayout()->getPointerSize(), Align);
MIB->addMemOperand(*FuncInfo.MF, MMO);
return ResultReg;
}
@@ -3229,7 +3345,10 @@ unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
ResultReg)
.addGlobalAddress(GV);
} else {
- unsigned Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r;
+ unsigned Opc = TLI.getPointerTy() == MVT::i32
+ ? (Subtarget->isTarget64BitILP32()
+ ? X86::LEA64_32r : X86::LEA32r)
+ : X86::LEA64r;
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg), AM);
}
@@ -3271,7 +3390,10 @@ unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {
X86AddressMode AM;
if (!X86SelectAddress(C, AM))
return 0;
- unsigned Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r;
+ unsigned Opc = TLI.getPointerTy() == MVT::i32
+ ? (Subtarget->isTarget64BitILP32()
+ ? X86::LEA64_32r : X86::LEA32r)
+ : X86::LEA64r;
const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned ResultReg = createResultReg(RC);
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -3325,7 +3447,7 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
if (!X86SelectAddress(Ptr, AM))
return false;
- const X86InstrInfo &XII = (const X86InstrInfo&)TII;
+ const X86InstrInfo &XII = (const X86InstrInfo &)TII;
unsigned Size = DL.getTypeAllocSize(LI->getType());
unsigned Alignment = LI->getAlignment();
diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp
index 02736ac..b39c5ab 100644
--- a/lib/Target/X86/X86FixupLEAs.cpp
+++ b/lib/Target/X86/X86FixupLEAs.cpp
@@ -88,7 +88,6 @@ public:
private:
MachineFunction *MF;
- const TargetMachine *TM;
const X86InstrInfo *TII; // Machine instruction info.
};
char FixupLEAPass::ID = 0;
@@ -150,13 +149,11 @@ FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); }
bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
MF = &Func;
- TM = &Func.getTarget();
- const X86Subtarget &ST = TM->getSubtarget<X86Subtarget>();
+ const X86Subtarget &ST = Func.getSubtarget<X86Subtarget>();
if (!ST.LEAusesAG() && !ST.slowLEA())
return false;
- TII =
- static_cast<const X86InstrInfo *>(TM->getSubtargetImpl()->getInstrInfo());
+ TII = ST.getInstrInfo();
DEBUG(dbgs() << "Start X86FixupLEAs\n";);
// Process all basic blocks.
@@ -219,7 +216,7 @@ FixupLEAPass::searchBackwards(MachineOperand &p, MachineBasicBlock::iterator &I,
return CurInst;
}
InstrDistance += TII->getInstrLatency(
- TM->getSubtargetImpl()->getInstrItineraryData(), CurInst);
+ MF->getSubtarget().getInstrItineraryData(), CurInst);
Found = getPreviousInstr(CurInst, MFI);
}
return nullptr;
@@ -283,6 +280,7 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
return;
int addrr_opcode, addri_opcode;
switch (opcode) {
+ default: llvm_unreachable("Unexpected LEA instruction");
case X86::LEA16r:
addrr_opcode = X86::ADD16rr;
addri_opcode = X86::ADD16ri;
@@ -296,8 +294,6 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
addrr_opcode = X86::ADD64rr;
addri_opcode = X86::ADD64ri32;
break;
- default:
- assert(false && "Unexpected LEA instruction");
}
DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump(););
DEBUG(dbgs() << "FixLEA: Replaced by: ";);
@@ -334,7 +330,7 @@ bool FixupLEAPass::processBasicBlock(MachineFunction &MF,
MachineFunction::iterator MFI) {
for (MachineBasicBlock::iterator I = MFI->begin(); I != MFI->end(); ++I) {
- if (TM->getSubtarget<X86Subtarget>().isSLM())
+ if (MF.getSubtarget<X86Subtarget>().isSLM())
processInstructionForSLM(I, MFI);
else
processInstruction(I, MFI);
diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp
index 6189109..c8e5f64 100644
--- a/lib/Target/X86/X86FloatingPoint.cpp
+++ b/lib/Target/X86/X86FloatingPoint.cpp
@@ -898,7 +898,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Now we should have the correct registers live.
DEBUG(dumpStack());
- assert(StackTop == CountPopulation_32(Mask) && "Live count mismatch");
+ assert(StackTop == countPopulation(Mask) && "Live count mismatch");
}
/// shuffleStackTop - emit fxch instructions before I to shuffle the top
@@ -943,7 +943,7 @@ void FPS::handleCall(MachineBasicBlock::iterator &I) {
}
}
- unsigned N = CountTrailingOnes_32(STReturns);
+ unsigned N = countTrailingOnes(STReturns);
// FP registers used for function return must be consecutive starting at
// FP0.
@@ -1420,14 +1420,14 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
if (STUses && !isMask_32(STUses))
MI->emitError("fixed input regs must be last on the x87 stack");
- unsigned NumSTUses = CountTrailingOnes_32(STUses);
+ unsigned NumSTUses = countTrailingOnes(STUses);
// Defs must be contiguous from the stack top. ST0-STn.
if (STDefs && !isMask_32(STDefs)) {
MI->emitError("output regs must be last on the x87 stack");
STDefs = NextPowerOf2(STDefs) - 1;
}
- unsigned NumSTDefs = CountTrailingOnes_32(STDefs);
+ unsigned NumSTDefs = countTrailingOnes(STDefs);
// So must the clobbered stack slots. ST0-STm, m >= n.
if (STClobbers && !isMask_32(STDefs | STClobbers))
@@ -1437,7 +1437,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
unsigned STPopped = STUses & (STDefs | STClobbers);
if (STPopped && !isMask_32(STPopped))
MI->emitError("implicitly popped regs must be last on the x87 stack");
- unsigned NumSTPopped = CountTrailingOnes_32(STPopped);
+ unsigned NumSTPopped = countTrailingOnes(STPopped);
DEBUG(dbgs() << "Asm uses " << NumSTUses << " fixed regs, pops "
<< NumSTPopped << ", and defines " << NumSTDefs << " regs.\n");
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index b9920b1..cead099 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -38,7 +38,34 @@ using namespace llvm;
extern cl::opt<bool> ForceStackAlign;
bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
- return !MF.getFrameInfo()->hasVarSizedObjects();
+ return !MF.getFrameInfo()->hasVarSizedObjects() &&
+ !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
+}
+
+/// canSimplifyCallFramePseudos - If there is a reserved call frame, the
+/// call frame pseudos can be simplified. Having a FP, as in the default
+/// implementation, is not sufficient here since we can't always use it.
+/// Use a more nuanced condition.
+bool
+X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
+ const X86RegisterInfo *TRI = static_cast<const X86RegisterInfo *>
+ (MF.getSubtarget().getRegisterInfo());
+ return hasReservedCallFrame(MF) ||
+ (hasFP(MF) && !TRI->needsStackRealignment(MF))
+ || TRI->hasBasePointer(MF);
+}
+
+// needsFrameIndexResolution - Do we need to perform FI resolution for
+// this function. Normally, this is required only when the function
+// has any stack objects. However, FI resolution actually has another job,
+// not apparent from the title - it resolves callframesetup/destroy
+// that were not simplified earlier.
+// So, this is required for x86 functions that have push sequences even
+// when there are no stack objects.
+bool
+X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
+ return MF.getFrameInfo()->hasStackObjects() ||
+ MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
}
/// hasFP - Return true if the specified function should have a dedicated frame
@@ -82,6 +109,14 @@ static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
}
}
+static unsigned getSUBrrOpcode(unsigned isLP64) {
+ return isLP64 ? X86::SUB64rr : X86::SUB32rr;
+}
+
+static unsigned getADDrrOpcode(unsigned isLP64) {
+ return isLP64 ? X86::ADD64rr : X86::ADD32rr;
+}
+
static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
if (IsLP64) {
if (isInt<8>(Imm))
@@ -155,6 +190,18 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
return 0;
}
+static bool isEAXLiveIn(MachineFunction &MF) {
+ for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
+ EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
+ unsigned Reg = II->first;
+
+ if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
+ Reg == X86::AH || Reg == X86::AL)
+ return true;
+ }
+
+ return false;
+}
/// emitSPUpdate - Emit a series of instructions to increment / decrement the
/// stack pointer by a constant value.
@@ -177,7 +224,33 @@ void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
DebugLoc DL = MBB.findDebugLoc(MBBI);
while (Offset) {
- uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
+ if (Offset > Chunk) {
+ // Rather than emit a long series of instructions for large offsets,
+ // load the offset into a register and do one sub/add
+ unsigned Reg = 0;
+
+ if (isSub && !isEAXLiveIn(*MBB.getParent()))
+ Reg = (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX);
+ else
+ Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);
+
+ if (Reg) {
+ Opc = Is64BitTarget ? X86::MOV64ri : X86::MOV32ri;
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)
+ .addImm(Offset);
+ Opc = isSub
+ ? getSUBrrOpcode(Is64BitTarget)
+ : getADDrrOpcode(Is64BitTarget);
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr)
+ .addReg(Reg);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ Offset = 0;
+ continue;
+ }
+ }
+
+ uint64_t ThisVal = std::min(Offset, Chunk);
if (ThisVal == (Is64BitTarget ? 8 : 4)) {
// Use push / pop instead.
unsigned Reg = isSub
@@ -239,38 +312,6 @@ void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
}
}
-/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower
-/// iterator.
-static
-void mergeSPUpdatesDown(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- unsigned StackPtr, uint64_t *NumBytes = nullptr) {
- // FIXME: THIS ISN'T RUN!!!
- return;
-
- if (MBBI == MBB.end()) return;
-
- MachineBasicBlock::iterator NI = std::next(MBBI);
- if (NI == MBB.end()) return;
-
- unsigned Opc = NI->getOpcode();
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
- NI->getOperand(0).getReg() == StackPtr) {
- if (NumBytes)
- *NumBytes -= NI->getOperand(2).getImm();
- MBB.erase(NI);
- MBBI = NI;
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
- NI->getOperand(0).getReg() == StackPtr) {
- if (NumBytes)
- *NumBytes += NI->getOperand(2).getImm();
- MBB.erase(NI);
- MBBI = NI;
- }
-}
-
/// mergeSPUpdates - Checks the instruction before/after the passed
/// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and
/// the stack adjustment is returned as a positive value for ADD/LEA and a
@@ -306,19 +347,6 @@ static int mergeSPUpdates(MachineBasicBlock &MBB,
return Offset;
}
-static bool isEAXLiveIn(MachineFunction &MF) {
- for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
- EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
- unsigned Reg = II->first;
-
- if (Reg == X86::EAX || Reg == X86::AX ||
- Reg == X86::AH || Reg == X86::AL)
- return true;
- }
-
- return false;
-}
-
void
X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -365,12 +393,23 @@ static bool usesTheStack(const MachineFunction &MF) {
return false;
}
-void X86FrameLowering::getStackProbeFunction(const X86Subtarget &STI,
- unsigned &CallOp,
- const char *&Symbol) {
- CallOp = STI.is64Bit() ? X86::W64ALLOCA : X86::CALLpcrel32;
+void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL) {
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
+ bool Is64Bit = STI.is64Bit();
+ bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
- if (STI.is64Bit()) {
+ unsigned CallOp;
+ if (Is64Bit)
+ CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
+ else
+ CallOp = X86::CALLpcrel32;
+
+ const char *Symbol;
+ if (Is64Bit) {
if (STI.isTargetCygMing()) {
Symbol = "___chkstk_ms";
} else {
@@ -380,6 +419,66 @@ void X86FrameLowering::getStackProbeFunction(const X86Subtarget &STI,
Symbol = "_alloca";
else
Symbol = "_chkstk";
+
+ MachineInstrBuilder CI;
+
+ // All current stack probes take AX and SP as input, clobber flags, and
+ // preserve all registers. x86_64 probes leave RSP unmodified.
+ if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
+ // For the large code model, we have to call through a register. Use R11,
+ // as it is scratch in all supported calling conventions.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
+ .addExternalSymbol(Symbol);
+ CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
+ } else {
+ CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);
+ }
+
+ unsigned AX = Is64Bit ? X86::RAX : X86::EAX;
+ unsigned SP = Is64Bit ? X86::RSP : X86::ESP;
+ CI.addReg(AX, RegState::Implicit)
+ .addReg(SP, RegState::Implicit)
+ .addReg(AX, RegState::Define | RegState::Implicit)
+ .addReg(SP, RegState::Define | RegState::Implicit)
+ .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
+
+ if (Is64Bit) {
+ // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
+ // themselves. It also does not clobber %rax so we can reuse it when
+ // adjusting %rsp.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
+ .addReg(X86::RSP)
+ .addReg(X86::RAX);
+ }
+}
+
+static unsigned calculateSetFPREG(uint64_t SPAdjust) {
+ // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
+ // and might require smaller successive adjustments.
+ const uint64_t Win64MaxSEHOffset = 128;
+ uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
+ // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
+ return SEHFrameOffset & -16;
+}
+
+// If we're forcing a stack realignment we can't rely on just the frame
+// info, we need to know the ABI stack alignment as well in case we
+// have a call out. Otherwise just make sure we have some alignment - we'll
+// go with the minimum SlotSize.
+static uint64_t calculateMaxStackAlign(const MachineFunction &MF) {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ const X86RegisterInfo *RegInfo = STI.getRegisterInfo();
+ unsigned SlotSize = RegInfo->getSlotSize();
+ unsigned StackAlign = STI.getFrameLowering()->getStackAlignment();
+ if (ForceStackAlign) {
+ if (MFI->hasCalls())
+ MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
+ else if (MaxAlign < SlotSize)
+ MaxAlign = SlotSize;
+ }
+ return MaxAlign;
}
/// emitPrologue - Push callee-saved registers onto the stack, which
@@ -448,6 +547,8 @@ void X86FrameLowering::getStackProbeFunction(const X86Subtarget &STI,
[if needs base pointer]
mov %rsp, %rbx
+ [if needs to restore base pointer]
+ mov %rsp, -MMM(%rbp)
; Emit CFI info
[if needs FP]
@@ -469,67 +570,65 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *Fn = MF.getFunction();
- const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ const X86RegisterInfo *RegInfo = STI.getRegisterInfo();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineModuleInfo &MMI = MF.getMMI();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
+ uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
bool HasFP = hasFP(MF);
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool Is64Bit = STI.is64Bit();
// standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
bool IsWin64 = STI.isTargetWin64();
// Not necessarily synonymous with IsWin64.
- bool IsWinEH = MF.getTarget().getMCAsmInfo()->getExceptionHandlingType() ==
- ExceptionHandling::ItaniumWinEH;
+ bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsWinEH = IsWinEH && Fn->needsUnwindTableEntry();
bool NeedsDwarfCFI =
!IsWinEH && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
bool UseLEA = STI.useLeaForSP();
- unsigned StackAlign = getStackAlignment();
unsigned SlotSize = RegInfo->getSlotSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
- const unsigned MachineFramePtr = STI.isTarget64BitILP32() ?
- getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;
+ const unsigned MachineFramePtr =
+ STI.isTarget64BitILP32()
+ ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
+ : FramePtr;
unsigned StackPtr = RegInfo->getStackRegister();
unsigned BasePtr = RegInfo->getBaseRegister();
DebugLoc DL;
- // If we're forcing a stack realignment we can't rely on just the frame
- // info, we need to know the ABI stack alignment as well in case we
- // have a call out. Otherwise just make sure we have some alignment - we'll
- // go with the minimum SlotSize.
- if (ForceStackAlign) {
- if (MFI->hasCalls())
- MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
- else if (MaxAlign < SlotSize)
- MaxAlign = SlotSize;
- }
-
// Add RETADDR move area to callee saved frame size.
int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
+ if (TailCallReturnAddrDelta && IsWinEH)
+ report_fatal_error("Can't handle guaranteed tail call under win64 yet");
+
if (TailCallReturnAddrDelta < 0)
X86FI->setCalleeSavedFrameSize(
X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
- bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMacho());
-
+ bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());
+
+ // The default stack probe size is 4096 if the function has no stackprobesize
+ // attribute.
+ unsigned StackProbeSize = 4096;
+ if (Fn->hasFnAttribute("stack-probe-size"))
+ Fn->getFnAttribute("stack-probe-size")
+ .getValueAsString()
+ .getAsInteger(0, StackProbeSize);
+
// If this is x86-64 and the Red Zone is not disabled, if we are a leaf
// function, and use up to 128 bytes of stack space, don't have a frame
// pointer, calls, or dynamic alloca then we do not need to adjust the
// stack pointer (we fit in the Red Zone). We also check that we don't
// push and pop from the stack.
- if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::NoRedZone) &&
+ if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
!RegInfo->needsStackRealignment(MF) &&
- !MFI->hasVarSizedObjects() && // No dynamic alloca.
- !MFI->adjustsStack() && // No calls.
- !IsWin64 && // Win64 has no Red Zone
- !usesTheStack(MF) && // Don't push and pop.
- !MF.shouldSplitStack()) { // Regular stack
+ !MFI->hasVarSizedObjects() && // No dynamic alloca.
+ !MFI->adjustsStack() && // No calls.
+ !IsWin64 && // Win64 has no Red Zone
+ !usesTheStack(MF) && // Don't push and pop.
+ !MF.shouldSplitStack()) { // Regular stack
uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
if (HasFP) MinSize += SlotSize;
StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
@@ -570,14 +669,15 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
if (HasFP) {
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
- if (RegInfo->needsStackRealignment(MF)) {
- // Callee-saved registers are pushed on stack before the stack
- // is realigned.
- FrameSize -= X86FI->getCalleeSavedFrameSize();
- NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
- } else {
- NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
- }
+ // If required, include space for extra hidden slot for stashing base pointer.
+ if (X86FI->getRestoreBasePointer())
+ FrameSize += SlotSize;
+
+ NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
+
+ // Callee-saved registers are pushed on stack before the stack is realigned.
+ if (RegInfo->needsStackRealignment(MF) && !IsWinEH)
+ NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
// Get the offset of the stack slot for the EBP register, which is
// guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
@@ -613,11 +713,14 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
.setMIFlag(MachineInstr::FrameSetup);
}
- // Update EBP with the new base value.
- BuildMI(MBB, MBBI, DL,
- TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), FramePtr)
- .addReg(StackPtr)
- .setMIFlag(MachineInstr::FrameSetup);
+ if (!IsWinEH) {
+ // Update EBP with the new base value.
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
+ FramePtr)
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
if (NeedsDwarfCFI) {
// Mark effective beginning of when frame pointer becomes valid.
@@ -666,15 +769,16 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// Realign stack after we pushed callee-saved registers (so that we'll be
// able to calculate their offsets from the frame pointer).
- if (RegInfo->needsStackRealignment(MF)) {
+ // Don't do this for Win64, it needs to realign the stack after the prologue.
+ if (!IsWinEH && RegInfo->needsStackRealignment(MF)) {
assert(HasFP && "There should be a frame pointer if stack is realigned.");
uint64_t Val = -MaxAlign;
MachineInstr *MI =
- BuildMI(MBB, MBBI, DL,
- TII.get(getANDriOpcode(Uses64BitFramePtr, Val)), StackPtr)
- .addReg(StackPtr)
- .addImm(Val)
- .setMIFlag(MachineInstr::FrameSetup);
+ BuildMI(MBB, MBBI, DL, TII.get(getANDriOpcode(Uses64BitFramePtr, Val)),
+ StackPtr)
+ .addReg(StackPtr)
+ .addImm(Val)
+ .setMIFlag(MachineInstr::FrameSetup);
// The EFLAGS implicit def is dead.
MI->getOperand(3).setIsDead();
@@ -685,14 +789,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// the callee has more arguments then the caller.
NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
- // If there is an ADD32ri or SUB32ri of ESP immediately after this
- // instruction, merge the two instructions.
- mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
-
// Adjust stack pointer: ESP -= numbytes.
- static const size_t PageSize = 4096;
-
// Windows and cygwin/mingw require a prologue helper routine when allocating
// more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
// uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
@@ -701,12 +799,10 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// responsible for adjusting the stack pointer. Touching the stack at 4K
// increments is necessary to ensure that the guard pages used by the OS
// virtual memory manager are allocated in correct sequence.
- if (NumBytes >= PageSize && UseStackProbe) {
- const char *StackProbeSymbol;
- unsigned CallOp;
-
- getStackProbeFunction(STI, CallOp, StackProbeSymbol);
-
+ uint64_t AlignedNumBytes = NumBytes;
+ if (IsWinEH && RegInfo->needsStackRealignment(MF))
+ AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
+ if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
// Check whether EAX is livein for this function.
bool isEAXAlive = isEAXLiveIn(MF);
@@ -724,9 +820,19 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
if (Is64Bit) {
// Handle the 64-bit Windows ABI case where we need to call __chkstk.
// Function prologue is responsible for adjusting the stack pointer.
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
- .addImm(NumBytes)
- .setMIFlag(MachineInstr::FrameSetup);
+ if (isUInt<32>(NumBytes)) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
+ .addImm(NumBytes)
+ .setMIFlag(MachineInstr::FrameSetup);
+ } else if (isInt<32>(NumBytes)) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
+ .addImm(NumBytes)
+ .setMIFlag(MachineInstr::FrameSetup);
+ } else {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
+ .addImm(NumBytes)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
} else {
// Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
// We'll also use 4 already allocated bytes for EAX.
@@ -735,22 +841,17 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
.setMIFlag(MachineInstr::FrameSetup);
}
- BuildMI(MBB, MBBI, DL,
- TII.get(CallOp))
- .addExternalSymbol(StackProbeSymbol)
- .addReg(StackPtr, RegState::Define | RegState::Implicit)
- .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit)
- .setMIFlag(MachineInstr::FrameSetup);
+ // Save a pointer to the MI where we set AX.
+ MachineBasicBlock::iterator SetRAX = MBBI;
+ --SetRAX;
+
+ // Call __chkstk, __chkstk_ms, or __alloca.
+ emitStackProbeCall(MF, MBB, MBBI, DL);
+
+ // Apply the frame setup flag to all inserted instrs.
+ for (; SetRAX != MBBI; ++SetRAX)
+ SetRAX->setFlag(MachineInstr::FrameSetup);
- if (Is64Bit) {
- // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
- // themself. It also does not clobber %rax so we can reuse it when
- // adjusting %rsp.
- BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), StackPtr)
- .addReg(StackPtr)
- .addReg(X86::RAX)
- .setMIFlag(MachineInstr::FrameSetup);
- }
if (isEAXAlive) {
// Restore EAX
MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
@@ -764,68 +865,66 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
UseLEA, TII, *RegInfo);
}
+ if (NeedsWinEH && NumBytes)
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
+ .addImm(NumBytes)
+ .setMIFlag(MachineInstr::FrameSetup);
+
int SEHFrameOffset = 0;
- if (NeedsWinEH) {
- if (HasFP) {
- // We need to set frame base offset low enough such that all saved
- // register offsets would be positive relative to it, but we can't
- // just use NumBytes, because .seh_setframe offset must be <=240.
- // So we pretend to have only allocated enough space to spill the
- // non-volatile registers.
- // We don't care about the rest of stack allocation, because unwinder
- // will restore SP to (BP - SEHFrameOffset)
- for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {
- int offset = MFI->getObjectOffset(Info.getFrameIdx());
- SEHFrameOffset = std::max(SEHFrameOffset, std::abs(offset));
- }
- SEHFrameOffset += SEHFrameOffset % 16; // ensure alignmant
-
- // This only needs to account for XMM spill slots, GPR slots
- // are covered by the .seh_pushreg's emitted above.
- unsigned Size = SEHFrameOffset - X86FI->getCalleeSavedFrameSize();
- if (Size) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
- .addImm(Size)
- .setMIFlag(MachineInstr::FrameSetup);
- }
+ if (IsWinEH && HasFP) {
+ SEHFrameOffset = calculateSetFPREG(NumBytes);
+ if (SEHFrameOffset)
+ addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
+ StackPtr, false, SEHFrameOffset);
+ else
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr).addReg(StackPtr);
+ if (NeedsWinEH)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
.addImm(FramePtr)
.addImm(SEHFrameOffset)
.setMIFlag(MachineInstr::FrameSetup);
- } else {
- // SP will be the base register for restoring XMMs
- if (NumBytes) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
- .addImm(NumBytes)
- .setMIFlag(MachineInstr::FrameSetup);
- }
- }
}
- // Skip the rest of register spilling code
- while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
+ while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
+ const MachineInstr *FrameInstr = &*MBBI;
++MBBI;
- // Emit SEH info for non-GPRs
- if (NeedsWinEH) {
- for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {
- unsigned Reg = Info.getReg();
- if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
- continue;
- assert(X86::FR64RegClass.contains(Reg) && "Unexpected register class");
-
- int Offset = getFrameIndexOffset(MF, Info.getFrameIdx());
- Offset += SEHFrameOffset;
-
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
- .addImm(Reg)
- .addImm(Offset)
- .setMIFlag(MachineInstr::FrameSetup);
+ if (NeedsWinEH) {
+ int FI;
+ if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
+ if (X86::FR64RegClass.contains(Reg)) {
+ int Offset = getFrameIndexOffset(MF, FI);
+ Offset += SEHFrameOffset;
+
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
+ .addImm(Reg)
+ .addImm(Offset)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ }
}
+ }
+ if (NeedsWinEH)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
.setMIFlag(MachineInstr::FrameSetup);
+
+ // Realign stack after we spilled callee-saved registers (so that we'll be
+ // able to calculate their offsets from the frame pointer).
+ // Win64 requires aligning the stack after the prologue.
+ if (IsWinEH && RegInfo->needsStackRealignment(MF)) {
+ assert(HasFP && "There should be a frame pointer if stack is realigned.");
+ uint64_t Val = -MaxAlign;
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL, TII.get(getANDriOpcode(Uses64BitFramePtr, Val)),
+ StackPtr)
+ .addReg(StackPtr)
+ .addImm(Val)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // The EFLAGS implicit def is dead.
+ MI->getOperand(3).setIsDead();
}
// If we need a base pointer, set it up here. It's whatever the value
@@ -838,6 +937,14 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
.addReg(StackPtr)
.setMIFlag(MachineInstr::FrameSetup);
+ if (X86FI->getRestoreBasePointer()) {
+ // Stash value of base pointer. Saving RSP instead of EBP shortens dependence chain.
+ unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
+ addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
+ FramePtr, true, X86FI->getRestoreBasePointerOffset())
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
}
if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
@@ -863,33 +970,45 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ const X86RegisterInfo *RegInfo = STI.getRegisterInfo();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
assert(MBBI != MBB.end() && "Returning block has no instructions");
unsigned RetOpcode = MBBI->getOpcode();
DebugLoc DL = MBBI->getDebugLoc();
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool Is64Bit = STI.is64Bit();
// standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
+ bool HasFP = hasFP(MF);
const bool Is64BitILP32 = STI.isTarget64BitILP32();
- bool UseLEA = STI.useLeaForSP();
- unsigned StackAlign = getStackAlignment();
unsigned SlotSize = RegInfo->getSlotSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
- unsigned MachineFramePtr = Is64BitILP32 ?
- getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;
+ unsigned MachineFramePtr =
+ Is64BitILP32 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
+ : FramePtr;
unsigned StackPtr = RegInfo->getStackRegister();
- bool IsWinEH = MF.getTarget().getMCAsmInfo()->getExceptionHandlingType() ==
- ExceptionHandling::ItaniumWinEH;
+ bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsWinEH = IsWinEH && MF.getFunction()->needsUnwindTableEntry();
+ bool UseLEAForSP = false;
+
+ // We can't use LEA instructions for adjusting the stack pointer if this is a
+ // leaf function in the Win64 ABI. Only ADD instructions may be used to
+ // deallocate the stack.
+ if (STI.useLeaForSP()) {
+ if (!IsWinEH) {
+ // We *aren't* using the Win64 ABI which means we are free to use LEA.
+ UseLEAForSP = true;
+ } else if (HasFP) {
+ // We *have* a frame pointer which means we are permitted to use LEA.
+ UseLEAForSP = true;
+ }
+ }
switch (RetOpcode) {
default:
- llvm_unreachable("Can only insert epilog into returning blocks");
+ llvm_unreachable("Can only insert epilogue into returning blocks");
case X86::RETQ:
case X86::RETL:
case X86::RETIL:
@@ -907,32 +1026,19 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Get the number of bytes to allocate from the FrameInfo.
uint64_t StackSize = MFI->getStackSize();
- uint64_t MaxAlign = MFI->getMaxAlignment();
+ uint64_t MaxAlign = calculateMaxStackAlign(MF);
unsigned CSSize = X86FI->getCalleeSavedFrameSize();
uint64_t NumBytes = 0;
- // If we're forcing a stack realignment we can't rely on just the frame
- // info, we need to know the ABI stack alignment as well in case we
- // have a call out. Otherwise just make sure we have some alignment - we'll
- // go with the minimum.
- if (ForceStackAlign) {
- if (MFI->hasCalls())
- MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
- else
- MaxAlign = MaxAlign ? MaxAlign : 4;
- }
-
if (hasFP(MF)) {
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
- if (RegInfo->needsStackRealignment(MF)) {
- // Callee-saved registers were pushed on stack before the stack
- // was realigned.
- FrameSize -= CSSize;
- NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
- } else {
- NumBytes = FrameSize - CSSize;
- }
+ NumBytes = FrameSize - CSSize;
+
+ // Callee-saved registers were pushed on stack before the stack was
+ // realigned.
+ if (RegInfo->needsStackRealignment(MF) && !IsWinEH)
+ NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
// Pop EBP.
BuildMI(MBB, MBBI, DL,
@@ -940,6 +1046,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
} else {
NumBytes = StackSize - CSSize;
}
+ uint64_t SEHStackAllocAmt = NumBytes;
// Skip the callee-saved pop instructions.
while (MBBI != MBB.begin()) {
@@ -967,10 +1074,20 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
if (RegInfo->needsStackRealignment(MF))
MBBI = FirstCSPop;
- if (CSSize != 0) {
+ unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
+ uint64_t LEAAmount = IsWinEH ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
+
+ // There are only two legal forms of epilogue:
+ // - add SEHAllocationSize, %rsp
+ // - lea SEHAllocationSize(%FramePtr), %rsp
+ //
+ // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
+ // However, we may use this sequence if we have a frame pointer because the
+ // effects of the prologue can safely be undone.
+ if (LEAAmount != 0) {
unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
- FramePtr, false, -CSSize);
+ FramePtr, false, LEAAmount);
--MBBI;
} else {
unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
@@ -980,8 +1097,8 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
}
} else if (NumBytes) {
// Adjust stack pointer back: ESP += numbytes.
- emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, Uses64BitFramePtr, UseLEA,
- TII, *RegInfo);
+ emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, Uses64BitFramePtr,
+ UseLEAForSP, TII, *RegInfo);
--MBBI;
}
@@ -1027,14 +1144,16 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Check for possible merge with preceding ADD instruction.
Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, Uses64BitFramePtr,
- UseLEA, TII, *RegInfo);
+ UseLEAForSP, TII, *RegInfo);
}
// Jump to label or value in register.
+ bool IsWin64 = STI.isTargetWin64();
if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
- ? X86::TAILJMPd : X86::TAILJMPd64));
+ unsigned Op = (RetOpcode == X86::TCRETURNdi)
+ ? X86::TAILJMPd
+ : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));
if (JumpTarget.isGlobal())
MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
JumpTarget.getTargetFlags());
@@ -1044,14 +1163,16 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
JumpTarget.getTargetFlags());
}
} else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
- ? X86::TAILJMPm : X86::TAILJMPm64));
+ unsigned Op = (RetOpcode == X86::TCRETURNmi)
+ ? X86::TAILJMPm
+ : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));
for (unsigned i = 0; i != 5; ++i)
MIB.addOperand(MBBI->getOperand(i));
} else if (RetOpcode == X86::TCRETURNri64) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
- addReg(JumpTarget.getReg(), RegState::Kill);
+ BuildMI(MBB, MBBI, DL,
+ TII.get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
+ .addReg(JumpTarget.getReg(), RegState::Kill);
} else {
BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
addReg(JumpTarget.getReg(), RegState::Kill);
@@ -1071,24 +1192,58 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Check for possible merge with preceding ADD instruction.
delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
- emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, Uses64BitFramePtr, UseLEA, TII,
- *RegInfo);
+ emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, Uses64BitFramePtr,
+ UseLEAForSP, TII, *RegInfo);
}
}
int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
int FI) const {
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ MF.getSubtarget<X86Subtarget>().getRegisterInfo();
const MachineFrameInfo *MFI = MF.getFrameInfo();
+ // Offset will hold the offset from the stack pointer at function entry to the
+ // object.
+ // We need to factor in additional offsets applied during the prologue to the
+ // frame, base, and stack pointer depending on which is used.
int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
+ const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ unsigned CSSize = X86FI->getCalleeSavedFrameSize();
uint64_t StackSize = MFI->getStackSize();
+ unsigned SlotSize = RegInfo->getSlotSize();
+ bool HasFP = hasFP(MF);
+ bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+ int64_t FPDelta = 0;
+
+ if (IsWinEH) {
+ assert(!MFI->hasCalls() || (StackSize % 16) == 8);
+
+ // Calculate required stack adjustment.
+ uint64_t FrameSize = StackSize - SlotSize;
+ // If required, include space for extra hidden slot for stashing base pointer.
+ if (X86FI->getRestoreBasePointer())
+ FrameSize += SlotSize;
+ uint64_t NumBytes = FrameSize - CSSize;
+
+ uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
+ if (FI && FI == X86FI->getFAIndex())
+ return -SEHFrameOffset;
+
+ // FPDelta is the offset from the "traditional" FP location of the old base
+ // pointer followed by return address and the location required by the
+ // restricted Win64 prologue.
+ // Add FPDelta to all offsets below that go through the frame pointer.
+ FPDelta = FrameSize - SEHFrameOffset;
+ assert((!MFI->hasCalls() || (FPDelta % 16) == 0) &&
+ "FPDelta isn't aligned per the Win64 ABI!");
+ }
+
if (RegInfo->hasBasePointer(MF)) {
- assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!");
+ assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
if (FI < 0) {
// Skip the saved EBP.
- return Offset + RegInfo->getSlotSize();
+ return Offset + SlotSize + FPDelta;
} else {
assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
return Offset + StackSize;
@@ -1096,33 +1251,32 @@ int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
} else if (RegInfo->needsStackRealignment(MF)) {
if (FI < 0) {
// Skip the saved EBP.
- return Offset + RegInfo->getSlotSize();
+ return Offset + SlotSize + FPDelta;
} else {
assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
return Offset + StackSize;
}
// FIXME: Support tail calls
} else {
- if (!hasFP(MF))
+ if (!HasFP)
return Offset + StackSize;
// Skip the saved EBP.
- Offset += RegInfo->getSlotSize();
+ Offset += SlotSize;
// Skip the RETADDR move area
- const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
if (TailCallReturnAddrDelta < 0)
Offset -= TailCallReturnAddrDelta;
}
- return Offset;
+ return Offset + FPDelta;
}
int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const {
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ MF.getSubtarget<X86Subtarget>().getRegisterInfo();
// We can't calculate offset from frame pointer if the stack is realigned,
// so enforce usage of stack/base pointer. The base pointer is used when we
// have dynamic allocas in addition to dynamic realignment.
@@ -1135,12 +1289,85 @@ int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
return getFrameIndexOffset(MF, FI);
}
+// Simplified from getFrameIndexOffset keeping only StackPointer cases
+int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int FI) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ // Does not include any dynamic realign.
+ const uint64_t StackSize = MFI->getStackSize();
+ {
+#ifndef NDEBUG
+ const X86RegisterInfo *RegInfo =
+ MF.getSubtarget<X86Subtarget>().getRegisterInfo();
+ // Note: LLVM arranges the stack as:
+ // Args > Saved RetPC (<--FP) > CSRs > dynamic alignment (<--BP)
+ // > "Stack Slots" (<--SP)
+ // We can always address StackSlots from RSP. We can usually (unless
+ // needsStackRealignment) address CSRs from RSP, but sometimes need to
+ // address them from RBP. FixedObjects can be placed anywhere in the stack
+ // frame depending on their specific requirements (i.e. we can actually
+ // refer to arguments to the function which are stored in the *callers*
+ // frame). As a result, THE RESULT OF THIS CALL IS MEANINGLESS FOR CSRs
+ // AND FixedObjects IFF needsStackRealignment or hasVarSizedObject.
+
+ assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");
+
+ // We don't handle tail calls, and shouldn't be seeing them
+ // either.
+ int TailCallReturnAddrDelta =
+ MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
+ assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
+#endif
+ }
+
+ // This is how the math works out:
+ //
+ // %rsp grows (i.e. gets lower) left to right. Each box below is
+ // one word (eight bytes). Obj0 is the stack slot we're trying to
+ // get to.
+ //
+ // ----------------------------------
+ // | BP | Obj0 | Obj1 | ... | ObjN |
+ // ----------------------------------
+ // ^ ^ ^ ^
+ // A B C E
+ //
+ // A is the incoming stack pointer.
+ // (B - A) is the local area offset (-8 for x86-64) [1]
+ // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]
+ //
+ // |(E - B)| is the StackSize (absolute value, positive). For a
+ // stack that grown down, this works out to be (B - E). [3]
+ //
+ // E is also the value of %rsp after stack has been set up, and we
+ // want (C - E) -- the value we can add to %rsp to get to Obj0. Now
+ // (C - E) == (C - A) - (B - A) + (B - E)
+ // { Using [1], [2] and [3] above }
+ // == getObjectOffset - LocalAreaOffset + StackSize
+ //
+
+ // Get the Offset from the StackPointer
+ int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
+
+ return Offset + StackSize;
+}
+// Simplified from getFrameIndexReference keeping only StackPointer cases
+int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
+ int FI,
+ unsigned &FrameReg) const {
+ const X86RegisterInfo *RegInfo =
+ MF.getSubtarget<X86Subtarget>().getRegisterInfo();
+ assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");
+
+ FrameReg = RegInfo->getStackRegister();
+ return getFrameIndexOffsetFromSP(MF, FI);
+}
+
bool X86FrameLowering::assignCalleeSavedSpillSlots(
MachineFunction &MF, const TargetRegisterInfo *TRI,
std::vector<CalleeSavedInfo> &CSI) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ MF.getSubtarget<X86Subtarget>().getRegisterInfo();
unsigned SlotSize = RegInfo->getSlotSize();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
@@ -1207,8 +1434,8 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
DebugLoc DL = MBB.findDebugLoc(MI);
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
// Push GPRs. It increases frame size.
unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
@@ -1228,8 +1455,7 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
// It can be done by spilling XMMs to stack frame.
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
- if (X86::GR64RegClass.contains(Reg) ||
- X86::GR32RegClass.contains(Reg))
+ if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
continue;
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
@@ -1255,8 +1481,8 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
DebugLoc DL = MBB.findDebugLoc(MI);
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
// Reload XMMs from stack frame.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
@@ -1287,7 +1513,7 @@ X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ MF.getSubtarget<X86Subtarget>().getRegisterInfo();
unsigned SlotSize = RegInfo->getSlotSize();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
@@ -1368,9 +1594,9 @@ void
X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
MachineBasicBlock &prologueMBB = MF.front();
MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
uint64_t StackSize;
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool Is64Bit = STI.is64Bit();
const bool IsLP64 = STI.isTarget64BitLP64();
unsigned TlsReg, TlsOffset;
@@ -1382,8 +1608,9 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
if (MF.getFunction()->isVarArg())
report_fatal_error("Segmented stacks do not support vararg functions.");
- if (!STI.isTargetLinux() && !STI.isTargetDarwin() &&
- !STI.isTargetWin32() && !STI.isTargetWin64() && !STI.isTargetFreeBSD())
+ if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
+ !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
+ !STI.isTargetDragonFly())
report_fatal_error("Segmented stacks not supported on this platform.");
// Eventually StackSize will be calculated by a link-time pass; which will
@@ -1437,6 +1664,9 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
} else if (STI.isTargetFreeBSD()) {
TlsReg = X86::FS;
TlsOffset = 0x18;
+ } else if (STI.isTargetDragonFly()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x20; // use tls_tcb.tcb_segstack
} else {
report_fatal_error("Segmented stacks not supported on this platform.");
}
@@ -1459,6 +1689,9 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
} else if (STI.isTargetWin32()) {
TlsReg = X86::FS;
TlsOffset = 0x14; // pvArbitrary, reserved for application use
+ } else if (STI.isTargetDragonFly()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x10; // use tls_tcb.tcb_segstack
} else if (STI.isTargetFreeBSD()) {
report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
} else {
@@ -1471,7 +1704,8 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
.addImm(1).addReg(0).addImm(-StackSize).addReg(0);
- if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64()) {
+ if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
+ STI.isTargetDragonFly()) {
BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
.addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
} else if (STI.isTargetDarwin()) {
@@ -1515,7 +1749,7 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
// This jump is taken if SP >= (Stacklet Limit + Stack Space required).
// It jumps to normal execution of the function body.
- BuildMI(checkMBB, DL, TII.get(X86::JA_4)).addMBB(&prologueMBB);
+ BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&prologueMBB);
// On 32 bit we first push the arguments size and then the frame size. On 64
// bit, we pass the stack frame size in r10 and the argument size in r11.
@@ -1546,12 +1780,36 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
}
// __morestack is in libgcc
- if (Is64Bit)
- BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
- .addExternalSymbol("__morestack");
- else
- BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
- .addExternalSymbol("__morestack");
+ if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
+ // Under the large code model, we cannot assume that __morestack lives
+ // within 2^31 bytes of the call site, so we cannot use pc-relative
+ // addressing. We cannot perform the call via a temporary register,
+ // as the rax register may be used to store the static chain, and all
+ // other suitable registers may be either callee-save or used for
+ // parameter passing. We cannot use the stack at this point either
+ // because __morestack manipulates the stack directly.
+ //
+ // To avoid these issues, perform an indirect call via a read-only memory
+ // location containing the address.
+ //
+ // This solution is not perfect, as it assumes that the .rodata section
+ // is laid out within 2^31 bytes of each function body, but this seems
+ // to be sufficient for JIT.
+ BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
+ .addReg(X86::RIP)
+ .addImm(0)
+ .addReg(0)
+ .addExternalSymbol("__morestack_addr")
+ .addReg(0);
+ MF.getMMI().setUsesMorestackAddr(true);
+ } else {
+ if (Is64Bit)
+ BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
+ .addExternalSymbol("__morestack");
+ else
+ BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
+ .addExternalSymbol("__morestack");
+ }
if (IsNested)
BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
@@ -1584,12 +1842,10 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
/// temp0 = sp - MaxStack
/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineFrameInfo *MFI = MF.getFrameInfo();
- const unsigned SlotSize =
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo())
- ->getSlotSize();
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ const unsigned SlotSize = STI.getRegisterInfo()->getSlotSize();
const bool Is64Bit = STI.is64Bit();
const bool IsLP64 = STI.isTarget64BitLP64();
DebugLoc DL;
@@ -1695,7 +1951,7 @@ void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
// SPLimitOffset is in a fixed heap location (pointed by BP).
addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
.addReg(ScratchReg), PReg, false, SPLimitOffset);
- BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_4)).addMBB(&prologueMBB);
+ BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&prologueMBB);
// Create new MBB for IncStack:
BuildMI(incStackMBB, DL, TII.get(CALLop)).
@@ -1704,7 +1960,7 @@ void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
SPReg, false, -MaxStack);
addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
.addReg(ScratchReg), PReg, false, SPLimitOffset);
- BuildMI(incStackMBB, DL, TII.get(X86::JLE_4)).addMBB(incStackMBB);
+ BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);
stackCheckMBB->addSuccessor(&prologueMBB, 99);
stackCheckMBB->addSuccessor(incStackMBB, 1);
@@ -1719,50 +1975,45 @@ void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
void X86FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
- const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
+ const X86RegisterInfo &RegInfo = *STI.getRegisterInfo();
unsigned StackPtr = RegInfo.getStackRegister();
- bool reseveCallFrame = hasReservedCallFrame(MF);
+ bool reserveCallFrame = hasReservedCallFrame(MF);
int Opcode = I->getOpcode();
bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool IsLP64 = STI.isTarget64BitLP64();
DebugLoc DL = I->getDebugLoc();
- uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
- uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
+ uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
+ uint64_t InternalAmt = (isDestroy || Amount) ? I->getOperand(1).getImm() : 0;
I = MBB.erase(I);
- if (!reseveCallFrame) {
+ if (!reserveCallFrame) {
// If the stack pointer can be changed after prologue, turn the
// adjcallstackup instruction into a 'sub ESP, <amt>' and the
// adjcallstackdown instruction into 'add ESP, <amt>'
- // TODO: consider using push / pop instead of sub + store / add
if (Amount == 0)
return;
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
- unsigned StackAlign = MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment();
- Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
+ unsigned StackAlign = getStackAlignment();
+ Amount = RoundUpToAlignment(Amount, StackAlign);
MachineInstr *New = nullptr;
- if (Opcode == TII.getCallFrameSetupOpcode()) {
- New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)),
- StackPtr)
- .addReg(StackPtr)
- .addImm(Amount);
- } else {
- assert(Opcode == TII.getCallFrameDestroyOpcode());
- // Factor out the amount the callee already popped.
- Amount -= CalleeAmt;
+ // Factor out the amount that gets handled inside the sequence
+ // (Pushes of argument for frame setup, callee pops for frame destroy)
+ Amount -= InternalAmt;
+
+ if (Amount) {
+ if (Opcode == TII.getCallFrameSetupOpcode()) {
+ New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)), StackPtr)
+ .addReg(StackPtr).addImm(Amount);
+ } else {
+ assert(Opcode == TII.getCallFrameDestroyOpcode());
- if (Amount) {
unsigned Opc = getADDriOpcode(IsLP64, Amount);
New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
.addReg(StackPtr).addImm(Amount);
@@ -1780,13 +2031,13 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
return;
}
- if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
+ if (Opcode == TII.getCallFrameDestroyOpcode() && InternalAmt) {
// If we are performing frame pointer elimination and if the callee pops
// something off the stack pointer, add it back. We do this until we have
// more advanced stack pointer tracking ability.
- unsigned Opc = getSUBriOpcode(IsLP64, CalleeAmt);
+ unsigned Opc = getSUBriOpcode(IsLP64, InternalAmt);
MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
- .addReg(StackPtr).addImm(CalleeAmt);
+ .addReg(StackPtr).addImm(InternalAmt);
// The EFLAGS implicit def is dead.
New->getOperand(3).setIsDead();
diff --git a/lib/Target/X86/X86FrameLowering.h b/lib/Target/X86/X86FrameLowering.h
index 7740c3a..542bbbc 100644
--- a/lib/Target/X86/X86FrameLowering.h
+++ b/lib/Target/X86/X86FrameLowering.h
@@ -18,18 +18,16 @@
namespace llvm {
-class MCSymbol;
-class X86TargetMachine;
-class X86Subtarget;
-
class X86FrameLowering : public TargetFrameLowering {
public:
explicit X86FrameLowering(StackDirection D, unsigned StackAl, int LAO)
: TargetFrameLowering(StackGrowsDown, StackAl, LAO) {}
- static void getStackProbeFunction(const X86Subtarget &STI,
- unsigned &CallOp,
- const char *&Symbol);
+ /// Emit a call to the target's stack probe function. This is required for all
+ /// large stack allocations on Windows. The caller is required to materialize
+ /// the number of bytes to probe in RAX/EAX.
+ static void emitStackProbeCall(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL);
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -64,14 +62,30 @@ public:
bool hasFP(const MachineFunction &MF) const override;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
+ bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override;
+ bool needsFrameIndexResolution(const MachineFunction &MF) const override;
int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;
int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const override;
+ int getFrameIndexOffsetFromSP(const MachineFunction &MF, int FI) const;
+ int getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const override;
+
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
+
+private:
+ /// convertArgMovsToPushes - This method tries to convert a call sequence
+ /// that uses sub and mov instructions to put the argument onto the stack
+ /// into a series of pushes.
+ /// Returns true if the transformation succeeded, false if not.
+ bool convertArgMovsToPushes(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ uint64_t Amount) const;
};
} // End llvm namespace
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index 3ef7b2c..8d50ae1 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -156,9 +156,7 @@ namespace {
public:
explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(tm, OptLevel),
- Subtarget(&tm.getSubtarget<X86Subtarget>()),
- OptForSize(false) {}
+ : SelectionDAGISel(tm, OptLevel), OptForSize(false) {}
const char *getPassName() const override {
return "X86 DAG->DAG Instruction Selection";
@@ -166,7 +164,7 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override {
// Reset the subtarget each time through.
- Subtarget = &TM.getSubtarget<X86Subtarget>();
+ Subtarget = &MF.getSubtarget<X86Subtarget>();
SelectionDAGISel::runOnMachineFunction(MF);
return true;
}
@@ -233,7 +231,7 @@ namespace {
char ConstraintCode,
std::vector<SDValue> &OutOps) override;
- void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
+ void EmitSpecialCodeForMain();
inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
SDValue &Scale, SDValue &Index,
@@ -298,7 +296,7 @@ namespace {
/// getInstrInfo - Return a reference to the TargetInstrInfo, casted
/// to the target-specific type.
const X86InstrInfo *getInstrInfo() const {
- return getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ return Subtarget->getInstrInfo();
}
/// \brief Address-mode matching performs shift-of-and to and-of-shift
@@ -395,17 +393,14 @@ static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
Ops.clear();
Ops.push_back(NewChain);
}
- for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
- Ops.push_back(OrigChain.getOperand(i));
+ Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
Load.getOperand(1), Load.getOperand(2));
- unsigned NumOps = Call.getNode()->getNumOperands();
Ops.clear();
Ops.push_back(SDValue(Load.getNode(), 1));
- for (unsigned i = 1, e = NumOps; i != e; ++i)
- Ops.push_back(Call.getOperand(i));
+ Ops.append(Call->op_begin() + 1, Call->op_end());
CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
}
@@ -453,8 +448,7 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptForSize is used in pattern predicates that isel is matching.
- OptForSize = MF->getFunction()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
+ OptForSize = MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
E = CurDAG->allnodes_end(); I != E; ) {
@@ -571,14 +565,18 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
/// the main function.
-void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
- MachineFrameInfo *MFI) {
- const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
+void X86DAGToDAGISel::EmitSpecialCodeForMain() {
if (Subtarget->isTargetCygMing()) {
- unsigned CallOp =
- Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
- BuildMI(BB, DebugLoc(),
- TII->get(CallOp)).addExternalSymbol("__main");
+ TargetLowering::ArgListTy Args;
+
+ TargetLowering::CallLoweringInfo CLI(*CurDAG);
+ CLI.setChain(CurDAG->getRoot())
+ .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
+ CurDAG->getExternalSymbol("__main", TLI->getPointerTy()),
+ std::move(Args), 0);
+ const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
+ std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
+ CurDAG->setRoot(Result.second);
}
}
@@ -586,7 +584,7 @@ void X86DAGToDAGISel::EmitFunctionEntryCode() {
// If this is main, emit special code for main.
if (const Function *Fn = MF->getFunction())
if (Fn->hasExternalLinkage() && Fn->getName() == "main")
- EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
+ EmitSpecialCodeForMain();
}
static bool isDispSafeForFrameIndex(int64_t Val) {
@@ -918,7 +916,7 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
// We also need to ensure that mask is a continuous run of bits.
- if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
+ if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
// Scale the leading zero count down based on the actual size of the value.
// Also scale it down based on the size of the shift.
@@ -1891,8 +1889,8 @@ static bool HasNoSignedComparisonUses(SDNode *N) {
case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
- case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
- case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
+ case X86::JA_1: case X86::JAE_1: case X86::JB_1: case X86::JBE_1:
+ case X86::JE_1: case X86::JNE_1: case X86::JP_1: case X86::JNP_1:
case X86::CMOVA16rr: case X86::CMOVA16rm:
case X86::CMOVA32rr: case X86::CMOVA32rm:
case X86::CMOVA64rr: case X86::CMOVA64rm:
@@ -2504,7 +2502,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
} else {
// Zero out the high part, effectively zero extending the input.
- SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
+ SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
switch (NVT.SimpleTy) {
case MVT::i16:
ClrNode =
@@ -2612,26 +2610,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue N1 = Node->getOperand(1);
if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
- HasNoSignedComparisonUses(Node)) {
- // Look for (X86cmp (truncate $op, i1), 0) and try to convert to a
- // smaller encoding
- if (Opcode == X86ISD::CMP && N0.getValueType() == MVT::i1 &&
- X86::isZeroNode(N1)) {
- SDValue Reg = N0.getOperand(0);
- SDValue Imm = CurDAG->getTargetConstant(1, MVT::i8);
-
- // Emit testb
- if (Reg.getScalarValueSizeInBits() > 8)
- Reg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Reg);
- // Emit a testb.
- SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
- Reg, Imm);
- ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
- return nullptr;
- }
-
+ HasNoSignedComparisonUses(Node))
N0 = N0.getOperand(0);
- }
+
// Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
// use a smaller encoding.
// Look past the truncate if CMP is the only use of it.
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index f05b6c6..6866be7 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -15,6 +15,7 @@
#include "X86ISelLowering.h"
#include "Utils/X86ShuffleDecode.h"
#include "X86CallingConv.h"
+#include "X86FrameLowering.h"
#include "X86InstrBuilder.h"
#include "X86MachineFunctionInfo.h"
#include "X86TargetMachine.h"
@@ -66,11 +67,6 @@ static cl::opt<bool> ExperimentalVectorWideningLegalization(
"rather than promotion."),
cl::Hidden);
-static cl::opt<bool> ExperimentalVectorShuffleLowering(
- "x86-experimental-vector-shuffle-lowering", cl::init(true),
- cl::desc("Enable an experimental vector shuffle lowering code path."),
- cl::Hidden);
-
static cl::opt<int> ReciprocalEstimateRefinementSteps(
"x86-recip-refinement-steps", cl::init(1),
cl::desc("Specify the number of Newton-Raphson iterations applied to the "
@@ -107,21 +103,18 @@ static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
// If the input is a buildvector just emit a smaller one.
if (Vec.getOpcode() == ISD::BUILD_VECTOR)
return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
- makeArrayRef(Vec->op_begin()+NormalizedIdxVal,
+ makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
ElemsPerChunk));
SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
- SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec,
- VecIdx);
-
- return Result;
-
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
}
+
/// Generate a DAG to grab 128-bits from a vector > 128 bits. This
/// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
/// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
/// instructions or a simple subregister reference. Idx is an index in the
-/// 128 bits we want. It need not be aligned to a 128-bit bounday. That makes
+/// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
/// lowering EXTRACT_VECTOR_ELT operations easier.
static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
SelectionDAG &DAG, SDLoc dl) {
@@ -158,25 +151,23 @@ static SDValue InsertSubVector(SDValue Result, SDValue Vec,
* ElemsPerChunk);
SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
- return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec,
- VecIdx);
+ return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
}
+
/// Generate a DAG to put 128-bits into a vector > 128 bits. This
/// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
/// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
/// simple superregister reference. Idx is an index in the 128 bits
-/// we want. It need not be aligned to a 128-bit bounday. That makes
+/// we want. It need not be aligned to a 128-bit boundary. That makes
/// lowering INSERT_VECTOR_ELT operations easier.
-static SDValue Insert128BitVector(SDValue Result, SDValue Vec,
- unsigned IdxVal, SelectionDAG &DAG,
- SDLoc dl) {
+static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG,SDLoc dl) {
assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
}
-static SDValue Insert256BitVector(SDValue Result, SDValue Vec,
- unsigned IdxVal, SelectionDAG &DAG,
- SDLoc dl) {
+static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, SDLoc dl) {
assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
}
@@ -199,44 +190,23 @@ static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
}
-// FIXME: This should stop caching the target machine as soon as
-// we can remove resetOperationActions et al.
-X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM)
- : TargetLowering(TM) {
- Subtarget = &TM.getSubtarget<X86Subtarget>();
+X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
+ const X86Subtarget &STI)
+ : TargetLowering(TM), Subtarget(&STI) {
X86ScalarSSEf64 = Subtarget->hasSSE2();
X86ScalarSSEf32 = Subtarget->hasSSE1();
TD = getDataLayout();
- resetOperationActions();
-}
-
-void X86TargetLowering::resetOperationActions() {
- const TargetMachine &TM = getTargetMachine();
- static bool FirstTimeThrough = true;
-
- // If none of the target options have changed, then we don't need to reset the
- // operation actions.
- if (!FirstTimeThrough && TO == TM.Options) return;
-
- if (!FirstTimeThrough) {
- // Reinitialize the actions.
- initActions();
- FirstTimeThrough = false;
- }
-
- TO = TM.Options;
-
// Set up the TargetLowering object.
static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
- // X86 is weird, it always uses i8 for shift amounts and setcc results.
+ // X86 is weird. It always uses i8 for shift amounts and setcc results.
setBooleanContents(ZeroOrOneBooleanContent);
// X86-SSE is even stranger. It uses -1 or 0 for vector masks.
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
- // For 64-bit since we have so many registers use the ILP scheduler, for
- // 32-bit code use the register pressure specific scheduling.
+ // For 64-bit, since we have so many registers, use the ILP scheduler.
+ // For 32-bit, use the register pressure specific scheduling.
// For Atom, always use ILP scheduling.
if (Subtarget->isAtom())
setSchedulingPreference(Sched::ILP);
@@ -244,14 +214,14 @@ void X86TargetLowering::resetOperationActions() {
setSchedulingPreference(Sched::ILP);
else
setSchedulingPreference(Sched::RegPressure);
- const X86RegisterInfo *RegInfo =
- TM.getSubtarget<X86Subtarget>().getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
- // Bypass expensive divides on Atom when compiling with O2
- if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default) {
- addBypassSlowDiv(32, 8);
- if (Subtarget->is64Bit())
+ // Bypass expensive divides on Atom when compiling with O2.
+ if (TM.getOptLevel() >= CodeGenOpt::Default) {
+ if (Subtarget->hasSlowDivide32())
+ addBypassSlowDiv(32, 8);
+ if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
addBypassSlowDiv(64, 16);
}
@@ -296,7 +266,8 @@ void X86TargetLowering::resetOperationActions() {
if (Subtarget->is64Bit())
addRegisterClass(MVT::i64, &X86::GR64RegClass);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ for (MVT VT : MVT::integer_valuetypes())
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
// We don't accept any truncstore of integer registers.
setTruncStoreAction(MVT::i64, MVT::i32, Expand);
@@ -521,7 +492,9 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
setTruncStoreAction(MVT::f32, MVT::f16, Expand);
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f80, MVT::f16, Expand);
@@ -805,9 +778,7 @@ void X86TargetLowering::resetOperationActions() {
// First set operation action for all vector types to either promote
// (for widening) or expand (for scalarization). Then we will selectively
// turn on ones that can be effectively codegen'd.
- for (int i = MVT::FIRST_VECTOR_VALUETYPE;
- i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
- MVT VT = (MVT::SimpleValueType)i;
+ for (MVT VT : MVT::vector_valuetypes()) {
setOperationAction(ISD::ADD , VT, Expand);
setOperationAction(ISD::SUB , VT, Expand);
setOperationAction(ISD::FADD, VT, Expand);
@@ -876,18 +847,19 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::ANY_EXTEND, VT, Expand);
setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);
- for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE;
- InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
- setTruncStoreAction(VT,
- (MVT::SimpleValueType)InnerVT, Expand);
- setLoadExtAction(ISD::SEXTLOAD, VT, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, VT, Expand);
+ for (MVT InnerVT : MVT::vector_valuetypes()) {
+ setTruncStoreAction(InnerVT, VT, Expand);
+
+ setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
- // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like types,
- // we have to deal with them whether we ask for Expansion or not. Setting
- // Expand causes its own optimisation problems though, so leave them legal.
- if (VT.getVectorElementType() == MVT::i1)
- setLoadExtAction(ISD::EXTLOAD, VT, Expand);
+ // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
+ // types, we have to deal with them whether we ask for Expansion or not.
+ // Setting Expand causes its own optimisation problems though, so leave
+ // them legal.
+ if (VT.getVectorElementType() == MVT::i1)
+ setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
+ }
}
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
@@ -942,6 +914,7 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
@@ -991,6 +964,14 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
+ // Only provide customized ctpop vector bit twiddling for vector types we
+ // know to perform better than using the popcnt instructions on each vector
+ // element. If popcnt isn't supported, always provide the custom version.
+ if (!Subtarget->hasPOPCNT()) {
+ setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
+ setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
+ }
+
// Custom lower build_vector, vector_shuffle, and extract_vector_elt.
for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
MVT VT = (MVT::SimpleValueType)i;
@@ -1002,6 +983,7 @@ void X86TargetLowering::resetOperationActions() {
continue;
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
}
@@ -1009,20 +991,24 @@ void X86TargetLowering::resetOperationActions() {
// memory vector types which we can load as a scalar (or sequence of
// scalars) and extend in-register to a legal 128-bit vector type. For sext
// loads these must work with a single scalar load.
- setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v8i8, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v8i8, Custom);
+ for (MVT VT : MVT::integer_vector_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
+ }
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
@@ -1070,7 +1056,8 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal);
+ for (MVT VT : MVT::fp_vector_valuetypes())
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
@@ -1103,20 +1090,32 @@ void X86TargetLowering::resetOperationActions() {
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
- setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
- setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
- setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
- setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
- setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
- // There is no BLENDI for byte vectors. We don't need to custom lower
- // some vselects for now.
+ // We directly match byte blends in the backend as they match the VSELECT
+ // condition form.
setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
// SSE41 brings specific instructions for doing vector sign extend even in
// cases where we don't have SRA.
- setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, Custom);
+ for (MVT VT : MVT::integer_vector_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
+ }
+
+ // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
+
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
// i8 and i16 vectors are custom because the source register and source
// source memory operand types are not the same width. f32 vectors are
@@ -1212,7 +1211,8 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, Legal);
+ for (MVT VT : MVT::fp_vector_valuetypes())
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
setOperationAction(ISD::SRL, MVT::v16i16, Custom);
setOperationAction(ISD::SRL, MVT::v32i8, Custom);
@@ -1232,11 +1232,6 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
- setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
- setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
- setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
- setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
-
setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
@@ -1280,12 +1275,34 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
- setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
- setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
-
// The custom lowering for UINT_TO_FP for v8i32 becomes interesting
// when we have a 256bit-wide blend with immediate.
setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
+
+ // Only provide customized ctpop vector bit twiddling for vector types we
+ // know to perform better than using the popcnt instructions on each
+ // vector element. If popcnt isn't supported, always provide the custom
+ // version.
+ if (!Subtarget->hasPOPCNT())
+ setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
+
+ // Custom CTPOP always performs better on natively supported v8i32
+ setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
+
+ // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
+
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
} else {
setOperationAction(ISD::ADD, MVT::v4i64, Custom);
setOperationAction(ISD::ADD, MVT::v8i32, Custom);
@@ -1314,21 +1331,23 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::SRA, MVT::v8i32, Custom);
// Custom lower several nodes for 256-bit types.
- for (int i = MVT::FIRST_VECTOR_VALUETYPE;
- i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
- MVT VT = (MVT::SimpleValueType)i;
-
+ for (MVT VT : MVT::vector_valuetypes()) {
+ if (VT.getScalarSizeInBits() >= 32) {
+ setOperationAction(ISD::MLOAD, VT, Legal);
+ setOperationAction(ISD::MSTORE, VT, Legal);
+ }
// Extract subvector is special because the value type
// (result) is 128-bit but the source is 256-bit wide.
- if (VT.is128BitVector())
+ if (VT.is128BitVector()) {
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
-
+ }
// Do not attempt to custom lower other non-256-bit vectors
if (!VT.is256BitVector())
continue;
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
@@ -1336,6 +1355,10 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
}
+ if (Subtarget->hasInt256())
+ setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
+
+
// Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
MVT VT = (MVT::SimpleValueType)i;
@@ -1367,12 +1390,14 @@ void X86TargetLowering::resetOperationActions() {
addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
+ for (MVT VT : MVT::fp_vector_valuetypes())
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
+
setOperationAction(ISD::BR_CC, MVT::i1, Expand);
setOperationAction(ISD::SETCC, MVT::i1, Custom);
setOperationAction(ISD::XOR, MVT::i1, Legal);
setOperationAction(ISD::OR, MVT::i1, Legal);
setOperationAction(ISD::AND, MVT::i1, Legal);
- setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, Legal);
setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
@@ -1434,6 +1459,17 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
+ setOperationAction(ISD::FFLOOR, MVT::v16f32, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::v8f64, Legal);
+ setOperationAction(ISD::FCEIL, MVT::v16f32, Legal);
+ setOperationAction(ISD::FCEIL, MVT::v8f64, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::v16f32, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::v8f64, Legal);
+ setOperationAction(ISD::FRINT, MVT::v16f32, Legal);
+ setOperationAction(ISD::FRINT, MVT::v8f64, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::v16f32, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::v8f64, Legal);
+
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
@@ -1486,16 +1522,13 @@ void X86TargetLowering::resetOperationActions() {
}
// Custom lower several nodes.
- for (int i = MVT::FIRST_VECTOR_VALUETYPE;
- i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
- MVT VT = (MVT::SimpleValueType)i;
-
+ for (MVT VT : MVT::vector_valuetypes()) {
unsigned EltSize = VT.getVectorElementType().getSizeInBits();
// Extract subvector is special because the value type
// (result) is 256/128-bit but the source is 512-bit wide.
- if (VT.is128BitVector() || VT.is256BitVector())
+ if (VT.is128BitVector() || VT.is256BitVector()) {
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
-
+ }
if (VT.getVectorElementType() == MVT::i1)
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
@@ -1511,12 +1544,14 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
+ setOperationAction(ISD::MLOAD, VT, Legal);
+ setOperationAction(ISD::MSTORE, VT, Legal);
}
}
for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
MVT VT = (MVT::SimpleValueType)i;
- // Do not attempt to promote non-256-bit vectors
+ // Do not attempt to promote non-512-bit vectors.
if (!VT.is512BitVector())
continue;
@@ -1536,17 +1571,22 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
+ setOperationAction(ISD::ADD, MVT::v32i16, Legal);
+ setOperationAction(ISD::ADD, MVT::v64i8, Legal);
+ setOperationAction(ISD::SUB, MVT::v32i16, Legal);
+ setOperationAction(ISD::SUB, MVT::v64i8, Legal);
+ setOperationAction(ISD::MUL, MVT::v32i16, Legal);
for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
const MVT VT = (MVT::SimpleValueType)i;
const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
- // Do not attempt to promote non-256-bit vectors
+ // Do not attempt to promote non-512-bit vectors.
if (!VT.is512BitVector())
continue;
- if ( EltSize < 32) {
+ if (EltSize < 32) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Legal);
}
@@ -1560,14 +1600,13 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
- }
- // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
- // of this type with custom code.
- for (int VT = MVT::FIRST_VECTOR_VALUETYPE;
- VT != MVT::LAST_VECTOR_VALUETYPE; VT++) {
- setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,
- Custom);
+ setOperationAction(ISD::AND, MVT::v8i32, Legal);
+ setOperationAction(ISD::OR, MVT::v8i32, Legal);
+ setOperationAction(ISD::XOR, MVT::v8i32, Legal);
+ setOperationAction(ISD::AND, MVT::v4i32, Legal);
+ setOperationAction(ISD::OR, MVT::v4i32, Legal);
+ setOperationAction(ISD::XOR, MVT::v4i32, Legal);
}
// We want to custom lower some of our intrinsics.
@@ -1607,9 +1646,8 @@ void X86TargetLowering::resetOperationActions() {
setLibcallName(RTLIB::SINCOS_F32, "sincosf");
setLibcallName(RTLIB::SINCOS_F64, "sincos");
if (Subtarget->isTargetDarwin()) {
- // For MacOSX, we don't want to the normal expansion of a libcall to
- // sincos. We want to issue a libcall to __sincos_stret to avoid memory
- // traffic.
+ // For MacOSX, we don't want the normal expansion of a libcall to sincos.
+ // We want to issue a libcall to __sincos_stret to avoid memory traffic.
setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
}
@@ -1627,6 +1665,7 @@ void X86TargetLowering::resetOperationActions() {
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
+ setTargetDAGCombine(ISD::BITCAST);
setTargetDAGCombine(ISD::VSELECT);
setTargetDAGCombine(ISD::SELECT);
setTargetDAGCombine(ISD::SHL);
@@ -1640,7 +1679,9 @@ void X86TargetLowering::resetOperationActions() {
setTargetDAGCombine(ISD::FMA);
setTargetDAGCombine(ISD::SUB);
setTargetDAGCombine(ISD::LOAD);
+ setTargetDAGCombine(ISD::MLOAD);
setTargetDAGCombine(ISD::STORE);
+ setTargetDAGCombine(ISD::MSTORE);
setTargetDAGCombine(ISD::ZERO_EXTEND);
setTargetDAGCombine(ISD::ANY_EXTEND);
setTargetDAGCombine(ISD::SIGN_EXTEND);
@@ -1650,11 +1691,10 @@ void X86TargetLowering::resetOperationActions() {
setTargetDAGCombine(ISD::SETCC);
setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
setTargetDAGCombine(ISD::BUILD_VECTOR);
- if (Subtarget->is64Bit())
- setTargetDAGCombine(ISD::MUL);
+ setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine(ISD::XOR);
- computeRegisterProperties();
+ computeRegisterProperties(Subtarget->getRegisterInfo());
// On Darwin, -Os means optimize for size without hurting performance,
// do not reduce the limit.
@@ -1668,7 +1708,7 @@ void X86TargetLowering::resetOperationActions() {
// Predictable cmov don't hurt on atom because it's in-order.
PredictableSelectIsExpensive = !Subtarget->isAtom();
-
+ EnableExtLdPromotion = true;
setPrefFunctionAlignment(4); // 2^4 bytes.
verifyIntrinsicTables();
@@ -1676,8 +1716,7 @@ void X86TargetLowering::resetOperationActions() {
// This has so far only been implemented for 64-bit MachO.
bool X86TargetLowering::useLoadStackGuardNode() const {
- return Subtarget->getTargetTriple().getObjectFormat() == Triple::MachO &&
- Subtarget->is64Bit();
+ return Subtarget->isTargetMachO() && Subtarget->is64Bit();
}
TargetLoweringBase::LegalizeTypeAction
@@ -1733,7 +1772,7 @@ EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
return VT.changeVectorElementTypeToInteger();
}
-/// getMaxByValAlign - Helper for getByValTypeAlignment to determine
+/// Helper for getByValTypeAlignment to determine
/// the desired ByVal argument alignment.
static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
if (MaxAlign == 16)
@@ -1758,7 +1797,7 @@ static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
}
}
-/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
+/// Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. For X86, aggregates
/// that contain SSE vectors are placed at 16-byte boundaries while the rest
/// are at 4-byte boundaries.
@@ -1777,7 +1816,7 @@ unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
return Align;
}
-/// getOptimalMemOpType - Returns the target specific optimal type for load
+/// Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
/// lowering. If DstAlign is zero that means it's safe to destination
/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
@@ -1796,8 +1835,7 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
MachineFunction &MF) const {
const Function *F = MF.getFunction();
if ((!IsMemset || ZeroMemset) &&
- !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::NoImplicitFloat)) {
+ !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
(Subtarget->isUnalignedMemAccessFast() ||
((DstAlign == 0 || DstAlign >= 16) &&
@@ -1843,7 +1881,7 @@ X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
return true;
}
-/// getJumpTableEncoding - Return the entry encoding for a jump table in the
+/// Return the entry encoding for a jump table in the
/// current function. The returned value is a member of the
/// MachineJumpTableInfo::JTEntryKind enum.
unsigned X86TargetLowering::getJumpTableEncoding() const {
@@ -1869,8 +1907,7 @@ X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
MCSymbolRefExpr::VK_GOTOFF, Ctx);
}
-/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
-/// jumptable.
+/// Returns relocation base for the given PIC jumptable.
SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const {
if (!Subtarget->is64Bit())
@@ -1880,9 +1917,8 @@ SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
return Table;
}
-/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the
-/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an
-/// MCExpr.
+/// This returns the relocation base for the given PIC jumptable,
+/// the same as getPICJumpTableRelocBase, but as an MCExpr.
const MCExpr *X86TargetLowering::
getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
MCContext &Ctx) const {
@@ -1894,14 +1930,14 @@ getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
}
-// FIXME: Why this routine is here? Move to RegInfo!
-std::pair<const TargetRegisterClass*, uint8_t>
-X86TargetLowering::findRepresentativeClass(MVT VT) const{
+std::pair<const TargetRegisterClass *, uint8_t>
+X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
+ MVT VT) const {
const TargetRegisterClass *RRC = nullptr;
uint8_t Cost = 1;
switch (VT.SimpleTy) {
default:
- return TargetLowering::findRepresentativeClass(VT);
+ return TargetLowering::findRepresentativeClass(TRI, VT);
case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
break;
@@ -1994,7 +2030,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
SDValue ValToCopy = OutVals[i];
EVT ValVT = ValToCopy.getValueType();
- // Promote values to the appropriate types
+ // Promote values to the appropriate types.
if (VA.getLocInfo() == CCValAssign::SExt)
ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
else if (VA.getLocInfo() == CCValAssign::ZExt)
@@ -2005,7 +2041,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
assert(VA.getLocInfo() != CCValAssign::FPExt &&
- "Unexpected FP-extend for return value.");
+ "Unexpected FP-extend for return value.");
// If this is x86-64, and we disabled SSE, we can't return FP values,
// or SSE or MMX vectors.
@@ -2060,14 +2096,15 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// Win32 requires us to put the sret argument to %eax as well.
// We saved the argument into a virtual register in the entry block,
// so now we copy the value out and into %rax/%eax.
- if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() &&
- (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {
- MachineFunction &MF = DAG.getMachineFunction();
- X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
- unsigned Reg = FuncInfo->getSRetReturnReg();
- assert(Reg &&
- "SRetReturnReg should have been set in LowerFormalArguments().");
- SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
+ //
+ // Checking Function.hasStructRetAttr() here is insufficient because the IR
+ // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
+ // false, then an sret argument may be implicitly inserted in the SelDAG. In
+ // either case FuncInfo->setSRetReturnReg() will have been called.
+ if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
+ assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
+ "No need for an sret register");
+ SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
unsigned RetValReg
= (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
@@ -2141,7 +2178,7 @@ X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
return VT.bitsLT(MinVT) ? MinVT : VT;
}
-/// LowerCallResult - Lower the result values of a call into the
+/// Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
///
SDValue
@@ -2221,8 +2258,7 @@ callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
return StackStructReturn;
}
-/// ArgsAreStructReturn - Determines whether a function uses struct
-/// return semantics.
+/// Determines whether a function uses struct return semantics.
static StructReturnType
argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
if (Ins.empty())
@@ -2236,10 +2272,9 @@ argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
return StackStructReturn;
}
-/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
-/// by "Src" to address "Dst" with size and alignment information specified by
-/// the specific parameter attribute. The copy will be passed as a byval
-/// function parameter.
+/// Make a copy of an aggregate at address specified by "Src" to address
+/// "Dst" with size and alignment information specified by the specific
+/// parameter attribute. The copy will be passed as a byval function parameter.
static SDValue
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
@@ -2251,7 +2286,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
MachinePointerInfo(), MachinePointerInfo());
}
-/// IsTailCallConvention - Return true if the calling convention is one that
+/// Return true if the calling convention is one that
/// supports tail call optimization.
static bool IsTailCallConvention(CallingConv::ID CC) {
return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
@@ -2276,7 +2311,7 @@ bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
return true;
}
-/// FuncIsMadeTailCallSafe - Return true if the function is being made into
+/// Return true if the function is being made into
/// a tailcall target by changing its ABI.
static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
bool GuaranteedTailCallOpt) {
@@ -2356,8 +2391,7 @@ static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
}
const Function *Fn = MF.getFunction();
- bool NoImplicitFloatOps = Fn->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
+ bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
"SSE register cannot be used when SSE is disabled!");
if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
@@ -2523,18 +2557,19 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
MFI->CreateFixedObject(1, StackSize, true));
}
+ // Figure out if XMM registers are in use.
+ assert(!(MF.getTarget().Options.UseSoftFloat &&
+ Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
+ "SSE register cannot be used when SSE is disabled!");
+
// 64-bit calling conventions support varargs and register parameters, so we
- // have to do extra work to spill them in the prologue or forward them to
- // musttail calls.
- if (Is64Bit && isVarArg &&
- (MFI->hasVAStart() || MFI->hasMustTailInVarArgFunc())) {
+ // have to do extra work to spill them in the prologue.
+ if (Is64Bit && isVarArg && MFI->hasVAStart()) {
// Find the first unallocated argument registers.
ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
- unsigned NumIntRegs =
- CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
- unsigned NumXMMRegs =
- CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
+ unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
+ unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
"SSE register cannot be used when SSE is disabled!");
@@ -2557,90 +2592,99 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
}
}
- // Store them to the va_list returned by va_start.
- if (MFI->hasVAStart()) {
- if (IsWin64) {
- const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
- // Get to the caller-allocated home save location. Add 8 to account
- // for the return address.
- int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
- FuncInfo->setRegSaveFrameIndex(
+ if (IsWin64) {
+ const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
+ // Get to the caller-allocated home save location. Add 8 to account
+ // for the return address.
+ int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
+ FuncInfo->setRegSaveFrameIndex(
MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
- // Fixup to set vararg frame on shadow area (4 x i64).
- if (NumIntRegs < 4)
- FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
- } else {
- // For X86-64, if there are vararg parameters that are passed via
- // registers, then we must store them to their spots on the stack so
- // they may be loaded by deferencing the result of va_next.
- FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
- FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
- FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
- ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
- }
-
- // Store the integer parameter registers.
- SmallVector<SDValue, 8> MemOps;
- SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
- getPointerTy());
- unsigned Offset = FuncInfo->getVarArgsGPOffset();
- for (SDValue Val : LiveGPRs) {
- SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
- DAG.getIntPtrConstant(Offset));
- SDValue Store =
- DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo::getFixedStack(
- FuncInfo->getRegSaveFrameIndex(), Offset),
- false, false, 0);
- MemOps.push_back(Store);
- Offset += 8;
- }
-
- if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
- // Now store the XMM (fp + vector) parameter registers.
- SmallVector<SDValue, 12> SaveXMMOps;
- SaveXMMOps.push_back(Chain);
- SaveXMMOps.push_back(ALVal);
- SaveXMMOps.push_back(DAG.getIntPtrConstant(
- FuncInfo->getRegSaveFrameIndex()));
- SaveXMMOps.push_back(DAG.getIntPtrConstant(
- FuncInfo->getVarArgsFPOffset()));
- SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
- LiveXMMRegs.end());
- MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
- MVT::Other, SaveXMMOps));
- }
-
- if (!MemOps.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
+ // Fixup to set vararg frame on shadow area (4 x i64).
+ if (NumIntRegs < 4)
+ FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
} else {
- // Add all GPRs, al, and XMMs to the list of forwards. We will add then
- // to the liveout set on a musttail call.
- assert(MFI->hasMustTailInVarArgFunc());
- auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
- typedef X86MachineFunctionInfo::Forward Forward;
-
- for (unsigned I = 0, E = LiveGPRs.size(); I != E; ++I) {
- unsigned VReg =
- MF.getRegInfo().createVirtualRegister(&X86::GR64RegClass);
- Chain = DAG.getCopyToReg(Chain, dl, VReg, LiveGPRs[I]);
- Forwards.push_back(Forward(VReg, ArgGPRs[NumIntRegs + I], MVT::i64));
- }
-
- if (!ArgXMMs.empty()) {
- unsigned ALVReg =
- MF.getRegInfo().createVirtualRegister(&X86::GR8RegClass);
- Chain = DAG.getCopyToReg(Chain, dl, ALVReg, ALVal);
- Forwards.push_back(Forward(ALVReg, X86::AL, MVT::i8));
-
- for (unsigned I = 0, E = LiveXMMRegs.size(); I != E; ++I) {
- unsigned VReg =
- MF.getRegInfo().createVirtualRegister(&X86::VR128RegClass);
- Chain = DAG.getCopyToReg(Chain, dl, VReg, LiveXMMRegs[I]);
- Forwards.push_back(
- Forward(VReg, ArgXMMs[NumXMMRegs + I], MVT::v4f32));
- }
- }
+ // For X86-64, if there are vararg parameters that are passed via
+ // registers, then we must store them to their spots on the stack so
+ // they may be loaded by deferencing the result of va_next.
+ FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
+ FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
+ FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
+ ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
+ }
+
+ // Store the integer parameter registers.
+ SmallVector<SDValue, 8> MemOps;
+ SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
+ getPointerTy());
+ unsigned Offset = FuncInfo->getVarArgsGPOffset();
+ for (SDValue Val : LiveGPRs) {
+ SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
+ DAG.getIntPtrConstant(Offset));
+ SDValue Store =
+ DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo::getFixedStack(
+ FuncInfo->getRegSaveFrameIndex(), Offset),
+ false, false, 0);
+ MemOps.push_back(Store);
+ Offset += 8;
+ }
+
+ if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
+ // Now store the XMM (fp + vector) parameter registers.
+ SmallVector<SDValue, 12> SaveXMMOps;
+ SaveXMMOps.push_back(Chain);
+ SaveXMMOps.push_back(ALVal);
+ SaveXMMOps.push_back(DAG.getIntPtrConstant(
+ FuncInfo->getRegSaveFrameIndex()));
+ SaveXMMOps.push_back(DAG.getIntPtrConstant(
+ FuncInfo->getVarArgsFPOffset()));
+ SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
+ LiveXMMRegs.end());
+ MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
+ MVT::Other, SaveXMMOps));
+ }
+
+ if (!MemOps.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
+ }
+
+ if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
+ // Find the largest legal vector type.
+ MVT VecVT = MVT::Other;
+ // FIXME: Only some x86_32 calling conventions support AVX512.
+ if (Subtarget->hasAVX512() &&
+ (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
+ CallConv == CallingConv::Intel_OCL_BI)))
+ VecVT = MVT::v16f32;
+ else if (Subtarget->hasAVX())
+ VecVT = MVT::v8f32;
+ else if (Subtarget->hasSSE2())
+ VecVT = MVT::v4f32;
+
+ // We forward some GPRs and some vector types.
+ SmallVector<MVT, 2> RegParmTypes;
+ MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
+ RegParmTypes.push_back(IntVT);
+ if (VecVT != MVT::Other)
+ RegParmTypes.push_back(VecVT);
+
+ // Compute the set of forwarded registers. The rest are scratch.
+ SmallVectorImpl<ForwardedRegister> &Forwards =
+ FuncInfo->getForwardedMustTailRegParms();
+ CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
+
+ // Conservatively forward AL on x86_64, since it might be used for varargs.
+ if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
+ unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
+ Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
+ }
+
+ // Copy all forwards from physical to virtual registers.
+ for (ForwardedRegister &F : Forwards) {
+ // FIXME: Can we use a less constrained schedule?
+ SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
+ F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
+ Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
}
}
@@ -2688,7 +2732,7 @@ X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
false, false, 0);
}
-/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call
+/// Emit a load of return address if tail call
/// optimization is performed and it is required.
SDValue
X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
@@ -2705,7 +2749,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
return SDValue(OutRetAddr.getNode(), 1);
}
-/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call
+/// Emit a store of the return address if tail call
/// optimization is performed and it is required (FPDiff!=0).
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
SDValue Chain, SDValue RetAddrFrIdx,
@@ -2838,8 +2882,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Walk the register/memloc assignments, inserting copies/loads. In the case
// of tail call optimization arguments are handle later.
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
// Skip inalloca arguments, they have already been written.
ISD::ArgFlagsTy Flags = Outs[i].Flags;
@@ -2952,7 +2995,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
- unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
+ unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
assert((Subtarget->hasSSE1() || !NumXMMRegs)
&& "SSE registers cannot be used when SSE is disabled");
@@ -2960,7 +3003,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getConstant(NumXMMRegs, MVT::i8)));
}
- if (Is64Bit && isVarArg && IsMustTail) {
+ if (isVarArg && IsMustTail) {
const auto &Forwards = X86Info->getForwardedMustTailRegParms();
for (const auto &F : Forwards) {
SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
@@ -3044,10 +3087,11 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// through a register, since the call instruction's 32-bit
// pc-relative offset may not be large enough to hold the whole
// address.
- } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ } else if (Callee->getOpcode() == ISD::GlobalAddress) {
// If the callee is a GlobalAddress node (quite common, every direct call
// is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
// it.
+ GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
// We should use extra load for direct calls to dllimported functions in
// non-JIT mode.
@@ -3073,11 +3117,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// unless we're building with the leopard linker or later, which
// automatically synthesizes these stubs.
OpFlags = X86II::MO_DARWIN_STUB;
- } else if (Subtarget->isPICStyleRIPRel() &&
- isa<Function>(GV) &&
- cast<Function>(GV)->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex,
- Attribute::NonLazyBind)) {
+ } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
+ cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
// If the function is marked as non-lazy, generate an indirect call
// which loads from the GOT directly. This avoids runtime overhead
// at the cost of eager binding (and one extra byte of encoding).
@@ -3117,7 +3158,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
OpFlags);
- } else if (Subtarget->isTarget64BitILP32() && Callee->getValueType(0) == MVT::i32) {
+ } else if (Subtarget->isTarget64BitILP32() &&
+ Callee->getValueType(0) == MVT::i32) {
// Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
}
@@ -3146,7 +3188,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
RegsToPass[i].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
- const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
@@ -3235,11 +3277,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
unsigned
X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG& DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- const TargetMachine &TM = MF.getTarget();
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
- const TargetFrameLowering &TFI = *TM.getSubtargetImpl()->getFrameLowering();
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
uint64_t AlignMask = StackAlignment - 1;
int64_t Offset = StackSize;
@@ -3276,7 +3315,8 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
return false;
} else {
unsigned Opcode = Def->getOpcode();
- if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) &&
+ if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
+ Opcode == X86::LEA64_32r) &&
Def->getOperand(1).isFI()) {
FI = Def->getOperand(1).getIndex();
Bytes = Flags.getByValSize();
@@ -3341,6 +3381,12 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
+ // Win64 functions have extra shadow space for argument homing. Don't do the
+ // sibcall if the caller and callee have mismatched expectations for this
+ // space.
+ if (IsCalleeWin64 != IsCallerWin64)
+ return false;
+
if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
if (IsTailCallConvention(CalleeCC) && CCMatch)
return true;
@@ -3352,8 +3398,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
// emit a special epilogue.
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
if (RegInfo->needsStackRealignment(MF))
return false;
@@ -3465,8 +3510,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// the caller's fixed stack objects.
MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
- const X86InstrInfo *TII =
- static_cast<const X86InstrInfo *>(DAG.getSubtarget().getInstrInfo());
+ const X86InstrInfo *TII = Subtarget->getInstrInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i];
@@ -3494,7 +3538,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// In PIC we need an extra register to formulate the address computation
// for the callee.
unsigned MaxInRegs =
- (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
+ (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
@@ -3563,17 +3607,6 @@ static bool isTargetShuffle(unsigned Opcode) {
}
static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
- SDValue V1, SelectionDAG &DAG) {
- switch(Opc) {
- default: llvm_unreachable("Unknown x86 shuffle node");
- case X86ISD::MOVSHDUP:
- case X86ISD::MOVSLDUP:
- case X86ISD::MOVDDUP:
- return DAG.getNode(Opc, dl, VT, V1);
- }
-}
-
-static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
SDValue V1, unsigned TargetMask,
SelectionDAG &DAG) {
switch(Opc) {
@@ -3588,20 +3621,6 @@ static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
}
static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
- SDValue V1, SDValue V2, unsigned TargetMask,
- SelectionDAG &DAG) {
- switch(Opc) {
- default: llvm_unreachable("Unknown x86 shuffle node");
- case X86ISD::PALIGNR:
- case X86ISD::VALIGN:
- case X86ISD::SHUFP:
- case X86ISD::VPERM2X128:
- return DAG.getNode(Opc, dl, VT, V1, V2,
- DAG.getConstant(TargetMask, MVT::i8));
- }
-}
-
-static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
SDValue V1, SDValue V2, SelectionDAG &DAG) {
switch(Opc) {
default: llvm_unreachable("Unknown x86 shuffle node");
@@ -3620,8 +3639,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
int ReturnAddrIndex = FuncInfo->getRAIndex();
@@ -3661,7 +3679,7 @@ bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
// For kernel code model we know that all object resist in the negative half
// of 32bits address space. We may not accept negative offsets, since they may
// be just off and we may accept pretty large positive ones.
- if (M == CodeModel::Kernel && Offset > 0)
+ if (M == CodeModel::Kernel && Offset >= 0)
return true;
return false;
@@ -3823,6 +3841,18 @@ bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
return false;
}
+bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
+ ISD::LoadExtType ExtTy,
+ EVT NewVT) const {
+ // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
+ // relocation target a movq or addq instruction: don't let the load shrink.
+ SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
+ if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
+ if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
+ return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
+ return true;
+}
+
/// \brief Returns true if it is beneficial to convert a load of a constant
/// to just the constant itself.
bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
@@ -3835,6 +3865,24 @@ bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
return true;
}
+bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
+ unsigned Index) const {
+ if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
+ return false;
+
+ return (Index == 0 || Index == ResVT.getVectorNumElements());
+}
+
+bool X86TargetLowering::isCheapToSpeculateCttz() const {
+ // Speculate cttz only if we can directly use TZCNT.
+ return Subtarget->hasBMI();
+}
+
+bool X86TargetLowering::isCheapToSpeculateCtlz() const {
+ // Speculate ctlz only if we can directly use LZCNT.
+ return Subtarget->hasLZCNT();
+}
+
/// isUndefOrInRange - Return true if Val is undef or if its value falls within
/// the specified range (L, H].
static bool isUndefOrInRange(int Val, int Low, int Hi) {
@@ -3849,7 +3897,7 @@ static bool isUndefOrEqual(int Val, int CmpVal) {
/// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size, falls within the specified
-/// sequential range (L, L+Pos]. or is undef.
+/// sequential range (Low, Low+Size]. or is undef.
static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
unsigned Pos, unsigned Size, int Low) {
for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
@@ -3858,176 +3906,6 @@ static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
return true;
}
-/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
-/// is suitable for input to PSHUFD. That is, it doesn't reference the other
-/// operand - by default will match for first operand.
-static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
- bool TestSecondOperand = false) {
- if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
- VT != MVT::v2f64 && VT != MVT::v2i64)
- return false;
-
- unsigned NumElems = VT.getVectorNumElements();
- unsigned Lo = TestSecondOperand ? NumElems : 0;
- unsigned Hi = Lo + NumElems;
-
- for (unsigned i = 0; i < NumElems; ++i)
- if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
- return false;
-
- return true;
-}
-
-/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
-/// is suitable for input to PSHUFHW.
-static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
- if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
- return false;
-
- // Lower quadword copied in order or undef.
- if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
- return false;
-
- // Upper quadword shuffled.
- for (unsigned i = 4; i != 8; ++i)
- if (!isUndefOrInRange(Mask[i], 4, 8))
- return false;
-
- if (VT == MVT::v16i16) {
- // Lower quadword copied in order or undef.
- if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
- return false;
-
- // Upper quadword shuffled.
- for (unsigned i = 12; i != 16; ++i)
- if (!isUndefOrInRange(Mask[i], 12, 16))
- return false;
- }
-
- return true;
-}
-
-/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
-/// is suitable for input to PSHUFLW.
-static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
- if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
- return false;
-
- // Upper quadword copied in order.
- if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
- return false;
-
- // Lower quadword shuffled.
- for (unsigned i = 0; i != 4; ++i)
- if (!isUndefOrInRange(Mask[i], 0, 4))
- return false;
-
- if (VT == MVT::v16i16) {
- // Upper quadword copied in order.
- if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
- return false;
-
- // Lower quadword shuffled.
- for (unsigned i = 8; i != 12; ++i)
- if (!isUndefOrInRange(Mask[i], 8, 12))
- return false;
- }
-
- return true;
-}
-
-/// \brief Return true if the mask specifies a shuffle of elements that is
-/// suitable for input to intralane (palignr) or interlane (valign) vector
-/// right-shift.
-static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
- unsigned NumElts = VT.getVectorNumElements();
- unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
- unsigned NumLaneElts = NumElts/NumLanes;
-
- // Do not handle 64-bit element shuffles with palignr.
- if (NumLaneElts == 2)
- return false;
-
- for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
- unsigned i;
- for (i = 0; i != NumLaneElts; ++i) {
- if (Mask[i+l] >= 0)
- break;
- }
-
- // Lane is all undef, go to next lane
- if (i == NumLaneElts)
- continue;
-
- int Start = Mask[i+l];
-
- // Make sure its in this lane in one of the sources
- if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
- !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
- return false;
-
- // If not lane 0, then we must match lane 0
- if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
- return false;
-
- // Correct second source to be contiguous with first source
- if (Start >= (int)NumElts)
- Start -= NumElts - NumLaneElts;
-
- // Make sure we're shifting in the right direction.
- if (Start <= (int)(i+l))
- return false;
-
- Start -= i;
-
- // Check the rest of the elements to see if they are consecutive.
- for (++i; i != NumLaneElts; ++i) {
- int Idx = Mask[i+l];
-
- // Make sure its in this lane
- if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
- !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
- return false;
-
- // If not lane 0, then we must match lane 0
- if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
- return false;
-
- if (Idx >= (int)NumElts)
- Idx -= NumElts - NumLaneElts;
-
- if (!isUndefOrEqual(Idx, Start+i))
- return false;
-
- }
- }
-
- return true;
-}
-
-/// \brief Return true if the node specifies a shuffle of elements that is
-/// suitable for input to PALIGNR.
-static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
- const X86Subtarget *Subtarget) {
- if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
- (VT.is256BitVector() && !Subtarget->hasInt256()) ||
- VT.is512BitVector())
- // FIXME: Add AVX512BW.
- return false;
-
- return isAlignrMask(Mask, VT, false);
-}
-
-/// \brief Return true if the node specifies a shuffle of elements that is
-/// suitable for input to VALIGN.
-static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
- const X86Subtarget *Subtarget) {
- // FIXME: Add AVX512VL.
- if (!VT.is512BitVector() || !Subtarget->hasAVX512())
- return false;
- return isAlignrMask(Mask, VT, true);
-}
-
/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
/// the two vector operands have swapped position.
static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
@@ -4043,664 +3921,6 @@ static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
}
}
-/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to 128/256-bit
-/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
-/// reverse of what x86 shuffles want.
-static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
-
- unsigned NumElems = VT.getVectorNumElements();
- unsigned NumLanes = VT.getSizeInBits()/128;
- unsigned NumLaneElems = NumElems/NumLanes;
-
- if (NumLaneElems != 2 && NumLaneElems != 4)
- return false;
-
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
- bool symetricMaskRequired =
- (VT.getSizeInBits() >= 256) && (EltSize == 32);
-
- // VSHUFPSY divides the resulting vector into 4 chunks.
- // The sources are also splitted into 4 chunks, and each destination
- // chunk must come from a different source chunk.
- //
- // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
- // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
- //
- // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
- // Y3..Y0, Y3..Y0, X3..X0, X3..X0
- //
- // VSHUFPDY divides the resulting vector into 4 chunks.
- // The sources are also splitted into 4 chunks, and each destination
- // chunk must come from a different source chunk.
- //
- // SRC1 => X3 X2 X1 X0
- // SRC2 => Y3 Y2 Y1 Y0
- //
- // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
- //
- SmallVector<int, 4> MaskVal(NumLaneElems, -1);
- unsigned HalfLaneElems = NumLaneElems/2;
- for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
- for (unsigned i = 0; i != NumLaneElems; ++i) {
- int Idx = Mask[i+l];
- unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
- if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
- return false;
- // For VSHUFPSY, the mask of the second half must be the same as the
- // first but with the appropriate offsets. This works in the same way as
- // VPERMILPS works with masks.
- if (!symetricMaskRequired || Idx < 0)
- continue;
- if (MaskVal[i] < 0) {
- MaskVal[i] = Idx - l;
- continue;
- }
- if ((signed)(Idx - l) != MaskVal[i])
- return false;
- }
- }
-
- return true;
-}
-
-/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
-static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
- if (!VT.is128BitVector())
- return false;
-
- unsigned NumElems = VT.getVectorNumElements();
-
- if (NumElems != 4)
- return false;
-
- // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
- return isUndefOrEqual(Mask[0], 6) &&
- isUndefOrEqual(Mask[1], 7) &&
- isUndefOrEqual(Mask[2], 2) &&
- isUndefOrEqual(Mask[3], 3);
-}
-
-/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
-/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
-/// <2, 3, 2, 3>
-static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
- if (!VT.is128BitVector())
- return false;
-
- unsigned NumElems = VT.getVectorNumElements();
-
- if (NumElems != 4)
- return false;
-
- return isUndefOrEqual(Mask[0], 2) &&
- isUndefOrEqual(Mask[1], 3) &&
- isUndefOrEqual(Mask[2], 2) &&
- isUndefOrEqual(Mask[3], 3);
-}
-
-/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
-static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
- if (!VT.is128BitVector())
- return false;
-
- unsigned NumElems = VT.getVectorNumElements();
-
- if (NumElems != 2 && NumElems != 4)
- return false;
-
- for (unsigned i = 0, e = NumElems/2; i != e; ++i)
- if (!isUndefOrEqual(Mask[i], i + NumElems))
- return false;
-
- for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
- if (!isUndefOrEqual(Mask[i], i))
- return false;
-
- return true;
-}
-
-/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to MOVLHPS.
-static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
- if (!VT.is128BitVector())
- return false;
-
- unsigned NumElems = VT.getVectorNumElements();
-
- if (NumElems != 2 && NumElems != 4)
- return false;
-
- for (unsigned i = 0, e = NumElems/2; i != e; ++i)
- if (!isUndefOrEqual(Mask[i], i))
- return false;
-
- for (unsigned i = 0, e = NumElems/2; i != e; ++i)
- if (!isUndefOrEqual(Mask[i + e], i + NumElems))
- return false;
-
- return true;
-}
-
-/// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to INSERTPS.
-/// i. e: If all but one element come from the same vector.
-static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
- // TODO: Deal with AVX's VINSERTPS
- if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
- return false;
-
- unsigned CorrectPosV1 = 0;
- unsigned CorrectPosV2 = 0;
- for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
- if (Mask[i] == -1) {
- ++CorrectPosV1;
- ++CorrectPosV2;
- continue;
- }
-
- if (Mask[i] == i)
- ++CorrectPosV1;
- else if (Mask[i] == i + 4)
- ++CorrectPosV2;
- }
-
- if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
- // We have 3 elements (undefs count as elements from any vector) from one
- // vector, and one from another.
- return true;
-
- return false;
-}
-
-//
-// Some special combinations that can be optimized.
-//
-static
-SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
- SelectionDAG &DAG) {
- MVT VT = SVOp->getSimpleValueType(0);
- SDLoc dl(SVOp);
-
- if (VT != MVT::v8i32 && VT != MVT::v8f32)
- return SDValue();
-
- ArrayRef<int> Mask = SVOp->getMask();
-
- // These are the special masks that may be optimized.
- static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
- static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
- bool MatchEvenMask = true;
- bool MatchOddMask = true;
- for (int i=0; i<8; ++i) {
- if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
- MatchEvenMask = false;
- if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
- MatchOddMask = false;
- }
-
- if (!MatchEvenMask && !MatchOddMask)
- return SDValue();
-
- SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
-
- SDValue Op0 = SVOp->getOperand(0);
- SDValue Op1 = SVOp->getOperand(1);
-
- if (MatchEvenMask) {
- // Shift the second operand right to 32 bits.
- static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
- Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
- } else {
- // Shift the first operand left to 32 bits.
- static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
- Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
- }
- static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
- return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
-}
-
-/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to UNPCKL.
-static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
- bool HasInt256, bool V2IsSplat = false) {
-
- assert(VT.getSizeInBits() >= 128 &&
- "Unsupported vector type for unpckl");
-
- unsigned NumElts = VT.getVectorNumElements();
- if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
- (!HasInt256 || (NumElts != 16 && NumElts != 32)))
- return false;
-
- assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
- "Unsupported vector type for unpckh");
-
- // AVX defines UNPCK* to operate independently on 128-bit lanes.
- unsigned NumLanes = VT.getSizeInBits()/128;
- unsigned NumLaneElts = NumElts/NumLanes;
-
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
- int BitI = Mask[l+i];
- int BitI1 = Mask[l+i+1];
- if (!isUndefOrEqual(BitI, j))
- return false;
- if (V2IsSplat) {
- if (!isUndefOrEqual(BitI1, NumElts))
- return false;
- } else {
- if (!isUndefOrEqual(BitI1, j + NumElts))
- return false;
- }
- }
- }
-
- return true;
-}
-
-/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to UNPCKH.
-static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
- bool HasInt256, bool V2IsSplat = false) {
- assert(VT.getSizeInBits() >= 128 &&
- "Unsupported vector type for unpckh");
-
- unsigned NumElts = VT.getVectorNumElements();
- if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
- (!HasInt256 || (NumElts != 16 && NumElts != 32)))
- return false;
-
- assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
- "Unsupported vector type for unpckh");
-
- // AVX defines UNPCK* to operate independently on 128-bit lanes.
- unsigned NumLanes = VT.getSizeInBits()/128;
- unsigned NumLaneElts = NumElts/NumLanes;
-
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
- int BitI = Mask[l+i];
- int BitI1 = Mask[l+i+1];
- if (!isUndefOrEqual(BitI, j))
- return false;
- if (V2IsSplat) {
- if (isUndefOrEqual(BitI1, NumElts))
- return false;
- } else {
- if (!isUndefOrEqual(BitI1, j+NumElts))
- return false;
- }
- }
- }
- return true;
-}
-
-/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
-/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
-/// <0, 0, 1, 1>
-static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
- unsigned NumElts = VT.getVectorNumElements();
- bool Is256BitVec = VT.is256BitVector();
-
- if (VT.is512BitVector())
- return false;
- assert((VT.is128BitVector() || VT.is256BitVector()) &&
- "Unsupported vector type for unpckh");
-
- if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
- (!HasInt256 || (NumElts != 16 && NumElts != 32)))
- return false;
-
- // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
- // FIXME: Need a better way to get rid of this, there's no latency difference
- // between UNPCKLPD and MOVDDUP, the later should always be checked first and
- // the former later. We should also remove the "_undef" special mask.
- if (NumElts == 4 && Is256BitVec)
- return false;
-
- // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
- // independently on 128-bit lanes.
- unsigned NumLanes = VT.getSizeInBits()/128;
- unsigned NumLaneElts = NumElts/NumLanes;
-
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
- int BitI = Mask[l+i];
- int BitI1 = Mask[l+i+1];
-
- if (!isUndefOrEqual(BitI, j))
- return false;
- if (!isUndefOrEqual(BitI1, j))
- return false;
- }
- }
-
- return true;
-}
-
-/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
-/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
-/// <2, 2, 3, 3>
-static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
- unsigned NumElts = VT.getVectorNumElements();
-
- if (VT.is512BitVector())
- return false;
-
- assert((VT.is128BitVector() || VT.is256BitVector()) &&
- "Unsupported vector type for unpckh");
-
- if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
- (!HasInt256 || (NumElts != 16 && NumElts != 32)))
- return false;
-
- // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
- // independently on 128-bit lanes.
- unsigned NumLanes = VT.getSizeInBits()/128;
- unsigned NumLaneElts = NumElts/NumLanes;
-
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
- int BitI = Mask[l+i];
- int BitI1 = Mask[l+i+1];
- if (!isUndefOrEqual(BitI, j))
- return false;
- if (!isUndefOrEqual(BitI1, j))
- return false;
- }
- }
- return true;
-}
-
-// Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
-// (src1[0], src0[1]), manipulation with 256-bit sub-vectors
-static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
- if (!VT.is512BitVector())
- return false;
-
- unsigned NumElts = VT.getVectorNumElements();
- unsigned HalfSize = NumElts/2;
- if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
- if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
- *Imm = 1;
- return true;
- }
- }
- if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
- if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
- *Imm = 0;
- return true;
- }
- }
- return false;
-}
-
-/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to MOVSS,
-/// MOVSD, and MOVD, i.e. setting the lowest element.
-static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
- if (VT.getVectorElementType().getSizeInBits() < 32)
- return false;
- if (!VT.is128BitVector())
- return false;
-
- unsigned NumElts = VT.getVectorNumElements();
-
- if (!isUndefOrEqual(Mask[0], NumElts))
- return false;
-
- for (unsigned i = 1; i != NumElts; ++i)
- if (!isUndefOrEqual(Mask[i], i))
- return false;
-
- return true;
-}
-
-/// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
-/// as permutations between 128-bit chunks or halves. As an example: this
-/// shuffle bellow:
-/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
-/// The first half comes from the second half of V1 and the second half from the
-/// the second half of V2.
-static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
- if (!HasFp256 || !VT.is256BitVector())
- return false;
-
- // The shuffle result is divided into half A and half B. In total the two
- // sources have 4 halves, namely: C, D, E, F. The final values of A and
- // B must come from C, D, E or F.
- unsigned HalfSize = VT.getVectorNumElements()/2;
- bool MatchA = false, MatchB = false;
-
- // Check if A comes from one of C, D, E, F.
- for (unsigned Half = 0; Half != 4; ++Half) {
- if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
- MatchA = true;
- break;
- }
- }
-
- // Check if B comes from one of C, D, E, F.
- for (unsigned Half = 0; Half != 4; ++Half) {
- if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
- MatchB = true;
- break;
- }
- }
-
- return MatchA && MatchB;
-}
-
-/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
-/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
-static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
- MVT VT = SVOp->getSimpleValueType(0);
-
- unsigned HalfSize = VT.getVectorNumElements()/2;
-
- unsigned FstHalf = 0, SndHalf = 0;
- for (unsigned i = 0; i < HalfSize; ++i) {
- if (SVOp->getMaskElt(i) > 0) {
- FstHalf = SVOp->getMaskElt(i)/HalfSize;
- break;
- }
- }
- for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
- if (SVOp->getMaskElt(i) > 0) {
- SndHalf = SVOp->getMaskElt(i)/HalfSize;
- break;
- }
- }
-
- return (FstHalf | (SndHalf << 4));
-}
-
-// Symetric in-lane mask. Each lane has 4 elements (for imm8)
-static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
- if (EltSize < 32)
- return false;
-
- unsigned NumElts = VT.getVectorNumElements();
- Imm8 = 0;
- if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
- for (unsigned i = 0; i != NumElts; ++i) {
- if (Mask[i] < 0)
- continue;
- Imm8 |= Mask[i] << (i*2);
- }
- return true;
- }
-
- unsigned LaneSize = 4;
- SmallVector<int, 4> MaskVal(LaneSize, -1);
-
- for (unsigned l = 0; l != NumElts; l += LaneSize) {
- for (unsigned i = 0; i != LaneSize; ++i) {
- if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
- return false;
- if (Mask[i+l] < 0)
- continue;
- if (MaskVal[i] < 0) {
- MaskVal[i] = Mask[i+l] - l;
- Imm8 |= MaskVal[i] << (i*2);
- continue;
- }
- if (Mask[i+l] != (signed)(MaskVal[i]+l))
- return false;
- }
- }
- return true;
-}
-
-/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
-/// Note that VPERMIL mask matching is different depending whether theunderlying
-/// type is 32 or 64. In the VPERMILPS the high half of the mask should point
-/// to the same elements of the low, but to the higher half of the source.
-/// In VPERMILPD the two lanes could be shuffled independently of each other
-/// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
-static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
- if (VT.getSizeInBits() < 256 || EltSize < 32)
- return false;
- bool symetricMaskRequired = (EltSize == 32);
- unsigned NumElts = VT.getVectorNumElements();
-
- unsigned NumLanes = VT.getSizeInBits()/128;
- unsigned LaneSize = NumElts/NumLanes;
- // 2 or 4 elements in one lane
-
- SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
- for (unsigned l = 0; l != NumElts; l += LaneSize) {
- for (unsigned i = 0; i != LaneSize; ++i) {
- if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
- return false;
- if (symetricMaskRequired) {
- if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
- ExpectedMaskVal[i] = Mask[i+l] - l;
- continue;
- }
- if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
- return false;
- }
- }
- }
- return true;
-}
-
-/// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
-/// of what x86 movss want. X86 movs requires the lowest element to be lowest
-/// element of vector 2 and the other elements to come from vector 1 in order.
-static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
- bool V2IsSplat = false, bool V2IsUndef = false) {
- if (!VT.is128BitVector())
- return false;
-
- unsigned NumOps = VT.getVectorNumElements();
- if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
- return false;
-
- if (!isUndefOrEqual(Mask[0], 0))
- return false;
-
- for (unsigned i = 1; i != NumOps; ++i)
- if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
- (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
- (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
- return false;
-
- return true;
-}
-
-/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
-/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
-static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
- const X86Subtarget *Subtarget) {
- if (!Subtarget->hasSSE3())
- return false;
-
- unsigned NumElems = VT.getVectorNumElements();
-
- if ((VT.is128BitVector() && NumElems != 4) ||
- (VT.is256BitVector() && NumElems != 8) ||
- (VT.is512BitVector() && NumElems != 16))
- return false;
-
- // "i+1" is the value the indexed mask element must have
- for (unsigned i = 0; i != NumElems; i += 2)
- if (!isUndefOrEqual(Mask[i], i+1) ||
- !isUndefOrEqual(Mask[i+1], i+1))
- return false;
-
- return true;
-}
-
-/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
-/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
-static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
- const X86Subtarget *Subtarget) {
- if (!Subtarget->hasSSE3())
- return false;
-
- unsigned NumElems = VT.getVectorNumElements();
-
- if ((VT.is128BitVector() && NumElems != 4) ||
- (VT.is256BitVector() && NumElems != 8) ||
- (VT.is512BitVector() && NumElems != 16))
- return false;
-
- // "i" is the value the indexed mask element must have
- for (unsigned i = 0; i != NumElems; i += 2)
- if (!isUndefOrEqual(Mask[i], i) ||
- !isUndefOrEqual(Mask[i+1], i))
- return false;
-
- return true;
-}
-
-/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to 256-bit
-/// version of MOVDDUP.
-static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
- if (!HasFp256 || !VT.is256BitVector())
- return false;
-
- unsigned NumElts = VT.getVectorNumElements();
- if (NumElts != 4)
- return false;
-
- for (unsigned i = 0; i != NumElts/2; ++i)
- if (!isUndefOrEqual(Mask[i], 0))
- return false;
- for (unsigned i = NumElts/2; i != NumElts; ++i)
- if (!isUndefOrEqual(Mask[i], NumElts/2))
- return false;
- return true;
-}
-
-/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to 128-bit
-/// version of MOVDDUP.
-static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
- if (!VT.is128BitVector())
- return false;
-
- unsigned e = VT.getVectorNumElements() / 2;
- for (unsigned i = 0; i != e; ++i)
- if (!isUndefOrEqual(Mask[i], i))
- return false;
- for (unsigned i = 0; i != e; ++i)
- if (!isUndefOrEqual(Mask[e+i], i))
- return false;
- return true;
-}
-
/// isVEXTRACTIndex - Return true if the specified
/// EXTRACT_SUBVECTOR operand specifies a vector extract that is
/// suitable for instruction that extract 128 or 256 bit vectors
@@ -4754,125 +3974,6 @@ bool X86::isVEXTRACT256Index(SDNode *N) {
return isVEXTRACTIndex(N, 256);
}
-/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
-/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
-/// Handles 128-bit and 256-bit.
-static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
- MVT VT = N->getSimpleValueType(0);
-
- assert((VT.getSizeInBits() >= 128) &&
- "Unsupported vector type for PSHUF/SHUFP");
-
- // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
- // independently on 128-bit lanes.
- unsigned NumElts = VT.getVectorNumElements();
- unsigned NumLanes = VT.getSizeInBits()/128;
- unsigned NumLaneElts = NumElts/NumLanes;
-
- assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
- "Only supports 2, 4 or 8 elements per lane");
-
- unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
- unsigned Mask = 0;
- for (unsigned i = 0; i != NumElts; ++i) {
- int Elt = N->getMaskElt(i);
- if (Elt < 0) continue;
- Elt &= NumLaneElts - 1;
- unsigned ShAmt = (i << Shift) % 8;
- Mask |= Elt << ShAmt;
- }
-
- return Mask;
-}
-
-/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
-/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
-static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
- MVT VT = N->getSimpleValueType(0);
-
- assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
- "Unsupported vector type for PSHUFHW");
-
- unsigned NumElts = VT.getVectorNumElements();
-
- unsigned Mask = 0;
- for (unsigned l = 0; l != NumElts; l += 8) {
- // 8 nodes per lane, but we only care about the last 4.
- for (unsigned i = 0; i < 4; ++i) {
- int Elt = N->getMaskElt(l+i+4);
- if (Elt < 0) continue;
- Elt &= 0x3; // only 2-bits.
- Mask |= Elt << (i * 2);
- }
- }
-
- return Mask;
-}
-
-/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
-/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
-static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
- MVT VT = N->getSimpleValueType(0);
-
- assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
- "Unsupported vector type for PSHUFHW");
-
- unsigned NumElts = VT.getVectorNumElements();
-
- unsigned Mask = 0;
- for (unsigned l = 0; l != NumElts; l += 8) {
- // 8 nodes per lane, but we only care about the first 4.
- for (unsigned i = 0; i < 4; ++i) {
- int Elt = N->getMaskElt(l+i);
- if (Elt < 0) continue;
- Elt &= 0x3; // only 2-bits
- Mask |= Elt << (i * 2);
- }
- }
-
- return Mask;
-}
-
-/// \brief Return the appropriate immediate to shuffle the specified
-/// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
-/// VALIGN (if Interlane is true) instructions.
-static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
- bool InterLane) {
- MVT VT = SVOp->getSimpleValueType(0);
- unsigned EltSize = InterLane ? 1 :
- VT.getVectorElementType().getSizeInBits() >> 3;
-
- unsigned NumElts = VT.getVectorNumElements();
- unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
- unsigned NumLaneElts = NumElts/NumLanes;
-
- int Val = 0;
- unsigned i;
- for (i = 0; i != NumElts; ++i) {
- Val = SVOp->getMaskElt(i);
- if (Val >= 0)
- break;
- }
- if (Val >= (int)NumElts)
- Val -= NumElts - NumLaneElts;
-
- assert(Val - i > 0 && "PALIGNR imm should be positive");
- return (Val - i) * EltSize;
-}
-
-/// \brief Return the appropriate immediate to shuffle the specified
-/// VECTOR_SHUFFLE mask with the PALIGNR instruction.
-static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
- return getShuffleAlignrImmediate(SVOp, false);
-}
-
-/// \brief Return the appropriate immediate to shuffle the specified
-/// VECTOR_SHUFFLE mask with the VALIGN instruction.
-static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
- return getShuffleAlignrImmediate(SVOp, true);
-}
-
-
static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
@@ -4947,119 +4048,6 @@ bool X86::isZeroNode(SDValue Elt) {
return false;
}
-/// ShouldXformToMOVHLPS - Return true if the node should be transformed to
-/// match movhlps. The lower half elements should come from upper half of
-/// V1 (and in order), and the upper half elements should come from the upper
-/// half of V2 (and in order).
-static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
- if (!VT.is128BitVector())
- return false;
- if (VT.getVectorNumElements() != 4)
- return false;
- for (unsigned i = 0, e = 2; i != e; ++i)
- if (!isUndefOrEqual(Mask[i], i+2))
- return false;
- for (unsigned i = 2; i != 4; ++i)
- if (!isUndefOrEqual(Mask[i], i+4))
- return false;
- return true;
-}
-
-/// isScalarLoadToVector - Returns true if the node is a scalar load that
-/// is promoted to a vector. It also returns the LoadSDNode by reference if
-/// required.
-static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
- if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
- return false;
- N = N->getOperand(0).getNode();
- if (!ISD::isNON_EXTLoad(N))
- return false;
- if (LD)
- *LD = cast<LoadSDNode>(N);
- return true;
-}
-
-// Test whether the given value is a vector value which will be legalized
-// into a load.
-static bool WillBeConstantPoolLoad(SDNode *N) {
- if (N->getOpcode() != ISD::BUILD_VECTOR)
- return false;
-
- // Check for any non-constant elements.
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- switch (N->getOperand(i).getNode()->getOpcode()) {
- case ISD::UNDEF:
- case ISD::ConstantFP:
- case ISD::Constant:
- break;
- default:
- return false;
- }
-
- // Vectors of all-zeros and all-ones are materialized with special
- // instructions rather than being loaded.
- return !ISD::isBuildVectorAllZeros(N) &&
- !ISD::isBuildVectorAllOnes(N);
-}
-
-/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
-/// match movlp{s|d}. The lower half elements should come from lower half of
-/// V1 (and in order), and the upper half elements should come from the upper
-/// half of V2 (and in order). And since V1 will become the source of the
-/// MOVLP, it must be either a vector load or a scalar load to vector.
-static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
- ArrayRef<int> Mask, MVT VT) {
- if (!VT.is128BitVector())
- return false;
-
- if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
- return false;
- // Is V2 is a vector load, don't do this transformation. We will try to use
- // load folding shufps op.
- if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
- return false;
-
- unsigned NumElems = VT.getVectorNumElements();
-
- if (NumElems != 2 && NumElems != 4)
- return false;
- for (unsigned i = 0, e = NumElems/2; i != e; ++i)
- if (!isUndefOrEqual(Mask[i], i))
- return false;
- for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
- if (!isUndefOrEqual(Mask[i], i+NumElems))
- return false;
- return true;
-}
-
-/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
-/// to an zero vector.
-/// FIXME: move to dag combiner / method on ShuffleVectorSDNode
-static bool isZeroShuffle(ShuffleVectorSDNode *N) {
- SDValue V1 = N->getOperand(0);
- SDValue V2 = N->getOperand(1);
- unsigned NumElems = N->getValueType(0).getVectorNumElements();
- for (unsigned i = 0; i != NumElems; ++i) {
- int Idx = N->getMaskElt(i);
- if (Idx >= (int)NumElems) {
- unsigned Opc = V2.getOpcode();
- if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
- continue;
- if (Opc != ISD::BUILD_VECTOR ||
- !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
- return false;
- } else if (Idx >= 0) {
- unsigned Opc = V1.getOpcode();
- if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
- continue;
- if (Opc != ISD::BUILD_VECTOR ||
- !X86::isZeroNode(V1.getOperand(Idx)))
- return false;
- }
- }
- return true;
-}
-
/// getZeroVector - Returns a vector of specified type with all zero elements.
///
static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
@@ -5131,16 +4119,6 @@ static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
}
-/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
-/// that point to V2 points to its first element.
-static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
- for (unsigned i = 0; i != NumElems; ++i) {
- if (Mask[i] > (int)NumElems) {
- Mask[i] = NumElems;
- }
- }
-}
-
/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
/// operation of specified width.
static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
@@ -5177,92 +4155,6 @@ static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
}
-// PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
-// a generic shuffle instruction because the target has no such instructions.
-// Generate shuffles which repeat i16 and i8 several times until they can be
-// represented by v4f32 and then be manipulated by target suported shuffles.
-static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
- MVT VT = V.getSimpleValueType();
- int NumElems = VT.getVectorNumElements();
- SDLoc dl(V);
-
- while (NumElems > 4) {
- if (EltNo < NumElems/2) {
- V = getUnpackl(DAG, dl, VT, V, V);
- } else {
- V = getUnpackh(DAG, dl, VT, V, V);
- EltNo -= NumElems/2;
- }
- NumElems >>= 1;
- }
- return V;
-}
-
-/// getLegalSplat - Generate a legal splat with supported x86 shuffles
-static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
- MVT VT = V.getSimpleValueType();
- SDLoc dl(V);
-
- if (VT.is128BitVector()) {
- V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
- int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
- V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
- &SplatMask[0]);
- } else if (VT.is256BitVector()) {
- // To use VPERMILPS to splat scalars, the second half of indicies must
- // refer to the higher part, which is a duplication of the lower one,
- // because VPERMILPS can only handle in-lane permutations.
- int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
- EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
-
- V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
- V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
- &SplatMask[0]);
- } else
- llvm_unreachable("Vector size not supported");
-
- return DAG.getNode(ISD::BITCAST, dl, VT, V);
-}
-
-/// PromoteSplat - Splat is promoted to target supported vector shuffles.
-static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
- MVT SrcVT = SV->getSimpleValueType(0);
- SDValue V1 = SV->getOperand(0);
- SDLoc dl(SV);
-
- int EltNo = SV->getSplatIndex();
- int NumElems = SrcVT.getVectorNumElements();
- bool Is256BitVec = SrcVT.is256BitVector();
-
- assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
- "Unknown how to promote splat for type");
-
- // Extract the 128-bit part containing the splat element and update
- // the splat element index when it refers to the higher register.
- if (Is256BitVec) {
- V1 = Extract128BitVector(V1, EltNo, DAG, dl);
- if (EltNo >= NumElems/2)
- EltNo -= NumElems/2;
- }
-
- // All i16 and i8 vector types can't be used directly by a generic shuffle
- // instruction because the target has no such instruction. Generate shuffles
- // which repeat i16 and i8 several times until they fit in i32, and then can
- // be manipulated by target suported shuffles.
- MVT EltVT = SrcVT.getVectorElementType();
- if (EltVT == MVT::i8 || EltVT == MVT::i16)
- V1 = PromoteSplati8i16(V1, DAG, EltNo);
-
- // Recreate the 256-bit vector and place the same 128-bit vector
- // into the low and high part. This is necessary because we want
- // to use VPERM* to shuffle the vectors
- if (Is256BitVec) {
- V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
- }
-
- return getLegalSplat(DAG, V1, EltNo);
-}
-
/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
/// vector of zero or undef vector. This produces a shuffle where the low
/// element of V2 is swizzled into the zero/undef vector, landing at element
@@ -5394,13 +4286,9 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT,
return false;
if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
- // FIXME: Support AVX-512 here.
- Type *Ty = C->getType();
- if (!Ty->isVectorTy() || (Ty->getVectorNumElements() != 16 &&
- Ty->getVectorNumElements() != 32))
- return false;
-
DecodePSHUFBMask(C, Mask);
+ if (Mask.empty())
+ return false;
break;
}
@@ -5412,16 +4300,9 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT,
IsUnary = true;
break;
case X86ISD::MOVSS:
- case X86ISD::MOVSD: {
- // The index 0 always comes from the first element of the second source,
- // this is why MOVSS and MOVSD are used in the first place. The other
- // elements come from the other positions of the first source vector
- Mask.push_back(NumElems);
- for (unsigned i = 1; i != NumElems; ++i) {
- Mask.push_back(i);
- }
+ case X86ISD::MOVSD:
+ DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
break;
- }
case X86ISD::VPERM2X128:
ImmN = N->getOperand(N->getNumOperands()-1);
DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
@@ -5429,11 +4310,16 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT,
break;
case X86ISD::MOVSLDUP:
DecodeMOVSLDUPMask(VT, Mask);
+ IsUnary = true;
break;
case X86ISD::MOVSHDUP:
DecodeMOVSHDUPMask(VT, Mask);
+ IsUnary = true;
break;
case X86ISD::MOVDDUP:
+ DecodeMOVDDUPMask(VT, Mask);
+ IsUnary = true;
+ break;
case X86ISD::MOVLHPD:
case X86ISD::MOVLPD:
case X86ISD::MOVLPS:
@@ -5517,148 +4403,6 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
return SDValue();
}
-/// getNumOfConsecutiveZeros - Return the number of elements of a vector
-/// shuffle operation which come from a consecutively from a zero. The
-/// search can start in two different directions, from left or right.
-/// We count undefs as zeros until PreferredNum is reached.
-static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
- unsigned NumElems, bool ZerosFromLeft,
- SelectionDAG &DAG,
- unsigned PreferredNum = -1U) {
- unsigned NumZeros = 0;
- for (unsigned i = 0; i != NumElems; ++i) {
- unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
- SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
- if (!Elt.getNode())
- break;
-
- if (X86::isZeroNode(Elt))
- ++NumZeros;
- else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
- NumZeros = std::min(NumZeros + 1, PreferredNum);
- else
- break;
- }
-
- return NumZeros;
-}
-
-/// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
-/// correspond consecutively to elements from one of the vector operands,
-/// starting from its index OpIdx. Also tell OpNum which source vector operand.
-static
-bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
- unsigned MaskI, unsigned MaskE, unsigned OpIdx,
- unsigned NumElems, unsigned &OpNum) {
- bool SeenV1 = false;
- bool SeenV2 = false;
-
- for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
- int Idx = SVOp->getMaskElt(i);
- // Ignore undef indicies
- if (Idx < 0)
- continue;
-
- if (Idx < (int)NumElems)
- SeenV1 = true;
- else
- SeenV2 = true;
-
- // Only accept consecutive elements from the same vector
- if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
- return false;
- }
-
- OpNum = SeenV1 ? 0 : 1;
- return true;
-}
-
-/// isVectorShiftRight - Returns true if the shuffle can be implemented as a
-/// logical left shift of a vector.
-static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
- bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
- unsigned NumElems =
- SVOp->getSimpleValueType(0).getVectorNumElements();
- unsigned NumZeros = getNumOfConsecutiveZeros(
- SVOp, NumElems, false /* check zeros from right */, DAG,
- SVOp->getMaskElt(0));
- unsigned OpSrc;
-
- if (!NumZeros)
- return false;
-
- // Considering the elements in the mask that are not consecutive zeros,
- // check if they consecutively come from only one of the source vectors.
- //
- // V1 = {X, A, B, C} 0
- // \ \ \ /
- // vector_shuffle V1, V2 <1, 2, 3, X>
- //
- if (!isShuffleMaskConsecutive(SVOp,
- 0, // Mask Start Index
- NumElems-NumZeros, // Mask End Index(exclusive)
- NumZeros, // Where to start looking in the src vector
- NumElems, // Number of elements in vector
- OpSrc)) // Which source operand ?
- return false;
-
- isLeft = false;
- ShAmt = NumZeros;
- ShVal = SVOp->getOperand(OpSrc);
- return true;
-}
-
-/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
-/// logical left shift of a vector.
-static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
- bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
- unsigned NumElems =
- SVOp->getSimpleValueType(0).getVectorNumElements();
- unsigned NumZeros = getNumOfConsecutiveZeros(
- SVOp, NumElems, true /* check zeros from left */, DAG,
- NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
- unsigned OpSrc;
-
- if (!NumZeros)
- return false;
-
- // Considering the elements in the mask that are not consecutive zeros,
- // check if they consecutively come from only one of the source vectors.
- //
- // 0 { A, B, X, X } = V2
- // / \ / /
- // vector_shuffle V1, V2 <X, X, 4, 5>
- //
- if (!isShuffleMaskConsecutive(SVOp,
- NumZeros, // Mask Start Index
- NumElems, // Mask End Index(exclusive)
- 0, // Where to start looking in the src vector
- NumElems, // Number of elements in vector
- OpSrc)) // Which source operand ?
- return false;
-
- isLeft = true;
- ShAmt = NumZeros;
- ShVal = SVOp->getOperand(OpSrc);
- return true;
-}
-
-/// isVectorShift - Returns true if the shuffle can be implemented as a
-/// logical left or right shift of a vector.
-static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
- bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
- // Although the logic below support any bitwidth size, there are no
- // shift instructions which handle more than 128-bit vectors.
- if (!SVOp->getSimpleValueType(0).is128BitVector())
- return false;
-
- if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
- isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
- return true;
-
- return false;
-}
-
/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
///
static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
@@ -5744,19 +4488,19 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
const X86Subtarget *Subtarget,
const TargetLowering &TLI) {
// Find all zeroable elements.
- bool Zeroable[4];
+ std::bitset<4> Zeroable;
for (int i=0; i < 4; ++i) {
SDValue Elt = Op->getOperand(i);
Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
}
- assert(std::count_if(&Zeroable[0], &Zeroable[4],
- [](bool M) { return !M; }) > 1 &&
+ assert(Zeroable.size() - Zeroable.count() > 1 &&
"We expect at least two non-zero elements!");
// We only know how to deal with build_vector nodes where elements are either
// zeroable or extract_vector_elt with constant index.
SDValue FirstNonZero;
- for (int i=0; i < 4; ++i) {
+ unsigned FirstNonZeroIdx;
+ for (unsigned i=0; i < 4; ++i) {
if (Zeroable[i])
continue;
SDValue Elt = Op->getOperand(i);
@@ -5767,8 +4511,10 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
MVT VT = Elt.getOperand(0).getSimpleValueType();
if (!VT.is128BitVector())
return SDValue();
- if (!FirstNonZero.getNode())
+ if (!FirstNonZero.getNode()) {
FirstNonZero = Elt;
+ FirstNonZeroIdx = i;
+ }
}
assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
@@ -5807,14 +4553,14 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
return SDValue();
SDValue V2 = Elt.getOperand(0);
- if (Elt == FirstNonZero)
+ if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
V1 = SDValue();
bool CanFold = true;
for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
if (Zeroable[i])
continue;
-
+
SDValue Current = Op->getOperand(i);
SDValue SrcVector = Current->getOperand(0);
if (!V1.getNode())
@@ -5833,10 +4579,7 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
// Ok, we can emit an INSERTPS instruction.
- unsigned ZMask = 0;
- for (int i = 0; i < 4; ++i)
- if (Zeroable[i])
- ZMask |= 1 << i;
+ unsigned ZMask = Zeroable.to_ulong();
unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
@@ -5845,19 +4588,19 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
}
-/// getVShift - Return a vector logical shift node.
-///
+/// Return a vector logical shift node.
static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
unsigned NumBits, SelectionDAG &DAG,
const TargetLowering &TLI, SDLoc dl) {
assert(VT.is128BitVector() && "Unknown type for VShift");
- EVT ShVT = MVT::v2i64;
+ MVT ShVT = MVT::v2i64;
unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
+ MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
+ assert(NumBits % 8 == 0 && "Only support byte sized shifts");
+ SDValue ShiftVal = DAG.getConstant(NumBits/8, ScalarShiftTy);
return DAG.getNode(ISD::BITCAST, dl, VT,
- DAG.getNode(Opc, dl, ShVT, SrcOp,
- DAG.getConstant(NumBits,
- TLI.getScalarShiftAmountTy(SrcOp.getValueType()))));
+ DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
}
static SDValue
@@ -5924,9 +4667,7 @@ LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
LD->getPointerInfo().getWithOffset(StartOffset),
false, false, false, 0);
- SmallVector<int, 8> Mask;
- for (unsigned i = 0; i != NumElems; ++i)
- Mask.push_back(EltNo);
+ SmallVector<int, 8> Mask(NumElems, EltNo);
return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
}
@@ -5934,19 +4675,18 @@ LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
return SDValue();
}
-/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a
-/// vector of type 'VT', see if the elements can be replaced by a single large
-/// load which has the same value as a build_vector whose operands are 'elts'.
+/// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
+/// elements can be replaced by a single large load which has the same value as
+/// a build_vector or insert_subvector whose loaded operands are 'Elts'.
///
/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
///
/// FIXME: we'd also like to handle the case where the last elements are zero
/// rather than undef via VZEXT_LOAD, but we do not detect that case today.
/// There's even a handy isZeroNode for that purpose.
-static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
+static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
SDLoc &DL, SelectionDAG &DAG,
bool isAfterLegalize) {
- EVT EltVT = VT.getVectorElementType();
unsigned NumElems = Elts.size();
LoadSDNode *LDBase = nullptr;
@@ -5957,7 +4697,9 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
// non-consecutive, bail out.
for (unsigned i = 0; i < NumElems; ++i) {
SDValue Elt = Elts[i];
-
+ // Look through a bitcast.
+ if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
+ Elt = Elt.getOperand(0);
if (!Elt.getNode() ||
(Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
return SDValue();
@@ -5972,7 +4714,12 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
continue;
LoadSDNode *LD = cast<LoadSDNode>(Elt);
- if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i))
+ EVT LdVT = Elt.getValueType();
+ // Each loaded element must be the correct fractional portion of the
+ // requested vector load.
+ if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
+ return SDValue();
+ if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
return SDValue();
LastLoadedElt = i;
}
@@ -5981,6 +4728,12 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
// load of the entire vector width starting at the base pointer. If we found
// consecutive loads for the low half, generate a vzext_load node.
if (LastLoadedElt == NumElems - 1) {
+ assert(LDBase && "Did not find base load for merging consecutive loads");
+ EVT EltVT = LDBase->getValueType(0);
+ // Ensure that the input vector size for the merged loads matches the
+ // cumulative size of the input elements.
+ if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
+ return SDValue();
if (isAfterLegalize &&
!DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
@@ -5988,15 +4741,10 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
SDValue NewLd = SDValue();
- if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16)
- NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
- LDBase->getPointerInfo(),
- LDBase->isVolatile(), LDBase->isNonTemporal(),
- LDBase->isInvariant(), 0);
NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
- LDBase->getPointerInfo(),
- LDBase->isVolatile(), LDBase->isNonTemporal(),
- LDBase->isInvariant(), LDBase->getAlignment());
+ LDBase->getPointerInfo(), LDBase->isVolatile(),
+ LDBase->isNonTemporal(), LDBase->isInvariant(),
+ LDBase->getAlignment());
if (LDBase->hasAnyUseOfValue(1)) {
SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
@@ -6009,7 +4757,11 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
return NewLd;
}
- if (NumElems == 4 && LastLoadedElt == 1 &&
+
+ //TODO: The code below fires only for for loading the low v2i32 / v2f32
+ //of a v4i32 / v4f32. It's probably worth generalizing.
+ EVT EltVT = VT.getVectorElementType();
+ if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
@@ -6134,8 +4886,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
// it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win.
const Function *F = DAG.getMachineFunction().getFunction();
- bool OptForSize = F->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
+ bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
// Handle broadcasting a single constant scalar from the constant pool
// into a vector.
@@ -6183,7 +4934,8 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
if (!IsLoad)
return SDValue();
- if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64))
+ if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
+ (Subtarget->hasVLX() && ScalarSize == 64))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
// The integer check is needed for the 64-bit into 128-bit so it doesn't match
@@ -6339,8 +5091,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
AllContants = false;
NonConstIdx = idx;
NumNonConsts++;
- }
- else {
+ } else {
NumConsts++;
if (cast<ConstantSDNode>(In)->getZExtValue())
Immediate |= (1ULL << idx);
@@ -6363,7 +5114,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
MVT::getIntegerVT(VT.getSizeInBits()));
DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
}
- else
+ else
DstVec = DAG.getUNDEF(VT);
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
Op.getOperand(NonConstIdx),
@@ -6386,7 +5137,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
/// \brief Return true if \p N implements a horizontal binop and return the
/// operands for the horizontal binop into V0 and V1.
-///
+///
/// This is a helper function of PerformBUILD_VECTORCombine.
/// This function checks that the build_vector \p N in input implements a
/// horizontal operation. Parameter \p Opcode defines the kind of horizontal
@@ -6407,7 +5158,7 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
"Invalid Vector in input!");
-
+
bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
bool CanFold = true;
unsigned ExpectedVExtractIdx = BaseIdx;
@@ -6476,13 +5227,13 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
}
/// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
-/// a concat_vector.
+/// a concat_vector.
///
/// This is a helper function of PerformBUILD_VECTORCombine.
/// This function expects two 256-bit vectors called V0 and V1.
/// At first, each vector is split into two separate 128-bit vectors.
/// Then, the resulting 128-bit vectors are used to implement two
-/// horizontal binary operations.
+/// horizontal binary operations.
///
/// The kind of horizontal binary operation is defined by \p X86Opcode.
///
@@ -6566,7 +5317,7 @@ static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
bool AddFound = false;
bool SubFound = false;
- for (unsigned i = 0, e = NumElts; i != e; i++) {
+ for (unsigned i = 0, e = NumElts; i != e; ++i) {
SDValue Op = BV->getOperand(i);
// Skip 'undef' values.
@@ -6676,18 +5427,18 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
// Try to match an SSE3 float HADD/HSUB.
if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
-
+
if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
} else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
// Try to match an SSSE3 integer HADD/HSUB.
if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
-
+
if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
}
-
+
if (!Subtarget->hasAVX())
return SDValue();
@@ -6738,7 +5489,7 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
// Do this only if the target has AVX2.
if (Subtarget->hasAVX2())
return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
-
+
// Do not try to expand this build_vector into a pair of horizontal
// add/sub if we can emit a pair of scalar add/sub.
if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
@@ -6863,32 +5614,14 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// Handle SSE only.
assert(VT == MVT::v2i64 && "Expected an SSE value type!");
EVT VecVT = MVT::v4i32;
- unsigned VecElts = 4;
// Truncate the value (which may itself be a constant) to i32, and
// convert it to a vector with movd (S2V+shuffle to zero extend).
Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
-
- // If using the new shuffle lowering, just directly insert this.
- if (ExperimentalVectorShuffleLowering)
- return DAG.getNode(
- ISD::BITCAST, dl, VT,
- getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
-
- Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
-
- // Now we have our 32-bit value zero extended in the low element of
- // a vector. If Idx != 0, swizzle it into place.
- if (Idx != 0) {
- SmallVector<int, 4> Mask;
- Mask.push_back(Idx);
- for (unsigned i = 1; i != VecElts; ++i)
- Mask.push_back(i);
- Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
- &Mask[0]);
- }
- return DAG.getNode(ISD::BITCAST, dl, VT, Item);
+ return DAG.getNode(
+ ISD::BITCAST, dl, VT,
+ getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
}
}
@@ -6948,17 +5681,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// place.
if (EVTBits == 32) {
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
-
- // If using the new shuffle lowering, just directly insert this.
- if (ExperimentalVectorShuffleLowering)
- return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
-
- // Turn it into a shuffle of zero and zero-extended scalar to vector.
- Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
- SmallVector<int, 8> MaskVec;
- for (unsigned i = 0; i != NumElems; ++i)
- MaskVec.push_back(i == Idx ? 0 : 1);
- return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
+ return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
}
}
@@ -6982,12 +5705,15 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (IsAllConstants)
return SDValue();
- // For AVX-length vectors, build the individual 128-bit pieces and use
+ // For AVX-length vectors, see if we can use a vector load to get all of the
+ // elements, otherwise build the individual 128-bit pieces and use
// shuffles to put them in place.
if (VT.is256BitVector() || VT.is512BitVector()) {
- SmallVector<SDValue, 64> V;
- for (unsigned i = 0; i != NumElems; ++i)
- V.push_back(Op.getOperand(i));
+ SmallVector<SDValue, 64> V(Op->op_begin(), Op->op_begin() + NumElems);
+
+ // Check for a build vector of consecutive loads.
+ if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
+ return LD;
EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
@@ -7091,7 +5817,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return Sh;
// For SSE 4.1, use insertps to put the high elements into the low element.
- if (getSubtarget()->hasSSE41()) {
+ if (Subtarget->hasSSE41()) {
SDValue Result;
if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
@@ -7271,38 +5997,40 @@ is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
return true;
}
-// Hide this symbol with an anonymous namespace instead of 'static' so that MSVC
-// 2013 will allow us to use it as a non-type template parameter.
-namespace {
-
-/// \brief Implementation of the \c isShuffleEquivalent variadic functor.
-///
-/// See its documentation for details.
-bool isShuffleEquivalentImpl(ArrayRef<int> Mask, ArrayRef<const int *> Args) {
- if (Mask.size() != Args.size())
- return false;
- for (int i = 0, e = Mask.size(); i < e; ++i) {
- assert(*Args[i] >= 0 && "Arguments must be positive integers!");
- if (Mask[i] != -1 && Mask[i] != *Args[i])
- return false;
- }
- return true;
-}
-
-} // namespace
-
/// \brief Checks whether a shuffle mask is equivalent to an explicit list of
/// arguments.
///
/// This is a fast way to test a shuffle mask against a fixed pattern:
///
-/// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
+/// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
///
/// It returns true if the mask is exactly as wide as the argument list, and
/// each element of the mask is either -1 (signifying undef) or the value given
/// in the argument.
-static const VariadicFunction1<
- bool, ArrayRef<int>, int, isShuffleEquivalentImpl> isShuffleEquivalent = {};
+static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ ArrayRef<int> ExpectedMask) {
+ if (Mask.size() != ExpectedMask.size())
+ return false;
+
+ int Size = Mask.size();
+
+ // If the values are build vectors, we can look through them to find
+ // equivalent inputs that make the shuffles equivalent.
+ auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
+ auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
+
+ for (int i = 0; i < Size; ++i)
+ if (Mask[i] != -1 && Mask[i] != ExpectedMask[i]) {
+ auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
+ auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
+ if (!MaskBV || !ExpectedBV ||
+ MaskBV->getOperand(Mask[i] % Size) !=
+ ExpectedBV->getOperand(ExpectedMask[i] % Size))
+ return false;
+ }
+
+ return true;
+}
/// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
///
@@ -7328,6 +6056,37 @@ static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
return DAG.getConstant(Imm, MVT::i8);
}
+/// \brief Try to emit a blend instruction for a shuffle using bit math.
+///
+/// This is used as a fallback approach when first class blend instructions are
+/// unavailable. Currently it is only suitable for integer vectors, but could
+/// be generalized for floating point vectors if desirable.
+static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
+ SDValue V2, ArrayRef<int> Mask,
+ SelectionDAG &DAG) {
+ assert(VT.isInteger() && "Only supports integer vector types!");
+ MVT EltVT = VT.getScalarType();
+ int NumEltBits = EltVT.getSizeInBits();
+ SDValue Zero = DAG.getConstant(0, EltVT);
+ SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), EltVT);
+ SmallVector<SDValue, 16> MaskOps;
+ for (int i = 0, Size = Mask.size(); i < Size; ++i) {
+ if (Mask[i] != -1 && Mask[i] != i && Mask[i] != i + Size)
+ return SDValue(); // Shuffled input!
+ MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
+ }
+
+ SDValue V1Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, MaskOps);
+ V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
+ // We have to cast V2 around.
+ MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
+ V2 = DAG.getNode(ISD::BITCAST, DL, VT,
+ DAG.getNode(X86ISD::ANDNP, DL, MaskVT,
+ DAG.getNode(ISD::BITCAST, DL, MaskVT, V1Mask),
+ DAG.getNode(ISD::BITCAST, DL, MaskVT, V2)));
+ return DAG.getNode(ISD::OR, DL, VT, V1, V2);
+}
+
/// \brief Try to emit a blend instruction for a shuffle.
///
/// This doesn't do any checks for the availability of instructions for blending
@@ -7338,7 +6097,6 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
-
unsigned BlendMask = 0;
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
if (Mask[i] >= Size) {
@@ -7415,11 +6173,17 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
}
}
// FALLTHROUGH
+ case MVT::v16i8:
case MVT::v32i8: {
- assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
+ assert((VT.getSizeInBits() == 128 || Subtarget->hasAVX2()) &&
+ "256-bit byte-blends require AVX2 support!");
+
// Scale the blend by the number of bytes per element.
- int Scale = VT.getScalarSizeInBits() / 8;
- assert(Mask.size() * Scale == 32 && "Not a 256-bit vector!");
+ int Scale = VT.getScalarSizeInBits() / 8;
+
+ // This form of blend is always done on bytes. Compute the byte vector
+ // type.
+ MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
// Compute the VSELECT mask. Note that VSELECT is really confusing in the
// mix of LLVM's code generator and the x86 backend. We tell the code
@@ -7432,19 +6196,19 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
// the LLVM model for boolean values in vector elements gets the relevant
// bit set, it is set backwards and over constrained relative to x86's
// actual model.
- SDValue VSELECTMask[32];
+ SmallVector<SDValue, 32> VSELECTMask;
for (int i = 0, Size = Mask.size(); i < Size; ++i)
for (int j = 0; j < Scale; ++j)
- VSELECTMask[Scale * i + j] =
+ VSELECTMask.push_back(
Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
- : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8);
+ : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8));
- V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1);
- V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V2);
+ V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
+ V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
return DAG.getNode(
ISD::BITCAST, DL, VT,
- DAG.getNode(ISD::VSELECT, DL, MVT::v32i8,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, VSELECTMask),
+ DAG.getNode(ISD::VSELECT, DL, BlendVT,
+ DAG.getNode(ISD::BUILD_VECTOR, DL, BlendVT, VSELECTMask),
V1, V2));
}
@@ -7453,12 +6217,45 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
}
}
-/// \brief Generic routine to lower a shuffle and blend as a decomposed set of
-/// unblended shuffles followed by an unshuffled blend.
+/// \brief Try to lower as a blend of elements from two inputs followed by
+/// a single-input permutation.
+///
+/// This matches the pattern where we can blend elements from two inputs and
+/// then reduce the shuffle to a single-input permutation.
+static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
+ SDValue V2,
+ ArrayRef<int> Mask,
+ SelectionDAG &DAG) {
+ // We build up the blend mask while checking whether a blend is a viable way
+ // to reduce the shuffle.
+ SmallVector<int, 32> BlendMask(Mask.size(), -1);
+ SmallVector<int, 32> PermuteMask(Mask.size(), -1);
+
+ for (int i = 0, Size = Mask.size(); i < Size; ++i) {
+ if (Mask[i] < 0)
+ continue;
+
+ assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
+
+ if (BlendMask[Mask[i] % Size] == -1)
+ BlendMask[Mask[i] % Size] = Mask[i];
+ else if (BlendMask[Mask[i] % Size] != Mask[i])
+ return SDValue(); // Can't blend in the needed input!
+
+ PermuteMask[i] = Mask[i] % Size;
+ }
+
+ SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
+ return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
+}
+
+/// \brief Generic routine to decompose a shuffle and blend into indepndent
+/// blends and permutes.
///
/// This matches the extremely common pattern for handling combined
/// shuffle+blend operations on newer X86 ISAs where we have very fast blend
-/// operations.
+/// operations. It will try to pick the best arrangement of shuffles and
+/// blends.
static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
SDValue V1,
SDValue V2,
@@ -7478,6 +6275,16 @@ static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
BlendMask[i] = i + Size;
}
+ // Try to lower with the simpler initial blend strategy unless one of the
+ // input shuffles would be a no-op. We prefer to shuffle inputs as the
+ // shuffle may be able to fold with a load or other benefit. However, when
+ // we'll have to do 2x as many shuffles in order to achieve this, blending
+ // first is a better strategy.
+ if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
+ if (SDValue BlendPerm =
+ lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
+ return BlendPerm;
+
V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
@@ -7492,15 +6299,13 @@ static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
/// does not check for the profitability of lowering either as PALIGNR or
/// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
/// This matches shuffle vectors that look like:
-///
+///
/// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
-///
+///
/// Essentially it concatenates V1 and V2, shifts right by some number of
/// elements, and takes the low elements as the result. Note that while this is
/// specified as a *right shift* because x86 is little-endian, it is a *left
/// rotate* of the vector lanes.
-///
-/// Note that this only handles 128-bit vector widths currently.
static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
SDValue V2,
ArrayRef<int> Mask,
@@ -7508,6 +6313,10 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
SelectionDAG &DAG) {
assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
+ int NumElts = Mask.size();
+ int NumLanes = VT.getSizeInBits() / 128;
+ int NumLaneElts = NumElts / NumLanes;
+
// We need to detect various ways of spelling a rotation:
// [11, 12, 13, 14, 15, 0, 1, 2]
// [-1, 12, 13, 14, -1, -1, 1, -1]
@@ -7517,44 +6326,52 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
// [-1, 4, 5, 6, -1, -1, -1, -1]
int Rotation = 0;
SDValue Lo, Hi;
- for (int i = 0, Size = Mask.size(); i < Size; ++i) {
- if (Mask[i] == -1)
- continue;
- assert(Mask[i] >= 0 && "Only -1 is a valid negative mask element!");
+ for (int l = 0; l < NumElts; l += NumLaneElts) {
+ for (int i = 0; i < NumLaneElts; ++i) {
+ if (Mask[l + i] == -1)
+ continue;
+ assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
- // Based on the mod-Size value of this mask element determine where
- // a rotated vector would have started.
- int StartIdx = i - (Mask[i] % Size);
- if (StartIdx == 0)
- // The identity rotation isn't interesting, stop.
- return SDValue();
+ // Get the mod-Size index and lane correct it.
+ int LaneIdx = (Mask[l + i] % NumElts) - l;
+ // Make sure it was in this lane.
+ if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
+ return SDValue();
- // If we found the tail of a vector the rotation must be the missing
- // front. If we found the head of a vector, it must be how much of the head.
- int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
+ // Determine where a rotated vector would have started.
+ int StartIdx = i - LaneIdx;
+ if (StartIdx == 0)
+ // The identity rotation isn't interesting, stop.
+ return SDValue();
- if (Rotation == 0)
- Rotation = CandidateRotation;
- else if (Rotation != CandidateRotation)
- // The rotations don't match, so we can't match this mask.
- return SDValue();
+ // If we found the tail of a vector the rotation must be the missing
+ // front. If we found the head of a vector, it must be how much of the
+ // head.
+ int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
- // Compute which value this mask is pointing at.
- SDValue MaskV = Mask[i] < Size ? V1 : V2;
-
- // Compute which of the two target values this index should be assigned to.
- // This reflects whether the high elements are remaining or the low elements
- // are remaining.
- SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
-
- // Either set up this value if we've not encountered it before, or check
- // that it remains consistent.
- if (!TargetV)
- TargetV = MaskV;
- else if (TargetV != MaskV)
- // This may be a rotation, but it pulls from the inputs in some
- // unsupported interleaving.
- return SDValue();
+ if (Rotation == 0)
+ Rotation = CandidateRotation;
+ else if (Rotation != CandidateRotation)
+ // The rotations don't match, so we can't match this mask.
+ return SDValue();
+
+ // Compute which value this mask is pointing at.
+ SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
+
+ // Compute which of the two target values this index should be assigned
+ // to. This reflects whether the high elements are remaining or the low
+ // elements are remaining.
+ SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
+
+ // Either set up this value if we've not encountered it before, or check
+ // that it remains consistent.
+ if (!TargetV)
+ TargetV = MaskV;
+ else if (TargetV != MaskV)
+ // This may be a rotation, but it pulls from the inputs in some
+ // unsupported interleaving.
+ return SDValue();
+ }
}
// Check that we successfully analyzed the mask, and normalize the results.
@@ -7565,26 +6382,27 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
else if (!Hi)
Hi = Lo;
- assert(VT.getSizeInBits() == 128 &&
- "Rotate-based lowering only supports 128-bit lowering!");
- assert(Mask.size() <= 16 &&
- "Can shuffle at most 16 bytes in a 128-bit vector!");
-
// The actual rotate instruction rotates bytes, so we need to scale the
- // rotation based on how many bytes are in the vector.
- int Scale = 16 / Mask.size();
+ // rotation based on how many bytes are in the vector lane.
+ int Scale = 16 / NumLaneElts;
- // SSSE3 targets can use the palignr instruction
+ // SSSE3 targets can use the palignr instruction.
if (Subtarget->hasSSSE3()) {
- // Cast the inputs to v16i8 to match PALIGNR.
- Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Lo);
- Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Hi);
+ // Cast the inputs to i8 vector of correct length to match PALIGNR.
+ MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
+ Lo = DAG.getNode(ISD::BITCAST, DL, AlignVT, Lo);
+ Hi = DAG.getNode(ISD::BITCAST, DL, AlignVT, Hi);
return DAG.getNode(ISD::BITCAST, DL, VT,
- DAG.getNode(X86ISD::PALIGNR, DL, MVT::v16i8, Hi, Lo,
+ DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo,
DAG.getConstant(Rotation * Scale, MVT::i8)));
}
+ assert(VT.getSizeInBits() == 128 &&
+ "Rotate-based lowering only supports 128-bit lowering!");
+ assert(Mask.size() <= 16 &&
+ "Can shuffle at most 16 bytes in a 128-bit vector!");
+
// Default SSE2 implementation
int LoByteShift = 16 - Rotation * Scale;
int HiByteShift = Rotation * Scale;
@@ -7594,9 +6412,9 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
- DAG.getConstant(8 * LoByteShift, MVT::i8));
+ DAG.getConstant(LoByteShift, MVT::i8));
SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
- DAG.getConstant(8 * HiByteShift, MVT::i8));
+ DAG.getConstant(HiByteShift, MVT::i8));
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
}
@@ -7613,6 +6431,11 @@ static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
SDValue V1, SDValue V2) {
SmallBitVector Zeroable(Mask.size(), false);
+ while (V1.getOpcode() == ISD::BITCAST)
+ V1 = V1->getOperand(0);
+ while (V2.getOpcode() == ISD::BITCAST)
+ V2 = V2->getOperand(0);
+
bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
@@ -7624,10 +6447,10 @@ static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
continue;
}
- // If this is an index into a build_vector node, dig out the input value and
- // use it.
+ // If this is an index into a build_vector node (which has the same number
+ // of elements), dig out the input value and use it.
SDValue V = M < Size ? V1 : V2;
- if (V.getOpcode() != ISD::BUILD_VECTOR)
+ if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
continue;
SDValue Input = V.getOperand(M % Size);
@@ -7640,85 +6463,133 @@ static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
return Zeroable;
}
-/// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
-///
-/// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ SSE2
-/// byte-shift instructions. The mask must consist of a shifted sequential
-/// shuffle from one of the input vectors and zeroable elements for the
-/// remaining 'shifted in' elements.
+/// \brief Try to emit a bitmask instruction for a shuffle.
///
-/// Note that this only handles 128-bit vector widths currently.
-static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
- SDValue V2, ArrayRef<int> Mask,
- SelectionDAG &DAG) {
- assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
+/// This handles cases where we can model a blend exactly as a bitmask due to
+/// one of the inputs being zeroable.
+static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
+ SDValue V2, ArrayRef<int> Mask,
+ SelectionDAG &DAG) {
+ MVT EltVT = VT.getScalarType();
+ int NumEltBits = EltVT.getSizeInBits();
+ MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
+ SDValue Zero = DAG.getConstant(0, IntEltVT);
+ SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
+ if (EltVT.isFloatingPoint()) {
+ Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
+ AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
+ }
+ SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
+ SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
+ SDValue V;
+ for (int i = 0, Size = Mask.size(); i < Size; ++i) {
+ if (Zeroable[i])
+ continue;
+ if (Mask[i] % Size != i)
+ return SDValue(); // Not a blend.
+ if (!V)
+ V = Mask[i] < Size ? V1 : V2;
+ else if (V != (Mask[i] < Size ? V1 : V2))
+ return SDValue(); // Can only let one input through the mask.
+
+ VMaskOps[i] = AllOnes;
+ }
+ if (!V)
+ return SDValue(); // No non-zeroable elements!
+
+ SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
+ V = DAG.getNode(VT.isFloatingPoint()
+ ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
+ DL, VT, V, VMask);
+ return V;
+}
+/// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
+///
+/// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
+/// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
+/// matches elements from one of the input vectors shuffled to the left or
+/// right with zeroable elements 'shifted in'. It handles both the strictly
+/// bit-wise element shifts and the byte shift across an entire 128-bit double
+/// quad word lane.
+///
+/// PSHL : (little-endian) left bit shift.
+/// [ zz, 0, zz, 2 ]
+/// [ -1, 4, zz, -1 ]
+/// PSRL : (little-endian) right bit shift.
+/// [ 1, zz, 3, zz]
+/// [ -1, -1, 7, zz]
+/// PSLLDQ : (little-endian) left byte shift
+/// [ zz, 0, 1, 2, 3, 4, 5, 6]
+/// [ zz, zz, -1, -1, 2, 3, 4, -1]
+/// [ zz, zz, zz, zz, zz, zz, -1, 1]
+/// PSRLDQ : (little-endian) right byte shift
+/// [ 5, 6, 7, zz, zz, zz, zz, zz]
+/// [ -1, 5, 6, 7, zz, zz, zz, zz]
+/// [ 1, 2, -1, -1, -1, -1, zz, zz]
+static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
+ SDValue V2, ArrayRef<int> Mask,
+ SelectionDAG &DAG) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
int Size = Mask.size();
- int Scale = 16 / Size;
+ assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
+
+ auto CheckZeros = [&](int Shift, int Scale, bool Left) {
+ for (int i = 0; i < Size; i += Scale)
+ for (int j = 0; j < Shift; ++j)
+ if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
+ return false;
- auto isSequential = [](int Base, int StartIndex, int EndIndex, int MaskOffset,
- ArrayRef<int> Mask) {
- for (int i = StartIndex; i < EndIndex; i++) {
- if (Mask[i] < 0)
- continue;
- if (i + Base != Mask[i] - MaskOffset)
- return false;
- }
return true;
};
- for (int Shift = 1; Shift < Size; Shift++) {
- int ByteShift = Shift * Scale;
-
- // PSRLDQ : (little-endian) right byte shift
- // [ 5, 6, 7, zz, zz, zz, zz, zz]
- // [ -1, 5, 6, 7, zz, zz, zz, zz]
- // [ 1, 2, -1, -1, -1, -1, zz, zz]
- bool ZeroableRight = true;
- for (int i = Size - Shift; i < Size; i++) {
- ZeroableRight &= Zeroable[i];
- }
-
- if (ZeroableRight) {
- bool ValidShiftRight1 = isSequential(Shift, 0, Size - Shift, 0, Mask);
- bool ValidShiftRight2 = isSequential(Shift, 0, Size - Shift, Size, Mask);
-
- if (ValidShiftRight1 || ValidShiftRight2) {
- // Cast the inputs to v2i64 to match PSRLDQ.
- SDValue &TargetV = ValidShiftRight1 ? V1 : V2;
- SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
- SDValue Shifted = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, V,
- DAG.getConstant(ByteShift * 8, MVT::i8));
- return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
- }
+ auto MatchShift = [&](int Shift, int Scale, bool Left, SDValue V) {
+ for (int i = 0; i != Size; i += Scale) {
+ unsigned Pos = Left ? i + Shift : i;
+ unsigned Low = Left ? i : i + Shift;
+ unsigned Len = Scale - Shift;
+ if (!isSequentialOrUndefInRange(Mask, Pos, Len,
+ Low + (V == V1 ? 0 : Size)))
+ return SDValue();
}
- // PSLLDQ : (little-endian) left byte shift
- // [ zz, 0, 1, 2, 3, 4, 5, 6]
- // [ zz, zz, -1, -1, 2, 3, 4, -1]
- // [ zz, zz, zz, zz, zz, zz, -1, 1]
- bool ZeroableLeft = true;
- for (int i = 0; i < Shift; i++) {
- ZeroableLeft &= Zeroable[i];
- }
+ int ShiftEltBits = VT.getScalarSizeInBits() * Scale;
+ bool ByteShift = ShiftEltBits > 64;
+ unsigned OpCode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
+ : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
+ int ShiftAmt = Shift * VT.getScalarSizeInBits() / (ByteShift ? 8 : 1);
- if (ZeroableLeft) {
- bool ValidShiftLeft1 = isSequential(-Shift, Shift, Size, 0, Mask);
- bool ValidShiftLeft2 = isSequential(-Shift, Shift, Size, Size, Mask);
+ // Normalize the scale for byte shifts to still produce an i64 element
+ // type.
+ Scale = ByteShift ? Scale / 2 : Scale;
- if (ValidShiftLeft1 || ValidShiftLeft2) {
- // Cast the inputs to v2i64 to match PSLLDQ.
- SDValue &TargetV = ValidShiftLeft1 ? V1 : V2;
- SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
- SDValue Shifted = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, V,
- DAG.getConstant(ByteShift * 8, MVT::i8));
- return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
- }
- }
- }
+ // We need to round trip through the appropriate type for the shift.
+ MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
+ MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
+ assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
+ "Illegal integer vector type");
+ V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
+ V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
+ return DAG.getNode(ISD::BITCAST, DL, VT, V);
+ };
+
+ // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
+ // keep doubling the size of the integer elements up to that. We can
+ // then shift the elements of the integer vector by whole multiples of
+ // their width within the elements of the larger integer vector. Test each
+ // multiple to see if we can find a match with the moved element indices
+ // and that the shifted in elements are all zeroable.
+ for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 128; Scale *= 2)
+ for (int Shift = 1; Shift != Scale; ++Shift)
+ for (bool Left : {true, false})
+ if (CheckZeros(Shift, Scale, Left))
+ for (SDValue V : {V1, V2})
+ if (SDValue Match = MatchShift(Shift, Scale, Left, V))
+ return Match;
+
+ // no match
return SDValue();
}
@@ -7728,10 +6599,11 @@ static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
/// stride, produce either a zero or any extension based on the available
/// features of the subtarget.
static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
- SDLoc DL, MVT VT, int NumElements, int Scale, bool AnyExt, SDValue InputV,
+ SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
const X86Subtarget *Subtarget, SelectionDAG &DAG) {
assert(Scale > 1 && "Need a scale to extend.");
- int EltBits = VT.getSizeInBits() / NumElements;
+ int NumElements = VT.getVectorNumElements();
+ int EltBits = VT.getScalarSizeInBits();
assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
"Only 8, 16, and 32 bit elements can be extended.");
assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
@@ -7739,10 +6611,8 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
// Found a valid zext mask! Try various lowering strategies based on the
// input type and available ISA extensions.
if (Subtarget->hasSSE41()) {
- MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
NumElements / Scale);
- InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
}
@@ -7800,7 +6670,7 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
}
-/// \brief Try to lower a vector shuffle as a zero extension on any micrarch.
+/// \brief Try to lower a vector shuffle as a zero extension on any microarch.
///
/// This routine will try to do everything in its power to cleverly lower
/// a shuffle which happens to match the pattern of a zero extend. It doesn't
@@ -7818,7 +6688,10 @@ static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
int Bits = VT.getSizeInBits();
- int NumElements = Mask.size();
+ int NumElements = VT.getVectorNumElements();
+ assert(VT.getScalarSizeInBits() <= 32 &&
+ "Exceeds 32-bit integer zero extension limit");
+ assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
// Define a helper function to check a particular ext-scale and lower to it if
// valid.
@@ -7829,11 +6702,11 @@ static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
if (Mask[i] == -1)
continue; // Valid anywhere but doesn't tell us anything.
if (i % Scale != 0) {
- // Each of the extend elements needs to be zeroable.
+ // Each of the extended elements need to be zeroable.
if (!Zeroable[i])
return SDValue();
- // We no lorger are in the anyext case.
+ // We no longer are in the anyext case.
AnyExt = false;
continue;
}
@@ -7847,7 +6720,7 @@ static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
return SDValue(); // Flip-flopping inputs.
if (Mask[i] % NumElements != i / Scale)
- return SDValue(); // Non-consecutive strided elemenst.
+ return SDValue(); // Non-consecutive strided elements.
}
// If we fail to find an input, we have a zero-shuffle which should always
@@ -7857,7 +6730,7 @@ static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
return SDValue();
return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
- DL, VT, NumElements, Scale, AnyExt, InputV, Subtarget, DAG);
+ DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
};
// The widest scale possible for extending is to a 64-bit integer.
@@ -7869,11 +6742,34 @@ static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
// many elements.
for (; NumExtElements < NumElements; NumExtElements *= 2) {
assert(NumElements % NumExtElements == 0 &&
- "The input vector size must be divisble by the extended size.");
+ "The input vector size must be divisible by the extended size.");
if (SDValue V = Lower(NumElements / NumExtElements))
return V;
}
+ // General extends failed, but 128-bit vectors may be able to use MOVQ.
+ if (Bits != 128)
+ return SDValue();
+
+ // Returns one of the source operands if the shuffle can be reduced to a
+ // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
+ auto CanZExtLowHalf = [&]() {
+ for (int i = NumElements / 2; i != NumElements; ++i)
+ if (!Zeroable[i])
+ return SDValue();
+ if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
+ return V1;
+ if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
+ return V2;
+ return SDValue();
+ };
+
+ if (SDValue V = CanZExtLowHalf()) {
+ V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
+ V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
+ return DAG.getNode(ISD::BITCAST, DL, VT, V);
+ }
+
// No viable ext lowering found.
return SDValue();
}
@@ -7916,7 +6812,7 @@ static bool isShuffleFoldableLoad(SDValue V) {
/// This is a common pattern that we have especially efficient patterns to lower
/// across all subtarget feature sets.
static SDValue lowerVectorShuffleAsElementInsertion(
- MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const X86Subtarget *Subtarget, SelectionDAG &DAG) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
MVT ExtVT = VT;
@@ -7983,6 +6879,10 @@ static SDValue lowerVectorShuffleAsElementInsertion(
ExtVT, V1, V2);
}
+ // This lowering only works for the low element with floating point vectors.
+ if (VT.isFloatingPoint() && V2Index != 0)
+ return SDValue();
+
V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
if (ExtVT != VT)
V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
@@ -8001,7 +6901,7 @@ static SDValue lowerVectorShuffleAsElementInsertion(
V2 = DAG.getNode(
X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
DAG.getConstant(
- V2Index * EltVT.getSizeInBits(),
+ V2Index * EltVT.getSizeInBits()/8,
DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
}
@@ -8014,7 +6914,7 @@ static SDValue lowerVectorShuffleAsElementInsertion(
/// For convenience, this code also bundles all of the subtarget feature set
/// filtering. While a little annoying to re-dispatch on type here, there isn't
/// a convenient way to factor it out.
-static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
+static SDValue lowerVectorShuffleAsBroadcast(SDLoc DL, MVT VT, SDValue V,
ArrayRef<int> Mask,
const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
@@ -8086,6 +6986,199 @@ static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
}
+// Check for whether we can use INSERTPS to perform the shuffle. We only use
+// INSERTPS when the V1 elements are already in the correct locations
+// because otherwise we can just always use two SHUFPS instructions which
+// are much smaller to encode than a SHUFPS and an INSERTPS. We can also
+// perform INSERTPS if a single V1 element is out of place and all V2
+// elements are zeroable.
+static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
+ ArrayRef<int> Mask,
+ SelectionDAG &DAG) {
+ assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
+ assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
+ assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
+ assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
+
+ SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
+
+ unsigned ZMask = 0;
+ int V1DstIndex = -1;
+ int V2DstIndex = -1;
+ bool V1UsedInPlace = false;
+
+ for (int i = 0; i < 4; ++i) {
+ // Synthesize a zero mask from the zeroable elements (includes undefs).
+ if (Zeroable[i]) {
+ ZMask |= 1 << i;
+ continue;
+ }
+
+ // Flag if we use any V1 inputs in place.
+ if (i == Mask[i]) {
+ V1UsedInPlace = true;
+ continue;
+ }
+
+ // We can only insert a single non-zeroable element.
+ if (V1DstIndex != -1 || V2DstIndex != -1)
+ return SDValue();
+
+ if (Mask[i] < 4) {
+ // V1 input out of place for insertion.
+ V1DstIndex = i;
+ } else {
+ // V2 input for insertion.
+ V2DstIndex = i;
+ }
+ }
+
+ // Don't bother if we have no (non-zeroable) element for insertion.
+ if (V1DstIndex == -1 && V2DstIndex == -1)
+ return SDValue();
+
+ // Determine element insertion src/dst indices. The src index is from the
+ // start of the inserted vector, not the start of the concatenated vector.
+ unsigned V2SrcIndex = 0;
+ if (V1DstIndex != -1) {
+ // If we have a V1 input out of place, we use V1 as the V2 element insertion
+ // and don't use the original V2 at all.
+ V2SrcIndex = Mask[V1DstIndex];
+ V2DstIndex = V1DstIndex;
+ V2 = V1;
+ } else {
+ V2SrcIndex = Mask[V2DstIndex] - 4;
+ }
+
+ // If no V1 inputs are used in place, then the result is created only from
+ // the zero mask and the V2 insertion - so remove V1 dependency.
+ if (!V1UsedInPlace)
+ V1 = DAG.getUNDEF(MVT::v4f32);
+
+ unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
+ assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
+
+ // Insert the V2 element into the desired position.
+ SDLoc DL(Op);
+ return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
+ DAG.getConstant(InsertPSMask, MVT::i8));
+}
+
+/// \brief Try to lower a shuffle as a permute of the inputs followed by an
+/// UNPCK instruction.
+///
+/// This specifically targets cases where we end up with alternating between
+/// the two inputs, and so can permute them into something that feeds a single
+/// UNPCK instruction. Note that this routine only targets integer vectors
+/// because for floating point vectors we have a generalized SHUFPS lowering
+/// strategy that handles everything that doesn't *exactly* match an unpack,
+/// making this clever lowering unnecessary.
+static SDValue lowerVectorShuffleAsUnpack(SDLoc DL, MVT VT, SDValue V1,
+ SDValue V2, ArrayRef<int> Mask,
+ SelectionDAG &DAG) {
+ assert(!VT.isFloatingPoint() &&
+ "This routine only supports integer vectors.");
+ assert(!isSingleInputShuffleMask(Mask) &&
+ "This routine should only be used when blending two inputs.");
+ assert(Mask.size() >= 2 && "Single element masks are invalid.");
+
+ int Size = Mask.size();
+
+ int NumLoInputs = std::count_if(Mask.begin(), Mask.end(), [Size](int M) {
+ return M >= 0 && M % Size < Size / 2;
+ });
+ int NumHiInputs = std::count_if(
+ Mask.begin(), Mask.end(), [Size](int M) { return M % Size >= Size / 2; });
+
+ bool UnpackLo = NumLoInputs >= NumHiInputs;
+
+ auto TryUnpack = [&](MVT UnpackVT, int Scale) {
+ SmallVector<int, 32> V1Mask(Mask.size(), -1);
+ SmallVector<int, 32> V2Mask(Mask.size(), -1);
+
+ for (int i = 0; i < Size; ++i) {
+ if (Mask[i] < 0)
+ continue;
+
+ // Each element of the unpack contains Scale elements from this mask.
+ int UnpackIdx = i / Scale;
+
+ // We only handle the case where V1 feeds the first slots of the unpack.
+ // We rely on canonicalization to ensure this is the case.
+ if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
+ return SDValue();
+
+ // Setup the mask for this input. The indexing is tricky as we have to
+ // handle the unpack stride.
+ SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
+ VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
+ Mask[i] % Size;
+ }
+
+ // If we will have to shuffle both inputs to use the unpack, check whether
+ // we can just unpack first and shuffle the result. If so, skip this unpack.
+ if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
+ !isNoopShuffleMask(V2Mask))
+ return SDValue();
+
+ // Shuffle the inputs into place.
+ V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
+ V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
+
+ // Cast the inputs to the type we will use to unpack them.
+ V1 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V1);
+ V2 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V2);
+
+ // Unpack the inputs and cast the result back to the desired type.
+ return DAG.getNode(ISD::BITCAST, DL, VT,
+ DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
+ DL, UnpackVT, V1, V2));
+ };
+
+ // We try each unpack from the largest to the smallest to try and find one
+ // that fits this mask.
+ int OrigNumElements = VT.getVectorNumElements();
+ int OrigScalarSize = VT.getScalarSizeInBits();
+ for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2) {
+ int Scale = ScalarSize / OrigScalarSize;
+ int NumElements = OrigNumElements / Scale;
+ MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), NumElements);
+ if (SDValue Unpack = TryUnpack(UnpackVT, Scale))
+ return Unpack;
+ }
+
+ // If none of the unpack-rooted lowerings worked (or were profitable) try an
+ // initial unpack.
+ if (NumLoInputs == 0 || NumHiInputs == 0) {
+ assert((NumLoInputs > 0 || NumHiInputs > 0) &&
+ "We have to have *some* inputs!");
+ int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
+
+ // FIXME: We could consider the total complexity of the permute of each
+ // possible unpacking. Or at the least we should consider how many
+ // half-crossings are created.
+ // FIXME: We could consider commuting the unpacks.
+
+ SmallVector<int, 32> PermMask;
+ PermMask.assign(Size, -1);
+ for (int i = 0; i < Size; ++i) {
+ if (Mask[i] < 0)
+ continue;
+
+ assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
+
+ PermMask[i] =
+ 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
+ }
+ return DAG.getVectorShuffle(
+ VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
+ DL, VT, V1, V2),
+ DAG.getUNDEF(VT), PermMask);
+ }
+
+ return SDValue();
+}
+
/// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
///
/// This is the basis function for the 2-lane 64-bit shuffles as we have full
@@ -8105,6 +7198,11 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
if (isSingleInputShuffleMask(Mask)) {
+ // Use low duplicate instructions for masks that match their pattern.
+ if (Subtarget->hasSSE3())
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 0}))
+ return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
+
// Straight shuffle of a single input vector. Simulate this by using the
// single input as both of the "inputs" to this instruction..
unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
@@ -8122,29 +7220,24 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
assert(Mask[1] >= 2 && "Non-canonicalized blend!");
- // Use dedicated unpack instructions for masks that match their pattern.
- if (isShuffleEquivalent(Mask, 0, 2))
- return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
- if (isShuffleEquivalent(Mask, 1, 3))
- return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
-
// If we have a single input, insert that into V1 if we can do so cheaply.
if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
- MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
+ DL, MVT::v2f64, V1, V2, Mask, Subtarget, DAG))
return Insertion;
// Try inverting the insertion since for v2 masks it is easy to do and we
// can't reliably sort the mask one way or the other.
int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
- MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
+ DL, MVT::v2f64, V2, V1, InverseMask, Subtarget, DAG))
return Insertion;
}
// Try to use one of the special instruction patterns to handle two common
// blend patterns if a zero-blend above didn't work.
- if (isShuffleEquivalent(Mask, 0, 3) || isShuffleEquivalent(Mask, 1, 3))
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
+ isShuffleEquivalent(V1, V2, Mask, {1, 3}))
if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
// We can either use a special instruction to load over the low double or
// to move just the low double.
@@ -8158,6 +7251,12 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
Subtarget, DAG))
return Blend;
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 2}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {1, 3}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
+
unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
DAG.getConstant(SHUFPDMask, MVT::i8));
@@ -8182,7 +7281,7 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (isSingleInputShuffleMask(Mask)) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v2i64, V1,
Mask, Subtarget, DAG))
return Broadcast;
@@ -8198,37 +7297,60 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
}
+ assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
+ assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
+ assert(Mask[0] < 2 && "We sort V1 to be the first input.");
+ assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
+
+ // If we have a blend of two PACKUS operations an the blend aligns with the
+ // low and half halves, we can just merge the PACKUS operations. This is
+ // particularly important as it lets us merge shuffles that this routine itself
+ // creates.
+ auto GetPackNode = [](SDValue V) {
+ while (V.getOpcode() == ISD::BITCAST)
+ V = V.getOperand(0);
- // If we have a single input from V2 insert that into V1 if we can do so
- // cheaply.
- if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
- if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
- MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
- return Insertion;
- // Try inverting the insertion since for v2 masks it is easy to do and we
- // can't reliably sort the mask one way or the other.
- int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
- Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
- if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
- MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
- return Insertion;
- }
-
- // Use dedicated unpack instructions for masks that match their pattern.
- if (isShuffleEquivalent(Mask, 0, 2))
- return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
- if (isShuffleEquivalent(Mask, 1, 3))
- return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
+ return V.getOpcode() == X86ISD::PACKUS ? V : SDValue();
+ };
+ if (SDValue V1Pack = GetPackNode(V1))
+ if (SDValue V2Pack = GetPackNode(V2))
+ return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
+ DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8,
+ Mask[0] == 0 ? V1Pack.getOperand(0)
+ : V1Pack.getOperand(1),
+ Mask[1] == 2 ? V2Pack.getOperand(0)
+ : V2Pack.getOperand(1)));
+
+ // Try to use shift instructions.
+ if (SDValue Shift =
+ lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, DAG))
+ return Shift;
- if (Subtarget->hasSSE41())
+ // When loading a scalar and then shuffling it into a vector we can often do
+ // the insertion cheaply.
+ if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
+ DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
+ return Insertion;
+ // Try inverting the insertion since for v2 masks it is easy to do and we
+ // can't reliably sort the mask one way or the other.
+ int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
+ if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
+ DL, MVT::v2i64, V2, V1, InverseMask, Subtarget, DAG))
+ return Insertion;
+
+ // We have different paths for blend lowering, but they all must use the
+ // *exact* same predicate.
+ bool IsBlendSupported = Subtarget->hasSSE41();
+ if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
Subtarget, DAG))
return Blend;
- // Try to use byte shift instructions.
- if (SDValue Shift = lowerVectorShuffleAsByteShift(
- DL, MVT::v2i64, V1, V2, Mask, DAG))
- return Shift;
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 2}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {1, 3}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
// Try to use byte rotation instructions.
// Its more profitable for pre-SSSE3 to use shuffles/unpacks.
@@ -8237,6 +7359,12 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
return Rotate;
+ // If we have direct support for blends, we should lower by decomposing into
+ // a permute. That will be faster than the domain cross.
+ if (IsBlendSupported)
+ return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
+ Mask, DAG);
+
// We implement this with SHUFPD which is pretty lame because it will likely
// incur 2 cycles of stall for integer vectors on Nehalem and older chips.
// However, all the alternatives are still more cycles and newer chips don't
@@ -8247,6 +7375,24 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
}
+/// \brief Test whether this can be lowered with a single SHUFPS instruction.
+///
+/// This is used to disable more specialized lowerings when the shufps lowering
+/// will happen to be efficient.
+static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
+ // This routine only handles 128-bit shufps.
+ assert(Mask.size() == 4 && "Unsupported mask size!");
+
+ // To lower with a single SHUFPS we need to have the low half and high half
+ // each requiring a single input.
+ if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
+ return false;
+ if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
+ return false;
+
+ return true;
+}
+
/// \brief Lower a vector shuffle using the SHUFPS instruction.
///
/// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
@@ -8358,10 +7504,18 @@ static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4f32, V1,
Mask, Subtarget, DAG))
return Broadcast;
+ // Use even/odd duplicate instructions for masks that match their pattern.
+ if (Subtarget->hasSSE3()) {
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
+ return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
+ if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
+ return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
+ }
+
if (Subtarget->hasAVX()) {
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
@@ -8375,70 +7529,41 @@ static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
getV4X86ShuffleImm8ForMask(Mask, DAG));
}
- // Use dedicated unpack instructions for masks that match their pattern.
- if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
- return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
- if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
- return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
-
// There are special ways we can lower some single-element blends. However, we
// have custom ways we can lower more complex single-element blends below that
// we defer to if both this and BLENDPS fail to match, so restrict this to
// when the V2 input is targeting element 0 of the mask -- that is the fast
// case here.
if (NumV2Elements == 1 && Mask[0] >= 4)
- if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
+ if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v4f32, V1, V2,
Mask, Subtarget, DAG))
return V;
- if (Subtarget->hasSSE41())
+ if (Subtarget->hasSSE41()) {
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
Subtarget, DAG))
return Blend;
- // Check for whether we can use INSERTPS to perform the blend. We only use
- // INSERTPS when the V1 elements are already in the correct locations
- // because otherwise we can just always use two SHUFPS instructions which
- // are much smaller to encode than a SHUFPS and an INSERTPS.
- if (NumV2Elements == 1 && Subtarget->hasSSE41()) {
- int V2Index =
- std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
- Mask.begin();
-
- // When using INSERTPS we can zero any lane of the destination. Collect
- // the zero inputs into a mask and drop them from the lanes of V1 which
- // actually need to be present as inputs to the INSERTPS.
- SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
-
- // Synthesize a shuffle mask for the non-zero and non-v2 inputs.
- bool InsertNeedsShuffle = false;
- unsigned ZMask = 0;
- for (int i = 0; i < 4; ++i)
- if (i != V2Index) {
- if (Zeroable[i]) {
- ZMask |= 1 << i;
- } else if (Mask[i] != i) {
- InsertNeedsShuffle = true;
- break;
- }
- }
-
- // We don't want to use INSERTPS or other insertion techniques if it will
- // require shuffling anyways.
- if (!InsertNeedsShuffle) {
- // If all of V1 is zeroable, replace it with undef.
- if ((ZMask | 1 << V2Index) == 0xF)
- V1 = DAG.getUNDEF(MVT::v4f32);
-
- unsigned InsertPSMask = (Mask[V2Index] - 4) << 6 | V2Index << 4 | ZMask;
- assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
+ // Use INSERTPS if we can complete the shuffle efficiently.
+ if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
+ return V;
- // Insert the V2 element into the desired position.
- return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
- DAG.getConstant(InsertPSMask, MVT::i8));
- }
+ if (!isSingleSHUFPSMask(Mask))
+ if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
+ DL, MVT::v4f32, V1, V2, Mask, DAG))
+ return BlendPerm;
}
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 4, 1, 5}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {2, 6, 3, 7}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {4, 0, 5, 1}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V2, V1);
+ if (isShuffleEquivalent(V1, V2, Mask, {6, 2, 7, 3}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V2, V1);
+
// Otherwise fall back to a SHUFPS lowering strategy.
return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
}
@@ -8470,7 +7595,7 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i32, V1,
Mask, Subtarget, DAG))
return Broadcast;
@@ -8481,36 +7606,47 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// so prevents folding a load into this instruction or making a copy.
const int UnpackLoMask[] = {0, 0, 1, 1};
const int UnpackHiMask[] = {2, 2, 3, 3};
- if (isShuffleEquivalent(Mask, 0, 0, 1, 1))
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
Mask = UnpackLoMask;
- else if (isShuffleEquivalent(Mask, 2, 2, 3, 3))
+ else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
Mask = UnpackHiMask;
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
getV4X86ShuffleImm8ForMask(Mask, DAG));
}
+ // Try to use shift instructions.
+ if (SDValue Shift =
+ lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, DAG))
+ return Shift;
+
// There are special ways we can lower some single-element blends.
if (NumV2Elements == 1)
- if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
+ if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v4i32, V1, V2,
Mask, Subtarget, DAG))
return V;
- // Use dedicated unpack instructions for masks that match their pattern.
- if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
- return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
- if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
- return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
-
- if (Subtarget->hasSSE41())
+ // We have different paths for blend lowering, but they all must use the
+ // *exact* same predicate.
+ bool IsBlendSupported = Subtarget->hasSSE41();
+ if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
Subtarget, DAG))
return Blend;
- // Try to use byte shift instructions.
- if (SDValue Shift = lowerVectorShuffleAsByteShift(
- DL, MVT::v4i32, V1, V2, Mask, DAG))
- return Shift;
+ if (SDValue Masked =
+ lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
+ return Masked;
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 4, 1, 5}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {2, 6, 3, 7}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {4, 0, 5, 1}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V2, V1);
+ if (isShuffleEquivalent(V1, V2, Mask, {6, 2, 7, 3}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V2, V1);
// Try to use byte rotation instructions.
// Its more profitable for pre-SSSE3 to use shuffles/unpacks.
@@ -8519,6 +7655,17 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
+ // If we have direct support for blends, we should lower by decomposing into
+ // a permute. That will be faster than the domain cross.
+ if (IsBlendSupported)
+ return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
+ Mask, DAG);
+
+ // Try to lower by permuting the inputs into an unpack instruction.
+ if (SDValue Unpack =
+ lowerVectorShuffleAsUnpack(DL, MVT::v4i32, V1, V2, Mask, DAG))
+ return Unpack;
+
// We implement this with SHUFPS because it can blend from two vectors.
// Because we're going to eventually use SHUFPS, we use SHUFPS even to build
// up the inputs, bypassing domain shift penalties that we would encur if we
@@ -8542,7 +7689,7 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// The exact breakdown of how to form these dword pairs and align them on the
/// correct sides is really tricky. See the comments within the function for
/// more of the details.
-static SDValue lowerV8I16SingleInputVectorShuffle(
+static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
const X86Subtarget *Subtarget, SelectionDAG &DAG) {
assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
@@ -8570,27 +7717,6 @@ static SDValue lowerV8I16SingleInputVectorShuffle(
MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
- // Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
- Mask, Subtarget, DAG))
- return Broadcast;
-
- // Use dedicated unpack instructions for masks that match their pattern.
- if (isShuffleEquivalent(Mask, 0, 0, 1, 1, 2, 2, 3, 3))
- return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
- if (isShuffleEquivalent(Mask, 4, 4, 5, 5, 6, 6, 7, 7))
- return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
-
- // Try to use byte shift instructions.
- if (SDValue Shift = lowerVectorShuffleAsByteShift(
- DL, MVT::v8i16, V, V, Mask, DAG))
- return Shift;
-
- // Try to use byte rotation instructions.
- if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
- DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
- return Rotate;
-
// Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
// such inputs we can swap two of the dwords across the half mark and end up
// with <=2 inputs to each half in each half. Once there, we can fall through
@@ -8993,158 +8119,56 @@ static SDValue lowerV8I16SingleInputVectorShuffle(
return V;
}
-/// \brief Detect whether the mask pattern should be lowered through
-/// interleaving.
-///
-/// This essentially tests whether viewing the mask as an interleaving of two
-/// sub-sequences reduces the cross-input traffic of a blend operation. If so,
-/// lowering it through interleaving is a significantly better strategy.
-static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
- int NumEvenInputs[2] = {0, 0};
- int NumOddInputs[2] = {0, 0};
- int NumLoInputs[2] = {0, 0};
- int NumHiInputs[2] = {0, 0};
- for (int i = 0, Size = Mask.size(); i < Size; ++i) {
- if (Mask[i] < 0)
- continue;
-
- int InputIdx = Mask[i] >= Size;
-
- if (i < Size / 2)
- ++NumLoInputs[InputIdx];
- else
- ++NumHiInputs[InputIdx];
-
- if ((i % 2) == 0)
- ++NumEvenInputs[InputIdx];
- else
- ++NumOddInputs[InputIdx];
- }
-
- // The minimum number of cross-input results for both the interleaved and
- // split cases. If interleaving results in fewer cross-input results, return
- // true.
- int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
- NumEvenInputs[0] + NumOddInputs[1]);
- int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
- NumLoInputs[0] + NumHiInputs[1]);
- return InterleavedCrosses < SplitCrosses;
-}
-
-/// \brief Blend two v8i16 vectors using a naive unpack strategy.
-///
-/// This strategy only works when the inputs from each vector fit into a single
-/// half of that vector, and generally there are not so many inputs as to leave
-/// the in-place shuffles required highly constrained (and thus expensive). It
-/// shifts all the inputs into a single side of both input vectors and then
-/// uses an unpack to interleave these inputs in a single vector. At that
-/// point, we will fall back on the generic single input shuffle lowering.
-static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
- SDValue V2,
- MutableArrayRef<int> Mask,
- const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
- assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
- assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
- SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
- for (int i = 0; i < 8; ++i)
- if (Mask[i] >= 0 && Mask[i] < 4)
- LoV1Inputs.push_back(i);
- else if (Mask[i] >= 4 && Mask[i] < 8)
- HiV1Inputs.push_back(i);
- else if (Mask[i] >= 8 && Mask[i] < 12)
- LoV2Inputs.push_back(i);
- else if (Mask[i] >= 12)
- HiV2Inputs.push_back(i);
-
- int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
- int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
- (void)NumV1Inputs;
- (void)NumV2Inputs;
- assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
- assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
- assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
-
- bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
- HiV1Inputs.size() + HiV2Inputs.size();
-
- auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
- ArrayRef<int> HiInputs, bool MoveToLo,
- int MaskOffset) {
- ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
- ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
- if (BadInputs.empty())
- return V;
-
- int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
- int MoveOffset = MoveToLo ? 0 : 4;
+/// \brief Helper to form a PSHUFB-based shuffle+blend.
+static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
+ SDValue V2, ArrayRef<int> Mask,
+ SelectionDAG &DAG, bool &V1InUse,
+ bool &V2InUse) {
+ SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
+ SDValue V1Mask[16];
+ SDValue V2Mask[16];
+ V1InUse = false;
+ V2InUse = false;
- if (GoodInputs.empty()) {
- for (int BadInput : BadInputs) {
- MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
- Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
- }
+ int Size = Mask.size();
+ int Scale = 16 / Size;
+ for (int i = 0; i < 16; ++i) {
+ if (Mask[i / Scale] == -1) {
+ V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
} else {
- if (GoodInputs.size() == 2) {
- // If the low inputs are spread across two dwords, pack them into
- // a single dword.
- MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
- MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
- Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
- Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
- } else {
- // Otherwise pin the good inputs.
- for (int GoodInput : GoodInputs)
- MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
- }
-
- if (BadInputs.size() == 2) {
- // If we have two bad inputs then there may be either one or two good
- // inputs fixed in place. Find a fixed input, and then find the *other*
- // two adjacent indices by using modular arithmetic.
- int GoodMaskIdx =
- std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
- [](int M) { return M >= 0; }) -
- std::begin(MoveMask);
- int MoveMaskIdx =
- ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
- assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
- assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
- MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
- MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
- Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
- Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
- } else {
- assert(BadInputs.size() == 1 && "All sizes handled");
- int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
- std::end(MoveMask), -1) -
- std::begin(MoveMask);
- MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
- Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
- }
- }
-
- return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
- MoveMask);
- };
- V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
- /*MaskOffset*/ 0);
- V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
- /*MaskOffset*/ 8);
-
- // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
- // cross-half traffic in the final shuffle.
+ const int ZeroMask = 0x80;
+ int V1Idx = Mask[i / Scale] < Size ? Mask[i / Scale] * Scale + i % Scale
+ : ZeroMask;
+ int V2Idx = Mask[i / Scale] < Size
+ ? ZeroMask
+ : (Mask[i / Scale] - Size) * Scale + i % Scale;
+ if (Zeroable[i / Scale])
+ V1Idx = V2Idx = ZeroMask;
+ V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
+ V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
+ V1InUse |= (ZeroMask != V1Idx);
+ V2InUse |= (ZeroMask != V2Idx);
+ }
+ }
+
+ if (V1InUse)
+ V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
+ DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, V1),
+ DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
+ if (V2InUse)
+ V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
+ DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, V2),
+ DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
- // Munge the mask to be a single-input mask after the unpack merges the
- // results.
- for (int &M : Mask)
- if (M != -1)
- M = 2 * (M % 4) + (M / 8);
+ // If we need shuffled inputs from both, blend the two.
+ SDValue V;
+ if (V1InUse && V2InUse)
+ V = DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
+ else
+ V = V1InUse ? V1 : V2;
- return DAG.getVectorShuffle(
- MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
- DL, MVT::v8i16, V1, V2),
- DAG.getUNDEF(MVT::v8i16), Mask);
+ // Cast the result back to the correct type.
+ return DAG.getNode(ISD::BITCAST, DL, VT, V);
}
/// \brief Generic lowering of 8-lane i16 shuffles.
@@ -9181,85 +8205,95 @@ static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return ZExt;
auto isV1 = [](int M) { return M >= 0 && M < 8; };
+ (void)isV1;
auto isV2 = [](int M) { return M >= 8; };
- int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
- if (NumV2Inputs == 0)
- return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
+ if (NumV2Inputs == 0) {
+ // Check for being able to broadcast a single element.
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i16, V1,
+ Mask, Subtarget, DAG))
+ return Broadcast;
+
+ // Try to use shift instructions.
+ if (SDValue Shift =
+ lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask, DAG))
+ return Shift;
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V1, Mask, {0, 0, 1, 1, 2, 2, 3, 3}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V1);
+ if (isShuffleEquivalent(V1, V1, Mask, {4, 4, 5, 5, 6, 6, 7, 7}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V1);
+
+ // Try to use byte rotation instructions.
+ if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v8i16, V1, V1,
+ Mask, Subtarget, DAG))
+ return Rotate;
+
+ return lowerV8I16GeneralSingleInputVectorShuffle(DL, V1, Mask, Subtarget,
+ DAG);
+ }
- assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
- "to be V1-input shuffles.");
+ assert(std::any_of(Mask.begin(), Mask.end(), isV1) &&
+ "All single-input shuffles should be canonicalized to be V1-input "
+ "shuffles.");
+
+ // Try to use shift instructions.
+ if (SDValue Shift =
+ lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, DAG))
+ return Shift;
// There are special ways we can lower some single-element blends.
if (NumV2Inputs == 1)
- if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
+ if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v8i16, V1, V2,
Mask, Subtarget, DAG))
return V;
- // Use dedicated unpack instructions for masks that match their pattern.
- if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 2, 10, 3, 11))
- return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
- if (isShuffleEquivalent(Mask, 4, 12, 5, 13, 6, 14, 7, 15))
- return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
-
- if (Subtarget->hasSSE41())
+ // We have different paths for blend lowering, but they all must use the
+ // *exact* same predicate.
+ bool IsBlendSupported = Subtarget->hasSSE41();
+ if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
Subtarget, DAG))
return Blend;
- // Try to use byte shift instructions.
- if (SDValue Shift = lowerVectorShuffleAsByteShift(
- DL, MVT::v8i16, V1, V2, Mask, DAG))
- return Shift;
+ if (SDValue Masked =
+ lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
+ return Masked;
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 1, 9, 2, 10, 3, 11}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {4, 12, 5, 13, 6, 14, 7, 15}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
return Rotate;
- if (NumV1Inputs + NumV2Inputs <= 4)
- return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
-
- // Check whether an interleaving lowering is likely to be more efficient.
- // This isn't perfect but it is a strong heuristic that tends to work well on
- // the kinds of shuffles that show up in practice.
- //
- // FIXME: Handle 1x, 2x, and 4x interleaving.
- if (shouldLowerAsInterleaving(Mask)) {
- // FIXME: Figure out whether we should pack these into the low or high
- // halves.
+ if (SDValue BitBlend =
+ lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
+ return BitBlend;
- int EMask[8], OMask[8];
- for (int i = 0; i < 4; ++i) {
- EMask[i] = Mask[2*i];
- OMask[i] = Mask[2*i + 1];
- EMask[i + 4] = -1;
- OMask[i + 4] = -1;
- }
+ if (SDValue Unpack =
+ lowerVectorShuffleAsUnpack(DL, MVT::v8i16, V1, V2, Mask, DAG))
+ return Unpack;
- SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
- SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
-
- return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
- }
-
- int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
- int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
-
- for (int i = 0; i < 4; ++i) {
- LoBlendMask[i] = Mask[i];
- HiBlendMask[i] = Mask[i + 4];
+ // If we can't directly blend but can use PSHUFB, that will be better as it
+ // can both shuffle and set up the inefficient blend.
+ if (!IsBlendSupported && Subtarget->hasSSSE3()) {
+ bool V1InUse, V2InUse;
+ return lowerVectorShuffleAsPSHUFB(DL, MVT::v8i16, V1, V2, Mask, DAG,
+ V1InUse, V2InUse);
}
- SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
- SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
- LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
- HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
-
- return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
- DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
+ // We can always bit-blend if we have to so the fallback strategy is to
+ // decompose into single-input permutes and blends.
+ return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
+ Mask, DAG);
}
/// \brief Check whether a compaction lowering can be done by dropping even
@@ -9345,40 +8379,31 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> OrigMask = SVOp->getMask();
- assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
+ ArrayRef<int> Mask = SVOp->getMask();
+ assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
- // Try to use byte shift instructions.
- if (SDValue Shift = lowerVectorShuffleAsByteShift(
- DL, MVT::v16i8, V1, V2, OrigMask, DAG))
+ // Try to use shift instructions.
+ if (SDValue Shift =
+ lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, DAG))
return Shift;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
- DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
+ DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
return Rotate;
// Try to use a zext lowering.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
- DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
+ DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
return ZExt;
- int MaskStorage[16] = {
- OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
- OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
- OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
- OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
- MutableArrayRef<int> Mask(MaskStorage);
- MutableArrayRef<int> LoMask = Mask.slice(0, 8);
- MutableArrayRef<int> HiMask = Mask.slice(8, 8);
-
int NumV2Elements =
std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
// For single-input shuffles, there are some nicer lowering tricks we can use.
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i8, V1,
Mask, Subtarget, DAG))
return Broadcast;
@@ -9475,36 +8500,17 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return V;
}
- // Check whether an interleaving lowering is likely to be more efficient.
- // This isn't perfect but it is a strong heuristic that tends to work well on
- // the kinds of shuffles that show up in practice.
- //
- // FIXME: We need to handle other interleaving widths (i16, i32, ...).
- if (shouldLowerAsInterleaving(Mask)) {
- int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
- return (M >= 0 && M < 8) || (M >= 16 && M < 24);
- });
- int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
- return (M >= 8 && M < 16) || M >= 24;
- });
- int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1};
- int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1};
- bool UnpackLo = NumLoHalf >= NumHiHalf;
- MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
- MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
- for (int i = 0; i < 8; ++i) {
- TargetEMask[i] = Mask[2 * i];
- TargetOMask[i] = Mask[2 * i + 1];
- }
-
- SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
- SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
-
- return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
- MVT::v16i8, Evens, Odds);
- }
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask, {// Low half.
+ 0, 16, 1, 17, 2, 18, 3, 19,
+ // High half.
+ 4, 20, 5, 21, 6, 22, 7, 23}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {// Low half.
+ 8, 24, 9, 25, 10, 26, 11, 27,
+ // High half.
+ 12, 28, 13, 29, 14, 30, 15, 31}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V1, V2);
// Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
// with PSHUFB. It is important to do this before we attempt to generate any
@@ -9520,33 +8526,47 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// interleavings with direct instructions supporting them. We currently don't
// handle those well here.
if (Subtarget->hasSSSE3()) {
- SDValue V1Mask[16];
- SDValue V2Mask[16];
- for (int i = 0; i < 16; ++i)
- if (Mask[i] == -1) {
- V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
- } else {
- V1Mask[i] = DAG.getConstant(Mask[i] < 16 ? Mask[i] : 0x80, MVT::i8);
- V2Mask[i] =
- DAG.getConstant(Mask[i] < 16 ? 0x80 : Mask[i] - 16, MVT::i8);
- }
- V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
- if (isSingleInputShuffleMask(Mask))
- return V1; // Single inputs are easy.
+ bool V1InUse = false;
+ bool V2InUse = false;
- // Otherwise, blend the two.
- V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
- return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
+ SDValue PSHUFB = lowerVectorShuffleAsPSHUFB(DL, MVT::v16i8, V1, V2, Mask,
+ DAG, V1InUse, V2InUse);
+
+ // If both V1 and V2 are in use and we can use a direct blend or an unpack,
+ // do so. This avoids using them to handle blends-with-zero which is
+ // important as a single pshufb is significantly faster for that.
+ if (V1InUse && V2InUse) {
+ if (Subtarget->hasSSE41())
+ if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2,
+ Mask, Subtarget, DAG))
+ return Blend;
+
+ // We can use an unpack to do the blending rather than an or in some
+ // cases. Even though the or may be (very minorly) more efficient, we
+ // preference this lowering because there are common cases where part of
+ // the complexity of the shuffles goes away when we do the final blend as
+ // an unpack.
+ // FIXME: It might be worth trying to detect if the unpack-feeding
+ // shuffles will both be pshufb, in which case we shouldn't bother with
+ // this.
+ if (SDValue Unpack =
+ lowerVectorShuffleAsUnpack(DL, MVT::v16i8, V1, V2, Mask, DAG))
+ return Unpack;
+ }
+
+ return PSHUFB;
}
// There are special ways we can lower some single-element blends.
if (NumV2Elements == 1)
- if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
+ if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v16i8, V1, V2,
Mask, Subtarget, DAG))
return V;
+ if (SDValue BitBlend =
+ lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
+ return BitBlend;
+
// Check whether a compaction lowering can be done. This handles shuffles
// which take every Nth element for some even N. See the helper function for
// details.
@@ -9585,72 +8605,58 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return Result;
}
- int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
- int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
- int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
- int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ // Handle multi-input cases by blending single-input shuffles.
+ if (NumV2Elements > 0)
+ return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2,
+ Mask, DAG);
- auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
- MutableArrayRef<int> V1HalfBlendMask,
- MutableArrayRef<int> V2HalfBlendMask) {
- for (int i = 0; i < 8; ++i)
- if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
- V1HalfBlendMask[i] = HalfMask[i];
- HalfMask[i] = i;
- } else if (HalfMask[i] >= 16) {
- V2HalfBlendMask[i] = HalfMask[i] - 16;
- HalfMask[i] = i + 8;
- }
- };
- buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
- buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
+ // The fallback path for single-input shuffles widens this into two v8i16
+ // vectors with unpacks, shuffles those, and then pulls them back together
+ // with a pack.
+ SDValue V = V1;
- SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
+ int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ for (int i = 0; i < 16; ++i)
+ if (Mask[i] >= 0)
+ (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
- auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
- MutableArrayRef<int> HiBlendMask) {
- SDValue V1, V2;
- // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
- // them out and avoid using UNPCK{L,H} to extract the elements of V as
- // i16s.
- if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
- [](int M) { return M >= 0 && M % 2 == 1; }) &&
- std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
- [](int M) { return M >= 0 && M % 2 == 1; })) {
- // Use a mask to drop the high bytes.
- V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
- V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
- DAG.getConstant(0x00FF, MVT::v8i16));
-
- // This will be a single vector shuffle instead of a blend so nuke V2.
- V2 = DAG.getUNDEF(MVT::v8i16);
-
- // Squash the masks to point directly into V1.
- for (int &M : LoBlendMask)
- if (M >= 0)
- M /= 2;
- for (int &M : HiBlendMask)
- if (M >= 0)
- M /= 2;
- } else {
- // Otherwise just unpack the low half of V into V1 and the high half into
- // V2 so that we can blend them as i16s.
- V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
- DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
- V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
- DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
- }
+ SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
- SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
- SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
- return std::make_pair(BlendedLo, BlendedHi);
- };
- SDValue V1Lo, V1Hi, V2Lo, V2Hi;
- std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
- std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
+ SDValue VLoHalf, VHiHalf;
+ // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
+ // them out and avoid using UNPCK{L,H} to extract the elements of V as
+ // i16s.
+ if (std::none_of(std::begin(LoBlendMask), std::end(LoBlendMask),
+ [](int M) { return M >= 0 && M % 2 == 1; }) &&
+ std::none_of(std::begin(HiBlendMask), std::end(HiBlendMask),
+ [](int M) { return M >= 0 && M % 2 == 1; })) {
+ // Use a mask to drop the high bytes.
+ VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
+ VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
+ DAG.getConstant(0x00FF, MVT::v8i16));
+
+ // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
+ VHiHalf = DAG.getUNDEF(MVT::v8i16);
+
+ // Squash the masks to point directly into VLoHalf.
+ for (int &M : LoBlendMask)
+ if (M >= 0)
+ M /= 2;
+ for (int &M : HiBlendMask)
+ if (M >= 0)
+ M /= 2;
+ } else {
+ // Otherwise just unpack the low half of V into VLoHalf and the high half into
+ // VHiHalf so that we can blend them as i16s.
+ VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
+ DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
+ VHiHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
+ DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
+ }
- SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
- SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
+ SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
+ SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
}
@@ -9736,7 +8742,7 @@ static bool canWidenShuffleElements(ArrayRef<int> Mask,
return true;
}
-/// \brief Generic routine to split ector shuffle into half-sized shuffles.
+/// \brief Generic routine to split vector shuffle into half-sized shuffles.
///
/// This routine just extracts two subvectors, shuffles them independently, and
/// then concatenates them back together. This should work effectively with all
@@ -9757,14 +8763,43 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
MVT ScalarVT = VT.getScalarType();
MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
- SDValue LoV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
- DAG.getIntPtrConstant(0));
- SDValue HiV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
- DAG.getIntPtrConstant(SplitNumElements));
- SDValue LoV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
- DAG.getIntPtrConstant(0));
- SDValue HiV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
- DAG.getIntPtrConstant(SplitNumElements));
+ // Rather than splitting build-vectors, just build two narrower build
+ // vectors. This helps shuffling with splats and zeros.
+ auto SplitVector = [&](SDValue V) {
+ while (V.getOpcode() == ISD::BITCAST)
+ V = V->getOperand(0);
+
+ MVT OrigVT = V.getSimpleValueType();
+ int OrigNumElements = OrigVT.getVectorNumElements();
+ int OrigSplitNumElements = OrigNumElements / 2;
+ MVT OrigScalarVT = OrigVT.getScalarType();
+ MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
+
+ SDValue LoV, HiV;
+
+ auto *BV = dyn_cast<BuildVectorSDNode>(V);
+ if (!BV) {
+ LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
+ DAG.getIntPtrConstant(0));
+ HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
+ DAG.getIntPtrConstant(OrigSplitNumElements));
+ } else {
+
+ SmallVector<SDValue, 16> LoOps, HiOps;
+ for (int i = 0; i < OrigSplitNumElements; ++i) {
+ LoOps.push_back(BV->getOperand(i));
+ HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
+ }
+ LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
+ HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
+ }
+ return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
+ DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
+ };
+
+ SDValue LoV1, HiV1, LoV2, HiV2;
+ std::tie(LoV1, HiV1) = SplitVector(V1);
+ std::tie(LoV2, HiV2) = SplitVector(V2);
// Now create two 4-way blends of these half-width vectors.
auto HalfBlend = [&](ArrayRef<int> HalfMask) {
@@ -9960,15 +8995,15 @@ static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
VT.getVectorNumElements() / 2);
// Check for patterns which can be matched with a single insert of a 128-bit
// subvector.
- if (isShuffleEquivalent(Mask, 0, 1, 0, 1) ||
- isShuffleEquivalent(Mask, 0, 1, 4, 5)) {
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}) ||
+ isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
DAG.getIntPtrConstant(0));
SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
}
- if (isShuffleEquivalent(Mask, 0, 1, 6, 7)) {
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 6, 7})) {
SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
DAG.getIntPtrConstant(0));
SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
@@ -9983,6 +9018,104 @@ static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
DAG.getConstant(PermMask, MVT::i8));
}
+/// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
+/// shuffling each lane.
+///
+/// This will only succeed when the result of fixing the 128-bit lanes results
+/// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
+/// each 128-bit lanes. This handles many cases where we can quickly blend away
+/// the lane crosses early and then use simpler shuffles within each lane.
+///
+/// FIXME: It might be worthwhile at some point to support this without
+/// requiring the 128-bit lane-relative shuffles to be repeating, but currently
+/// in x86 only floating point has interesting non-repeating shuffles, and even
+/// those are still *marginally* more expensive.
+static SDValue lowerVectorShuffleByMerging128BitLanes(
+ SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ assert(!isSingleInputShuffleMask(Mask) &&
+ "This is only useful with multiple inputs.");
+
+ int Size = Mask.size();
+ int LaneSize = 128 / VT.getScalarSizeInBits();
+ int NumLanes = Size / LaneSize;
+ assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
+
+ // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
+ // check whether the in-128-bit lane shuffles share a repeating pattern.
+ SmallVector<int, 4> Lanes;
+ Lanes.resize(NumLanes, -1);
+ SmallVector<int, 4> InLaneMask;
+ InLaneMask.resize(LaneSize, -1);
+ for (int i = 0; i < Size; ++i) {
+ if (Mask[i] < 0)
+ continue;
+
+ int j = i / LaneSize;
+
+ if (Lanes[j] < 0) {
+ // First entry we've seen for this lane.
+ Lanes[j] = Mask[i] / LaneSize;
+ } else if (Lanes[j] != Mask[i] / LaneSize) {
+ // This doesn't match the lane selected previously!
+ return SDValue();
+ }
+
+ // Check that within each lane we have a consistent shuffle mask.
+ int k = i % LaneSize;
+ if (InLaneMask[k] < 0) {
+ InLaneMask[k] = Mask[i] % LaneSize;
+ } else if (InLaneMask[k] != Mask[i] % LaneSize) {
+ // This doesn't fit a repeating in-lane mask.
+ return SDValue();
+ }
+ }
+
+ // First shuffle the lanes into place.
+ MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
+ VT.getSizeInBits() / 64);
+ SmallVector<int, 8> LaneMask;
+ LaneMask.resize(NumLanes * 2, -1);
+ for (int i = 0; i < NumLanes; ++i)
+ if (Lanes[i] >= 0) {
+ LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
+ LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
+ }
+
+ V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
+ V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
+ SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
+
+ // Cast it back to the type we actually want.
+ LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
+
+ // Now do a simple shuffle that isn't lane crossing.
+ SmallVector<int, 8> NewMask;
+ NewMask.resize(Size, -1);
+ for (int i = 0; i < Size; ++i)
+ if (Mask[i] >= 0)
+ NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
+ assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
+ "Must not introduce lane crosses at this point!");
+
+ return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
+}
+
+/// \brief Test whether the specified input (0 or 1) is in-place blended by the
+/// given mask.
+///
+/// This returns true if the elements from a particular input are already in the
+/// slot required by the given mask and require no permutation.
+static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
+ assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
+ int Size = Mask.size();
+ for (int i = 0; i < Size; ++i)
+ if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
+ return false;
+
+ return true;
+}
+
/// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
///
/// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
@@ -10004,10 +9137,14 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (isSingleInputShuffleMask(Mask)) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4f64, V1,
Mask, Subtarget, DAG))
return Broadcast;
+ // Use low duplicate instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
+ return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
+
if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
// Non-half-crossing single input shuffles can be lowerid with an
// interleaved permutation.
@@ -10029,10 +9166,14 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// X86 has dedicated unpack instructions that can handle specific blend
// operations: UNPCKH and UNPCKL.
- if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 4, 2, 6}))
return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
- if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
+ if (isShuffleEquivalent(V1, V2, Mask, {1, 5, 3, 7}))
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {4, 0, 6, 2}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V2, V1);
+ if (isShuffleEquivalent(V1, V2, Mask, {5, 1, 7, 3}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V2, V1);
// If we have a single input to the zero element, insert that into V1 if we
// can do so cheaply.
@@ -10040,7 +9181,7 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
if (NumV2Elements == 1 && Mask[0] >= 4)
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
- MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
+ DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return Insertion;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
@@ -10067,6 +9208,16 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DAG.getConstant(SHUFPDMask, MVT::i8));
}
+ // Try to simplify this by merging 128-bit lanes to enable a lane-based
+ // shuffle. However, if we have AVX2 and either inputs are already in place,
+ // we will be able to shuffle even across lanes the other input in a single
+ // instruction so skip this pattern.
+ if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
+ isShuffleMaskInputInPlace(1, Mask))))
+ if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
+ DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
+ return Result;
+
// If we have AVX2 then we always want to lower with a blend because an v4 we
// can fully permute the elements.
if (Subtarget->hasAVX2())
@@ -10102,7 +9253,7 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return Blend;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i64, V1,
Mask, Subtarget, DAG))
return Broadcast;
@@ -10123,12 +9274,6 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
}
-
- // Use dedicated unpack instructions for masks that match their pattern.
- if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
- return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
- if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
- return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
}
// AVX2 provides a direct instruction for permuting a single input across
@@ -10137,6 +9282,31 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
getV4X86ShuffleImm8ForMask(Mask, DAG));
+ // Try to use shift instructions.
+ if (SDValue Shift =
+ lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, DAG))
+ return Shift;
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 4, 2, 6}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {1, 5, 3, 7}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {4, 0, 6, 2}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V2, V1);
+ if (isShuffleEquivalent(V1, V2, Mask, {5, 1, 7, 3}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V2, V1);
+
+ // Try to simplify this by merging 128-bit lanes to enable a lane-based
+ // shuffle. However, if we have AVX2 and either inputs are already in place,
+ // we will be able to shuffle even across lanes the other input in a single
+ // instruction so skip this pattern.
+ if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
+ isShuffleMaskInputInPlace(1, Mask))))
+ if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
+ DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
+ return Result;
+
// Otherwise fall back on generic blend lowering.
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
Mask, DAG);
@@ -10161,7 +9331,7 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return Blend;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8f32, V1,
Mask, Subtarget, DAG))
return Broadcast;
@@ -10171,15 +9341,26 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
assert(RepeatedMask.size() == 4 &&
"Repeated masks must be half the mask width!");
+
+ // Use even/odd duplicate instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
+ return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
+ if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3, 5, 5, 7, 7}))
+ return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
+
if (isSingleInputShuffleMask(Mask))
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
- if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 1, 9, 4, 12, 5, 13}))
return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
- if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
+ if (isShuffleEquivalent(V1, V2, Mask, {2, 10, 3, 11, 6, 14, 7, 15}))
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {8, 0, 9, 1, 12, 4, 13, 5}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V2, V1);
+ if (isShuffleEquivalent(V1, V2, Mask, {10, 2, 11, 3, 14, 6, 15, 7}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V2, V1);
// Otherwise, fall back to a SHUFPS sequence. Here it is important that we
// have already handled any direct blends. We also need to squash the
@@ -10214,6 +9395,12 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DAG);
}
+ // Try to simplify this by merging 128-bit lanes to enable a lane-based
+ // shuffle.
+ if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
+ DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
+ return Result;
+
// If we have AVX2 then we always want to lower with a blend because at v8 we
// can fully permute the elements.
if (Subtarget->hasAVX2())
@@ -10239,12 +9426,19 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
+ // Whenever we can lower this as a zext, that instruction is strictly faster
+ // than any alternative. It also allows us to fold memory operands into the
+ // shuffle in many cases.
+ if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
+ Mask, Subtarget, DAG))
+ return ZExt;
+
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
Subtarget, DAG))
return Blend;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i32, V1,
Mask, Subtarget, DAG))
return Broadcast;
@@ -10259,12 +9453,25 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
- if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 1, 9, 4, 12, 5, 13}))
return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
- if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
+ if (isShuffleEquivalent(V1, V2, Mask, {2, 10, 3, 11, 6, 14, 7, 15}))
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {8, 0, 9, 1, 12, 4, 13, 5}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V2, V1);
+ if (isShuffleEquivalent(V1, V2, Mask, {10, 2, 11, 3, 14, 6, 15, 7}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V2, V1);
}
+ // Try to use shift instructions.
+ if (SDValue Shift =
+ lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, DAG))
+ return Shift;
+
+ if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
+ DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
+ return Rotate;
+
// If the shuffle patterns aren't repeated but it is a single input, directly
// generate a cross-lane VPERMD instruction.
if (isSingleInputShuffleMask(Mask)) {
@@ -10277,6 +9484,12 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
}
+ // Try to simplify this by merging 128-bit lanes to enable a lane-based
+ // shuffle.
+ if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
+ DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
+ return Result;
+
// Otherwise fall back on generic blend lowering.
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
Mask, DAG);
@@ -10297,36 +9510,53 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
+ // Whenever we can lower this as a zext, that instruction is strictly faster
+ // than any alternative. It also allows us to fold memory operands into the
+ // shuffle in many cases.
+ if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
+ Mask, Subtarget, DAG))
+ return ZExt;
+
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i16, V1,
Mask, Subtarget, DAG))
return Broadcast;
- // There are no generalized cross-lane shuffle operations available on i16
- // element types.
- if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
- return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
- Mask, DAG);
-
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
Subtarget, DAG))
return Blend;
// Use dedicated unpack instructions for masks that match their pattern.
- if (isShuffleEquivalent(Mask,
- // First 128-bit lane:
- 0, 16, 1, 17, 2, 18, 3, 19,
- // Second 128-bit lane:
- 8, 24, 9, 25, 10, 26, 11, 27))
+ if (isShuffleEquivalent(V1, V2, Mask,
+ {// First 128-bit lane:
+ 0, 16, 1, 17, 2, 18, 3, 19,
+ // Second 128-bit lane:
+ 8, 24, 9, 25, 10, 26, 11, 27}))
return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
- if (isShuffleEquivalent(Mask,
- // First 128-bit lane:
- 4, 20, 5, 21, 6, 22, 7, 23,
- // Second 128-bit lane:
- 12, 28, 13, 29, 14, 30, 15, 31))
+ if (isShuffleEquivalent(V1, V2, Mask,
+ {// First 128-bit lane:
+ 4, 20, 5, 21, 6, 22, 7, 23,
+ // Second 128-bit lane:
+ 12, 28, 13, 29, 14, 30, 15, 31}))
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
+ // Try to use shift instructions.
+ if (SDValue Shift =
+ lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, DAG))
+ return Shift;
+
+ // Try to use byte rotation instructions.
+ if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
+ DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
+ return Rotate;
+
if (isSingleInputShuffleMask(Mask)) {
+ // There are no generalized cross-lane shuffle operations available on i16
+ // element types.
+ if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
+ return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
+ Mask, DAG);
+
SDValue PSHUFBMask[32];
for (int i = 0; i < 16; ++i) {
if (Mask[i] == -1) {
@@ -10347,6 +9577,12 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
}
+ // Try to simplify this by merging 128-bit lanes to enable a lane-based
+ // shuffle.
+ if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
+ DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
+ return Result;
+
// Otherwise fall back on generic lowering.
return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
}
@@ -10366,17 +9602,18 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
+ // Whenever we can lower this as a zext, that instruction is strictly faster
+ // than any alternative. It also allows us to fold memory operands into the
+ // shuffle in many cases.
+ if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
+ Mask, Subtarget, DAG))
+ return ZExt;
+
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v32i8, V1,
Mask, Subtarget, DAG))
return Broadcast;
- // There are no generalized cross-lane shuffle operations available on i8
- // element types.
- if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
- return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
- Mask, DAG);
-
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
Subtarget, DAG))
return Blend;
@@ -10385,21 +9622,37 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// Note that these are repeated 128-bit lane unpacks, not unpacks across all
// 256-bit lanes.
if (isShuffleEquivalent(
- Mask,
- // First 128-bit lane:
- 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
- // Second 128-bit lane:
- 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
+ V1, V2, Mask,
+ {// First 128-bit lane:
+ 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
+ // Second 128-bit lane:
+ 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55}))
return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
if (isShuffleEquivalent(
- Mask,
- // First 128-bit lane:
- 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
- // Second 128-bit lane:
- 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
+ V1, V2, Mask,
+ {// First 128-bit lane:
+ 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
+ // Second 128-bit lane:
+ 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63}))
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
+ // Try to use shift instructions.
+ if (SDValue Shift =
+ lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, DAG))
+ return Shift;
+
+ // Try to use byte rotation instructions.
+ if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
+ DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
+ return Rotate;
+
if (isSingleInputShuffleMask(Mask)) {
+ // There are no generalized cross-lane shuffle operations available on i8
+ // element types.
+ if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
+ return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
+ Mask, DAG);
+
SDValue PSHUFBMask[32];
for (int i = 0; i < 32; ++i)
PSHUFBMask[i] =
@@ -10412,6 +9665,12 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
}
+ // Try to simplify this by merging 128-bit lanes to enable a lane-based
+ // shuffle.
+ if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
+ DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
+ return Result;
+
// Otherwise fall back on generic lowering.
return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
}
@@ -10478,6 +9737,13 @@ static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
+ // X86 has dedicated unpack instructions that can handle specific blend
+ // operations: UNPCKH and UNPCKL.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 2, 10, 4, 12, 6, 14}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {1, 9, 3, 11, 5, 13, 7, 15}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
+
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
}
@@ -10493,6 +9759,20 @@ static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask,
+ {// First 128-bit lane.
+ 0, 16, 1, 17, 4, 20, 5, 21,
+ // Second 128-bit lane.
+ 8, 24, 9, 25, 12, 28, 13, 29}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask,
+ {// First 128-bit lane.
+ 2, 18, 3, 19, 6, 22, 7, 23,
+ // Second 128-bit lane.
+ 10, 26, 11, 27, 14, 30, 15, 31}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
+
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
}
@@ -10508,6 +9788,13 @@ static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
+ // X86 has dedicated unpack instructions that can handle specific blend
+ // operations: UNPCKH and UNPCKL.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 2, 10, 4, 12, 6, 14}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {1, 9, 3, 11, 5, 13, 7, 15}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
+
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
}
@@ -10523,6 +9810,20 @@ static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask,
+ {// First 128-bit lane.
+ 0, 16, 1, 17, 4, 20, 5, 21,
+ // Second 128-bit lane.
+ 8, 24, 9, 25, 12, 28, 13, 29}))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask,
+ {// First 128-bit lane.
+ 2, 18, 3, 19, 6, 22, 7, 23,
+ // Second 128-bit lane.
+ 10, 26, 11, 27, 14, 30, 15, 31}))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
+
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
}
@@ -10574,8 +9875,8 @@ static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
"Cannot lower 512-bit vectors w/ basic ISA!");
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast =
+ lowerVectorShuffleAsBroadcast(DL, VT, V1, Mask, Subtarget, DAG))
return Broadcast;
// Dispatch to each element type for lowering. If we don't have supprot for
@@ -10651,6 +9952,13 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
}
+ // We actually see shuffles that are entirely re-arrangements of a set of
+ // zero inputs. This mostly happens while decomposing complex shuffles into
+ // simple ones. Directly lower these as a buildvector of zeros.
+ SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
+ if (Zeroable.all())
+ return getZeroVector(VT, Subtarget, DAG, dl);
+
// Try to collapse shuffles into using a vector type with fewer elements but
// wider element types. We cap this to not form integers or floating point
// elements wider than 64 bits, but it might be interesting to form i128
@@ -10690,7 +9998,8 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
// When the number of V1 and V2 elements are the same, try to minimize the
// number of uses of V2 in the low half of the vector. When that is tied,
// ensure that the sum of indices for V1 is equal to or lower than the sum
- // indices for V2.
+ // indices for V2. When those are equal, try to ensure that the number of odd
+ // indices for V1 is lower than the number of odd indices for V2.
if (NumV1Elements == NumV2Elements) {
int LowV1Elements = 0, LowV2Elements = 0;
for (int M : SVOp->getMask().slice(0, NumElements / 2))
@@ -10707,8 +10016,18 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
SumV2Indices += i;
else if (SVOp->getMask()[i] >= 0)
SumV1Indices += i;
- if (SumV2Indices < SumV1Indices)
+ if (SumV2Indices < SumV1Indices) {
return DAG.getCommutedVectorShuffle(*SVOp);
+ } else if (SumV2Indices == SumV1Indices) {
+ int NumV1OddIndices = 0, NumV2OddIndices = 0;
+ for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
+ if (SVOp->getMask()[i] >= NumElements)
+ NumV2OddIndices += i % 2;
+ else if (SVOp->getMask()[i] >= 0)
+ NumV1OddIndices += i % 2;
+ if (NumV2OddIndices < NumV1OddIndices)
+ return DAG.getCommutedVectorShuffle(*SVOp);
+ }
}
}
@@ -10727,1586 +10046,6 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
llvm_unreachable("Unimplemented!");
}
-
-//===----------------------------------------------------------------------===//
-// Legacy vector shuffle lowering
-//
-// This code is the legacy code handling vector shuffles until the above
-// replaces its functionality and performance.
-//===----------------------------------------------------------------------===//
-
-static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
- bool hasInt256, unsigned *MaskOut = nullptr) {
- MVT EltVT = VT.getVectorElementType();
-
- // There is no blend with immediate in AVX-512.
- if (VT.is512BitVector())
- return false;
-
- if (!hasSSE41 || EltVT == MVT::i8)
- return false;
- if (!hasInt256 && VT == MVT::v16i16)
- return false;
-
- unsigned MaskValue = 0;
- unsigned NumElems = VT.getVectorNumElements();
- // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
- unsigned NumLanes = (NumElems - 1) / 8 + 1;
- unsigned NumElemsInLane = NumElems / NumLanes;
-
- // Blend for v16i16 should be symetric for the both lanes.
- for (unsigned i = 0; i < NumElemsInLane; ++i) {
-
- int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
- int EltIdx = MaskVals[i];
-
- if ((EltIdx < 0 || EltIdx == (int)i) &&
- (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
- continue;
-
- if (((unsigned)EltIdx == (i + NumElems)) &&
- (SndLaneEltIdx < 0 ||
- (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
- MaskValue |= (1 << i);
- else
- return false;
- }
-
- if (MaskOut)
- *MaskOut = MaskValue;
- return true;
-}
-
-// Try to lower a shuffle node into a simple blend instruction.
-// This function assumes isBlendMask returns true for this
-// SuffleVectorSDNode
-static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
- unsigned MaskValue,
- const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
- MVT VT = SVOp->getSimpleValueType(0);
- MVT EltVT = VT.getVectorElementType();
- assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
- Subtarget->hasInt256() && "Trying to lower a "
- "VECTOR_SHUFFLE to a Blend but "
- "with the wrong mask"));
- SDValue V1 = SVOp->getOperand(0);
- SDValue V2 = SVOp->getOperand(1);
- SDLoc dl(SVOp);
- unsigned NumElems = VT.getVectorNumElements();
-
- // Convert i32 vectors to floating point if it is not AVX2.
- // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
- MVT BlendVT = VT;
- if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
- BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
- NumElems);
- V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
- V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
- }
-
- SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
- DAG.getConstant(MaskValue, MVT::i32));
- return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
-}
-
-/// In vector type \p VT, return true if the element at index \p InputIdx
-/// falls on a different 128-bit lane than \p OutputIdx.
-static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
- unsigned OutputIdx) {
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
- return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
-}
-
-/// Generate a PSHUFB if possible. Selects elements from \p V1 according to
-/// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
-/// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
-/// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
-/// zero.
-static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
- SelectionDAG &DAG) {
- MVT VT = V1.getSimpleValueType();
- assert(VT.is128BitVector() || VT.is256BitVector());
-
- MVT EltVT = VT.getVectorElementType();
- unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
- unsigned NumElts = VT.getVectorNumElements();
-
- SmallVector<SDValue, 32> PshufbMask;
- for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
- int InputIdx = MaskVals[OutputIdx];
- unsigned InputByteIdx;
-
- if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
- InputByteIdx = 0x80;
- else {
- // Cross lane is not allowed.
- if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
- return SDValue();
- InputByteIdx = InputIdx * EltSizeInBytes;
- // Index is an byte offset within the 128-bit lane.
- InputByteIdx &= 0xf;
- }
-
- for (unsigned j = 0; j < EltSizeInBytes; ++j) {
- PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
- if (InputByteIdx != 0x80)
- ++InputByteIdx;
- }
- }
-
- MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
- if (ShufVT != VT)
- V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
- return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
- DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
-}
-
-// v8i16 shuffles - Prefer shuffles in the following order:
-// 1. [all] pshuflw, pshufhw, optional move
-// 2. [ssse3] 1 x pshufb
-// 3. [ssse3] 2 x pshufb + 1 x por
-// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
-static SDValue
-LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- SDValue V1 = SVOp->getOperand(0);
- SDValue V2 = SVOp->getOperand(1);
- SDLoc dl(SVOp);
- SmallVector<int, 8> MaskVals;
-
- // Determine if more than 1 of the words in each of the low and high quadwords
- // of the result come from the same quadword of one of the two inputs. Undef
- // mask values count as coming from any quadword, for better codegen.
- //
- // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
- // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
- unsigned LoQuad[] = { 0, 0, 0, 0 };
- unsigned HiQuad[] = { 0, 0, 0, 0 };
- // Indices of quads used.
- std::bitset<4> InputQuads;
- for (unsigned i = 0; i < 8; ++i) {
- unsigned *Quad = i < 4 ? LoQuad : HiQuad;
- int EltIdx = SVOp->getMaskElt(i);
- MaskVals.push_back(EltIdx);
- if (EltIdx < 0) {
- ++Quad[0];
- ++Quad[1];
- ++Quad[2];
- ++Quad[3];
- continue;
- }
- ++Quad[EltIdx / 4];
- InputQuads.set(EltIdx / 4);
- }
-
- int BestLoQuad = -1;
- unsigned MaxQuad = 1;
- for (unsigned i = 0; i < 4; ++i) {
- if (LoQuad[i] > MaxQuad) {
- BestLoQuad = i;
- MaxQuad = LoQuad[i];
- }
- }
-
- int BestHiQuad = -1;
- MaxQuad = 1;
- for (unsigned i = 0; i < 4; ++i) {
- if (HiQuad[i] > MaxQuad) {
- BestHiQuad = i;
- MaxQuad = HiQuad[i];
- }
- }
-
- // For SSSE3, If all 8 words of the result come from only 1 quadword of each
- // of the two input vectors, shuffle them into one input vector so only a
- // single pshufb instruction is necessary. If there are more than 2 input
- // quads, disable the next transformation since it does not help SSSE3.
- bool V1Used = InputQuads[0] || InputQuads[1];
- bool V2Used = InputQuads[2] || InputQuads[3];
- if (Subtarget->hasSSSE3()) {
- if (InputQuads.count() == 2 && V1Used && V2Used) {
- BestLoQuad = InputQuads[0] ? 0 : 1;
- BestHiQuad = InputQuads[2] ? 2 : 3;
- }
- if (InputQuads.count() > 2) {
- BestLoQuad = -1;
- BestHiQuad = -1;
- }
- }
-
- // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
- // the shuffle mask. If a quad is scored as -1, that means that it contains
- // words from all 4 input quadwords.
- SDValue NewV;
- if (BestLoQuad >= 0 || BestHiQuad >= 0) {
- int MaskV[] = {
- BestLoQuad < 0 ? 0 : BestLoQuad,
- BestHiQuad < 0 ? 1 : BestHiQuad
- };
- NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
- DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
- DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
- NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
-
- // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
- // source words for the shuffle, to aid later transformations.
- bool AllWordsInNewV = true;
- bool InOrder[2] = { true, true };
- for (unsigned i = 0; i != 8; ++i) {
- int idx = MaskVals[i];
- if (idx != (int)i)
- InOrder[i/4] = false;
- if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
- continue;
- AllWordsInNewV = false;
- break;
- }
-
- bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
- if (AllWordsInNewV) {
- for (int i = 0; i != 8; ++i) {
- int idx = MaskVals[i];
- if (idx < 0)
- continue;
- idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
- if ((idx != i) && idx < 4)
- pshufhw = false;
- if ((idx != i) && idx > 3)
- pshuflw = false;
- }
- V1 = NewV;
- V2Used = false;
- BestLoQuad = 0;
- BestHiQuad = 1;
- }
-
- // If we've eliminated the use of V2, and the new mask is a pshuflw or
- // pshufhw, that's as cheap as it gets. Return the new shuffle.
- if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
- unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
- unsigned TargetMask = 0;
- NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
- DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
- TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
- getShufflePSHUFLWImmediate(SVOp);
- V1 = NewV.getOperand(0);
- return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
- }
- }
-
- // Promote splats to a larger type which usually leads to more efficient code.
- // FIXME: Is this true if pshufb is available?
- if (SVOp->isSplat())
- return PromoteSplat(SVOp, DAG);
-
- // If we have SSSE3, and all words of the result are from 1 input vector,
- // case 2 is generated, otherwise case 3 is generated. If no SSSE3
- // is present, fall back to case 4.
- if (Subtarget->hasSSSE3()) {
- SmallVector<SDValue,16> pshufbMask;
-
- // If we have elements from both input vectors, set the high bit of the
- // shuffle mask element to zero out elements that come from V2 in the V1
- // mask, and elements that come from V1 in the V2 mask, so that the two
- // results can be OR'd together.
- bool TwoInputs = V1Used && V2Used;
- V1 = getPSHUFB(MaskVals, V1, dl, DAG);
- if (!TwoInputs)
- return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
-
- // Calculate the shuffle mask for the second input, shuffle it, and
- // OR it with the first shuffled input.
- CommuteVectorShuffleMask(MaskVals, 8);
- V2 = getPSHUFB(MaskVals, V2, dl, DAG);
- V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
- return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
- }
-
- // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
- // and update MaskVals with new element order.
- std::bitset<8> InOrder;
- if (BestLoQuad >= 0) {
- int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
- for (int i = 0; i != 4; ++i) {
- int idx = MaskVals[i];
- if (idx < 0) {
- InOrder.set(i);
- } else if ((idx / 4) == BestLoQuad) {
- MaskV[i] = idx & 3;
- InOrder.set(i);
- }
- }
- NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
- &MaskV[0]);
-
- if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
- NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
- NewV.getOperand(0),
- getShufflePSHUFLWImmediate(SVOp), DAG);
- }
- }
-
- // If BestHi >= 0, generate a pshufhw to put the high elements in order,
- // and update MaskVals with the new element order.
- if (BestHiQuad >= 0) {
- int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
- for (unsigned i = 4; i != 8; ++i) {
- int idx = MaskVals[i];
- if (idx < 0) {
- InOrder.set(i);
- } else if ((idx / 4) == BestHiQuad) {
- MaskV[i] = (idx & 3) + 4;
- InOrder.set(i);
- }
- }
- NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
- &MaskV[0]);
-
- if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
- NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
- NewV.getOperand(0),
- getShufflePSHUFHWImmediate(SVOp), DAG);
- }
- }
-
- // In case BestHi & BestLo were both -1, which means each quadword has a word
- // from each of the four input quadwords, calculate the InOrder bitvector now
- // before falling through to the insert/extract cleanup.
- if (BestLoQuad == -1 && BestHiQuad == -1) {
- NewV = V1;
- for (int i = 0; i != 8; ++i)
- if (MaskVals[i] < 0 || MaskVals[i] == i)
- InOrder.set(i);
- }
-
- // The other elements are put in the right place using pextrw and pinsrw.
- for (unsigned i = 0; i != 8; ++i) {
- if (InOrder[i])
- continue;
- int EltIdx = MaskVals[i];
- if (EltIdx < 0)
- continue;
- SDValue ExtOp = (EltIdx < 8) ?
- DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
- DAG.getIntPtrConstant(EltIdx)) :
- DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
- DAG.getIntPtrConstant(EltIdx - 8));
- NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
- DAG.getIntPtrConstant(i));
- }
- return NewV;
-}
-
-/// \brief v16i16 shuffles
-///
-/// FIXME: We only support generation of a single pshufb currently. We can
-/// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
-/// well (e.g 2 x pshufb + 1 x por).
-static SDValue
-LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- SDValue V1 = SVOp->getOperand(0);
- SDValue V2 = SVOp->getOperand(1);
- SDLoc dl(SVOp);
-
- if (V2.getOpcode() != ISD::UNDEF)
- return SDValue();
-
- SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
- return getPSHUFB(MaskVals, V1, dl, DAG);
-}
-
-// v16i8 shuffles - Prefer shuffles in the following order:
-// 1. [ssse3] 1 x pshufb
-// 2. [ssse3] 2 x pshufb + 1 x por
-// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
-static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
- const X86Subtarget* Subtarget,
- SelectionDAG &DAG) {
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- SDValue V1 = SVOp->getOperand(0);
- SDValue V2 = SVOp->getOperand(1);
- SDLoc dl(SVOp);
- ArrayRef<int> MaskVals = SVOp->getMask();
-
- // Promote splats to a larger type which usually leads to more efficient code.
- // FIXME: Is this true if pshufb is available?
- if (SVOp->isSplat())
- return PromoteSplat(SVOp, DAG);
-
- // If we have SSSE3, case 1 is generated when all result bytes come from
- // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
- // present, fall back to case 3.
-
- // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
- if (Subtarget->hasSSSE3()) {
- SmallVector<SDValue,16> pshufbMask;
-
- // If all result elements are from one input vector, then only translate
- // undef mask values to 0x80 (zero out result) in the pshufb mask.
- //
- // Otherwise, we have elements from both input vectors, and must zero out
- // elements that come from V2 in the first mask, and V1 in the second mask
- // so that we can OR them together.
- for (unsigned i = 0; i != 16; ++i) {
- int EltIdx = MaskVals[i];
- if (EltIdx < 0 || EltIdx >= 16)
- EltIdx = 0x80;
- pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
- }
- V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
- DAG.getNode(ISD::BUILD_VECTOR, dl,
- MVT::v16i8, pshufbMask));
-
- // As PSHUFB will zero elements with negative indices, it's safe to ignore
- // the 2nd operand if it's undefined or zero.
- if (V2.getOpcode() == ISD::UNDEF ||
- ISD::isBuildVectorAllZeros(V2.getNode()))
- return V1;
-
- // Calculate the shuffle mask for the second input, shuffle it, and
- // OR it with the first shuffled input.
- pshufbMask.clear();
- for (unsigned i = 0; i != 16; ++i) {
- int EltIdx = MaskVals[i];
- EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
- pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
- }
- V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
- DAG.getNode(ISD::BUILD_VECTOR, dl,
- MVT::v16i8, pshufbMask));
- return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
- }
-
- // No SSSE3 - Calculate in place words and then fix all out of place words
- // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
- // the 16 different words that comprise the two doublequadword input vectors.
- V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
- V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
- SDValue NewV = V1;
- for (int i = 0; i != 8; ++i) {
- int Elt0 = MaskVals[i*2];
- int Elt1 = MaskVals[i*2+1];
-
- // This word of the result is all undef, skip it.
- if (Elt0 < 0 && Elt1 < 0)
- continue;
-
- // This word of the result is already in the correct place, skip it.
- if ((Elt0 == i*2) && (Elt1 == i*2+1))
- continue;
-
- SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
- SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
- SDValue InsElt;
-
- // If Elt0 and Elt1 are defined, are consecutive, and can be load
- // using a single extract together, load it and store it.
- if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
- InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
- DAG.getIntPtrConstant(Elt1 / 2));
- NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
- DAG.getIntPtrConstant(i));
- continue;
- }
-
- // If Elt1 is defined, extract it from the appropriate source. If the
- // source byte is not also odd, shift the extracted word left 8 bits
- // otherwise clear the bottom 8 bits if we need to do an or.
- if (Elt1 >= 0) {
- InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
- DAG.getIntPtrConstant(Elt1 / 2));
- if ((Elt1 & 1) == 0)
- InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
- DAG.getConstant(8,
- TLI.getShiftAmountTy(InsElt.getValueType())));
- else if (Elt0 >= 0)
- InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
- DAG.getConstant(0xFF00, MVT::i16));
- }
- // If Elt0 is defined, extract it from the appropriate source. If the
- // source byte is not also even, shift the extracted word right 8 bits. If
- // Elt1 was also defined, OR the extracted values together before
- // inserting them in the result.
- if (Elt0 >= 0) {
- SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
- Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
- if ((Elt0 & 1) != 0)
- InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
- DAG.getConstant(8,
- TLI.getShiftAmountTy(InsElt0.getValueType())));
- else if (Elt1 >= 0)
- InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
- DAG.getConstant(0x00FF, MVT::i16));
- InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
- : InsElt0;
- }
- NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
- DAG.getIntPtrConstant(i));
- }
- return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
-}
-
-// v32i8 shuffles - Translate to VPSHUFB if possible.
-static
-SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
- const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
- MVT VT = SVOp->getSimpleValueType(0);
- SDValue V1 = SVOp->getOperand(0);
- SDValue V2 = SVOp->getOperand(1);
- SDLoc dl(SVOp);
- SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
-
- bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
- bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
- bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
-
- // VPSHUFB may be generated if
- // (1) one of input vector is undefined or zeroinitializer.
- // The mask value 0x80 puts 0 in the corresponding slot of the vector.
- // And (2) the mask indexes don't cross the 128-bit lane.
- if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
- (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
- return SDValue();
-
- if (V1IsAllZero && !V2IsAllZero) {
- CommuteVectorShuffleMask(MaskVals, 32);
- V1 = V2;
- }
- return getPSHUFB(MaskVals, V1, dl, DAG);
-}
-
-/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
-/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
-/// done when every pair / quad of shuffle mask elements point to elements in
-/// the right sequence. e.g.
-/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
-static
-SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
- SelectionDAG &DAG) {
- MVT VT = SVOp->getSimpleValueType(0);
- SDLoc dl(SVOp);
- unsigned NumElems = VT.getVectorNumElements();
- MVT NewVT;
- unsigned Scale;
- switch (VT.SimpleTy) {
- default: llvm_unreachable("Unexpected!");
- case MVT::v2i64:
- case MVT::v2f64:
- return SDValue(SVOp, 0);
- case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
- case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
- case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
- case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
- case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
- case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
- }
-
- SmallVector<int, 8> MaskVec;
- for (unsigned i = 0; i != NumElems; i += Scale) {
- int StartIdx = -1;
- for (unsigned j = 0; j != Scale; ++j) {
- int EltIdx = SVOp->getMaskElt(i+j);
- if (EltIdx < 0)
- continue;
- if (StartIdx < 0)
- StartIdx = (EltIdx / Scale);
- if (EltIdx != (int)(StartIdx*Scale + j))
- return SDValue();
- }
- MaskVec.push_back(StartIdx);
- }
-
- SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
- SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
- return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
-}
-
-/// getVZextMovL - Return a zero-extending vector move low node.
-///
-static SDValue getVZextMovL(MVT VT, MVT OpVT,
- SDValue SrcOp, SelectionDAG &DAG,
- const X86Subtarget *Subtarget, SDLoc dl) {
- if (VT == MVT::v2f64 || VT == MVT::v4f32) {
- LoadSDNode *LD = nullptr;
- if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
- LD = dyn_cast<LoadSDNode>(SrcOp);
- if (!LD) {
- // movssrr and movsdrr do not clear top bits. Try to use movd, movq
- // instead.
- MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
- if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
- SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
- SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
- SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
- // PR2108
- OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
- return DAG.getNode(ISD::BITCAST, dl, VT,
- DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
- DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
- OpVT,
- SrcOp.getOperand(0)
- .getOperand(0))));
- }
- }
- }
-
- return DAG.getNode(ISD::BITCAST, dl, VT,
- DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
- DAG.getNode(ISD::BITCAST, dl,
- OpVT, SrcOp)));
-}
-
-/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
-/// which could not be matched by any known target speficic shuffle
-static SDValue
-LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
-
- SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
- if (NewOp.getNode())
- return NewOp;
-
- MVT VT = SVOp->getSimpleValueType(0);
-
- unsigned NumElems = VT.getVectorNumElements();
- unsigned NumLaneElems = NumElems / 2;
-
- SDLoc dl(SVOp);
- MVT EltVT = VT.getVectorElementType();
- MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
- SDValue Output[2];
-
- SmallVector<int, 16> Mask;
- for (unsigned l = 0; l < 2; ++l) {
- // Build a shuffle mask for the output, discovering on the fly which
- // input vectors to use as shuffle operands (recorded in InputUsed).
- // If building a suitable shuffle vector proves too hard, then bail
- // out with UseBuildVector set.
- bool UseBuildVector = false;
- int InputUsed[2] = { -1, -1 }; // Not yet discovered.
- unsigned LaneStart = l * NumLaneElems;
- for (unsigned i = 0; i != NumLaneElems; ++i) {
- // The mask element. This indexes into the input.
- int Idx = SVOp->getMaskElt(i+LaneStart);
- if (Idx < 0) {
- // the mask element does not index into any input vector.
- Mask.push_back(-1);
- continue;
- }
-
- // The input vector this mask element indexes into.
- int Input = Idx / NumLaneElems;
-
- // Turn the index into an offset from the start of the input vector.
- Idx -= Input * NumLaneElems;
-
- // Find or create a shuffle vector operand to hold this input.
- unsigned OpNo;
- for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
- if (InputUsed[OpNo] == Input)
- // This input vector is already an operand.
- break;
- if (InputUsed[OpNo] < 0) {
- // Create a new operand for this input vector.
- InputUsed[OpNo] = Input;
- break;
- }
- }
-
- if (OpNo >= array_lengthof(InputUsed)) {
- // More than two input vectors used! Give up on trying to create a
- // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
- UseBuildVector = true;
- break;
- }
-
- // Add the mask index for the new shuffle vector.
- Mask.push_back(Idx + OpNo * NumLaneElems);
- }
-
- if (UseBuildVector) {
- SmallVector<SDValue, 16> SVOps;
- for (unsigned i = 0; i != NumLaneElems; ++i) {
- // The mask element. This indexes into the input.
- int Idx = SVOp->getMaskElt(i+LaneStart);
- if (Idx < 0) {
- SVOps.push_back(DAG.getUNDEF(EltVT));
- continue;
- }
-
- // The input vector this mask element indexes into.
- int Input = Idx / NumElems;
-
- // Turn the index into an offset from the start of the input vector.
- Idx -= Input * NumElems;
-
- // Extract the vector element by hand.
- SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
- SVOp->getOperand(Input),
- DAG.getIntPtrConstant(Idx)));
- }
-
- // Construct the output using a BUILD_VECTOR.
- Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
- } else if (InputUsed[0] < 0) {
- // No input vectors were used! The result is undefined.
- Output[l] = DAG.getUNDEF(NVT);
- } else {
- SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
- (InputUsed[0] % 2) * NumLaneElems,
- DAG, dl);
- // If only one input was used, use an undefined vector for the other.
- SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
- Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
- (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
- // At least one input vector was used. Create a new shuffle vector.
- Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
- }
-
- Mask.clear();
- }
-
- // Concatenate the result back
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
-}
-
-/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
-/// 4 elements, and match them with several different shuffle types.
-static SDValue
-LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
- SDValue V1 = SVOp->getOperand(0);
- SDValue V2 = SVOp->getOperand(1);
- SDLoc dl(SVOp);
- MVT VT = SVOp->getSimpleValueType(0);
-
- assert(VT.is128BitVector() && "Unsupported vector size");
-
- std::pair<int, int> Locs[4];
- int Mask1[] = { -1, -1, -1, -1 };
- SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
-
- unsigned NumHi = 0;
- unsigned NumLo = 0;
- for (unsigned i = 0; i != 4; ++i) {
- int Idx = PermMask[i];
- if (Idx < 0) {
- Locs[i] = std::make_pair(-1, -1);
- } else {
- assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
- if (Idx < 4) {
- Locs[i] = std::make_pair(0, NumLo);
- Mask1[NumLo] = Idx;
- NumLo++;
- } else {
- Locs[i] = std::make_pair(1, NumHi);
- if (2+NumHi < 4)
- Mask1[2+NumHi] = Idx;
- NumHi++;
- }
- }
- }
-
- if (NumLo <= 2 && NumHi <= 2) {
- // If no more than two elements come from either vector. This can be
- // implemented with two shuffles. First shuffle gather the elements.
- // The second shuffle, which takes the first shuffle as both of its
- // vector operands, put the elements into the right order.
- V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
-
- int Mask2[] = { -1, -1, -1, -1 };
-
- for (unsigned i = 0; i != 4; ++i)
- if (Locs[i].first != -1) {
- unsigned Idx = (i < 2) ? 0 : 4;
- Idx += Locs[i].first * 2 + Locs[i].second;
- Mask2[i] = Idx;
- }
-
- return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
- }
-
- if (NumLo == 3 || NumHi == 3) {
- // Otherwise, we must have three elements from one vector, call it X, and
- // one element from the other, call it Y. First, use a shufps to build an
- // intermediate vector with the one element from Y and the element from X
- // that will be in the same half in the final destination (the indexes don't
- // matter). Then, use a shufps to build the final vector, taking the half
- // containing the element from Y from the intermediate, and the other half
- // from X.
- if (NumHi == 3) {
- // Normalize it so the 3 elements come from V1.
- CommuteVectorShuffleMask(PermMask, 4);
- std::swap(V1, V2);
- }
-
- // Find the element from V2.
- unsigned HiIndex;
- for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
- int Val = PermMask[HiIndex];
- if (Val < 0)
- continue;
- if (Val >= 4)
- break;
- }
-
- Mask1[0] = PermMask[HiIndex];
- Mask1[1] = -1;
- Mask1[2] = PermMask[HiIndex^1];
- Mask1[3] = -1;
- V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
-
- if (HiIndex >= 2) {
- Mask1[0] = PermMask[0];
- Mask1[1] = PermMask[1];
- Mask1[2] = HiIndex & 1 ? 6 : 4;
- Mask1[3] = HiIndex & 1 ? 4 : 6;
- return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
- }
-
- Mask1[0] = HiIndex & 1 ? 2 : 0;
- Mask1[1] = HiIndex & 1 ? 0 : 2;
- Mask1[2] = PermMask[2];
- Mask1[3] = PermMask[3];
- if (Mask1[2] >= 0)
- Mask1[2] += 4;
- if (Mask1[3] >= 0)
- Mask1[3] += 4;
- return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
- }
-
- // Break it into (shuffle shuffle_hi, shuffle_lo).
- int LoMask[] = { -1, -1, -1, -1 };
- int HiMask[] = { -1, -1, -1, -1 };
-
- int *MaskPtr = LoMask;
- unsigned MaskIdx = 0;
- unsigned LoIdx = 0;
- unsigned HiIdx = 2;
- for (unsigned i = 0; i != 4; ++i) {
- if (i == 2) {
- MaskPtr = HiMask;
- MaskIdx = 1;
- LoIdx = 0;
- HiIdx = 2;
- }
- int Idx = PermMask[i];
- if (Idx < 0) {
- Locs[i] = std::make_pair(-1, -1);
- } else if (Idx < 4) {
- Locs[i] = std::make_pair(MaskIdx, LoIdx);
- MaskPtr[LoIdx] = Idx;
- LoIdx++;
- } else {
- Locs[i] = std::make_pair(MaskIdx, HiIdx);
- MaskPtr[HiIdx] = Idx;
- HiIdx++;
- }
- }
-
- SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
- SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
- int MaskOps[] = { -1, -1, -1, -1 };
- for (unsigned i = 0; i != 4; ++i)
- if (Locs[i].first != -1)
- MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
- return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
-}
-
-static bool MayFoldVectorLoad(SDValue V) {
- while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
- V = V.getOperand(0);
-
- if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
- V = V.getOperand(0);
- if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
- V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
- // BUILD_VECTOR (load), undef
- V = V.getOperand(0);
-
- return MayFoldLoad(V);
-}
-
-static
-SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
- MVT VT = Op.getSimpleValueType();
-
- // Canonizalize to v2f64.
- V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
- return DAG.getNode(ISD::BITCAST, dl, VT,
- getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
- V1, DAG));
-}
-
-static
-SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
- bool HasSSE2) {
- SDValue V1 = Op.getOperand(0);
- SDValue V2 = Op.getOperand(1);
- MVT VT = Op.getSimpleValueType();
-
- assert(VT != MVT::v2i64 && "unsupported shuffle type");
-
- if (HasSSE2 && VT == MVT::v2f64)
- return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
-
- // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1)
- return DAG.getNode(ISD::BITCAST, dl, VT,
- getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
- DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
- DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
-}
-
-static
-SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
- SDValue V1 = Op.getOperand(0);
- SDValue V2 = Op.getOperand(1);
- MVT VT = Op.getSimpleValueType();
-
- assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
- "unsupported shuffle type");
-
- if (V2.getOpcode() == ISD::UNDEF)
- V2 = V1;
-
- // v4i32 or v4f32
- return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
-}
-
-static
-SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
- SDValue V1 = Op.getOperand(0);
- SDValue V2 = Op.getOperand(1);
- MVT VT = Op.getSimpleValueType();
- unsigned NumElems = VT.getVectorNumElements();
-
- // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
- // operand of these instructions is only memory, so check if there's a
- // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
- // same masks.
- bool CanFoldLoad = false;
-
- // Trivial case, when V2 comes from a load.
- if (MayFoldVectorLoad(V2))
- CanFoldLoad = true;
-
- // When V1 is a load, it can be folded later into a store in isel, example:
- // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
- // turns into:
- // (MOVLPSmr addr:$src1, VR128:$src2)
- // So, recognize this potential and also use MOVLPS or MOVLPD
- else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
- CanFoldLoad = true;
-
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- if (CanFoldLoad) {
- if (HasSSE2 && NumElems == 2)
- return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
-
- if (NumElems == 4)
- // If we don't care about the second element, proceed to use movss.
- if (SVOp->getMaskElt(1) != -1)
- return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
- }
-
- // movl and movlp will both match v2i64, but v2i64 is never matched by
- // movl earlier because we make it strict to avoid messing with the movlp load
- // folding logic (see the code above getMOVLP call). Match it here then,
- // this is horrible, but will stay like this until we move all shuffle
- // matching to x86 specific nodes. Note that for the 1st condition all
- // types are matched with movsd.
- if (HasSSE2) {
- // FIXME: isMOVLMask should be checked and matched before getMOVLP,
- // as to remove this logic from here, as much as possible
- if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
- return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
- return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
- }
-
- assert(VT != MVT::v4i32 && "unsupported shuffle type");
-
- // Invert the operand order and use SHUFPS to match it.
- return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
- getShuffleSHUFImmediate(SVOp), DAG);
-}
-
-static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
- SelectionDAG &DAG) {
- SDLoc dl(Load);
- MVT VT = Load->getSimpleValueType(0);
- MVT EVT = VT.getVectorElementType();
- SDValue Addr = Load->getOperand(1);
- SDValue NewAddr = DAG.getNode(
- ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
- DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
-
- SDValue NewLoad =
- DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
- DAG.getMachineFunction().getMachineMemOperand(
- Load->getMemOperand(), 0, EVT.getStoreSize()));
- return NewLoad;
-}
-
-// It is only safe to call this function if isINSERTPSMask is true for
-// this shufflevector mask.
-static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
- SelectionDAG &DAG) {
- // Generate an insertps instruction when inserting an f32 from memory onto a
- // v4f32 or when copying a member from one v4f32 to another.
- // We also use it for transferring i32 from one register to another,
- // since it simply copies the same bits.
- // If we're transferring an i32 from memory to a specific element in a
- // register, we output a generic DAG that will match the PINSRD
- // instruction.
- MVT VT = SVOp->getSimpleValueType(0);
- MVT EVT = VT.getVectorElementType();
- SDValue V1 = SVOp->getOperand(0);
- SDValue V2 = SVOp->getOperand(1);
- auto Mask = SVOp->getMask();
- assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
- "unsupported vector type for insertps/pinsrd");
-
- auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
- auto FromV2Predicate = [](const int &i) { return i >= 4; };
- int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
-
- SDValue From;
- SDValue To;
- unsigned DestIndex;
- if (FromV1 == 1) {
- From = V1;
- To = V2;
- DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
- Mask.begin();
-
- // If we have 1 element from each vector, we have to check if we're
- // changing V1's element's place. If so, we're done. Otherwise, we
- // should assume we're changing V2's element's place and behave
- // accordingly.
- int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
- assert(DestIndex <= INT32_MAX && "truncated destination index");
- if (FromV1 == FromV2 &&
- static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
- From = V2;
- To = V1;
- DestIndex =
- std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
- }
- } else {
- assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
- "More than one element from V1 and from V2, or no elements from one "
- "of the vectors. This case should not have returned true from "
- "isINSERTPSMask");
- From = V2;
- To = V1;
- DestIndex =
- std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
- }
-
- // Get an index into the source vector in the range [0,4) (the mask is
- // in the range [0,8) because it can address V1 and V2)
- unsigned SrcIndex = Mask[DestIndex] % 4;
- if (MayFoldLoad(From)) {
- // Trivial case, when From comes from a load and is only used by the
- // shuffle. Make it use insertps from the vector that we need from that
- // load.
- SDValue NewLoad =
- NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
- if (!NewLoad.getNode())
- return SDValue();
-
- if (EVT == MVT::f32) {
- // Create this as a scalar to vector to match the instruction pattern.
- SDValue LoadScalarToVector =
- DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
- SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
- return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
- InsertpsMask);
- } else { // EVT == MVT::i32
- // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
- // instruction, to match the PINSRD instruction, which loads an i32 to a
- // certain vector element.
- return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
- DAG.getConstant(DestIndex, MVT::i32));
- }
- }
-
- // Vector-element-to-vector
- SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
- return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
-}
-
-// Reduce a vector shuffle to zext.
-static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
- // PMOVZX is only available from SSE41.
- if (!Subtarget->hasSSE41())
- return SDValue();
-
- MVT VT = Op.getSimpleValueType();
-
- // Only AVX2 support 256-bit vector integer extending.
- if (!Subtarget->hasInt256() && VT.is256BitVector())
- return SDValue();
-
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- SDLoc DL(Op);
- SDValue V1 = Op.getOperand(0);
- SDValue V2 = Op.getOperand(1);
- unsigned NumElems = VT.getVectorNumElements();
-
- // Extending is an unary operation and the element type of the source vector
- // won't be equal to or larger than i64.
- if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
- VT.getVectorElementType() == MVT::i64)
- return SDValue();
-
- // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
- unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
- while ((1U << Shift) < NumElems) {
- if (SVOp->getMaskElt(1U << Shift) == 1)
- break;
- Shift += 1;
- // The maximal ratio is 8, i.e. from i8 to i64.
- if (Shift > 3)
- return SDValue();
- }
-
- // Check the shuffle mask.
- unsigned Mask = (1U << Shift) - 1;
- for (unsigned i = 0; i != NumElems; ++i) {
- int EltIdx = SVOp->getMaskElt(i);
- if ((i & Mask) != 0 && EltIdx != -1)
- return SDValue();
- if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
- return SDValue();
- }
-
- unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
- MVT NeVT = MVT::getIntegerVT(NBits);
- MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
-
- if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
- return SDValue();
-
- return DAG.getNode(ISD::BITCAST, DL, VT,
- DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
-}
-
-static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- MVT VT = Op.getSimpleValueType();
- SDLoc dl(Op);
- SDValue V1 = Op.getOperand(0);
- SDValue V2 = Op.getOperand(1);
-
- if (isZeroShuffle(SVOp))
- return getZeroVector(VT, Subtarget, DAG, dl);
-
- // Handle splat operations
- if (SVOp->isSplat()) {
- // Use vbroadcast whenever the splat comes from a foldable load
- SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
- if (Broadcast.getNode())
- return Broadcast;
- }
-
- // Check integer expanding shuffles.
- SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
- if (NewOp.getNode())
- return NewOp;
-
- // If the shuffle can be profitably rewritten as a narrower shuffle, then
- // do it!
- if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
- VT == MVT::v32i8) {
- SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
- if (NewOp.getNode())
- return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
- } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
- // FIXME: Figure out a cleaner way to do this.
- if (ISD::isBuildVectorAllZeros(V2.getNode())) {
- SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
- if (NewOp.getNode()) {
- MVT NewVT = NewOp.getSimpleValueType();
- if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
- NewVT, true, false))
- return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
- dl);
- }
- } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
- SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
- if (NewOp.getNode()) {
- MVT NewVT = NewOp.getSimpleValueType();
- if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
- return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
- dl);
- }
- }
- }
- return SDValue();
-}
-
-SDValue
-X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- SDValue V1 = Op.getOperand(0);
- SDValue V2 = Op.getOperand(1);
- MVT VT = Op.getSimpleValueType();
- SDLoc dl(Op);
- unsigned NumElems = VT.getVectorNumElements();
- bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
- bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
- bool V1IsSplat = false;
- bool V2IsSplat = false;
- bool HasSSE2 = Subtarget->hasSSE2();
- bool HasFp256 = Subtarget->hasFp256();
- bool HasInt256 = Subtarget->hasInt256();
- MachineFunction &MF = DAG.getMachineFunction();
- bool OptForSize = MF.getFunction()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
-
- // Check if we should use the experimental vector shuffle lowering. If so,
- // delegate completely to that code path.
- if (ExperimentalVectorShuffleLowering)
- return lowerVectorShuffle(Op, Subtarget, DAG);
-
- assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
-
- if (V1IsUndef && V2IsUndef)
- return DAG.getUNDEF(VT);
-
- // When we create a shuffle node we put the UNDEF node to second operand,
- // but in some cases the first operand may be transformed to UNDEF.
- // In this case we should just commute the node.
- if (V1IsUndef)
- return DAG.getCommutedVectorShuffle(*SVOp);
-
- // Vector shuffle lowering takes 3 steps:
- //
- // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
- // narrowing and commutation of operands should be handled.
- // 2) Matching of shuffles with known shuffle masks to x86 target specific
- // shuffle nodes.
- // 3) Rewriting of unmatched masks into new generic shuffle operations,
- // so the shuffle can be broken into other shuffles and the legalizer can
- // try the lowering again.
- //
- // The general idea is that no vector_shuffle operation should be left to
- // be matched during isel, all of them must be converted to a target specific
- // node here.
-
- // Normalize the input vectors. Here splats, zeroed vectors, profitable
- // narrowing and commutation of operands should be handled. The actual code
- // doesn't include all of those, work in progress...
- SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
- if (NewOp.getNode())
- return NewOp;
-
- SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
-
- // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
- // unpckh_undef). Only use pshufd if speed is more important than size.
- if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
- return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
- if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
- return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
-
- if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
- V2IsUndef && MayFoldVectorLoad(V1))
- return getMOVDDup(Op, dl, V1, DAG);
-
- if (isMOVHLPS_v_undef_Mask(M, VT))
- return getMOVHighToLow(Op, dl, DAG);
-
- // Use to match splats
- if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
- (VT == MVT::v2f64 || VT == MVT::v2i64))
- return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
-
- if (isPSHUFDMask(M, VT)) {
- // The actual implementation will match the mask in the if above and then
- // during isel it can match several different instructions, not only pshufd
- // as its name says, sad but true, emulate the behavior for now...
- if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
- return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
-
- unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
-
- if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
- return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
-
- if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
- return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
- DAG);
-
- return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
- TargetMask, DAG);
- }
-
- if (isPALIGNRMask(M, VT, Subtarget))
- return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
- getShufflePALIGNRImmediate(SVOp),
- DAG);
-
- if (isVALIGNMask(M, VT, Subtarget))
- return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
- getShuffleVALIGNImmediate(SVOp),
- DAG);
-
- // Check if this can be converted into a logical shift.
- bool isLeft = false;
- unsigned ShAmt = 0;
- SDValue ShVal;
- bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
- if (isShift && ShVal.hasOneUse()) {
- // If the shifted value has multiple uses, it may be cheaper to use
- // v_set0 + movlhps or movhlps, etc.
- MVT EltVT = VT.getVectorElementType();
- ShAmt *= EltVT.getSizeInBits();
- return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
- }
-
- if (isMOVLMask(M, VT)) {
- if (ISD::isBuildVectorAllZeros(V1.getNode()))
- return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
- if (!isMOVLPMask(M, VT)) {
- if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
- return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
-
- if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
- }
- }
-
- // FIXME: fold these into legal mask.
- if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
- return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
-
- if (isMOVHLPSMask(M, VT))
- return getMOVHighToLow(Op, dl, DAG);
-
- if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
- return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
-
- if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
- return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
-
- if (isMOVLPMask(M, VT))
- return getMOVLP(Op, dl, DAG, HasSSE2);
-
- if (ShouldXformToMOVHLPS(M, VT) ||
- ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
- return DAG.getCommutedVectorShuffle(*SVOp);
-
- if (isShift) {
- // No better options. Use a vshldq / vsrldq.
- MVT EltVT = VT.getVectorElementType();
- ShAmt *= EltVT.getSizeInBits();
- return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
- }
-
- bool Commuted = false;
- // FIXME: This should also accept a bitcast of a splat? Be careful, not
- // 1,1,1,1 -> v8i16 though.
- BitVector UndefElements;
- if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
- if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
- V1IsSplat = true;
- if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
- if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
- V2IsSplat = true;
-
- // Canonicalize the splat or undef, if present, to be on the RHS.
- if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
- CommuteVectorShuffleMask(M, NumElems);
- std::swap(V1, V2);
- std::swap(V1IsSplat, V2IsSplat);
- Commuted = true;
- }
-
- if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
- // Shuffling low element of v1 into undef, just return v1.
- if (V2IsUndef)
- return V1;
- // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
- // the instruction selector will not match, so get a canonical MOVL with
- // swapped operands to undo the commute.
- return getMOVL(DAG, dl, VT, V2, V1);
- }
-
- if (isUNPCKLMask(M, VT, HasInt256))
- return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
-
- if (isUNPCKHMask(M, VT, HasInt256))
- return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
-
- if (V2IsSplat) {
- // Normalize mask so all entries that point to V2 points to its first
- // element then try to match unpck{h|l} again. If match, return a
- // new vector_shuffle with the corrected mask.p
- SmallVector<int, 8> NewMask(M.begin(), M.end());
- NormalizeMask(NewMask, NumElems);
- if (isUNPCKLMask(NewMask, VT, HasInt256, true))
- return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
- if (isUNPCKHMask(NewMask, VT, HasInt256, true))
- return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
- }
-
- if (Commuted) {
- // Commute is back and try unpck* again.
- // FIXME: this seems wrong.
- CommuteVectorShuffleMask(M, NumElems);
- std::swap(V1, V2);
- std::swap(V1IsSplat, V2IsSplat);
-
- if (isUNPCKLMask(M, VT, HasInt256))
- return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
-
- if (isUNPCKHMask(M, VT, HasInt256))
- return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
- }
-
- // Normalize the node to match x86 shuffle ops if needed
- if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
- return DAG.getCommutedVectorShuffle(*SVOp);
-
- // The checks below are all present in isShuffleMaskLegal, but they are
- // inlined here right now to enable us to directly emit target specific
- // nodes, and remove one by one until they don't return Op anymore.
-
- if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
- SVOp->getSplatIndex() == 0 && V2IsUndef) {
- if (VT == MVT::v2f64 || VT == MVT::v2i64)
- return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
- }
-
- if (isPSHUFHWMask(M, VT, HasInt256))
- return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
- getShufflePSHUFHWImmediate(SVOp),
- DAG);
-
- if (isPSHUFLWMask(M, VT, HasInt256))
- return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
- getShufflePSHUFLWImmediate(SVOp),
- DAG);
-
- unsigned MaskValue;
- if (isBlendMask(M, VT, Subtarget->hasSSE41(), Subtarget->hasInt256(),
- &MaskValue))
- return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
-
- if (isSHUFPMask(M, VT))
- return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
- getShuffleSHUFImmediate(SVOp), DAG);
-
- if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
- return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
- if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
- return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
-
- //===--------------------------------------------------------------------===//
- // Generate target specific nodes for 128 or 256-bit shuffles only
- // supported in the AVX instruction set.
- //
-
- // Handle VMOVDDUPY permutations
- if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
- return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
-
- // Handle VPERMILPS/D* permutations
- if (isVPERMILPMask(M, VT)) {
- if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
- return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
- getShuffleSHUFImmediate(SVOp), DAG);
- return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
- getShuffleSHUFImmediate(SVOp), DAG);
- }
-
- unsigned Idx;
- if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
- return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
- Idx*(NumElems/2), DAG, dl);
-
- // Handle VPERM2F128/VPERM2I128 permutations
- if (isVPERM2X128Mask(M, VT, HasFp256))
- return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
- V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
-
- if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
- return getINSERTPS(SVOp, dl, DAG);
-
- unsigned Imm8;
- if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
- return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
-
- if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
- VT.is512BitVector()) {
- MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
- MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
- SmallVector<SDValue, 16> permclMask;
- for (unsigned i = 0; i != NumElems; ++i) {
- permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
- }
-
- SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
- if (V2IsUndef)
- // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
- return DAG.getNode(X86ISD::VPERMV, dl, VT,
- DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
- return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
- DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
- }
-
- //===--------------------------------------------------------------------===//
- // Since no target specific shuffle was selected for this generic one,
- // lower it into other known shuffles. FIXME: this isn't true yet, but
- // this is the plan.
- //
-
- // Handle v8i16 specifically since SSE can do byte extraction and insertion.
- if (VT == MVT::v8i16) {
- SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
- if (NewOp.getNode())
- return NewOp;
- }
-
- if (VT == MVT::v16i16 && Subtarget->hasInt256()) {
- SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
- if (NewOp.getNode())
- return NewOp;
- }
-
- if (VT == MVT::v16i8) {
- SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
- if (NewOp.getNode())
- return NewOp;
- }
-
- if (VT == MVT::v32i8) {
- SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
- if (NewOp.getNode())
- return NewOp;
- }
-
- // Handle all 128-bit wide vectors with 4 elements, and match them with
- // several different shuffle types.
- if (NumElems == 4 && VT.is128BitVector())
- return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
-
- // Handle general 256-bit shuffles
- if (VT.is256BitVector())
- return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
-
- return SDValue();
-}
-
// This function assumes its argument is a BUILD_VECTOR of constants or
// undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
// true.
@@ -12344,48 +10083,29 @@ static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
return true;
}
-/// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
-/// instruction.
-static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
+/// \brief Try to lower a VSELECT instruction to a vector shuffle.
+static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
SDValue Cond = Op.getOperand(0);
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
- MVT EltVT = VT.getVectorElementType();
- unsigned NumElems = VT.getVectorNumElements();
-
- // There is no blend with immediate in AVX-512.
- if (VT.is512BitVector())
- return SDValue();
-
- if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
- return SDValue();
- if (!Subtarget->hasInt256() && VT == MVT::v16i16)
- return SDValue();
if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
return SDValue();
+ auto *CondBV = cast<BuildVectorSDNode>(Cond);
- // Check the mask for BLEND and build the value.
- unsigned MaskValue = 0;
- if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
- return SDValue();
-
- // Convert i32 vectors to floating point if it is not AVX2.
- // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
- MVT BlendVT = VT;
- if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
- BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
- NumElems);
- LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
- RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
+ // Only non-legal VSELECTs reach this lowering, convert those into generic
+ // shuffles and re-use the shuffle lowering path for blends.
+ SmallVector<int, 32> Mask;
+ for (int i = 0, Size = VT.getVectorNumElements(); i < Size; ++i) {
+ SDValue CondElt = CondBV->getOperand(i);
+ Mask.push_back(
+ isa<ConstantSDNode>(CondElt) ? i + (isZero(CondElt) ? Size : 0) : -1);
}
-
- SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
- DAG.getConstant(MaskValue, MVT::i32));
- return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
+ return DAG.getVectorShuffle(VT, dl, LHS, RHS, Mask);
}
SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
@@ -12396,28 +10116,41 @@ SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
return SDValue();
- SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
+ // Try to lower this to a blend-style vector shuffle. This can handle all
+ // constant condition cases.
+ SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG);
if (BlendOp.getNode())
return BlendOp;
- // Some types for vselect were previously set to Expand, not Legal or
- // Custom. Return an empty SDValue so we fall-through to Expand, after
- // the Custom lowering phase.
- MVT VT = Op.getSimpleValueType();
- switch (VT.SimpleTy) {
+ // Variable blends are only legal from SSE4.1 onward.
+ if (!Subtarget->hasSSE41())
+ return SDValue();
+
+ // Only some types will be legal on some subtargets. If we can emit a legal
+ // VSELECT-matching blend, return Op, and but if we need to expand, return
+ // a null value.
+ switch (Op.getSimpleValueType().SimpleTy) {
default:
- break;
+ // Most of the vector types have blends past SSE4.1.
+ return Op;
+
+ case MVT::v32i8:
+ // The byte blends for AVX vectors were introduced only in AVX2.
+ if (Subtarget->hasAVX2())
+ return Op;
+
+ return SDValue();
+
case MVT::v8i16:
case MVT::v16i16:
+ // AVX-512 BWI and VLX features support VSELECT with i16 elements.
if (Subtarget->hasBWI() && Subtarget->hasVLX())
- break;
+ return Op;
+
+ // FIXME: We should custom lower this by fixing the condition and using i8
+ // blends.
return SDValue();
}
-
- // We couldn't create a "Blend with immediate" node.
- // This node should still be legal, but we'll have to emit a blendv*
- // instruction.
- return Op;
}
static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
@@ -12493,6 +10226,8 @@ X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const
MVT EltVT = Op.getSimpleValueType();
assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
+ assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
+ "Unexpected vector type in ExtractBitFromMaskVector");
// variable index can't be handled in mask registers,
// extend vector to VR512
@@ -12506,6 +10241,8 @@ X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
const TargetRegisterClass* rc = getRegClassFor(VecVT);
+ if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
+ rc = getRegClassFor(MVT::v16i1);
unsigned MaxSift = rc->getSize()*8 - 1;
Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
DAG.getConstant(MaxSift - IdxVal, MVT::i8));
@@ -12631,7 +10368,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
/// Insert one bit to mask vector, like v16i1 or v8i1.
/// AVX-512 feature.
-SDValue
+SDValue
X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
@@ -12644,7 +10381,7 @@ X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
// insert element and then truncate the result.
MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
- SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
+ SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
@@ -12815,27 +10552,47 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
// the upper bits of a vector.
static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
- if (Subtarget->hasFp256()) {
- SDLoc dl(Op.getNode());
- SDValue Vec = Op.getNode()->getOperand(0);
- SDValue SubVec = Op.getNode()->getOperand(1);
- SDValue Idx = Op.getNode()->getOperand(2);
-
- if ((Op.getNode()->getSimpleValueType(0).is256BitVector() ||
- Op.getNode()->getSimpleValueType(0).is512BitVector()) &&
- SubVec.getNode()->getSimpleValueType(0).is128BitVector() &&
- isa<ConstantSDNode>(Idx)) {
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
- return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
- }
+ if (!Subtarget->hasAVX())
+ return SDValue();
- if (Op.getNode()->getSimpleValueType(0).is512BitVector() &&
- SubVec.getNode()->getSimpleValueType(0).is256BitVector() &&
- isa<ConstantSDNode>(Idx)) {
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
- return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
+ SDLoc dl(Op);
+ SDValue Vec = Op.getOperand(0);
+ SDValue SubVec = Op.getOperand(1);
+ SDValue Idx = Op.getOperand(2);
+
+ if (!isa<ConstantSDNode>(Idx))
+ return SDValue();
+
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ MVT OpVT = Op.getSimpleValueType();
+ MVT SubVecVT = SubVec.getSimpleValueType();
+
+ // Fold two 16-byte subvector loads into one 32-byte load:
+ // (insert_subvector (insert_subvector undef, (load addr), 0),
+ // (load addr + 16), Elts/2)
+ // --> load32 addr
+ if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
+ Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
+ !Subtarget->isUnalignedMem32Slow()) {
+ SDValue SubVec2 = Vec.getOperand(1);
+ if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
+ if (Idx2->getZExtValue() == 0) {
+ SDValue Ops[] = { SubVec2, SubVec };
+ SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
+ if (LD.getNode())
+ return LD;
+ }
}
}
+
+ if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
+ SubVecVT.is128BitVector())
+ return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
+
+ if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
+ return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
+
return SDValue();
}
@@ -13392,7 +11149,7 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
}
return SDValue();
}
-
+
assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
"Unknown SINT_TO_FP to lower!");
@@ -14039,7 +11796,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
InVT = ExtVT;
}
-
+
SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
SDValue CP = DAG.getConstantPool(C, getPointerTy());
@@ -14233,7 +11990,7 @@ static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
EltVT = VT.getVectorElementType();
NumElts = VT.getVectorNumElements();
}
-
+
unsigned EltBits = EltVT.getSizeInBits();
LLVMContext *Context = DAG.getContext();
// For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
@@ -14260,7 +12017,7 @@ static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
}
-
+
// If not vector, then scalar.
unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
@@ -14290,19 +12047,17 @@ static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
// At this point the operands and the result should have the same
// type, and that won't be f80 since that is not custom lowered.
- // First get the sign bit of second operand.
- SmallVector<Constant*,4> CV;
- if (SrcVT == MVT::f64) {
- const fltSemantics &Sem = APFloat::IEEEdouble;
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 1ULL << 63))));
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 0))));
- } else {
- const fltSemantics &Sem = APFloat::IEEEsingle;
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 1U << 31))));
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0))));
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0))));
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0))));
- }
+ const fltSemantics &Sem =
+ VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
+ const unsigned SizeInBits = VT.getSizeInBits();
+
+ SmallVector<Constant *, 4> CV(
+ VT == MVT::f64 ? 2 : 4,
+ ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
+
+ // First, clear all bits but the sign bit from the second operand (sign).
+ CV[0] = ConstantFP::get(*Context,
+ APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
Constant *C = ConstantVector::get(CV);
SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
@@ -14310,40 +12065,30 @@ static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
false, false, false, 16);
SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
- // Shift sign bit right or left if the two operands have different types.
- if (SrcVT.bitsGT(VT)) {
- // Op0 is MVT::f32, Op1 is MVT::f64.
- SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit);
- SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit,
- DAG.getConstant(32, MVT::i32));
- SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit);
- SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit,
- DAG.getIntPtrConstant(0));
- }
-
- // Clear first operand sign bit.
- CV.clear();
- if (VT == MVT::f64) {
- const fltSemantics &Sem = APFloat::IEEEdouble;
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem,
- APInt(64, ~(1ULL << 63)))));
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 0))));
+ // Next, clear the sign bit from the first operand (magnitude).
+ // If it's a constant, we can clear it here.
+ if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
+ APFloat APF = Op0CN->getValueAPF();
+ // If the magnitude is a positive zero, the sign bit alone is enough.
+ if (APF.isPosZero())
+ return SignBit;
+ APF.clearSign();
+ CV[0] = ConstantFP::get(*Context, APF);
} else {
- const fltSemantics &Sem = APFloat::IEEEsingle;
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem,
- APInt(32, ~(1U << 31)))));
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0))));
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0))));
- CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0))));
+ CV[0] = ConstantFP::get(
+ *Context,
+ APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
}
C = ConstantVector::get(CV);
CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
- SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
- MachinePointerInfo::getConstantPool(),
- false, false, false, 16);
- SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2);
-
- // Or the value with the sign bit.
+ SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
+ MachinePointerInfo::getConstantPool(),
+ false, false, false, 16);
+ // If the magnitude operand wasn't a constant, we need to AND out the sign.
+ if (!isa<ConstantFPSDNode>(Op0))
+ Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
+
+ // OR the magnitude value with the sign bit.
return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
}
@@ -14473,11 +12218,11 @@ static bool hasNonFlagsUse(SDValue Op) {
/// equivalent.
SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
SelectionDAG &DAG) const {
- if (Op.getValueType() == MVT::i1)
- // KORTEST instruction should be selected
- return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
- DAG.getConstant(0, Op.getValueType()));
-
+ if (Op.getValueType() == MVT::i1) {
+ SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
+ return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
+ DAG.getConstant(0, MVT::i8));
+ }
// CF and OF aren't always set the way we want. Determine which
// of these we need.
bool NeedCF = false;
@@ -14697,9 +12442,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
DAG.getConstant(0, Op.getValueType()));
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
- SmallVector<SDValue, 4> Ops;
- for (unsigned i = 0; i != NumOperands; ++i)
- Ops.push_back(Op.getOperand(i));
+ SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
DAG.ReplaceAllUsesWith(Op, New);
@@ -14717,16 +12460,16 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
if (Op0.getValueType() == MVT::i1)
llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
}
-
+
if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
- // Do the comparison at i32 if it's smaller, besides the Atom case.
- // This avoids subregister aliasing issues. Keep the smaller reference
- // if we're optimizing for size, however, as that'll allow better folding
+ // Do the comparison at i32 if it's smaller, besides the Atom case.
+ // This avoids subregister aliasing issues. Keep the smaller reference
+ // if we're optimizing for size, however, as that'll allow better folding
// of memory operations.
if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
- !DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::MinSize) &&
+ !DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ Attribute::MinSize) &&
!Subtarget->isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
@@ -14780,7 +12523,7 @@ SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
return SDValue();
EVT VT = Op.getValueType();
-
+
// SSE1 has rsqrtss and rsqrtps.
// TODO: Add support for AVX512 (v16f32).
// It is likely not profitable to do this for f64 because a double-precision
@@ -14808,9 +12551,9 @@ SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
// significant digits in the divisor.
if (!Subtarget->useReciprocalEst())
return SDValue();
-
+
EVT VT = Op.getValueType();
-
+
// SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
// TODO: Add support for AVX512 (v16f32).
// It is likely not profitable to do this for f64 because a double-precision
@@ -15307,8 +13050,11 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
cast<ConstantSDNode>(Op1)->isNullValue() &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
- if (NewSetCC.getNode())
+ if (NewSetCC.getNode()) {
+ if (VT == MVT::i1)
+ return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
return NewSetCC;
+ }
}
// Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
@@ -15629,11 +13375,11 @@ static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget
((Subtarget->hasDQI() && Subtarget->hasVLX() &&
VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
-
+
((Subtarget->hasDQI() && VT.is512BitVector() &&
VTElt.getSizeInBits() >= 32))))
return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
-
+
unsigned int NumElts = VT.getVectorNumElements();
if (NumElts != 8 && NumElts != 16)
@@ -15718,6 +13464,7 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
// may emit an illegal shuffle but the expansion is still better than scalar
// code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
// we'll emit a shuffle and a arithmetic shift.
+// FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
// TODO: It is possible to support ZExt by zeroing the undef values during
// the shuffle phase or after the shuffle.
static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
@@ -15797,9 +13544,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
// Attempt to load the original value using scalar loads.
// Find the largest scalar type that divides the total loaded size.
MVT SclrLoadTy = MVT::i8;
- for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE;
- tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) {
- MVT Tp = (MVT::SimpleValueType)tp;
+ for (MVT Tp : MVT::integer_valuetypes()) {
if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
SclrLoadTy = Tp;
}
@@ -16232,7 +13977,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool SplitStack = MF.shouldSplitStack();
- bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMacho()) ||
+ bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
SplitStack;
SDLoc dl(Op);
@@ -16258,7 +14003,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SP.getValue(1);
unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
- const TargetFrameLowering &TFI = *DAG.getSubtarget().getFrameLowering();
+ const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
unsigned StackAlign = TFI.getStackAlignment();
Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
if (Align > StackAlign)
@@ -16316,8 +14061,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
unsigned SPReg = RegInfo->getStackRegister();
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
Chain = SP.getValue(1);
@@ -16427,21 +14171,16 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
if (ArgMode == 2) {
// Sanity Check: Make sure using fp_offset makes sense.
assert(!DAG.getTarget().Options.UseSoftFloat &&
- !(DAG.getMachineFunction()
- .getFunction()->getAttributes()
- .hasAttribute(AttributeSet::FunctionIndex,
- Attribute::NoImplicitFloat)) &&
+ !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ Attribute::NoImplicitFloat)) &&
Subtarget->hasSSE1());
}
// Insert VAARG_64 node into the DAG
// VAARG_64 returns two values: Variable Argument Address, Chain
- SmallVector<SDValue, 11> InstOps;
- InstOps.push_back(Chain);
- InstOps.push_back(SrcPtr);
- InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
- InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
- InstOps.push_back(DAG.getConstant(Align, MVT::i32));
+ SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, MVT::i32),
+ DAG.getConstant(ArgMode, MVT::i8),
+ DAG.getConstant(Align, MVT::i32)};
SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
VTs, InstOps, MVT::i64,
@@ -16558,7 +14297,8 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
SDValue SrcOp, SDValue ShAmt,
SelectionDAG &DAG) {
- assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32");
+ MVT SVT = ShAmt.getSimpleValueType();
+ assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
// Catch shift-by-constant.
if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
@@ -16573,13 +14313,28 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
}
- // Need to build a vector containing shift amount
- // Shift amount is 32-bits, but SSE instructions read 64-bit, so fill with 0
- SDValue ShOps[4];
- ShOps[0] = ShAmt;
- ShOps[1] = DAG.getConstant(0, MVT::i32);
- ShOps[2] = ShOps[3] = DAG.getUNDEF(MVT::i32);
- ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, ShOps);
+ const X86Subtarget &Subtarget =
+ static_cast<const X86Subtarget &>(DAG.getSubtarget());
+ if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
+ ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
+ // Let the shuffle legalizer expand this shift amount node.
+ SDValue Op0 = ShAmt.getOperand(0);
+ Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
+ ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
+ } else {
+ // Need to build a vector containing shift amount.
+ // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
+ SmallVector<SDValue, 4> ShOps;
+ ShOps.push_back(ShAmt);
+ if (SVT == MVT::i32) {
+ ShOps.push_back(DAG.getConstant(0, SVT));
+ ShOps.push_back(DAG.getUNDEF(SVT));
+ }
+ ShOps.push_back(DAG.getUNDEF(SVT));
+
+ MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
+ ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
+ }
// The return type has to be a 128-bit type with the same element
// type as the input type.
@@ -16628,52 +14383,28 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
}
-static unsigned getOpcodeForFMAIntrinsic(unsigned IntNo) {
- switch (IntNo) {
- default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
- case Intrinsic::x86_fma_vfmadd_ps:
- case Intrinsic::x86_fma_vfmadd_pd:
- case Intrinsic::x86_fma_vfmadd_ps_256:
- case Intrinsic::x86_fma_vfmadd_pd_256:
- case Intrinsic::x86_fma_mask_vfmadd_ps_512:
- case Intrinsic::x86_fma_mask_vfmadd_pd_512:
- return X86ISD::FMADD;
- case Intrinsic::x86_fma_vfmsub_ps:
- case Intrinsic::x86_fma_vfmsub_pd:
- case Intrinsic::x86_fma_vfmsub_ps_256:
- case Intrinsic::x86_fma_vfmsub_pd_256:
- case Intrinsic::x86_fma_mask_vfmsub_ps_512:
- case Intrinsic::x86_fma_mask_vfmsub_pd_512:
- return X86ISD::FMSUB;
- case Intrinsic::x86_fma_vfnmadd_ps:
- case Intrinsic::x86_fma_vfnmadd_pd:
- case Intrinsic::x86_fma_vfnmadd_ps_256:
- case Intrinsic::x86_fma_vfnmadd_pd_256:
- case Intrinsic::x86_fma_mask_vfnmadd_ps_512:
- case Intrinsic::x86_fma_mask_vfnmadd_pd_512:
- return X86ISD::FNMADD;
- case Intrinsic::x86_fma_vfnmsub_ps:
- case Intrinsic::x86_fma_vfnmsub_pd:
- case Intrinsic::x86_fma_vfnmsub_ps_256:
- case Intrinsic::x86_fma_vfnmsub_pd_256:
- case Intrinsic::x86_fma_mask_vfnmsub_ps_512:
- case Intrinsic::x86_fma_mask_vfnmsub_pd_512:
- return X86ISD::FNMSUB;
- case Intrinsic::x86_fma_vfmaddsub_ps:
- case Intrinsic::x86_fma_vfmaddsub_pd:
- case Intrinsic::x86_fma_vfmaddsub_ps_256:
- case Intrinsic::x86_fma_vfmaddsub_pd_256:
- case Intrinsic::x86_fma_mask_vfmaddsub_ps_512:
- case Intrinsic::x86_fma_mask_vfmaddsub_pd_512:
- return X86ISD::FMADDSUB;
- case Intrinsic::x86_fma_vfmsubadd_ps:
- case Intrinsic::x86_fma_vfmsubadd_pd:
- case Intrinsic::x86_fma_vfmsubadd_ps_256:
- case Intrinsic::x86_fma_vfmsubadd_pd_256:
- case Intrinsic::x86_fma_mask_vfmsubadd_ps_512:
- case Intrinsic::x86_fma_mask_vfmsubadd_pd_512:
- return X86ISD::FMSUBADD;
- }
+/// \brief Creates an SDNode for a predicated scalar operation.
+/// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
+/// The mask is comming as MVT::i8 and it should be truncated
+/// to MVT::i1 while lowering masking intrinsics.
+/// The main difference between ScalarMaskingNode and VectorMaskingNode is using
+/// "X86select" instead of "vselect". We just can't create the "vselect" node for
+/// a scalar instruction.
+static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
+ SDValue PreservedSrc,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ if (isAllOnes(Mask))
+ return Op;
+
+ EVT VT = Op.getValueType();
+ SDLoc dl(Op);
+ // The mask should be of type MVT::i1
+ SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
+
+ if (PreservedSrc.getOpcode() == ISD::UNDEF)
+ PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
+ return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
}
static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
@@ -16701,7 +14432,73 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
RoundingMode),
Mask, Src0, Subtarget, DAG);
}
-
+ case INTR_TYPE_SCALAR_MASK_RM: {
+ SDValue Src1 = Op.getOperand(1);
+ SDValue Src2 = Op.getOperand(2);
+ SDValue Src0 = Op.getOperand(3);
+ SDValue Mask = Op.getOperand(4);
+ // There are 2 kinds of intrinsics in this group:
+ // (1) With supress-all-exceptions (sae) - 6 operands
+ // (2) With rounding mode and sae - 7 operands.
+ if (Op.getNumOperands() == 6) {
+ SDValue Sae = Op.getOperand(5);
+ return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
+ Sae),
+ Mask, Src0, Subtarget, DAG);
+ }
+ assert(Op.getNumOperands() == 7 && "Unexpected intrinsic form");
+ SDValue RoundingMode = Op.getOperand(5);
+ SDValue Sae = Op.getOperand(6);
+ return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
+ RoundingMode, Sae),
+ Mask, Src0, Subtarget, DAG);
+ }
+ case INTR_TYPE_2OP_MASK: {
+ SDValue Src1 = Op.getOperand(1);
+ SDValue Src2 = Op.getOperand(2);
+ SDValue PassThru = Op.getOperand(3);
+ SDValue Mask = Op.getOperand(4);
+ // We specify 2 possible opcodes for intrinsics with rounding modes.
+ // First, we check if the intrinsic may have non-default rounding mode,
+ // (IntrData->Opc1 != 0), then we check the rounding mode operand.
+ unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
+ if (IntrWithRoundingModeOpcode != 0) {
+ SDValue Rnd = Op.getOperand(5);
+ unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
+ if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
+ return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
+ dl, Op.getValueType(),
+ Src1, Src2, Rnd),
+ Mask, PassThru, Subtarget, DAG);
+ }
+ }
+ return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
+ Src1,Src2),
+ Mask, PassThru, Subtarget, DAG);
+ }
+ case FMA_OP_MASK: {
+ SDValue Src1 = Op.getOperand(1);
+ SDValue Src2 = Op.getOperand(2);
+ SDValue Src3 = Op.getOperand(3);
+ SDValue Mask = Op.getOperand(4);
+ // We specify 2 possible opcodes for intrinsics with rounding modes.
+ // First, we check if the intrinsic may have non-default rounding mode,
+ // (IntrData->Opc1 != 0), then we check the rounding mode operand.
+ unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
+ if (IntrWithRoundingModeOpcode != 0) {
+ SDValue Rnd = Op.getOperand(5);
+ if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
+ X86::STATIC_ROUNDING::CUR_DIRECTION)
+ return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
+ dl, Op.getValueType(),
+ Src1, Src2, Src3, Rnd),
+ Mask, Src1, Subtarget, DAG);
+ }
+ return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
+ dl, Op.getValueType(),
+ Src1, Src2, Src3),
+ Mask, Src1, Subtarget, DAG);
+ }
case CMP_MASK:
case CMP_MASK_CC: {
// Comparison intrinsics with masks.
@@ -16751,9 +14548,45 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
Op.getOperand(1), Op.getOperand(2), DAG);
case VSHIFT_MASK:
- return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
- Op.getOperand(1), Op.getOperand(2), DAG),
- Op.getOperand(4), Op.getOperand(3), Subtarget, DAG);;
+ return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
+ Op.getSimpleValueType(),
+ Op.getOperand(1),
+ Op.getOperand(2), DAG),
+ Op.getOperand(4), Op.getOperand(3), Subtarget,
+ DAG);
+ case COMPRESS_EXPAND_IN_REG: {
+ SDValue Mask = Op.getOperand(3);
+ SDValue DataToCompress = Op.getOperand(1);
+ SDValue PassThru = Op.getOperand(2);
+ if (isAllOnes(Mask)) // return data as is
+ return Op.getOperand(1);
+ EVT VT = Op.getValueType();
+ EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
+ VT.getVectorNumElements());
+ EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
+ Mask.getValueType().getSizeInBits());
+ SDLoc dl(Op);
+ SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
+ DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
+ DAG.getIntPtrConstant(0));
+
+ return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
+ PassThru);
+ }
+ case BLEND: {
+ SDValue Mask = Op.getOperand(3);
+ EVT VT = Op.getValueType();
+ EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
+ VT.getVectorNumElements());
+ EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
+ Mask.getValueType().getSizeInBits());
+ SDLoc dl(Op);
+ SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
+ DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
+ DAG.getIntPtrConstant(0));
+ return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
+ Op.getOperand(2));
+ }
default:
break;
}
@@ -16762,138 +14595,6 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
- // Arithmetic intrinsics.
- case Intrinsic::x86_sse2_pmulu_dq:
- case Intrinsic::x86_avx2_pmulu_dq:
- return DAG.getNode(X86ISD::PMULUDQ, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
-
- case Intrinsic::x86_sse41_pmuldq:
- case Intrinsic::x86_avx2_pmul_dq:
- return DAG.getNode(X86ISD::PMULDQ, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
-
- case Intrinsic::x86_sse2_pmulhu_w:
- case Intrinsic::x86_avx2_pmulhu_w:
- return DAG.getNode(ISD::MULHU, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
-
- case Intrinsic::x86_sse2_pmulh_w:
- case Intrinsic::x86_avx2_pmulh_w:
- return DAG.getNode(ISD::MULHS, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
-
- // SSE/SSE2/AVX floating point max/min intrinsics.
- case Intrinsic::x86_sse_max_ps:
- case Intrinsic::x86_sse2_max_pd:
- case Intrinsic::x86_avx_max_ps_256:
- case Intrinsic::x86_avx_max_pd_256:
- case Intrinsic::x86_sse_min_ps:
- case Intrinsic::x86_sse2_min_pd:
- case Intrinsic::x86_avx_min_ps_256:
- case Intrinsic::x86_avx_min_pd_256: {
- unsigned Opcode;
- switch (IntNo) {
- default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
- case Intrinsic::x86_sse_max_ps:
- case Intrinsic::x86_sse2_max_pd:
- case Intrinsic::x86_avx_max_ps_256:
- case Intrinsic::x86_avx_max_pd_256:
- Opcode = X86ISD::FMAX;
- break;
- case Intrinsic::x86_sse_min_ps:
- case Intrinsic::x86_sse2_min_pd:
- case Intrinsic::x86_avx_min_ps_256:
- case Intrinsic::x86_avx_min_pd_256:
- Opcode = X86ISD::FMIN;
- break;
- }
- return DAG.getNode(Opcode, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- }
-
- // AVX2 variable shift intrinsics
- case Intrinsic::x86_avx2_psllv_d:
- case Intrinsic::x86_avx2_psllv_q:
- case Intrinsic::x86_avx2_psllv_d_256:
- case Intrinsic::x86_avx2_psllv_q_256:
- case Intrinsic::x86_avx2_psrlv_d:
- case Intrinsic::x86_avx2_psrlv_q:
- case Intrinsic::x86_avx2_psrlv_d_256:
- case Intrinsic::x86_avx2_psrlv_q_256:
- case Intrinsic::x86_avx2_psrav_d:
- case Intrinsic::x86_avx2_psrav_d_256: {
- unsigned Opcode;
- switch (IntNo) {
- default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
- case Intrinsic::x86_avx2_psllv_d:
- case Intrinsic::x86_avx2_psllv_q:
- case Intrinsic::x86_avx2_psllv_d_256:
- case Intrinsic::x86_avx2_psllv_q_256:
- Opcode = ISD::SHL;
- break;
- case Intrinsic::x86_avx2_psrlv_d:
- case Intrinsic::x86_avx2_psrlv_q:
- case Intrinsic::x86_avx2_psrlv_d_256:
- case Intrinsic::x86_avx2_psrlv_q_256:
- Opcode = ISD::SRL;
- break;
- case Intrinsic::x86_avx2_psrav_d:
- case Intrinsic::x86_avx2_psrav_d_256:
- Opcode = ISD::SRA;
- break;
- }
- return DAG.getNode(Opcode, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- }
-
- case Intrinsic::x86_sse2_packssdw_128:
- case Intrinsic::x86_sse2_packsswb_128:
- case Intrinsic::x86_avx2_packssdw:
- case Intrinsic::x86_avx2_packsswb:
- return DAG.getNode(X86ISD::PACKSS, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
-
- case Intrinsic::x86_sse2_packuswb_128:
- case Intrinsic::x86_sse41_packusdw:
- case Intrinsic::x86_avx2_packuswb:
- case Intrinsic::x86_avx2_packusdw:
- return DAG.getNode(X86ISD::PACKUS, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
-
- case Intrinsic::x86_ssse3_pshuf_b_128:
- case Intrinsic::x86_avx2_pshuf_b:
- return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
-
- case Intrinsic::x86_sse2_pshuf_d:
- return DAG.getNode(X86ISD::PSHUFD, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
-
- case Intrinsic::x86_sse2_pshufl_w:
- return DAG.getNode(X86ISD::PSHUFLW, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
-
- case Intrinsic::x86_sse2_pshufh_w:
- return DAG.getNode(X86ISD::PSHUFHW, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
-
- case Intrinsic::x86_ssse3_psign_b_128:
- case Intrinsic::x86_ssse3_psign_w_128:
- case Intrinsic::x86_ssse3_psign_d_128:
- case Intrinsic::x86_avx2_psign_b:
- case Intrinsic::x86_avx2_psign_w:
- case Intrinsic::x86_avx2_psign_d:
- return DAG.getNode(X86ISD::PSIGN, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
-
- case Intrinsic::x86_avx2_permd:
- case Intrinsic::x86_avx2_permps:
- // Operands intentionally swapped. Mask is last operand to intrinsic,
- // but second operand for node/instruction.
- return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(1));
-
case Intrinsic::x86_avx512_mask_valign_q_512:
case Intrinsic::x86_avx512_mask_valign_d_512:
// Vector source operands are swapped.
@@ -17056,58 +14757,6 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
return DAG.getNode(Opcode, dl, VTs, NewOps);
}
-
- case Intrinsic::x86_fma_mask_vfmadd_ps_512:
- case Intrinsic::x86_fma_mask_vfmadd_pd_512:
- case Intrinsic::x86_fma_mask_vfmsub_ps_512:
- case Intrinsic::x86_fma_mask_vfmsub_pd_512:
- case Intrinsic::x86_fma_mask_vfnmadd_ps_512:
- case Intrinsic::x86_fma_mask_vfnmadd_pd_512:
- case Intrinsic::x86_fma_mask_vfnmsub_ps_512:
- case Intrinsic::x86_fma_mask_vfnmsub_pd_512:
- case Intrinsic::x86_fma_mask_vfmaddsub_ps_512:
- case Intrinsic::x86_fma_mask_vfmaddsub_pd_512:
- case Intrinsic::x86_fma_mask_vfmsubadd_ps_512:
- case Intrinsic::x86_fma_mask_vfmsubadd_pd_512: {
- auto *SAE = cast<ConstantSDNode>(Op.getOperand(5));
- if (SAE->getZExtValue() == X86::STATIC_ROUNDING::CUR_DIRECTION)
- return getVectorMaskingNode(DAG.getNode(getOpcodeForFMAIntrinsic(IntNo),
- dl, Op.getValueType(),
- Op.getOperand(1),
- Op.getOperand(2),
- Op.getOperand(3)),
- Op.getOperand(4), Op.getOperand(1),
- Subtarget, DAG);
- else
- return SDValue();
- }
-
- case Intrinsic::x86_fma_vfmadd_ps:
- case Intrinsic::x86_fma_vfmadd_pd:
- case Intrinsic::x86_fma_vfmsub_ps:
- case Intrinsic::x86_fma_vfmsub_pd:
- case Intrinsic::x86_fma_vfnmadd_ps:
- case Intrinsic::x86_fma_vfnmadd_pd:
- case Intrinsic::x86_fma_vfnmsub_ps:
- case Intrinsic::x86_fma_vfnmsub_pd:
- case Intrinsic::x86_fma_vfmaddsub_ps:
- case Intrinsic::x86_fma_vfmaddsub_pd:
- case Intrinsic::x86_fma_vfmsubadd_ps:
- case Intrinsic::x86_fma_vfmsubadd_pd:
- case Intrinsic::x86_fma_vfmadd_ps_256:
- case Intrinsic::x86_fma_vfmadd_pd_256:
- case Intrinsic::x86_fma_vfmsub_ps_256:
- case Intrinsic::x86_fma_vfmsub_pd_256:
- case Intrinsic::x86_fma_vfnmadd_ps_256:
- case Intrinsic::x86_fma_vfnmadd_pd_256:
- case Intrinsic::x86_fma_vfnmsub_ps_256:
- case Intrinsic::x86_fma_vfnmsub_pd_256:
- case Intrinsic::x86_fma_vfmaddsub_ps_256:
- case Intrinsic::x86_fma_vfmaddsub_pd_256:
- case Intrinsic::x86_fma_vfmsubadd_ps_256:
- case Intrinsic::x86_fma_vfmsubadd_pd_256:
- return DAG.getNode(getOpcodeForFMAIntrinsic(IntNo), dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
}
}
@@ -17305,7 +14954,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
switch(IntrData->Type) {
default:
llvm_unreachable("Unknown Intrinsic Type");
- break;
+ break;
case RDSEED:
case RDRAND: {
// Emit the node with the right value type.
@@ -17403,6 +15052,58 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
Results.push_back(Store);
return DAG.getMergeValues(Results, dl);
}
+ case COMPRESS_TO_MEM: {
+ SDLoc dl(Op);
+ SDValue Mask = Op.getOperand(4);
+ SDValue DataToCompress = Op.getOperand(3);
+ SDValue Addr = Op.getOperand(2);
+ SDValue Chain = Op.getOperand(0);
+
+ if (isAllOnes(Mask)) // return just a store
+ return DAG.getStore(Chain, dl, DataToCompress, Addr,
+ MachinePointerInfo(), false, false, 0);
+
+ EVT VT = DataToCompress.getValueType();
+ EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
+ VT.getVectorNumElements());
+ EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
+ Mask.getValueType().getSizeInBits());
+ SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
+ DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
+ DAG.getIntPtrConstant(0));
+
+ SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
+ DataToCompress, DAG.getUNDEF(VT));
+ return DAG.getStore(Chain, dl, Compressed, Addr,
+ MachinePointerInfo(), false, false, 0);
+ }
+ case EXPAND_FROM_MEM: {
+ SDLoc dl(Op);
+ SDValue Mask = Op.getOperand(4);
+ SDValue PathThru = Op.getOperand(3);
+ SDValue Addr = Op.getOperand(2);
+ SDValue Chain = Op.getOperand(0);
+ EVT VT = Op.getValueType();
+
+ if (isAllOnes(Mask)) // return just a load
+ return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
+ false, 0);
+ EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
+ VT.getVectorNumElements());
+ EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
+ Mask.getValueType().getSizeInBits());
+ SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
+ DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
+ DAG.getIntPtrConstant(0));
+
+ SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
+ false, false, false, 0);
+
+ SDValue Results[] = {
+ DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand, PathThru),
+ Chain};
+ return DAG.getMergeValues(Results, dl);
+ }
}
}
@@ -17420,8 +15121,7 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, PtrVT,
@@ -17436,15 +15136,33 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
}
SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ EVT VT = Op.getValueType();
+
MFI->setFrameAddressIsTaken(true);
- EVT VT = Op.getValueType();
+ if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
+ // Depth > 0 makes no sense on targets which use Windows unwind codes. It
+ // is not possible to crawl up the stack without looking at the unwind codes
+ // simultaneously.
+ int FrameAddrIndex = FuncInfo->getFAIndex();
+ if (!FrameAddrIndex) {
+ // Set up a frame object for the return address.
+ unsigned SlotSize = RegInfo->getSlotSize();
+ FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
+ SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
+ FuncInfo->setFAIndex(FrameAddrIndex);
+ }
+ return DAG.getFrameIndex(FrameAddrIndex, VT);
+ }
+
+ unsigned FrameReg =
+ RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
SDLoc dl(Op); // FIXME probably not meaningful
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
- unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
(FrameReg == X86::EBP && VT == MVT::i32)) &&
"Invalid Frame Register!");
@@ -17471,8 +15189,7 @@ unsigned X86TargetLowering::getRegisterByName(const char* RegName,
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
}
@@ -17483,8 +15200,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl (Op);
EVT PtrVT = getPointerTy();
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- DAG.getSubtarget().getRegisterInfo());
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
(FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
@@ -17531,7 +15247,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SDLoc dl (Op);
const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
- const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
if (Subtarget->is64Bit()) {
SDValue OutChains[6];
@@ -17694,8 +15410,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
*/
MachineFunction &MF = DAG.getMachineFunction();
- const TargetMachine &TM = MF.getTarget();
- const TargetFrameLowering &TFI = *TM.getSubtargetImpl()->getFrameLowering();
+ const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
@@ -18090,76 +15805,29 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
DAG);
}
- if (VT == MVT::v16i8) {
- if (Op.getOpcode() == ISD::SHL) {
- // Make a large shift.
- SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
- MVT::v8i16, R, ShiftAmt,
- DAG);
- SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
- // Zero out the rightmost bits.
- SmallVector<SDValue, 16> V(16,
- DAG.getConstant(uint8_t(-1U << ShiftAmt),
- MVT::i8));
- return DAG.getNode(ISD::AND, dl, VT, SHL,
- DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
- }
- if (Op.getOpcode() == ISD::SRL) {
- // Make a large shift.
- SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
- MVT::v8i16, R, ShiftAmt,
- DAG);
- SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
- // Zero out the leftmost bits.
- SmallVector<SDValue, 16> V(16,
- DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
- MVT::i8));
- return DAG.getNode(ISD::AND, dl, VT, SRL,
- DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
- }
- if (Op.getOpcode() == ISD::SRA) {
- if (ShiftAmt == 7) {
- // R s>> 7 === R s< 0
- SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
- return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
- }
-
- // R s>> a === ((R u>> a) ^ m) - m
- SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
- SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
- MVT::i8));
- SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
- Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
- Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
- return Res;
- }
- llvm_unreachable("Unknown shift opcode.");
- }
+ if (VT == MVT::v16i8 || (Subtarget->hasInt256() && VT == MVT::v32i8)) {
+ unsigned NumElts = VT.getVectorNumElements();
+ MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
- if (Subtarget->hasInt256() && VT == MVT::v32i8) {
if (Op.getOpcode() == ISD::SHL) {
// Make a large shift.
- SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
- MVT::v16i16, R, ShiftAmt,
- DAG);
+ SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT,
+ R, ShiftAmt, DAG);
SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
// Zero out the rightmost bits.
- SmallVector<SDValue, 32> V(32,
- DAG.getConstant(uint8_t(-1U << ShiftAmt),
- MVT::i8));
+ SmallVector<SDValue, 32> V(
+ NumElts, DAG.getConstant(uint8_t(-1U << ShiftAmt), MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, SHL,
DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
}
if (Op.getOpcode() == ISD::SRL) {
// Make a large shift.
- SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
- MVT::v16i16, R, ShiftAmt,
- DAG);
+ SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT,
+ R, ShiftAmt, DAG);
SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
// Zero out the leftmost bits.
- SmallVector<SDValue, 32> V(32,
- DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
- MVT::i8));
+ SmallVector<SDValue, 32> V(
+ NumElts, DAG.getConstant(uint8_t(-1U) >> ShiftAmt, MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, SRL,
DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
}
@@ -18172,8 +15840,8 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
// R s>> a === ((R u>> a) ^ m) - m
SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
- SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
- MVT::i8));
+ SmallVector<SDValue, 32> V(NumElts,
+ DAG.getConstant(128 >> ShiftAmt, MVT::i8));
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
@@ -18249,55 +15917,43 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
SDValue BaseShAmt;
EVT EltVT = VT.getVectorElementType();
- if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
- unsigned NumElts = VT.getVectorNumElements();
- unsigned i, j;
- for (i = 0; i != NumElts; ++i) {
- if (Amt.getOperand(i).getOpcode() == ISD::UNDEF)
- continue;
- break;
- }
- for (j = i; j != NumElts; ++j) {
- SDValue Arg = Amt.getOperand(j);
- if (Arg.getOpcode() == ISD::UNDEF) continue;
- if (Arg != Amt.getOperand(i))
- break;
- }
- if (i != NumElts && j == NumElts)
- BaseShAmt = Amt.getOperand(i);
+ if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
+ // Check if this build_vector node is doing a splat.
+ // If so, then set BaseShAmt equal to the splat value.
+ BaseShAmt = BV->getSplatValue();
+ if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
+ BaseShAmt = SDValue();
} else {
if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
Amt = Amt.getOperand(0);
- if (Amt.getOpcode() == ISD::VECTOR_SHUFFLE &&
- cast<ShuffleVectorSDNode>(Amt)->isSplat()) {
+
+ ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
+ if (SVN && SVN->isSplat()) {
+ unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
SDValue InVec = Amt.getOperand(0);
if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
- unsigned NumElts = InVec.getValueType().getVectorNumElements();
- unsigned i = 0;
- for (; i != NumElts; ++i) {
- SDValue Arg = InVec.getOperand(i);
- if (Arg.getOpcode() == ISD::UNDEF) continue;
- BaseShAmt = Arg;
- break;
- }
+ assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
+ "Unexpected shuffle index found!");
+ BaseShAmt = InVec.getOperand(SplatIdx);
} else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
if (ConstantSDNode *C =
dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
- unsigned SplatIdx =
- cast<ShuffleVectorSDNode>(Amt)->getSplatIndex();
if (C->getZExtValue() == SplatIdx)
BaseShAmt = InVec.getOperand(1);
}
}
- if (!BaseShAmt.getNode())
- BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Amt,
- DAG.getIntPtrConstant(0));
+
+ if (!BaseShAmt)
+ // Avoid introducing an extract element from a shuffle.
+ BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
+ DAG.getIntPtrConstant(SplatIdx));
}
}
if (BaseShAmt.getNode()) {
- if (EltVT.bitsGT(MVT::i32))
- BaseShAmt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BaseShAmt);
+ assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
+ if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
+ BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
else if (EltVT.bitsLT(MVT::i32))
BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
@@ -18415,7 +16071,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// If possible, lower this packed shift into a vector multiply instead of
// expanding it into a sequence of scalar shifts.
// Do this only if the vector shift count is a constant build_vector.
- if (Op.getOpcode() == ISD::SHL &&
+ if (Op.getOpcode() == ISD::SHL &&
(VT == MVT::v8i16 || VT == MVT::v4i32 ||
(Subtarget->hasInt256() && VT == MVT::v16i16)) &&
ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
@@ -18507,15 +16163,15 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
CanBeSimplified = Amt2 == Amt->getOperand(j);
}
}
-
+
if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
isa<ConstantSDNode>(Amt2)) {
// Replace this node with two shifts followed by a MOVSS/MOVSD.
EVT CastVT = MVT::v4i32;
- SDValue Splat1 =
+ SDValue Splat1 =
DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
- SDValue Splat2 =
+ SDValue Splat2 =
DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
if (TargetOpcode == X86ISD::MOVSD)
@@ -18704,81 +16360,17 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
}
-// Sign extension of the low part of vector elements. This may be used either
-// when sign extend instructions are not available or if the vector element
-// sizes already match the sign-extended size. If the vector elements are in
-// their pre-extended size and sign extend instructions are available, that will
-// be handled by LowerSIGN_EXTEND.
-SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc dl(Op);
- EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- MVT VT = Op.getSimpleValueType();
-
- if (!Subtarget->hasSSE2() || !VT.isVector())
- return SDValue();
-
- unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
- ExtraVT.getScalarType().getSizeInBits();
-
- switch (VT.SimpleTy) {
- default: return SDValue();
- case MVT::v8i32:
- case MVT::v16i16:
- if (!Subtarget->hasFp256())
- return SDValue();
- if (!Subtarget->hasInt256()) {
- // needs to be split
- unsigned NumElems = VT.getVectorNumElements();
-
- // Extract the LHS vectors
- SDValue LHS = Op.getOperand(0);
- SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
- SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
-
- MVT EltVT = VT.getVectorElementType();
- EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
-
- EVT ExtraEltVT = ExtraVT.getVectorElementType();
- unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
- ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
- ExtraNumElems/2);
- SDValue Extra = DAG.getValueType(ExtraVT);
-
- LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
- LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
-
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
- }
- // fall through
- case MVT::v4i32:
- case MVT::v8i16: {
- SDValue Op0 = Op.getOperand(0);
-
- // This is a sign extension of some low part of vector elements without
- // changing the size of the vector elements themselves:
- // Shift-Left + Shift-Right-Algebraic.
- SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
- BitsDiff, DAG);
- return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
- DAG);
- }
- }
-}
-
/// Returns true if the operand type is exactly twice the native width, and
/// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
/// Used to know whether to use cmpxchg8/16b when expanding atomic operations
/// (otherwise we leave them alone to become __sync_fetch_and_... calls).
bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
- const X86Subtarget &Subtarget =
- getTargetMachine().getSubtarget<X86Subtarget>();
unsigned OpWidth = MemType->getPrimitiveSizeInBits();
if (OpWidth == 64)
- return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
+ return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
else if (OpWidth == 128)
- return Subtarget.hasCmpxchg16b();
+ return Subtarget->hasCmpxchg16b();
else
return false;
}
@@ -18795,9 +16387,7 @@ bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
}
bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
- const X86Subtarget &Subtarget =
- getTargetMachine().getSubtarget<X86Subtarget>();
- unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
+ unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
const Type *MemType = AI->getType();
// If the operand is too big, we must see if cmpxchg8/16b is available
@@ -18840,9 +16430,7 @@ static bool hasMFENCE(const X86Subtarget& Subtarget) {
LoadInst *
X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
- const X86Subtarget &Subtarget =
- getTargetMachine().getSubtarget<X86Subtarget>();
- unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
+ unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
const Type *MemType = AI->getType();
// Accesses larger than the native width are turned into cmpxchg/libcalls, so
// there is no benefit in turning such RMWs into loads, and it is actually
@@ -18878,7 +16466,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
// the IR level, so we must wrap it in an intrinsic.
return nullptr;
- } else if (hasMFENCE(Subtarget)) {
+ } else if (hasMFENCE(*Subtarget)) {
Function *MFence = llvm::Intrinsic::getDeclaration(M,
Intrinsic::x86_sse2_mfence);
Builder.CreateCall(MFence);
@@ -18997,9 +16585,7 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
DAG.getIntPtrConstant(i)));
// Explicitly mark the extra elements as Undef.
- SDValue Undef = DAG.getUNDEF(SVT);
- for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
- Elts.push_back(Undef);
+ Elts.append(NumElts, DAG.getUNDEF(SVT));
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
@@ -19025,6 +16611,139 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
return SDValue();
}
+static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SDNode *Node = Op.getNode();
+ SDLoc dl(Node);
+
+ Op = Op.getOperand(0);
+ EVT VT = Op.getValueType();
+ assert((VT.is128BitVector() || VT.is256BitVector()) &&
+ "CTPOP lowering only implemented for 128/256-bit wide vector types");
+
+ unsigned NumElts = VT.getVectorNumElements();
+ EVT EltVT = VT.getVectorElementType();
+ unsigned Len = EltVT.getSizeInBits();
+
+ // This is the vectorized version of the "best" algorithm from
+ // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ // with a minor tweak to use a series of adds + shifts instead of vector
+ // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
+ //
+ // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
+ // v8i32 => Always profitable
+ //
+ // FIXME: There a couple of possible improvements:
+ //
+ // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
+ // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
+ //
+ assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
+ "CTPOP not implemented for this vector element type.");
+
+ // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
+ // extra legalization.
+ bool NeedsBitcast = EltVT == MVT::i32;
+ MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
+
+ SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
+ SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
+ SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
+
+ // v = v - ((v >> 1) & 0x55555555...)
+ SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
+ SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
+ SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
+ if (NeedsBitcast)
+ Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
+
+ SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
+ SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
+ if (NeedsBitcast)
+ M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
+
+ SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
+ if (VT != And.getValueType())
+ And = DAG.getNode(ISD::BITCAST, dl, VT, And);
+ SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
+
+ // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
+ SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
+ SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
+ SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
+ SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
+
+ Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
+ if (NeedsBitcast) {
+ Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
+ M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
+ Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
+ }
+
+ SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
+ SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
+ if (VT != AndRHS.getValueType()) {
+ AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
+ AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
+ }
+ SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
+
+ // v = (v + (v >> 4)) & 0x0F0F0F0F...
+ SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
+ SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
+ Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
+ Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
+
+ SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
+ SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
+ if (NeedsBitcast) {
+ Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
+ M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
+ }
+ And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
+ if (VT != And.getValueType())
+ And = DAG.getNode(ISD::BITCAST, dl, VT, And);
+
+ // The algorithm mentioned above uses:
+ // v = (v * 0x01010101...) >> (Len - 8)
+ //
+ // Change it to use vector adds + vector shifts which yield faster results on
+ // Haswell than using vector integer multiplication.
+ //
+ // For i32 elements:
+ // v = v + (v >> 8)
+ // v = v + (v >> 16)
+ //
+ // For i64 elements:
+ // v = v + (v >> 8)
+ // v = v + (v >> 16)
+ // v = v + (v >> 32)
+ //
+ Add = And;
+ SmallVector<SDValue, 8> Csts;
+ for (unsigned i = 8; i <= Len/2; i *= 2) {
+ Csts.assign(NumElts, DAG.getConstant(i, EltVT));
+ SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
+ Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
+ Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
+ Csts.clear();
+ }
+
+ // The result is on the least significant 6-bits on i32 and 7-bits on i64.
+ SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
+ SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
+ SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
+ if (NeedsBitcast) {
+ Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
+ M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
+ }
+ And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
+ if (VT != And.getValueType())
+ And = DAG.getNode(ISD::BITCAST, dl, VT, And);
+
+ return And;
+}
+
static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
SDNode *Node = Op.getNode();
SDLoc dl(Node);
@@ -19148,15 +16867,15 @@ static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
- case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
return LowerCMP_SWAP(Op, Subtarget, DAG);
+ case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
- case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
+ case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
case ISD::VSELECT: return LowerVSELECT(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
@@ -19243,6 +16962,22 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
switch (N->getOpcode()) {
default:
llvm_unreachable("Do not know how to custom type legalize this operation!");
+ // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
+ case X86ISD::FMINC:
+ case X86ISD::FMIN:
+ case X86ISD::FMAXC:
+ case X86ISD::FMAX: {
+ EVT VT = N->getValueType(0);
+ if (VT != MVT::v2f32)
+ llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
+ SDValue UNDEF = DAG.getUNDEF(VT);
+ SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
+ N->getOperand(0), UNDEF);
+ SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
+ N->getOperand(1), UNDEF);
+ Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
+ return;
+ }
case ISD::SIGN_EXTEND_INREG:
case ISD::ADDC:
case ISD::ADDE:
@@ -19599,6 +17334,16 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
case X86ISD::XTEST: return "X86ISD::XTEST";
+ case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
+ case X86ISD::EXPAND: return "X86ISD::EXPAND";
+ case X86ISD::SELECT: return "X86ISD::SELECT";
+ case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
+ case X86ISD::RCP28: return "X86ISD::RCP28";
+ case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
+ case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
+ case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
+ case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
+ case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
}
}
@@ -19747,6 +17492,8 @@ bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
return false;
}
+bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
+
bool
X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
@@ -19783,68 +17530,20 @@ X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
if (!VT.isSimple())
return false;
- MVT SVT = VT.getSimpleVT();
-
// Very little shuffling can be done for 64-bit vectors right now.
if (VT.getSizeInBits() == 64)
return false;
- // If this is a single-input shuffle with no 128 bit lane crossings we can
- // lower it into pshufb.
- if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
- (SVT.is256BitVector() && Subtarget->hasInt256())) {
- bool isLegal = true;
- for (unsigned I = 0, E = M.size(); I != E; ++I) {
- if (M[I] >= (int)SVT.getVectorNumElements() ||
- ShuffleCrosses128bitLane(SVT, I, M[I])) {
- isLegal = false;
- break;
- }
- }
- if (isLegal)
- return true;
- }
-
- // FIXME: blends, shifts.
- return (SVT.getVectorNumElements() == 2 ||
- ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
- isMOVLMask(M, SVT) ||
- isMOVHLPSMask(M, SVT) ||
- isSHUFPMask(M, SVT) ||
- isSHUFPMask(M, SVT, /* Commuted */ true) ||
- isPSHUFDMask(M, SVT) ||
- isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
- isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
- isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
- isPALIGNRMask(M, SVT, Subtarget) ||
- isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
- isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
- isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
- isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
- isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
- (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
+ // We only care that the types being shuffled are legal. The lowering can
+ // handle any possible shuffle mask that results.
+ return isTypeLegal(VT.getSimpleVT());
}
bool
X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
EVT VT) const {
- if (!VT.isSimple())
- return false;
-
- MVT SVT = VT.getSimpleVT();
- unsigned NumElts = SVT.getVectorNumElements();
- // FIXME: This collection of masks seems suspect.
- if (NumElts == 2)
- return true;
- if (NumElts == 4 && SVT.is128BitVector()) {
- return (isMOVLMask(Mask, SVT) ||
- isCommutedMOVLMask(Mask, SVT, true) ||
- isSHUFPMask(Mask, SVT) ||
- isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
- isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
- Subtarget->hasInt256()));
- }
- return false;
+ // Just delegate to the generic legality, clear masks aren't special.
+ return isShuffleMaskLegal(Mask, VT);
}
//===----------------------------------------------------------------------===//
@@ -19982,11 +17681,10 @@ static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
return BB;
}
-static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
- const TargetInstrInfo *TII,
- const X86Subtarget* Subtarget) {
+static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
+ const X86Subtarget *Subtarget) {
DebugLoc dl = MI->getDebugLoc();
-
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
// Address into RAX/EAX, other two args into ECX, EDX.
unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
@@ -20008,9 +17706,8 @@ static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
}
MachineBasicBlock *
-X86TargetLowering::EmitVAARG64WithCustomInserter(
- MachineInstr *MI,
- MachineBasicBlock *MBB) const {
+X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
// Emit va_arg instruction on X86-64.
// Operands to this pseudo-instruction:
@@ -20040,7 +17737,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(
MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
// Machine Information
- const TargetInstrInfo *TII = MBB->getParent()->getSubtarget().getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
@@ -20192,7 +17889,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(
.setMemRefs(MMOBegin, MMOEnd);
// Jump to endMBB
- BuildMI(offsetMBB, DL, TII->get(X86::JMP_4))
+ BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
.addMBB(endMBB);
}
@@ -20296,7 +17993,7 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
XMMSaveMBB->addSuccessor(EndMBB);
// Now add the instructions.
- const TargetInstrInfo *TII = MBB->getParent()->getSubtarget().getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned CountReg = MI->getOperand(0).getReg();
@@ -20306,7 +18003,7 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
if (!Subtarget->isTargetWin64()) {
// If %al is 0, branch around the XMM save block.
BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
- BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB);
+ BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
MBB->addSuccessor(EndMBB);
}
@@ -20379,7 +18076,7 @@ static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
MachineBasicBlock *
X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = BB->getParent()->getSubtarget().getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
@@ -20405,8 +18102,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
- const TargetRegisterInfo *TRI =
- BB->getParent()->getSubtarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
if (!MI->killsRegister(X86::EFLAGS) &&
!checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
copy0MBB->addLiveIn(X86::EFLAGS);
@@ -20448,7 +18144,7 @@ MachineBasicBlock *
X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
- const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
@@ -20510,7 +18206,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
.addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
.addReg(SPLimitVReg);
- BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB);
+ BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
// bumpMBB simply decreases the stack pointer, since we know the current
// stacklet has enough space.
@@ -20518,13 +18214,11 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
.addReg(SPLimitVReg);
BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
.addReg(SPLimitVReg);
- BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB);
+ BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
// Calls into a routine in libgcc to allocate more space from the heap.
- const uint32_t *RegMask = MF->getTarget()
- .getSubtargetImpl()
- ->getRegisterInfo()
- ->getCallPreservedMask(CallingConv::C);
+ const uint32_t *RegMask =
+ Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
if (IsLP64) {
BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
.addReg(sizeVReg);
@@ -20557,7 +18251,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
.addReg(IsLP64 ? X86::RAX : X86::EAX);
- BuildMI(mallocMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB);
+ BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
// Set up the CFG correctly.
BB->addSuccessor(bumpMBB);
@@ -20581,52 +18275,11 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
MachineBasicBlock *
X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = BB->getParent()->getSubtarget().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
- assert(!Subtarget->isTargetMacho());
-
- // The lowering is pretty easy: we're just emitting the call to _alloca. The
- // non-trivial part is impdef of ESP.
-
- if (Subtarget->isTargetWin64()) {
- if (Subtarget->isTargetCygMing()) {
- // ___chkstk(Mingw64):
- // Clobbers R10, R11, RAX and EFLAGS.
- // Updates RSP.
- BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA))
- .addExternalSymbol("___chkstk")
- .addReg(X86::RAX, RegState::Implicit)
- .addReg(X86::RSP, RegState::Implicit)
- .addReg(X86::RAX, RegState::Define | RegState::Implicit)
- .addReg(X86::RSP, RegState::Define | RegState::Implicit)
- .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
- } else {
- // __chkstk(MSVCRT): does not update stack pointer.
- // Clobbers R10, R11 and EFLAGS.
- BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA))
- .addExternalSymbol("__chkstk")
- .addReg(X86::RAX, RegState::Implicit)
- .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
- // RAX has the offset to be subtracted from RSP.
- BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP)
- .addReg(X86::RSP)
- .addReg(X86::RAX);
- }
- } else {
- const char *StackProbeSymbol = (Subtarget->isTargetKnownWindowsMSVC() ||
- Subtarget->isTargetWindowsItanium())
- ? "_chkstk"
- : "_alloca";
+ assert(!Subtarget->isTargetMachO());
- BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32))
- .addExternalSymbol(StackProbeSymbol)
- .addReg(X86::EAX, RegState::Implicit)
- .addReg(X86::ESP, RegState::Implicit)
- .addReg(X86::EAX, RegState::Define | RegState::Implicit)
- .addReg(X86::ESP, RegState::Define | RegState::Implicit)
- .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
- }
+ X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
@@ -20640,8 +18293,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
// or EAX and doing an indirect call. The return value will then
// be in the normal return register.
MachineFunction *F = BB->getParent();
- const X86InstrInfo *TII =
- static_cast<const X86InstrInfo *>(F->getSubtarget().getInstrInfo());
+ const X86InstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
@@ -20650,10 +18302,8 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
// Get a register mask for the lowered call.
// FIXME: The 32-bit calls have non-standard calling conventions. Use a
// proper register mask.
- const uint32_t *RegMask = F->getTarget()
- .getSubtargetImpl()
- ->getRegisterInfo()
- ->getCallPreservedMask(CallingConv::C);
+ const uint32_t *RegMask =
+ Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
if (Subtarget->is64Bit()) {
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
TII->get(X86::MOV64rm), X86::RDI)
@@ -20698,7 +18348,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI->getDebugLoc();
MachineFunction *MF = MBB->getParent();
- const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
const BasicBlock *BB = MBB->getBasicBlock();
@@ -20739,6 +18389,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// v = phi(main, restore)
//
// restoreMBB:
+ // if base pointer being used, load it from frame
// v_restore = 1
MachineBasicBlock *thisMBB = MBB;
@@ -20804,8 +18455,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
.addMBB(restoreMBB);
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- MF->getSubtarget().getRegisterInfo());
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
MIB.addRegMask(RegInfo->getNoPreservedMask());
thisMBB->addSuccessor(mainMBB);
thisMBB->addSuccessor(restoreMBB);
@@ -20822,8 +18472,20 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
.addReg(restoreDstReg).addMBB(restoreMBB);
// restoreMBB:
+ if (RegInfo->hasBasePointer(*MF)) {
+ const bool Uses64BitFramePtr =
+ Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
+ X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
+ X86FI->setRestoreBasePointer(MF);
+ unsigned FramePtr = RegInfo->getFrameRegister(*MF);
+ unsigned BasePtr = RegInfo->getBaseRegister();
+ unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
+ addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
+ FramePtr, true, X86FI->getRestoreBasePointerOffset())
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
- BuildMI(restoreMBB, DL, TII->get(X86::JMP_4)).addMBB(sinkMBB);
+ BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
restoreMBB->addSuccessor(sinkMBB);
MI->eraseFromParent();
@@ -20835,7 +18497,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI->getDebugLoc();
MachineFunction *MF = MBB->getParent();
- const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
@@ -20850,8 +18512,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
(PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
unsigned Tmp = MRI.createVirtualRegister(RC);
// Since FP is only updated here but NOT referenced, it's treated as GPR.
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
- MF->getSubtarget().getRegisterInfo());
+ const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
unsigned SP = RegInfo->getStackRegister();
@@ -20895,7 +18556,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
// Replace 213-type (isel default) FMA3 instructions with 231-type for
// accumulator loops. Writing back to the accumulator allows the coalescer
-// to remove extra copies in the loop.
+// to remove extra copies in the loop.
MachineBasicBlock *
X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
MachineBasicBlock *MBB) const {
@@ -20970,7 +18631,7 @@ X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
default: llvm_unreachable("Unrecognized FMA variant.");
}
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
MachineInstrBuilder MIB =
BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
.addOperand(MI->getOperand(0))
@@ -20993,6 +18654,9 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::TAILJMPd64:
case X86::TAILJMPr64:
case X86::TAILJMPm64:
+ case X86::TAILJMPd64_REX:
+ case X86::TAILJMPr64_REX:
+ case X86::TAILJMPm64_REX:
llvm_unreachable("TAILJMP64 would not be touched here.");
case X86::TCRETURNdi64:
case X86::TCRETURNri64:
@@ -21035,7 +18699,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::FP80_TO_INT32_IN_MEM:
case X86::FP80_TO_INT64_IN_MEM: {
MachineFunction *F = BB->getParent();
- const TargetInstrInfo *TII = F->getSubtarget().getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
// Change the floating point control register to use "round towards zero"
@@ -21119,7 +18783,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VPCMPESTRM128MEM:
assert(Subtarget->hasSSE42() &&
"Target must have SSE4.2 or AVX features enabled");
- return EmitPCMPSTRM(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
+ return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
// String/text processing lowering.
case X86::PCMPISTRIREG:
@@ -21132,16 +18796,15 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VPCMPESTRIMEM:
assert(Subtarget->hasSSE42() &&
"Target must have SSE4.2 or AVX features enabled");
- return EmitPCMPSTRI(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
+ return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
// Thread synchronization.
case X86::MONITOR:
- return EmitMonitor(MI, BB, BB->getParent()->getSubtarget().getInstrInfo(),
- Subtarget);
+ return EmitMonitor(MI, BB, Subtarget);
// xbegin
case X86::XBEGIN:
- return EmitXBegin(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
+ return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
case X86::VASTART_SAVE_XMM_REGS:
return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
@@ -21157,6 +18820,11 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::EH_SjLj_LongJmp64:
return emitEHSjLjLongJmp(MI, BB);
+ case TargetOpcode::STATEPOINT:
+ // As an implementation detail, STATEPOINT shares the STACKMAP format at
+ // this point in the process. We diverge later.
+ return emitPatchPoint(MI, BB);
+
case TargetOpcode::STACKMAP:
case TargetOpcode::PATCHPOINT:
return emitPatchPoint(MI, BB);
@@ -22118,9 +19786,9 @@ static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
// We're looking for blends between FADD and FSUB nodes. We insist on these
// nodes being lined up in a specific expected pattern.
- if (!(isShuffleEquivalent(Mask, 0, 3) ||
- isShuffleEquivalent(Mask, 0, 5, 2, 7) ||
- isShuffleEquivalent(Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
+ if (!(isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
+ isShuffleEquivalent(V1, V2, Mask, {0, 5, 2, 7}) ||
+ isShuffleEquivalent(V1, V2, Mask, {0, 9, 2, 11, 4, 13, 6, 15})))
return SDValue();
// Only specific types are legal at this point, assert so we notice if and
@@ -22176,7 +19844,7 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
EVT SVT = BC0.getValueType();
unsigned Opcode = BC0.getOpcode();
unsigned NumElts = VT.getVectorNumElements();
-
+
if (BC0.hasOneUse() && SVT.isVector() &&
SVT.getVectorNumElements() * 2 == NumElts &&
TLI.isOperationLegal(Opcode, VT)) {
@@ -22304,7 +19972,8 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
: InVec.getOperand(1);
// If inputs to shuffle are the same for both ops, then allow 2 uses
- unsigned AllowedUses = InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
+ unsigned AllowedUses = InVec.getNumOperands() > 1 &&
+ InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
if (LdNode.getOpcode() == ISD::BITCAST) {
// Don't duplicate a load with other uses.
@@ -22349,9 +20018,30 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
EltNo);
}
+/// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
+/// special and don't usually play with other vector types, it's better to
+/// handle them early to be sure we emit efficient code by avoiding
+/// store-load conversions.
+static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
+ if (N->getValueType(0) != MVT::x86mmx ||
+ N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
+ N->getOperand(0)->getValueType(0) != MVT::v2i32)
+ return SDValue();
+
+ SDValue V = N->getOperand(0);
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
+ if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
+ return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
+ N->getValueType(0), V.getOperand(0));
+
+ return SDValue();
+}
+
/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
/// generation and convert it from being a bunch of shuffles and extracts
-/// to a simple store and scalar loads to extract the elements.
+/// into a somewhat faster sequence. For i686, the best sequence is apparently
+/// storing the value and loading scalars back, while for x64 we should
+/// use 64-bit extracts and shifts.
static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
@@ -22360,14 +20050,29 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
SDValue InputVector = N->getOperand(0);
- // Detect whether we are trying to convert from mmx to i32 and the bitcast
- // from mmx to v2i32 has a single usage.
- if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST &&
- InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx &&
- InputVector.hasOneUse() && N->getValueType(0) == MVT::i32)
- return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
- N->getValueType(0),
- InputVector.getNode()->getOperand(0));
+ // Detect mmx to i32 conversion through a v2i32 elt extract.
+ if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
+ N->getValueType(0) == MVT::i32 &&
+ InputVector.getValueType() == MVT::v2i32) {
+
+ // The bitcast source is a direct mmx result.
+ SDValue MMXSrc = InputVector.getNode()->getOperand(0);
+ if (MMXSrc.getValueType() == MVT::x86mmx)
+ return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
+ N->getValueType(0),
+ InputVector.getNode()->getOperand(0));
+
+ // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
+ SDValue MMXSrcOp = MMXSrc.getOperand(0);
+ if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
+ MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
+ MMXSrcOp.getOpcode() == ISD::BITCAST &&
+ MMXSrcOp.getValueType() == MVT::v1i64 &&
+ MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
+ return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
+ N->getValueType(0),
+ MMXSrcOp.getOperand(0));
+ }
// Only operate on vectors of 4 elements, where the alternative shuffling
// gets to be more expensive.
@@ -22410,36 +20115,61 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
// Ok, we've now decided to do the transformation.
+ // If 64-bit shifts are legal, use the extract-shift sequence,
+ // otherwise bounce the vector off the cache.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SDValue Vals[4];
SDLoc dl(InputVector);
- // Store the value to a temporary stack slot.
- SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
- SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
- MachinePointerInfo(), false, false, 0);
+ if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
+ SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
+ EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
+ SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
+ DAG.getConstant(0, VecIdxTy));
+ SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
+ DAG.getConstant(1, VecIdxTy));
+
+ SDValue ShAmt = DAG.getConstant(32,
+ DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
+ Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
+ Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
+ DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
+ Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
+ Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
+ DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
+ } else {
+ // Store the value to a temporary stack slot.
+ SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
+ SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
+ MachinePointerInfo(), false, false, 0);
- // Replace each use (extract) with a load of the appropriate element.
- for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
- UE = Uses.end(); UI != UE; ++UI) {
- SDNode *Extract = *UI;
+ EVT ElementType = InputVector.getValueType().getVectorElementType();
+ unsigned EltSize = ElementType.getSizeInBits() / 8;
- // cOMpute the element's address.
- SDValue Idx = Extract->getOperand(1);
- unsigned EltSize =
- InputVector.getValueType().getVectorElementType().getSizeInBits()/8;
- uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue();
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
+ // Replace each use (extract) with a load of the appropriate element.
+ for (unsigned i = 0; i < 4; ++i) {
+ uint64_t Offset = EltSize * i;
+ SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
+
+ SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
+ StackPtr, OffsetVal);
+
+ // Load the scalar.
+ Vals[i] = DAG.getLoad(ElementType, dl, Ch,
+ ScalarAddr, MachinePointerInfo(),
+ false, false, false, 0);
- SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
- StackPtr, OffsetVal);
+ }
+ }
- // Load the scalar.
- SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch,
- ScalarAddr, MachinePointerInfo(),
- false, false, false, 0);
+ // Replace the extracts
+ for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
+ UE = Uses.end(); UI != UE; ++UI) {
+ SDNode *Extract = *UI;
- // Replace the exact with the load.
- DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar);
+ SDValue Idx = Extract->getOperand(1);
+ uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
}
// The replacement was made in place; don't return anything.
@@ -22456,6 +20186,21 @@ matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
bool NeedSplit = false;
switch (VT.getSimpleVT().SimpleTy) {
default: return std::make_pair(0, false);
+ case MVT::v4i64:
+ case MVT::v2i64:
+ if (!Subtarget->hasVLX())
+ return std::make_pair(0, false);
+ break;
+ case MVT::v64i8:
+ case MVT::v32i16:
+ if (!Subtarget->hasBWI())
+ return std::make_pair(0, false);
+ break;
+ case MVT::v16i32:
+ case MVT::v8i64:
+ if (!Subtarget->hasAVX512())
+ return std::make_pair(0, false);
+ break;
case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
@@ -22522,7 +20267,7 @@ matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
}
static SDValue
-TransformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
+transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
SDLoc dl(N);
SDValue Cond = N->getOperand(0);
@@ -22535,18 +20280,6 @@ TransformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
Cond = CondSrc->getOperand(0);
}
- MVT VT = N->getSimpleValueType(0);
- MVT EltVT = VT.getVectorElementType();
- unsigned NumElems = VT.getVectorNumElements();
- // There is no blend with immediate in AVX-512.
- if (VT.is512BitVector())
- return SDValue();
-
- if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
- return SDValue();
- if (!Subtarget->hasInt256() && VT == MVT::v16i16)
- return SDValue();
-
if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
return SDValue();
@@ -22560,6 +20293,8 @@ TransformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
return SDValue();
+ MVT VT = N->getSimpleValueType(0);
+ unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> ShuffleMask(NumElems, -1);
for (unsigned i = 0; i < NumElems; ++i) {
// Be sure we emit undef where we can.
@@ -22569,6 +20304,9 @@ TransformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
}
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
+ return SDValue();
return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
}
@@ -22589,8 +20327,9 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// instructions match the semantics of the common C idiom x<y?x:y but not
// x<=y?x:y, because of how they handle negative zero (which can be
// ignored in unsafe-math mode).
+ // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
- VT != MVT::f80 && TLI.isTypeLegal(VT) &&
+ VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
(Subtarget->hasSSE2() ||
(Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
@@ -23008,96 +20747,31 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
}
}
- // Try to fold this VSELECT into a MOVSS/MOVSD
- if (N->getOpcode() == ISD::VSELECT &&
- Cond.getOpcode() == ISD::BUILD_VECTOR && !DCI.isBeforeLegalize()) {
- if (VT == MVT::v4i32 || VT == MVT::v4f32 ||
- (Subtarget->hasSSE2() && (VT == MVT::v2i64 || VT == MVT::v2f64))) {
- bool CanFold = false;
- unsigned NumElems = Cond.getNumOperands();
- SDValue A = LHS;
- SDValue B = RHS;
-
- if (isZero(Cond.getOperand(0))) {
- CanFold = true;
-
- // fold (vselect <0,-1,-1,-1>, A, B) -> (movss A, B)
- // fold (vselect <0,-1> -> (movsd A, B)
- for (unsigned i = 1, e = NumElems; i != e && CanFold; ++i)
- CanFold = isAllOnes(Cond.getOperand(i));
- } else if (isAllOnes(Cond.getOperand(0))) {
- CanFold = true;
- std::swap(A, B);
-
- // fold (vselect <-1,0,0,0>, A, B) -> (movss B, A)
- // fold (vselect <-1,0> -> (movsd B, A)
- for (unsigned i = 1, e = NumElems; i != e && CanFold; ++i)
- CanFold = isZero(Cond.getOperand(i));
- }
-
- if (CanFold) {
- if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return getTargetShuffleNode(X86ISD::MOVSS, DL, VT, A, B, DAG);
- return getTargetShuffleNode(X86ISD::MOVSD, DL, VT, A, B, DAG);
- }
-
- if (Subtarget->hasSSE2() && (VT == MVT::v4i32 || VT == MVT::v4f32)) {
- // fold (v4i32: vselect <0,0,-1,-1>, A, B) ->
- // (v4i32 (bitcast (movsd (v2i64 (bitcast A)),
- // (v2i64 (bitcast B)))))
- //
- // fold (v4f32: vselect <0,0,-1,-1>, A, B) ->
- // (v4f32 (bitcast (movsd (v2f64 (bitcast A)),
- // (v2f64 (bitcast B)))))
- //
- // fold (v4i32: vselect <-1,-1,0,0>, A, B) ->
- // (v4i32 (bitcast (movsd (v2i64 (bitcast B)),
- // (v2i64 (bitcast A)))))
- //
- // fold (v4f32: vselect <-1,-1,0,0>, A, B) ->
- // (v4f32 (bitcast (movsd (v2f64 (bitcast B)),
- // (v2f64 (bitcast A)))))
-
- CanFold = (isZero(Cond.getOperand(0)) &&
- isZero(Cond.getOperand(1)) &&
- isAllOnes(Cond.getOperand(2)) &&
- isAllOnes(Cond.getOperand(3)));
-
- if (!CanFold && isAllOnes(Cond.getOperand(0)) &&
- isAllOnes(Cond.getOperand(1)) &&
- isZero(Cond.getOperand(2)) &&
- isZero(Cond.getOperand(3))) {
- CanFold = true;
- std::swap(LHS, RHS);
- }
-
- if (CanFold) {
- EVT NVT = (VT == MVT::v4i32) ? MVT::v2i64 : MVT::v2f64;
- SDValue NewA = DAG.getNode(ISD::BITCAST, DL, NVT, LHS);
- SDValue NewB = DAG.getNode(ISD::BITCAST, DL, NVT, RHS);
- SDValue Select = getTargetShuffleNode(X86ISD::MOVSD, DL, NVT, NewA,
- NewB, DAG);
- return DAG.getNode(ISD::BITCAST, DL, VT, Select);
- }
- }
- }
+ // We should generate an X86ISD::BLENDI from a vselect if its argument
+ // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
+ // constants. This specific pattern gets generated when we split a
+ // selector for a 512 bit vector in a machine without AVX512 (but with
+ // 256-bit vectors), during legalization:
+ //
+ // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
+ //
+ // Iff we find this pattern and the build_vectors are built from
+ // constants, we translate the vselect into a shuffle_vector that we
+ // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
+ if ((N->getOpcode() == ISD::VSELECT ||
+ N->getOpcode() == X86ISD::SHRUNKBLEND) &&
+ !DCI.isBeforeLegalize()) {
+ SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
+ if (Shuffle.getNode())
+ return Shuffle;
}
- // If we know that this node is legal then we know that it is going to be
- // matched by one of the SSE/AVX BLEND instructions. These instructions only
- // depend on the highest bit in each word. Try to use SimplifyDemandedBits
- // to simplify previous instructions.
+ // If this is a *dynamic* select (non-constant condition) and we can match
+ // this node with one of the variable blend instructions, restructure the
+ // condition so that the blends can use the high bit of each element and use
+ // SimplifyDemandedBits to simplify the condition operand.
if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
!DCI.isBeforeLegalize() &&
- // We explicitly check against v8i16 and v16i16 because, although
- // they're marked as Custom, they might only be legal when Cond is a
- // build_vector of constants. This will be taken care in a later
- // condition.
- (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
- VT != MVT::v8i16) &&
- // Don't optimize vector of constants. Those are handled by
- // the generic code and all the bits must be properly set for
- // the generic optimizer.
!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
@@ -23105,6 +20779,31 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
if (BitWidth == 1)
return SDValue();
+ // We can only handle the cases where VSELECT is directly legal on the
+ // subtarget. We custom lower VSELECT nodes with constant conditions and
+ // this makes it hard to see whether a dynamic VSELECT will correctly
+ // lower, so we both check the operation's status and explicitly handle the
+ // cases where a *dynamic* blend will fail even though a constant-condition
+ // blend could be custom lowered.
+ // FIXME: We should find a better way to handle this class of problems.
+ // Potentially, we should combine constant-condition vselect nodes
+ // pre-legalization into shuffles and not mark as many types as custom
+ // lowered.
+ if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
+ return SDValue();
+ // FIXME: We don't support i16-element blends currently. We could and
+ // should support them by making *all* the bits in the condition be set
+ // rather than just the high bit and using an i8-element blend.
+ if (VT.getScalarType() == MVT::i16)
+ return SDValue();
+ // Dynamic blending was only available from SSE4.1 onward.
+ if (VT.getSizeInBits() == 128 && !Subtarget->hasSSE41())
+ return SDValue();
+ // Byte blends are only available in AVX2
+ if (VT.getSizeInBits() == 256 && VT.getScalarType() == MVT::i8 &&
+ !Subtarget->hasAVX2())
+ return SDValue();
+
assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
@@ -23153,25 +20852,6 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
}
}
- // We should generate an X86ISD::BLENDI from a vselect if its argument
- // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
- // constants. This specific pattern gets generated when we split a
- // selector for a 512 bit vector in a machine without AVX512 (but with
- // 256-bit vectors), during legalization:
- //
- // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
- //
- // Iff we find this pattern and the build_vectors are built from
- // constants, we translate the vselect into a shuffle_vector that we
- // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
- if ((N->getOpcode() == ISD::VSELECT ||
- N->getOpcode() == X86ISD::SHRUNKBLEND) &&
- !DCI.isBeforeLegalize()) {
- SDValue Shuffle = TransformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
- if (Shuffle.getNode())
- return Shuffle;
- }
-
return SDValue();
}
@@ -23524,7 +21204,7 @@ static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
// fold (blend A, B, allOnes) -> B
if (ISD::isBuildVectorAllOnes(Mask.getNode()))
return Op1;
-
+
// Simplify the case where the mask is a constant i32 value.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
if (C->isNullValue())
@@ -23590,7 +21270,7 @@ static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
EVT VT = N->getValueType(0);
- if (VT != MVT::i64)
+ if (VT != MVT::i64 && VT != MVT::i32)
return SDValue();
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
@@ -23948,24 +21628,118 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
}
}
+static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDLoc DL(N);
+
+ // A vector zext_in_reg may be represented as a shuffle,
+ // feeding into a bitcast (this represents anyext) feeding into
+ // an and with a mask.
+ // We'd like to try to combine that into a shuffle with zero
+ // plus a bitcast, removing the and.
+ if (N0.getOpcode() != ISD::BITCAST ||
+ N0.getOperand(0).getOpcode() != ISD::VECTOR_SHUFFLE)
+ return SDValue();
+
+ // The other side of the AND should be a splat of 2^C, where C
+ // is the number of bits in the source type.
+ if (N1.getOpcode() == ISD::BITCAST)
+ N1 = N1.getOperand(0);
+ if (N1.getOpcode() != ISD::BUILD_VECTOR)
+ return SDValue();
+ BuildVectorSDNode *Vector = cast<BuildVectorSDNode>(N1);
+
+ ShuffleVectorSDNode *Shuffle = cast<ShuffleVectorSDNode>(N0.getOperand(0));
+ EVT SrcType = Shuffle->getValueType(0);
+
+ // We expect a single-source shuffle
+ if (Shuffle->getOperand(1)->getOpcode() != ISD::UNDEF)
+ return SDValue();
+
+ unsigned SrcSize = SrcType.getScalarSizeInBits();
+
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ if (!Vector->isConstantSplat(SplatValue, SplatUndef,
+ SplatBitSize, HasAnyUndefs))
+ return SDValue();
+
+ unsigned ResSize = N1.getValueType().getScalarSizeInBits();
+ // Make sure the splat matches the mask we expect
+ if (SplatBitSize > ResSize ||
+ (SplatValue + 1).exactLogBase2() != (int)SrcSize)
+ return SDValue();
+
+ // Make sure the input and output size make sense
+ if (SrcSize >= ResSize || ResSize % SrcSize)
+ return SDValue();
+
+ // We expect a shuffle of the form <0, u, u, u, 1, u, u, u...>
+ // The number of u's between each two values depends on the ratio between
+ // the source and dest type.
+ unsigned ZextRatio = ResSize / SrcSize;
+ bool IsZext = true;
+ for (unsigned i = 0; i < SrcType.getVectorNumElements(); ++i) {
+ if (i % ZextRatio) {
+ if (Shuffle->getMaskElt(i) > 0) {
+ // Expected undef
+ IsZext = false;
+ break;
+ }
+ } else {
+ if (Shuffle->getMaskElt(i) != (int)(i / ZextRatio)) {
+ // Expected element number
+ IsZext = false;
+ break;
+ }
+ }
+ }
+
+ if (!IsZext)
+ return SDValue();
+
+ // Ok, perform the transformation - replace the shuffle with
+ // a shuffle of the form <0, k, k, k, 1, k, k, k> with zero
+ // (instead of undef) where the k elements come from the zero vector.
+ SmallVector<int, 8> Mask;
+ unsigned NumElems = SrcType.getVectorNumElements();
+ for (unsigned i = 0; i < NumElems; ++i)
+ if (i % ZextRatio)
+ Mask.push_back(NumElems);
+ else
+ Mask.push_back(i / ZextRatio);
+
+ SDValue NewShuffle = DAG.getVectorShuffle(Shuffle->getValueType(0), DL,
+ Shuffle->getOperand(0), DAG.getConstant(0, SrcType), Mask);
+ return DAG.getNode(ISD::BITCAST, DL, N0.getValueType(), NewShuffle);
+}
+
static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
- EVT VT = N->getValueType(0);
if (DCI.isBeforeLegalizeOps())
return SDValue();
+ SDValue Zext = VectorZextCombine(N, DAG, DCI, Subtarget);
+ if (Zext.getNode())
+ return Zext;
+
SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
if (R.getNode())
return R;
+ EVT VT = N->getValueType(0);
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDLoc DL(N);
+
// Create BEXTR instructions
// BEXTR is ((X >> imm) & (2**size-1))
if (VT == MVT::i32 || VT == MVT::i64) {
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
- SDLoc DL(N);
-
// Check for BEXTR.
if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
(N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
@@ -23975,7 +21749,7 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
uint64_t Mask = MaskNode->getZExtValue();
uint64_t Shift = ShiftNode->getZExtValue();
if (isMask_64(Mask)) {
- uint64_t MaskSize = CountPopulation_64(Mask);
+ uint64_t MaskSize = countPopulation(Mask);
if (Shift + MaskSize <= VT.getSizeInBits())
return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
DAG.getConstant(Shift | (MaskSize << 8), VT));
@@ -23993,10 +21767,6 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
if (VT != MVT::v2i64 && VT != MVT::v4i64)
return SDValue();
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
- SDLoc DL(N);
-
// Check LHS for vnot
if (N0.getOpcode() == ISD::XOR &&
//ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
@@ -24108,8 +21878,8 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
MachineFunction &MF = DAG.getMachineFunction();
- bool OptForSize = MF.getFunction()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
+ bool OptForSize =
+ MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
// SHLD/SHRD instructions have lower register pressure, but on some
// platforms they have higher latency than the equivalent
@@ -24233,11 +22003,12 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
SDLoc dl(Ld);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- // On Sandybridge unaligned 256bit loads are inefficient.
+ // For chips with slow 32-byte unaligned loads, break the 32-byte operation
+ // into two 16-byte operations.
ISD::LoadExtType Ext = Ld->getExtensionType();
unsigned Alignment = Ld->getAlignment();
bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
- if (RegVT.is256BitVector() && !Subtarget->hasInt256() &&
+ if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
!DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
unsigned NumElems = RegVT.getVectorNumElements();
if (NumElems < 2)
@@ -24270,6 +22041,166 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+/// PerformMLOADCombine - Resolve extending loads
+static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
+ MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
+ if (Mld->getExtensionType() != ISD::SEXTLOAD)
+ return SDValue();
+
+ EVT VT = Mld->getValueType(0);
+ unsigned NumElems = VT.getVectorNumElements();
+ EVT LdVT = Mld->getMemoryVT();
+ SDLoc dl(Mld);
+
+ assert(LdVT != VT && "Cannot extend to the same type");
+ unsigned ToSz = VT.getVectorElementType().getSizeInBits();
+ unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
+ // From, To sizes and ElemCount must be pow of two
+ assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
+ "Unexpected size for extending masked load");
+
+ unsigned SizeRatio = ToSz / FromSz;
+ assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
+
+ // Create a type on which we perform the shuffle
+ EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
+ LdVT.getScalarType(), NumElems*SizeRatio);
+ assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
+
+ // Convert Src0 value
+ SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
+ if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
+ SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
+ for (unsigned i = 0; i != NumElems; ++i)
+ ShuffleVec[i] = i * SizeRatio;
+
+ // Can't shuffle using an illegal type.
+ assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
+ && "WideVecVT should be legal");
+ WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
+ DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
+ }
+ // Prepare the new mask
+ SDValue NewMask;
+ SDValue Mask = Mld->getMask();
+ if (Mask.getValueType() == VT) {
+ // Mask and original value have the same type
+ NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
+ SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
+ for (unsigned i = 0; i != NumElems; ++i)
+ ShuffleVec[i] = i * SizeRatio;
+ for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
+ ShuffleVec[i] = NumElems*SizeRatio;
+ NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
+ DAG.getConstant(0, WideVecVT),
+ &ShuffleVec[0]);
+ }
+ else {
+ assert(Mask.getValueType().getVectorElementType() == MVT::i1);
+ unsigned WidenNumElts = NumElems*SizeRatio;
+ unsigned MaskNumElts = VT.getVectorNumElements();
+ EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
+ WidenNumElts);
+
+ unsigned NumConcat = WidenNumElts / MaskNumElts;
+ SmallVector<SDValue, 16> Ops(NumConcat);
+ SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
+ Ops[0] = Mask;
+ for (unsigned i = 1; i != NumConcat; ++i)
+ Ops[i] = ZeroVal;
+
+ NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
+ }
+
+ SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
+ Mld->getBasePtr(), NewMask, WideSrc0,
+ Mld->getMemoryVT(), Mld->getMemOperand(),
+ ISD::NON_EXTLOAD);
+ SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
+ return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
+
+}
+/// PerformMSTORECombine - Resolve truncating stores
+static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
+ MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
+ if (!Mst->isTruncatingStore())
+ return SDValue();
+
+ EVT VT = Mst->getValue().getValueType();
+ unsigned NumElems = VT.getVectorNumElements();
+ EVT StVT = Mst->getMemoryVT();
+ SDLoc dl(Mst);
+
+ assert(StVT != VT && "Cannot truncate to the same type");
+ unsigned FromSz = VT.getVectorElementType().getSizeInBits();
+ unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
+
+ // From, To sizes and ElemCount must be pow of two
+ assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
+ "Unexpected size for truncating masked store");
+ // We are going to use the original vector elt for storing.
+ // Accumulated smaller vector elements must be a multiple of the store size.
+ assert (((NumElems * FromSz) % ToSz) == 0 &&
+ "Unexpected ratio for truncating masked store");
+
+ unsigned SizeRatio = FromSz / ToSz;
+ assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
+
+ // Create a type on which we perform the shuffle
+ EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
+ StVT.getScalarType(), NumElems*SizeRatio);
+
+ assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
+
+ SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
+ SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
+ for (unsigned i = 0; i != NumElems; ++i)
+ ShuffleVec[i] = i * SizeRatio;
+
+ // Can't shuffle using an illegal type.
+ assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
+ && "WideVecVT should be legal");
+
+ SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
+ DAG.getUNDEF(WideVecVT),
+ &ShuffleVec[0]);
+
+ SDValue NewMask;
+ SDValue Mask = Mst->getMask();
+ if (Mask.getValueType() == VT) {
+ // Mask and original value have the same type
+ NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
+ for (unsigned i = 0; i != NumElems; ++i)
+ ShuffleVec[i] = i * SizeRatio;
+ for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
+ ShuffleVec[i] = NumElems*SizeRatio;
+ NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
+ DAG.getConstant(0, WideVecVT),
+ &ShuffleVec[0]);
+ }
+ else {
+ assert(Mask.getValueType().getVectorElementType() == MVT::i1);
+ unsigned WidenNumElts = NumElems*SizeRatio;
+ unsigned MaskNumElts = VT.getVectorNumElements();
+ EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
+ WidenNumElts);
+
+ unsigned NumConcat = WidenNumElts / MaskNumElts;
+ SmallVector<SDValue, 16> Ops(NumConcat);
+ SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
+ Ops[0] = Mask;
+ for (unsigned i = 1; i != NumConcat; ++i)
+ Ops[i] = ZeroVal;
+
+ NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
+ }
+
+ return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
+ NewMask, StVT, Mst->getMemOperand(), false);
+}
/// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
@@ -24280,13 +22211,11 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue StoredVal = St->getOperand(1);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- // If we are saving a concatenation of two XMM registers, perform two stores.
- // On Sandy Bridge, 256-bit memory operations are executed by two
- // 128-bit ports. However, on Haswell it is better to issue a single 256-bit
- // memory operation.
+ // If we are saving a concatenation of two XMM registers and 32-byte stores
+ // are slow, such as on Sandy Bridge, perform two 16-byte stores.
unsigned Alignment = St->getAlignment();
bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
- if (VT.is256BitVector() && !Subtarget->hasInt256() &&
+ if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
StVT == VT && !IsAligned) {
unsigned NumElems = VT.getVectorNumElements();
if (NumElems < 2)
@@ -24352,9 +22281,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// Find the largest store unit
MVT StoreType = MVT::i8;
- for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE;
- tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) {
- MVT Tp = (MVT::SimpleValueType)tp;
+ for (MVT Tp : MVT::integer_valuetypes()) {
if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
StoreType = Tp;
}
@@ -24399,8 +22326,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
const Function *F = DAG.getMachineFunction().getFunction();
- bool NoImplicitFloatOps = F->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
+ bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
&& Subtarget->hasSSE2();
if ((VT.isVector() ||
@@ -24500,7 +22426,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// isHorizontalBinOp - Return 'true' if this vector operation is "horizontal"
+/// Return 'true' if this vector operation is "horizontal"
/// and return the operands for the horizontal operation in LHS and RHS. A
/// horizontal operation performs the binary operation on successive elements
/// of its first operand, then on successive elements of its second operand,
@@ -24626,7 +22552,7 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
return true;
}
-/// PerformFADDCombine - Do target-specific dag combines on floating point adds.
+/// Do target-specific dag combines on floating point adds.
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
EVT VT = N->getValueType(0);
@@ -24641,7 +22567,7 @@ static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// PerformFSUBCombine - Do target-specific dag combines on floating point subs.
+/// Do target-specific dag combines on floating point subs.
static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
EVT VT = N->getValueType(0);
@@ -24656,23 +22582,23 @@ static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and
-/// X86ISD::FXOR nodes.
+/// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
+
// F[X]OR(0.0, x) -> x
- // F[X]OR(x, 0.0) -> x
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
if (C->getValueAPF().isPosZero())
return N->getOperand(1);
+
+ // F[X]OR(x, 0.0) -> x
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
if (C->getValueAPF().isPosZero())
return N->getOperand(0);
return SDValue();
}
-/// PerformFMinFMaxCombine - Do target-specific dag combines on X86ISD::FMIN and
-/// X86ISD::FMAX nodes.
+/// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
@@ -24693,29 +22619,33 @@ static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
N->getOperand(0), N->getOperand(1));
}
-/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes.
+/// Do target-specific dag combines on X86ISD::FAND nodes.
static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
// FAND(0.0, x) -> 0.0
- // FAND(x, 0.0) -> 0.0
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
if (C->getValueAPF().isPosZero())
return N->getOperand(0);
+
+ // FAND(x, 0.0) -> 0.0
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
if (C->getValueAPF().isPosZero())
return N->getOperand(1);
+
return SDValue();
}
-/// PerformFANDNCombine - Do target-specific dag combines on X86ISD::FANDN nodes
+/// Do target-specific dag combines on X86ISD::FANDN nodes
static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
- // FANDN(x, 0.0) -> 0.0
// FANDN(0.0, x) -> x
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
if (C->getValueAPF().isPosZero())
return N->getOperand(1);
+
+ // FANDN(x, 0.0) -> 0.0
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
if (C->getValueAPF().isPosZero())
return N->getOperand(1);
+
return SDValue();
}
@@ -24978,6 +22908,23 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
+ SelectionDAG &DAG) {
+ SDLoc dl(Load);
+ MVT VT = Load->getSimpleValueType(0);
+ MVT EVT = VT.getVectorElementType();
+ SDValue Addr = Load->getOperand(1);
+ SDValue NewAddr = DAG.getNode(
+ ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
+ DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
+
+ SDValue NewLoad =
+ DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
+ DAG.getMachineFunction().getMachineMemOperand(
+ Load->getMemOperand(), 0, EVT.getStoreSize()));
+ return NewLoad;
+}
+
static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
SDLoc dl(N);
@@ -24989,20 +22936,47 @@ static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
if (MayFoldLoad(Ld)) {
// Extract the countS bits from the immediate so we can get the proper
// address when narrowing the vector load to a specific element.
- // When the second source op is a memory address, interps doesn't use
+ // When the second source op is a memory address, insertps doesn't use
// countS and just gets an f32 from that address.
unsigned DestIndex =
cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
+
Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
- } else
- return SDValue();
- // Create this as a scalar to vector to match the instruction pattern.
- SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
- // countS bits are ignored when loading from memory on insertps, which
- // means we don't need to explicitly set them to 0.
- return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
- LoadScalarToVector, N->getOperand(2));
+ // Create this as a scalar to vector to match the instruction pattern.
+ SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
+ // countS bits are ignored when loading from memory on insertps, which
+ // means we don't need to explicitly set them to 0.
+ return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
+ LoadScalarToVector, N->getOperand(2));
+ }
+ return SDValue();
+}
+
+static SDValue PerformBLENDICombine(SDNode *N, SelectionDAG &DAG) {
+ SDValue V0 = N->getOperand(0);
+ SDValue V1 = N->getOperand(1);
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+
+ // Canonicalize a v2f64 blend with a mask of 2 by swapping the vector
+ // operands and changing the mask to 1. This saves us a bunch of
+ // pattern-matching possibilities related to scalar math ops in SSE/AVX.
+ // x86InstrInfo knows how to commute this back after instruction selection
+ // if it would help register allocation.
+
+ // TODO: If optimizing for size or a processor that doesn't suffer from
+ // partial register update stalls, this should be transformed into a MOVSD
+ // instruction because a MOVSD is 1-2 bytes smaller than a BLENDPD.
+
+ if (VT == MVT::v2f64)
+ if (auto *Mask = dyn_cast<ConstantSDNode>(N->getOperand(2)))
+ if (Mask->getZExtValue() == 2 && !isShuffleFoldableLoad(V0)) {
+ SDValue NewMask = DAG.getConstant(1, MVT::i8);
+ return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V0, NewMask);
+ }
+
+ return SDValue();
}
// Helper function of PerformSETCCCombine. It is to materialize "setb reg"
@@ -25134,7 +23108,7 @@ static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
}
static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
- const X86TargetLowering *XTLI) {
+ const X86Subtarget *Subtarget) {
// First try to optimize away the conversion entirely when it's
// conditionally from a constant. Vectors only.
SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
@@ -25160,10 +23134,9 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
EVT VT = Ld->getValueType(0);
if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
- !XTLI->getSubtarget()->is64Bit() &&
- VT == MVT::i64) {
- SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0),
- Ld->getChain(), Op0, DAG);
+ !Subtarget->is64Bit() && VT == MVT::i64) {
+ SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
+ SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
return FILDChain;
}
@@ -25362,6 +23335,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SELECT:
case X86ISD::SHRUNKBLEND:
return PerformSELECTCombine(N, DAG, DCI, Subtarget);
+ case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
@@ -25374,8 +23348,10 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
+ case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
- case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this);
+ case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
+ case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
case X86ISD::FXOR:
@@ -25414,8 +23390,12 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
case ISD::INTRINSIC_WO_CHAIN:
return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
- case X86ISD::INSERTPS:
- return PerformINSERTPSCombine(N, DAG, Subtarget);
+ case X86ISD::INSERTPS: {
+ if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
+ return PerformINSERTPSCombine(N, DAG, Subtarget);
+ break;
+ }
+ case X86ISD::BLENDI: return PerformBLENDICombine(N, DAG);
case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
}
@@ -25841,6 +23821,23 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
}
}
return;
+ case 'L':
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
+ (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
+ Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
+ break;
+ }
+ }
+ return;
+ case 'M':
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (C->getZExtValue() <= 3) {
+ Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
+ break;
+ }
+ }
+ return;
case 'N':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 255) {
@@ -25849,6 +23846,14 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
}
}
return;
+ case 'O':
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (C->getZExtValue() <= 127) {
+ Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
+ break;
+ }
+ }
+ return;
case 'e': {
// 32-bit signed value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
@@ -25938,8 +23943,9 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
-std::pair<unsigned, const TargetRegisterClass*>
-X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
+std::pair<unsigned, const TargetRegisterClass *>
+X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
MVT VT) const {
// First, see if this is a constraint that directly corresponds to an LLVM
// register class.
@@ -26045,7 +24051,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
// Use the default implementation in TargetLowering to convert the register
// constraint into a member of a register class.
std::pair<unsigned, const TargetRegisterClass*> Res;
- Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
// Not found as a standard register?
if (!Res.second) {
@@ -26193,7 +24199,7 @@ int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
// "load" ports instead of the dedicated "store" port.
// E.g., on Haswell:
// vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
- // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
+ // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
if (isLegalAddressingMode(AM, Ty))
// Scale represents reg2 * scale, thus account for 1
// as soon as we use a second register.
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 7c6ffa2..4423015 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -158,6 +158,10 @@ namespace llvm {
/// vector to a GPR.
MMX_MOVD2W,
+ /// MMX_MOVW2D - Copies a GPR into the low 32-bit word of a MMX vector
+ /// and zero out the high word.
+ MMX_MOVW2D,
+
/// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
/// i32, corresponds to X86::PEXTRB.
PEXTRB,
@@ -197,7 +201,12 @@ namespace llvm {
/// ADDSUB - Combined add and sub on an FP vector.
ADDSUB,
-
+ // FADD, FSUB, FMUL, FDIV, FMIN, FMAX - FP vector ops with rounding mode.
+ FADD_RND,
+ FSUB_RND,
+ FMUL_RND,
+ FDIV_RND,
+
// SUBUS - Integer sub with unsigned saturation.
SUBUS,
@@ -378,6 +387,18 @@ namespace llvm {
FNMSUB,
FMADDSUB,
FMSUBADD,
+ // FMA with rounding mode
+ FMADD_RND,
+ FNMADD_RND,
+ FMSUB_RND,
+ FNMSUB_RND,
+ FMADDSUB_RND,
+ FMSUBADD_RND,
+ RNDSCALE,
+
+ // Compress and expand
+ COMPRESS,
+ EXPAND,
// Save xmm argument registers to the stack, according to %al. An operator
// is needed so that this can be expanded with control flow.
@@ -543,7 +564,8 @@ namespace llvm {
// X86 Implementation of the TargetLowering interface
class X86TargetLowering final : public TargetLowering {
public:
- explicit X86TargetLowering(const X86TargetMachine &TM);
+ explicit X86TargetLowering(const X86TargetMachine &TM,
+ const X86Subtarget &STI);
unsigned getJumpTableEncoding() const override;
@@ -629,6 +651,10 @@ namespace llvm {
/// This method returns the name of a target specific DAG node.
const char *getTargetNodeName(unsigned Opcode) const override;
+ bool isCheapToSpeculateCttz() const override;
+
+ bool isCheapToSpeculateCtlz() const override;
+
/// Return the value type to use for ISD::SETCC.
EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
@@ -675,9 +701,10 @@ namespace llvm {
/// (e.g. {edx}), return the register number and the register class for the
/// register. This should only be used for C_Register constraints. On
/// error, this returns a register number of 0.
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const override;
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
+ MVT VT) const override;
/// Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
@@ -724,6 +751,10 @@ namespace llvm {
bool isZExtFree(EVT VT1, EVT VT2) const override;
bool isZExtFree(SDValue Val, EVT VT2) const override;
+ /// Return true if folding a vector load into ExtVal (a sign, zero, or any
+ /// extend node) is profitable.
+ bool isVectorLoadExtDesirable(SDValue) const override;
+
/// Return true if an FMA operation is faster than a pair of fmul and fadd
/// instructions. fmuladd intrinsics will be expanded to FMAs when this
/// method returns true, otherwise fmuladd is expanded to fmul + fadd.
@@ -762,9 +793,10 @@ namespace llvm {
return !X86ScalarSSEf64 || VT == MVT::f80;
}
- const X86Subtarget* getSubtarget() const {
- return Subtarget;
- }
+ /// Return true if we believe it is correct and profitable to reduce the
+ /// load node to a smaller type.
+ bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
+ EVT NewVT) const override;
/// Return true if the specified scalar FP type is computed in an SSE
/// register, not on the X87 floating point stack.
@@ -787,6 +819,10 @@ namespace llvm {
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const override;
+ /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
+ /// with this index.
+ bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const override;
+
/// Intel processors have a unified instruction and data cache
const char * getClearCacheBuiltinName() const override {
return nullptr; // nothing to do, move along.
@@ -810,16 +846,14 @@ namespace llvm {
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
- /// \brief Reset the operation actions based on target options.
- void resetOperationActions() override;
-
bool useLoadStackGuardNode() const override;
/// \brief Customize the preferred legalization strategy for certain types.
LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
protected:
- std::pair<const TargetRegisterClass*, uint8_t>
- findRepresentativeClass(MVT VT) const override;
+ std::pair<const TargetRegisterClass *, uint8_t>
+ findRepresentativeClass(const TargetRegisterInfo *TRI,
+ MVT VT) const override;
private:
/// Keep a pointer to the X86Subtarget around so that we can
@@ -827,10 +861,6 @@ namespace llvm {
const X86Subtarget *Subtarget;
const DataLayout *TD;
- /// Used to store the TargetOptions so that we don't waste time resetting
- /// the operation actions unless we have to.
- TargetOptions TO;
-
/// Select between SSE or x87 floating point ops.
/// When SSE is available, use it for f32 operations.
/// When SSE2 is available, use it for f64 operations.
@@ -930,7 +960,6 @@ namespace llvm {
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
SDValue
diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td
index b188cd5..4923bc5 100644
--- a/lib/Target/X86/X86InstrAVX512.td
+++ b/lib/Target/X86/X86InstrAVX512.td
@@ -1,10 +1,27 @@
+//===-- X86InstrAVX512.td - AVX512 Instruction Set ---------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the X86 AVX512 instruction set, defining the
+// instructions, and properties of the instructions which are needed for code
+// generation, machine code emission, and analysis.
+//
+//===----------------------------------------------------------------------===//
+
// Group template arguments that can be derived from the vector type (EltNum x
// EltVT). These are things like the register class for the writemask, etc.
// The idea is to pass one of these as the template argument rather than the
// individual arguments.
-class X86VectorVTInfo<int numelts, ValueType EltVT, RegisterClass rc,
+// The template is also used for scalar types, in this case numelts is 1.
+class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc,
string suffix = ""> {
RegisterClass RC = rc;
+ ValueType EltVT = eltvt;
int NumElts = numelts;
// Corresponding mask register class.
@@ -23,7 +40,13 @@ class X86VectorVTInfo<int numelts, ValueType EltVT, RegisterClass rc,
// Suffix used in the instruction mnemonic.
string Suffix = suffix;
- string VTName = "v" # NumElts # EltVT;
+ // VTName is a string name for vector VT. For vector types it will be
+ // v # NumElts # EltVT, so for vector of 8 elements of i32 it will be v8i32
+ // It is a little bit complex for scalar types, where NumElts = 1.
+ // In this case we build v4f32 or v2f64
+ string VTName = "v" # !if (!eq (NumElts, 1),
+ !if (!eq (EltVT.Size, 32), 4,
+ !if (!eq (EltVT.Size, 64), 2, NumElts)), NumElts) # EltVT;
// The vector VT.
ValueType VT = !cast<ValueType>(VTName);
@@ -53,14 +76,6 @@ class X86VectorVTInfo<int numelts, ValueType EltVT, RegisterClass rc,
VTName)), VTName));
PatFrag ScalarLdFrag = !cast<PatFrag>("load" # EltVT);
- // Load patterns used for memory operands. We only have this defined in
- // case of i64 element types for sub-512 integer vectors. For now, keep
- // MemOpFrag undefined in these cases.
- PatFrag MemOpFrag =
- !if (!eq (TypeVariantName, "f"), !cast<PatFrag>("memop" # VTName),
- !if (!eq (EltTypeName, "i64"), !cast<PatFrag>("memop" # VTName),
- !if (!eq (VTName, "v16i32"), !cast<PatFrag>("memop" # VTName), ?)));
-
// The corresponding float type, e.g. v16f32 for v16i32
// Note: For EltSize < 32, FloatVT is illegal and TableGen
// fails to compile, so we choose FloatVT = VT
@@ -86,6 +101,8 @@ class X86VectorVTInfo<int numelts, ValueType EltVT, RegisterClass rc,
!if (!eq (EltTypeName, "f64"), SSEPackedDouble,
SSEPackedInt));
+ RegisterClass FRC = !if (!eq (EltTypeName, "f32"), FR32X, FR64X);
+
// A vector type of the same width with element type i32. This is used to
// create the canonical constant zero node ImmAllZerosV.
ValueType i32VT = !cast<ValueType>("v" # !srl(Size, 5) # "i32");
@@ -114,6 +131,11 @@ def v2i64x_info : X86VectorVTInfo<2, i64, VR128X, "q">;
def v4f32x_info : X86VectorVTInfo<4, f32, VR128X, "ps">;
def v2f64x_info : X86VectorVTInfo<2, f64, VR128X, "pd">;
+// We map scalar types to the smallest (128-bit) vector type
+// with the appropriate element type. This allows to use the same masking logic.
+def f32x_info : X86VectorVTInfo<1, f32, VR128X, "ss">;
+def f64x_info : X86VectorVTInfo<1, f64, VR128X, "sd">;
+
class AVX512VLVectorVTInfo<X86VectorVTInfo i512, X86VectorVTInfo i256,
X86VectorVTInfo i128> {
X86VectorVTInfo info512 = i512;
@@ -183,7 +205,7 @@ multiclass AVX512_maskable_common<bits<8> O, Format F, X86VectorVTInfo _,
string OpcodeStr,
string AttSrcAsm, string IntelSrcAsm,
dag RHS, dag MaskingRHS,
- string Round = "",
+ SDNode Select = vselect, string Round = "",
string MaskingConstraint = "",
InstrItinClass itin = NoItinerary,
bit IsCommutable = 0> :
@@ -192,11 +214,11 @@ multiclass AVX512_maskable_common<bits<8> O, Format F, X86VectorVTInfo _,
[(set _.RC:$dst, RHS)],
[(set _.RC:$dst, MaskingRHS)],
[(set _.RC:$dst,
- (vselect _.KRCWM:$mask, RHS, _.ImmAllZerosV))],
+ (Select _.KRCWM:$mask, RHS, _.ImmAllZerosV))],
Round, MaskingConstraint, NoItinerary, IsCommutable>;
// This multiclass generates the unconditional/non-masking, the masking and
-// the zero-masking variant of the instruction. In the masking case, the
+// the zero-masking variant of the vector instruction. In the masking case, the
// perserved vector elements come from a new dummy input operand tied to $dst.
multiclass AVX512_maskable<bits<8> O, Format F, X86VectorVTInfo _,
dag Outs, dag Ins, string OpcodeStr,
@@ -208,8 +230,23 @@ multiclass AVX512_maskable<bits<8> O, Format F, X86VectorVTInfo _,
!con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
!con((ins _.KRCWM:$mask), Ins),
OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
- (vselect _.KRCWM:$mask, RHS, _.RC:$src0), Round,
- "$src0 = $dst", itin, IsCommutable>;
+ (vselect _.KRCWM:$mask, RHS, _.RC:$src0), vselect,
+ Round, "$src0 = $dst", itin, IsCommutable>;
+
+// This multiclass generates the unconditional/non-masking, the masking and
+// the zero-masking variant of the scalar instruction.
+multiclass AVX512_maskable_scalar<bits<8> O, Format F, X86VectorVTInfo _,
+ dag Outs, dag Ins, string OpcodeStr,
+ string AttSrcAsm, string IntelSrcAsm,
+ dag RHS, string Round = "",
+ InstrItinClass itin = NoItinerary,
+ bit IsCommutable = 0> :
+ AVX512_maskable_common<O, F, _, Outs, Ins,
+ !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
+ !con((ins _.KRCWM:$mask), Ins),
+ OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
+ (X86select _.KRCWM:$mask, RHS, _.RC:$src0), X86select,
+ Round, "$src0 = $dst", itin, IsCommutable>;
// Similar to AVX512_maskable but in this case one of the source operands
// ($src1) is already tied to $dst so we just use that for the preserved
@@ -364,7 +401,7 @@ multiclass vinsert_for_size_no_alt<int Opcode,
SDNodeXForm INSERT_get_vinsert_imm> {
let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
def rr : AVX512AIi8<Opcode, MRMSrcReg, (outs VR512:$dst),
- (ins VR512:$src1, From.RC:$src2, i8imm:$src3),
+ (ins VR512:$src1, From.RC:$src2, u8imm:$src3),
"vinsert" # From.EltTypeName # "x" # From.NumElts #
"\t{$src3, $src2, $src1, $dst|"
"$dst, $src1, $src2, $src3}",
@@ -375,7 +412,7 @@ multiclass vinsert_for_size_no_alt<int Opcode,
let mayLoad = 1 in
def rm : AVX512AIi8<Opcode, MRMSrcMem, (outs VR512:$dst),
- (ins VR512:$src1, From.MemOp:$src2, i8imm:$src3),
+ (ins VR512:$src1, From.MemOp:$src2, u8imm:$src3),
"vinsert" # From.EltTypeName # "x" # From.NumElts #
"\t{$src3, $src2, $src1, $dst|"
"$dst, $src1, $src2, $src3}",
@@ -437,12 +474,12 @@ defm VINSERTI : vinsert_for_type<i32, 0x38, i64, 0x3a>;
// vinsertps - insert f32 to XMM
def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
- (ins VR128X:$src1, VR128X:$src2, i8imm:$src3),
+ (ins VR128X:$src1, VR128X:$src2, u8imm:$src3),
"vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
EVEX_4V;
def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
- (ins VR128X:$src1, f32mem:$src2, i8imm:$src3),
+ (ins VR128X:$src1, f32mem:$src2, u8imm:$src3),
"vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR128X:$dst, (X86insertps VR128X:$src1,
(v4f32 (scalar_to_vector (loadf32 addr:$src2))),
@@ -459,7 +496,7 @@ multiclass vextract_for_size<int Opcode,
SDNodeXForm EXTRACT_get_vextract_imm> {
let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
defm rr : AVX512_maskable_in_asm<Opcode, MRMDestReg, To, (outs To.RC:$dst),
- (ins VR512:$src1, i8imm:$idx),
+ (ins VR512:$src1, u8imm:$idx),
"vextract" # To.EltTypeName # "x4",
"$idx, $src1", "$src1, $idx",
[(set To.RC:$dst, (vextract_extract:$idx (From.VT VR512:$src1),
@@ -467,7 +504,7 @@ multiclass vextract_for_size<int Opcode,
AVX512AIi8Base, EVEX, EVEX_V512;
let mayStore = 1 in
def rm : AVX512AIi8<Opcode, MRMDestMem, (outs),
- (ins To.MemOp:$dst, VR512:$src1, i8imm:$src2),
+ (ins To.MemOp:$dst, VR512:$src1, u8imm:$src2),
"vextract" # To.EltTypeName # "x4\t{$src2, $src1, $dst|"
"$dst, $src1, $src2}",
[]>, EVEX, EVEX_V512, EVEX_CD8<To.EltSize, CD8VT4>;
@@ -566,13 +603,13 @@ def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
// vextractps - extract 32 bits from XMM
def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
- (ins VR128X:$src1, i32i8imm:$src2),
+ (ins VR128X:$src1, u8imm:$src2),
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
EVEX;
def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
- (ins f32mem:$dst, VR128X:$src1, i32i8imm:$src2),
+ (ins f32mem:$dst, VR128X:$src1, u8imm:$src2),
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
@@ -622,6 +659,45 @@ let ExeDomain = SSEPackedDouble in {
avx512vl_f64_info>, VEX_W, EVEX_CD8<64, CD8VT1>;
}
+// avx512_broadcast_pat introduces patterns for broadcast with a scalar argument.
+// Later, we can canonize broadcast instructions before ISel phase and
+// eliminate additional patterns on ISel.
+// SrcRC_v and SrcRC_s are RegisterClasses for vector and scalar
+// representations of source
+multiclass avx512_broadcast_pat<string InstName, SDNode OpNode,
+ X86VectorVTInfo _, RegisterClass SrcRC_v,
+ RegisterClass SrcRC_s> {
+ def : Pat<(_.VT (OpNode (_.EltVT SrcRC_s:$src))),
+ (!cast<Instruction>(InstName##"r")
+ (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
+
+ let AddedComplexity = 30 in {
+ def : Pat<(_.VT (vselect _.KRCWM:$mask,
+ (OpNode (_.EltVT SrcRC_s:$src)), _.RC:$src0)),
+ (!cast<Instruction>(InstName##"rk") _.RC:$src0, _.KRCWM:$mask,
+ (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
+
+ def : Pat<(_.VT(vselect _.KRCWM:$mask,
+ (OpNode (_.EltVT SrcRC_s:$src)), _.ImmAllZerosV)),
+ (!cast<Instruction>(InstName##"rkz") _.KRCWM:$mask,
+ (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
+ }
+}
+
+defm : avx512_broadcast_pat<"VBROADCASTSSZ", X86VBroadcast, v16f32_info,
+ VR128X, FR32X>;
+defm : avx512_broadcast_pat<"VBROADCASTSDZ", X86VBroadcast, v8f64_info,
+ VR128X, FR64X>;
+
+let Predicates = [HasVLX] in {
+ defm : avx512_broadcast_pat<"VBROADCASTSSZ256", X86VBroadcast,
+ v8f32x_info, VR128X, FR32X>;
+ defm : avx512_broadcast_pat<"VBROADCASTSSZ128", X86VBroadcast,
+ v4f32x_info, VR128X, FR32X>;
+ defm : avx512_broadcast_pat<"VBROADCASTSDZ256", X86VBroadcast,
+ v4f64x_info, VR128X, FR64X>;
+}
+
def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
(VBROADCASTSSZm addr:$src)>;
def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
@@ -632,74 +708,84 @@ def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
(VBROADCASTSDZm addr:$src)>;
-multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
- RegisterClass SrcRC, RegisterClass KRC> {
- def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
- []>, EVEX, EVEX_V512;
- def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst),
- (ins KRC:$mask, SrcRC:$src),
- !strconcat(OpcodeStr,
- " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
- []>, EVEX, EVEX_V512, EVEX_KZ;
-}
-
-defm VPBROADCASTDr : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>;
-defm VPBROADCASTQr : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>,
- VEX_W;
-
+multiclass avx512_int_broadcast_reg<bits<8> opc, X86VectorVTInfo _,
+ RegisterClass SrcRC> {
+ defm r : AVX512_maskable_in_asm<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins SrcRC:$src), "vpbroadcast"##_.Suffix,
+ "$src", "$src", []>, T8PD, EVEX;
+}
+
+multiclass avx512_int_broadcast_reg_vl<bits<8> opc, AVX512VLVectorVTInfo _,
+ RegisterClass SrcRC, Predicate prd> {
+ let Predicates = [prd] in
+ defm Z : avx512_int_broadcast_reg<opc, _.info512, SrcRC>, EVEX_V512;
+ let Predicates = [prd, HasVLX] in {
+ defm Z256 : avx512_int_broadcast_reg<opc, _.info256, SrcRC>, EVEX_V256;
+ defm Z128 : avx512_int_broadcast_reg<opc, _.info128, SrcRC>, EVEX_V128;
+ }
+}
+
+defm VPBROADCASTBr : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info, GR32,
+ HasBWI>;
+defm VPBROADCASTWr : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info, GR32,
+ HasBWI>;
+defm VPBROADCASTDr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i32_info, GR32,
+ HasAVX512>;
+defm VPBROADCASTQr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i64_info, GR64,
+ HasAVX512>, VEX_W;
+
def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
- (VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
+ (VPBROADCASTDrZrkz VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
- (VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
+ (VPBROADCASTQrZrkz VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
- (VPBROADCASTDrZrr GR32:$src)>;
+ (VPBROADCASTDrZr GR32:$src)>;
def : Pat<(v16i32 (X86VBroadcastm VK16WM:$mask, (i32 GR32:$src))),
- (VPBROADCASTDrZkrr VK16WM:$mask, GR32:$src)>;
+ (VPBROADCASTDrZrkz VK16WM:$mask, GR32:$src)>;
def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
- (VPBROADCASTQrZrr GR64:$src)>;
+ (VPBROADCASTQrZr GR64:$src)>;
def : Pat<(v8i64 (X86VBroadcastm VK8WM:$mask, (i64 GR64:$src))),
- (VPBROADCASTQrZkrr VK8WM:$mask, GR64:$src)>;
+ (VPBROADCASTQrZrkz VK8WM:$mask, GR64:$src)>;
def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
- (VPBROADCASTDrZrr GR32:$src)>;
+ (VPBROADCASTDrZr GR32:$src)>;
def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
- (VPBROADCASTQrZrr GR64:$src)>;
+ (VPBROADCASTQrZr GR64:$src)>;
def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
(v16i32 immAllZerosV), (i16 GR16:$mask))),
- (VPBROADCASTDrZkrr (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
+ (VPBROADCASTDrZrkz (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
(bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
- (VPBROADCASTQrZkrr (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
+ (VPBROADCASTQrZrkz (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
X86MemOperand x86memop, PatFrag ld_frag,
RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
RegisterClass KRC> {
def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst,
(OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
VR128X:$src),
- !strconcat(OpcodeStr,
- " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ !strconcat(OpcodeStr,
+ "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
[(set DstRC:$dst,
(OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
EVEX, EVEX_KZ;
let mayLoad = 1 in {
def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
- [(set DstRC:$dst,
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst,
(OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
x86memop:$src),
- !strconcat(OpcodeStr,
- " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
- [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
+ !strconcat(OpcodeStr,
+ "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
(ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
}
}
@@ -716,12 +802,12 @@ multiclass avx512_int_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
RegisterClass KRC> {
let mayLoad = 1 in {
def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[]>, EVEX;
def krm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins KRC:$mask,
x86memop:$src),
!strconcat(OpcodeStr,
- " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
[]>, EVEX, EVEX_KZ;
}
}
@@ -752,7 +838,7 @@ def : Pat<(v16f32 (int_x86_avx512_vbroadcast_ss_ps_512 (v4f32 VR128X:$src))),
(VBROADCASTSSZr VR128X:$src)>;
def : Pat<(v8f64 (int_x86_avx512_vbroadcast_sd_pd_512 (v2f64 VR128X:$src))),
(VBROADCASTSDZr VR128X:$src)>;
-
+
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
@@ -763,7 +849,7 @@ def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
let Predicates = [HasAVX512] in {
def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))),
- (EXTRACT_SUBREG
+ (EXTRACT_SUBREG
(v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
addr:$src)), sub_ymm)>;
}
@@ -775,15 +861,15 @@ multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
RegisterClass KRC> {
let Predicates = [HasCDI] in
def Zrr : AVX512XS8I<opc, MRMSrcReg, (outs VR512:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[]>, EVEX, EVEX_V512;
-
+
let Predicates = [HasCDI, HasVLX] in {
def Z128rr : AVX512XS8I<opc, MRMSrcReg, (outs VR128:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[]>, EVEX, EVEX_V128;
def Z256rr : AVX512XS8I<opc, MRMSrcReg, (outs VR256:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[]>, EVEX, EVEX_V256;
}
}
@@ -803,18 +889,18 @@ multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
let ExeDomain = _.ExeDomain in {
def ri : AVX512AIi8<opc, MRMSrcReg, (outs _.RC:$dst),
- (ins _.RC:$src1, i8imm:$src2),
+ (ins _.RC:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst,
(_.VT (OpNode _.RC:$src1, (i8 imm:$src2))))]>,
EVEX;
def mi : AVX512AIi8<opc, MRMSrcMem, (outs _.RC:$dst),
- (ins _.MemOp:$src1, i8imm:$src2),
+ (ins _.MemOp:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst,
- (_.VT (OpNode (_.MemOpFrag addr:$src1),
+ (_.VT (OpNode (_.LdFrag addr:$src1),
(i8 imm:$src2))))]>,
EVEX, EVEX_CD8<_.EltSize, CD8VF>;
}
@@ -827,7 +913,7 @@ multiclass avx512_permil<bits<8> OpcImm, bits<8> OpcVar, X86VectorVTInfo _,
def rr : AVX5128I<OpcVar, MRMSrcReg, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2),
!strconcat("vpermil" # _.Suffix,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst,
(_.VT (X86VPermilpv _.RC:$src1,
(Ctrl.VT Ctrl.RC:$src2))))]>,
@@ -835,10 +921,10 @@ multiclass avx512_permil<bits<8> OpcImm, bits<8> OpcVar, X86VectorVTInfo _,
def rm : AVX5128I<OpcVar, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src1, Ctrl.MemOp:$src2),
!strconcat("vpermil" # _.Suffix,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst,
(_.VT (X86VPermilpv _.RC:$src1,
- (Ctrl.VT (Ctrl.MemOpFrag addr:$src2)))))]>,
+ (Ctrl.VT (Ctrl.LdFrag addr:$src2)))))]>,
EVEX_4V;
}
}
@@ -859,34 +945,34 @@ def : Pat<(v8i64 (X86VPermilpi VR512:$src1, (i8 imm:$imm))),
(VPERMILPDZri VR512:$src1, imm:$imm)>;
// -- VPERM - register form --
-multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
+multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> {
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>,
EVEX_4V;
}
-defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, memopv16i32, i512mem,
+defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, loadv16i32, i512mem,
v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, memopv8i64, i512mem,
+defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, loadv8i64, i512mem,
v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
let ExeDomain = SSEPackedSingle in
-defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, memopv16f32, f512mem,
+defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, loadv16f32, f512mem,
v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
let ExeDomain = SSEPackedDouble in
-defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, memopv8f64, f512mem,
+defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, loadv8f64, f512mem,
v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
// -- VPERM2I - 3 source operands form --
@@ -897,7 +983,7 @@ let Constraints = "$src1 = $dst" in {
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
- " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set RC:$dst,
(OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>,
EVEX_4V;
@@ -905,7 +991,7 @@ let Constraints = "$src1 = $dst" in {
def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
- " \t{$src3, $src2, $dst {${mask}}|"
+ "\t{$src3, $src2, $dst {${mask}}|"
"$dst {${mask}}, $src2, $src3}"),
[(set RC:$dst, (OpVT (vselect KRC:$mask,
(OpNode RC:$src1, RC:$src2,
@@ -917,7 +1003,7 @@ let Constraints = "$src1 = $dst" in {
def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
- " \t{$src3, $src2, $dst {${mask}} {z} |",
+ "\t{$src3, $src2, $dst {${mask}} {z} |",
"$dst {${mask}} {z}, $src2, $src3}"),
[(set RC:$dst, (OpVT (vselect KRC:$mask,
(OpNode RC:$src1, RC:$src2,
@@ -929,7 +1015,7 @@ let Constraints = "$src1 = $dst" in {
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, RC:$src2, x86memop:$src3),
!strconcat(OpcodeStr,
- " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set RC:$dst,
(OpVT (OpNode RC:$src1, RC:$src2,
(mem_frag addr:$src3))))]>, EVEX_4V;
@@ -937,7 +1023,7 @@ let Constraints = "$src1 = $dst" in {
def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
!strconcat(OpcodeStr,
- " \t{$src3, $src2, $dst {${mask}}|"
+ "\t{$src3, $src2, $dst {${mask}}|"
"$dst {${mask}}, $src2, $src3}"),
[(set RC:$dst,
(OpVT (vselect KRC:$mask,
@@ -950,7 +1036,7 @@ let Constraints = "$src1 = $dst" in {
def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
!strconcat(OpcodeStr,
- " \t{$src3, $src2, $dst {${mask}} {z}|"
+ "\t{$src3, $src2, $dst {${mask}} {z}|"
"$dst {${mask}} {z}, $src2, $src3}"),
[(set RC:$dst,
(OpVT (vselect KRC:$mask,
@@ -961,16 +1047,16 @@ let Constraints = "$src1 = $dst" in {
EVEX_4V, EVEX_KZ;
}
}
-defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, memopv16i32,
+defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, loadv16i32,
i512mem, X86VPermiv3, v16i32, VK16WM>,
EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, memopv8i64,
+defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, loadv8i64,
i512mem, X86VPermiv3, v8i64, VK8WM>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, memopv16f32,
+defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, loadv16f32,
i512mem, X86VPermiv3, v16f32, VK16WM>,
EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, memopv8f64,
+defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, loadv8f64,
i512mem, X86VPermiv3, v8f64, VK8WM>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
@@ -990,93 +1076,126 @@ multiclass avx512_perm_table_3src<bits<8> opc, string Suffix, RegisterClass RC,
(MaskVT (COPY_TO_REGCLASS MRC:$mask, KRC)), VR512:$idx, VR512:$src2)>;
}
-defm VPERMT2D : avx512_perm_table_3src<0x7E, "d", VR512, memopv16i32, i512mem,
+defm VPERMT2D : avx512_perm_table_3src<0x7E, "d", VR512, loadv16i32, i512mem,
X86VPermv3, v16i32, VK16WM, v16i1, GR16>,
EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPERMT2Q : avx512_perm_table_3src<0x7E, "q", VR512, memopv8i64, i512mem,
+defm VPERMT2Q : avx512_perm_table_3src<0x7E, "q", VR512, loadv8i64, i512mem,
X86VPermv3, v8i64, VK8WM, v8i1, GR8>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPERMT2PS : avx512_perm_table_3src<0x7F, "ps", VR512, memopv16f32, i512mem,
+defm VPERMT2PS : avx512_perm_table_3src<0x7F, "ps", VR512, loadv16f32, i512mem,
X86VPermv3, v16f32, VK16WM, v16i1, GR16>,
EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPERMT2PD : avx512_perm_table_3src<0x7F, "pd", VR512, memopv8f64, i512mem,
+defm VPERMT2PD : avx512_perm_table_3src<0x7F, "pd", VR512, loadv8f64, i512mem,
X86VPermv3, v8f64, VK8WM, v8i1, GR8>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
//===----------------------------------------------------------------------===//
// AVX-512 - BLEND using mask
//
-multiclass avx512_blendmask<bits<8> opc, string OpcodeStr,
- RegisterClass KRC, RegisterClass RC,
- X86MemOperand x86memop, PatFrag mem_frag,
- SDNode OpNode, ValueType vt> {
- def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, RC:$src2),
+multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
+ let ExeDomain = _.ExeDomain in {
+ def rr : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.RC:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
- [(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
- (vt RC:$src1)))]>, EVEX_4V, EVEX_K;
- let mayLoad = 1 in
- def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, x86memop:$src2),
+ "\t{$src2, $src1, ${dst} |${dst}, $src1, $src2}"),
+ []>, EVEX_4V;
+ def rrk : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
+ (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
+ [(set _.RC:$dst, (X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2)))]>, EVEX_4V, EVEX_K;
+ def rrkz : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
+ (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
+ []>, EVEX_4V, EVEX_KZ;
+ let mayLoad = 1 in {
+ def rm : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
- []>, EVEX_4V, EVEX_K;
+ "\t{$src2, $src1, ${dst} |${dst}, $src1, $src2}"),
+ []>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
+ def rmk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
+ (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
+ [(set _.RC:$dst, (X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
+ (_.VT (bitconvert (_.LdFrag addr:$src2)))))]>,
+ EVEX_4V, EVEX_K, EVEX_CD8<_.EltSize, CD8VF>;
+ def rmkz : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
+ (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
+ []>, EVEX_4V, EVEX_KZ, EVEX_CD8<_.EltSize, CD8VF>;
+ }
+ }
}
+multiclass avx512_blendmask_rmb<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
+
+ def rmbk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
+ (ins _.KRCWM:$mask, _.RC:$src1, _.ScalarMemOp:$src2),
+ !strconcat(OpcodeStr,
+ "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
+ "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
+ [(set _.RC:$dst,(X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
+ (X86VBroadcast (_.ScalarLdFrag addr:$src2))))]>,
+ EVEX_4V, EVEX_K, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
+
+ def rmb : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2),
+ !strconcat(OpcodeStr,
+ "\t{${src2}", _.BroadcastStr, ", $src1, $dst|",
+ "$dst, $src1, ${src2}", _.BroadcastStr, "}"),
+ []>, EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
+
+}
+
+multiclass blendmask_dq <bits<8> opc, string OpcodeStr,
+ AVX512VLVectorVTInfo VTInfo> {
+ defm Z : avx512_blendmask <opc, OpcodeStr, VTInfo.info512>,
+ avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
+
+ let Predicates = [HasVLX] in {
+ defm Z256 : avx512_blendmask<opc, OpcodeStr, VTInfo.info256>,
+ avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
+ defm Z128 : avx512_blendmask<opc, OpcodeStr, VTInfo.info128>,
+ avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
+ }
+}
+
+multiclass blendmask_bw <bits<8> opc, string OpcodeStr,
+ AVX512VLVectorVTInfo VTInfo> {
+ let Predicates = [HasBWI] in
+ defm Z : avx512_blendmask <opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
+
+ let Predicates = [HasBWI, HasVLX] in {
+ defm Z256 : avx512_blendmask <opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
+ defm Z128 : avx512_blendmask <opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
+ }
+}
+
+
+defm VBLENDMPS : blendmask_dq <0x65, "vblendmps", avx512vl_f32_info>;
+defm VBLENDMPD : blendmask_dq <0x65, "vblendmpd", avx512vl_f64_info>, VEX_W;
+defm VPBLENDMD : blendmask_dq <0x64, "vpblendmd", avx512vl_i32_info>;
+defm VPBLENDMQ : blendmask_dq <0x64, "vpblendmq", avx512vl_i64_info>, VEX_W;
+defm VPBLENDMB : blendmask_bw <0x66, "vpblendmb", avx512vl_i8_info>;
+defm VPBLENDMW : blendmask_bw <0x66, "vpblendmw", avx512vl_i16_info>, VEX_W;
-let ExeDomain = SSEPackedSingle in
-defm VBLENDMPSZ : avx512_blendmask<0x65, "vblendmps",
- VK16WM, VR512, f512mem,
- memopv16f32, vselect, v16f32>,
- EVEX_CD8<32, CD8VF>, EVEX_V512;
-let ExeDomain = SSEPackedDouble in
-defm VBLENDMPDZ : avx512_blendmask<0x65, "vblendmpd",
- VK8WM, VR512, f512mem,
- memopv8f64, vselect, v8f64>,
- VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
-
-def : Pat<(v16f32 (int_x86_avx512_mask_blend_ps_512 (v16f32 VR512:$src1),
- (v16f32 VR512:$src2), (i16 GR16:$mask))),
- (VBLENDMPSZrr (COPY_TO_REGCLASS GR16:$mask, VK16WM),
- VR512:$src1, VR512:$src2)>;
-
-def : Pat<(v8f64 (int_x86_avx512_mask_blend_pd_512 (v8f64 VR512:$src1),
- (v8f64 VR512:$src2), (i8 GR8:$mask))),
- (VBLENDMPDZrr (COPY_TO_REGCLASS GR8:$mask, VK8WM),
- VR512:$src1, VR512:$src2)>;
-
-defm VPBLENDMDZ : avx512_blendmask<0x64, "vpblendmd",
- VK16WM, VR512, f512mem,
- memopv16i32, vselect, v16i32>,
- EVEX_CD8<32, CD8VF>, EVEX_V512;
-
-defm VPBLENDMQZ : avx512_blendmask<0x64, "vpblendmq",
- VK8WM, VR512, f512mem,
- memopv8i64, vselect, v8i64>,
- VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
-
-def : Pat<(v16i32 (int_x86_avx512_mask_blend_d_512 (v16i32 VR512:$src1),
- (v16i32 VR512:$src2), (i16 GR16:$mask))),
- (VPBLENDMDZrr (COPY_TO_REGCLASS GR16:$mask, VK16),
- VR512:$src1, VR512:$src2)>;
-
-def : Pat<(v8i64 (int_x86_avx512_mask_blend_q_512 (v8i64 VR512:$src1),
- (v8i64 VR512:$src2), (i8 GR8:$mask))),
- (VPBLENDMQZrr (COPY_TO_REGCLASS GR8:$mask, VK8),
- VR512:$src1, VR512:$src2)>;
let Predicates = [HasAVX512] in {
def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
(v8f32 VR256X:$src2))),
- (EXTRACT_SUBREG
- (v16f32 (VBLENDMPSZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
+ (EXTRACT_SUBREG
+ (v16f32 (VBLENDMPSZrrk (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
(v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
(v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
(v8i32 VR256X:$src2))),
- (EXTRACT_SUBREG
- (v16i32 (VPBLENDMDZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
+ (EXTRACT_SUBREG
+ (v16i32 (VPBLENDMDZrrk (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
}
@@ -1086,35 +1205,40 @@ def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
// avx512_cmp_scalar - AVX512 CMPSS and CMPSD
multiclass avx512_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
- Operand CC, SDNode OpNode, ValueType VT,
- PatFrag ld_frag, string asm, string asm_alt> {
+ SDNode OpNode, ValueType VT,
+ PatFrag ld_frag, string Suffix> {
def rr : AVX512Ii8<0xC2, MRMSrcReg,
- (outs VK1:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
+ (outs VK1:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
+ !strconcat("vcmp${cc}", Suffix,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VK1:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
IIC_SSE_ALU_F32S_RR>, EVEX_4V;
def rm : AVX512Ii8<0xC2, MRMSrcMem,
- (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
+ (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc),
+ !strconcat("vcmp${cc}", Suffix,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VK1:$dst, (OpNode (VT RC:$src1),
(ld_frag addr:$src2), imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
let isAsmParserOnly = 1, hasSideEffects = 0 in {
def rri_alt : AVX512Ii8<0xC2, MRMSrcReg,
- (outs VK1:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
- asm_alt, [], IIC_SSE_ALU_F32S_RR>, EVEX_4V;
+ (outs VK1:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc),
+ !strconcat("vcmp", Suffix,
+ "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
+ [], IIC_SSE_ALU_F32S_RR>, EVEX_4V;
+ let mayLoad = 1 in
def rmi_alt : AVX512Ii8<0xC2, MRMSrcMem,
- (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
- asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
+ (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc),
+ !strconcat("vcmp", Suffix,
+ "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
+ [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
}
}
let Predicates = [HasAVX512] in {
-defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, AVXCC, X86cmpms, f32, loadf32,
- "vcmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- "vcmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
- XS;
-defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, AVXCC, X86cmpms, f64, loadf64,
- "vcmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- "vcmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
- XD, VEX_W;
+defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, X86cmpms, f32, loadf32, "ss">,
+ XS;
+defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, X86cmpms, f64, loadf64, "sd">,
+ XD, VEX_W;
}
multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -1249,7 +1373,7 @@ def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
X86VectorVTInfo _> {
def rri : AVX512AIi8<opc, MRMSrcReg,
- (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, AVXCC:$cc),
+ (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, AVX512ICC:$cc),
!strconcat("vpcmp${cc}", Suffix,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
@@ -1257,7 +1381,7 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
IIC_SSE_ALU_F32P_RR>, EVEX_4V;
let mayLoad = 1 in
def rmi : AVX512AIi8<opc, MRMSrcMem,
- (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
+ (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, AVX512ICC:$cc),
!strconcat("vpcmp${cc}", Suffix,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
@@ -1266,7 +1390,7 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
IIC_SSE_ALU_F32P_RM>, EVEX_4V;
def rrik : AVX512AIi8<opc, MRMSrcReg,
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
- AVXCC:$cc),
+ AVX512ICC:$cc),
!strconcat("vpcmp${cc}", Suffix,
"\t{$src2, $src1, $dst {${mask}}|",
"$dst {${mask}}, $src1, $src2}"),
@@ -1277,7 +1401,7 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
let mayLoad = 1 in
def rmik : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
- AVXCC:$cc),
+ AVX512ICC:$cc),
!strconcat("vpcmp${cc}", Suffix,
"\t{$src2, $src1, $dst {${mask}}|",
"$dst {${mask}}, $src1, $src2}"),
@@ -1290,25 +1414,27 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
// Accept explicit immediate argument form instead of comparison code.
let isAsmParserOnly = 1, hasSideEffects = 0 in {
def rri_alt : AVX512AIi8<opc, MRMSrcReg,
- (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, i8imm:$cc),
+ (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
!strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
"$dst, $src1, $src2, $cc}"),
[], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
+ let mayLoad = 1 in
def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
- (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, i8imm:$cc),
+ (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc),
!strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
"$dst, $src1, $src2, $cc}"),
[], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
def rrik_alt : AVX512AIi8<opc, MRMSrcReg,
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
- i8imm:$cc),
+ u8imm:$cc),
!strconcat("vpcmp", Suffix,
"\t{$cc, $src2, $src1, $dst {${mask}}|",
"$dst {${mask}}, $src1, $src2, $cc}"),
[], IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
+ let mayLoad = 1 in
def rmik_alt : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
- i8imm:$cc),
+ u8imm:$cc),
!strconcat("vpcmp", Suffix,
"\t{$cc, $src2, $src1, $dst {${mask}}|",
"$dst {${mask}}, $src1, $src2, $cc}"),
@@ -1319,10 +1445,9 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, SDNode OpNode,
X86VectorVTInfo _> :
avx512_icmp_cc<opc, Suffix, OpNode, _> {
- let mayLoad = 1 in {
def rmib : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
- AVXCC:$cc),
+ AVX512ICC:$cc),
!strconcat("vpcmp${cc}", Suffix,
"\t{${src2}", _.BroadcastStr, ", $src1, $dst|",
"$dst, $src1, ${src2}", _.BroadcastStr, "}"),
@@ -1332,7 +1457,7 @@ multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, SDNode OpNode,
IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
def rmibk : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
- _.ScalarMemOp:$src2, AVXCC:$cc),
+ _.ScalarMemOp:$src2, AVX512ICC:$cc),
!strconcat("vpcmp${cc}", Suffix,
"\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
"$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
@@ -1341,20 +1466,19 @@ multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, SDNode OpNode,
(X86VBroadcast (_.ScalarLdFrag addr:$src2)),
imm:$cc)))],
IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
- }
// Accept explicit immediate argument form instead of comparison code.
- let isAsmParserOnly = 1, hasSideEffects = 0 in {
+ let isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 1 in {
def rmib_alt : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
- i8imm:$cc),
+ u8imm:$cc),
!strconcat("vpcmp", Suffix,
"\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst|",
"$dst, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
[], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
def rmibk_alt : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
- _.ScalarMemOp:$src2, i8imm:$cc),
+ _.ScalarMemOp:$src2, u8imm:$cc),
!strconcat("vpcmp", Suffix,
"\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
"$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
@@ -1414,30 +1538,32 @@ multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
def rri : AVX512PIi8<0xC2, MRMSrcReg,
(outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
!strconcat("vcmp${cc}", suffix,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set KRC:$dst, (X86cmpm (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
+ let hasSideEffects = 0 in
def rrib: AVX512PIi8<0xC2, MRMSrcReg,
(outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
!strconcat("vcmp${cc}", suffix,
- " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
+ "\t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
[], d>, EVEX_B;
def rmi : AVX512PIi8<0xC2, MRMSrcMem,
(outs KRC:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc),
!strconcat("vcmp${cc}", suffix,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
[(set KRC:$dst,
- (X86cmpm (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
+ (X86cmpm (vt RC:$src1), (load addr:$src2), imm:$cc))], d>;
// Accept explicit immediate argument form instead of comparison code.
let isAsmParserOnly = 1, hasSideEffects = 0 in {
def rri_alt : AVX512PIi8<0xC2, MRMSrcReg,
- (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
+ (outs KRC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc),
!strconcat("vcmp", suffix,
- " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
+ "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
+ let mayLoad = 1 in
def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem,
- (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
+ (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc),
!strconcat("vcmp", suffix,
- " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
+ "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
}
}
@@ -1465,25 +1591,25 @@ def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
imm:$cc), VK8)>;
def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
- (v16f32 VR512:$src2), imm:$cc, (i16 -1),
+ (v16f32 VR512:$src2), i8immZExt5:$cc, (i16 -1),
FROUND_NO_EXC)),
(COPY_TO_REGCLASS (VCMPPSZrrib VR512:$src1, VR512:$src2,
(I8Imm imm:$cc)), GR16)>;
-
+
def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
- (v8f64 VR512:$src2), imm:$cc, (i8 -1),
+ (v8f64 VR512:$src2), i8immZExt5:$cc, (i8 -1),
FROUND_NO_EXC)),
(COPY_TO_REGCLASS (VCMPPDZrrib VR512:$src1, VR512:$src2,
(I8Imm imm:$cc)), GR8)>;
def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
- (v16f32 VR512:$src2), imm:$cc, (i16 -1),
+ (v16f32 VR512:$src2), i8immZExt5:$cc, (i16 -1),
FROUND_CURRENT)),
(COPY_TO_REGCLASS (VCMPPSZrri VR512:$src1, VR512:$src2,
(I8Imm imm:$cc)), GR16)>;
def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
- (v8f64 VR512:$src2), imm:$cc, (i8 -1),
+ (v8f64 VR512:$src2), i8immZExt5:$cc, (i8 -1),
FROUND_CURRENT)),
(COPY_TO_REGCLASS (VCMPPDZrri VR512:$src1, VR512:$src2,
(I8Imm imm:$cc)), GR8)>;
@@ -1495,17 +1621,18 @@ def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
//
multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
string OpcodeStr, RegisterClass KRC,
- ValueType vvt, ValueType ivt, X86MemOperand x86memop> {
+ ValueType vvt, X86MemOperand x86memop> {
let hasSideEffects = 0 in {
def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
let mayLoad = 1 in
def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
- [(set KRC:$dst, (vvt (bitconvert (ivt (load addr:$src)))))]>;
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set KRC:$dst, (vvt (load addr:$src)))]>;
let mayStore = 1 in
def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(store KRC:$src, addr:$dst)]>;
}
}
@@ -1514,34 +1641,32 @@ multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
RegisterClass KRC, RegisterClass GRC> {
let hasSideEffects = 0 in {
def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
}
}
let Predicates = [HasDQI] in
- defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8,
- i8mem>,
+ defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8mem>,
avx512_mask_mov_gpr<0x92, 0x93, "kmovb", VK8, GR32>,
VEX, PD;
let Predicates = [HasAVX512] in
- defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16,
- i16mem>,
+ defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
VEX, PS;
let Predicates = [HasBWI] in {
- defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1, i32,
- i32mem>, VEX, PD, VEX_W;
+ defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1,i32mem>,
+ VEX, PD, VEX_W;
defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
VEX, XD;
}
let Predicates = [HasBWI] in {
- defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64,
- i64mem>, VEX, PS, VEX_W;
+ defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem>,
+ VEX, PS, VEX_W;
defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
VEX, XD, VEX_W;
}
@@ -1572,24 +1697,34 @@ let Predicates = [HasBWI] in {
let Predicates = [HasDQI] in {
def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
(KMOVBmk addr:$dst, VK8:$src)>;
+ def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
+ (KMOVBkm addr:$src)>;
+}
+let Predicates = [HasAVX512, NoDQI] in {
+ def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
+ (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
+ def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
+ (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
}
let Predicates = [HasAVX512] in {
def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst),
(KMOVWmk addr:$dst, VK16:$src)>;
- def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
- (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
def : Pat<(i1 (load addr:$src)),
(COPY_TO_REGCLASS (KMOVWkm addr:$src), VK1)>;
- def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
- (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
+ def : Pat<(v16i1 (bitconvert (i16 (load addr:$src)))),
+ (KMOVWkm addr:$src)>;
}
let Predicates = [HasBWI] in {
def : Pat<(store (i32 (bitconvert (v32i1 VK32:$src))), addr:$dst),
(KMOVDmk addr:$dst, VK32:$src)>;
+ def : Pat<(v32i1 (bitconvert (i32 (load addr:$src)))),
+ (KMOVDkm addr:$src)>;
}
let Predicates = [HasBWI] in {
def : Pat<(store (i64 (bitconvert (v64i1 VK64:$src))), addr:$dst),
(KMOVQmk addr:$dst, VK64:$src)>;
+ def : Pat<(v64i1 (bitconvert (i64 (load addr:$src)))),
+ (KMOVQkm addr:$src)>;
}
let Predicates = [HasAVX512] in {
@@ -1666,7 +1801,7 @@ multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
Predicate prd> {
let Predicates = [prd] in
def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set KRC:$dst, (OpNode KRC:$src))]>;
}
@@ -1703,7 +1838,7 @@ let Predicates = [HasBWI] in
def : Pat<(xor VK64:$src1, (v64i1 immAllOnesV)), (KNOTQrr VK64:$src1)>;
// KNL does not support KMOVB, 8-bit mask is promoted to 16-bit
-let Predicates = [HasAVX512] in {
+let Predicates = [HasAVX512, NoDQI] in {
def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)),
(COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
@@ -1720,7 +1855,7 @@ multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
let Predicates = [prd] in
def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
}
@@ -1796,7 +1931,7 @@ multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
let Predicates = [HasAVX512] in
def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
}
multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
@@ -1825,35 +1960,50 @@ multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
SDNode OpNode> {
let Predicates = [HasAVX512], Defs = [EFLAGS] in
def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1|$src1, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
[(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
}
multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
VEX, PS;
+ let Predicates = [HasDQI] in
+ defm B : avx512_mask_testop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode>,
+ VEX, PD;
+ let Predicates = [HasBWI] in {
+ defm Q : avx512_mask_testop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode>,
+ VEX, PS, VEX_W;
+ defm D : avx512_mask_testop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode>,
+ VEX, PD, VEX_W;
+ }
}
defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
-def : Pat<(X86cmp VK1:$src1, (i1 0)),
- (KORTESTWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
- (COPY_TO_REGCLASS VK1:$src1, VK16))>;
-
// Mask shift
multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
SDNode OpNode> {
let Predicates = [HasAVX512] in
- def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
+ def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, u8imm:$imm),
!strconcat(OpcodeStr,
- " \t{$imm, $src, $dst|$dst, $src, $imm}"),
+ "\t{$imm, $src, $dst|$dst, $src, $imm}"),
[(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
}
multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
SDNode OpNode> {
defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
- VEX, TAPD, VEX_W;
+ VEX, TAPD, VEX_W;
+ let Predicates = [HasDQI] in
+ defm B : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "b"), VK8, OpNode>,
+ VEX, TAPD;
+ let Predicates = [HasBWI] in {
+ defm Q : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "q"), VK64, OpNode>,
+ VEX, TAPD, VEX_W;
+ let Predicates = [HasDQI] in
+ defm D : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "d"), VK32, OpNode>,
+ VEX, TAPD;
+ }
}
defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>;
@@ -1904,10 +2054,14 @@ let Predicates = [HasVLX] in {
}
def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
- (v8i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
+ (v8i1 (COPY_TO_REGCLASS
+ (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16),
+ (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
- (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
+ (v8i1 (COPY_TO_REGCLASS
+ (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16),
+ (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
//===----------------------------------------------------------------------===//
// AVX-512 - Aligned and unaligned load and store
//
@@ -2001,7 +2155,7 @@ multiclass avx512_load_vl<bits<8> opc, string OpcodeStr, string ld_pat,
multiclass avx512_store<bits<8> opc, string OpcodeStr, PatFrag st_frag,
ValueType OpVT, RegisterClass KRC, RegisterClass RC,
X86MemOperand memop, Domain d> {
- let isAsmParserOnly = 1, hasSideEffects = 0 in {
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
def rr_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst), (ins RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [], d>,
EVEX;
@@ -2088,6 +2242,22 @@ def: Pat<(v16f32 (int_x86_avx512_mask_loadu_ps_512 addr:$ptr,
(bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
(VMOVUPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
+def: Pat<(v8f64 (int_x86_avx512_mask_load_pd_512 addr:$ptr,
+ (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)),
+ (VMOVAPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
+
+def: Pat<(v16f32 (int_x86_avx512_mask_load_ps_512 addr:$ptr,
+ (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
+ (VMOVAPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
+
+def: Pat<(v8f64 (int_x86_avx512_mask_load_pd_512 addr:$ptr,
+ (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VMOVAPDZrm addr:$ptr)>;
+
+def: Pat<(v16f32 (int_x86_avx512_mask_load_ps_512 addr:$ptr,
+ (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
+ (VMOVAPSZrm addr:$ptr)>;
+
def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
GR16:$mask),
(VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
@@ -2097,6 +2267,55 @@ def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
(VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
VR512:$src)>;
+def: Pat<(int_x86_avx512_mask_store_ps_512 addr:$ptr, (v16f32 VR512:$src),
+ GR16:$mask),
+ (VMOVAPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
+ VR512:$src)>;
+def: Pat<(int_x86_avx512_mask_store_pd_512 addr:$ptr, (v8f64 VR512:$src),
+ GR8:$mask),
+ (VMOVAPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
+ VR512:$src)>;
+
+def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8f32 VR256:$src)),
+ (VMOVUPSZmrk addr:$ptr,
+ (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)),
+ (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256:$src, sub_ymm))>;
+
+def: Pat<(v8f32 (masked_load addr:$ptr, VK8WM:$mask, undef)),
+ (v8f32 (EXTRACT_SUBREG (v16f32 (VMOVUPSZrmkz
+ (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), addr:$ptr)), sub_ymm))>;
+
+def: Pat<(masked_store addr:$ptr, VK16WM:$mask, (v16f32 VR512:$src)),
+ (VMOVUPSZmrk addr:$ptr, VK16WM:$mask, VR512:$src)>;
+
+def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8f64 VR512:$src)),
+ (VMOVUPDZmrk addr:$ptr, VK8WM:$mask, VR512:$src)>;
+
+def: Pat<(v16f32 (masked_load addr:$ptr, VK16WM:$mask, undef)),
+ (VMOVUPSZrmkz VK16WM:$mask, addr:$ptr)>;
+
+def: Pat<(v16f32 (masked_load addr:$ptr, VK16WM:$mask,
+ (bc_v16f32 (v16i32 immAllZerosV)))),
+ (VMOVUPSZrmkz VK16WM:$mask, addr:$ptr)>;
+
+def: Pat<(v16f32 (masked_load addr:$ptr, VK16WM:$mask, (v16f32 VR512:$src0))),
+ (VMOVUPSZrmk VR512:$src0, VK16WM:$mask, addr:$ptr)>;
+
+def: Pat<(v8f64 (masked_load addr:$ptr, VK8WM:$mask, undef)),
+ (VMOVUPDZrmkz VK8WM:$mask, addr:$ptr)>;
+
+def: Pat<(v8f64 (masked_load addr:$ptr, VK8WM:$mask,
+ (bc_v8f64 (v16i32 immAllZerosV)))),
+ (VMOVUPDZrmkz VK8WM:$mask, addr:$ptr)>;
+
+def: Pat<(v8f64 (masked_load addr:$ptr, VK8WM:$mask, (v8f64 VR512:$src0))),
+ (VMOVUPDZrmk VR512:$src0, VK8WM:$mask, addr:$ptr)>;
+
+def: Pat<(v8f32 (masked_load addr:$ptr, VK8WM:$mask, (v8f32 VR256:$src0))),
+ (v8f32 (EXTRACT_SUBREG (v16f32 (VMOVUPSZrmk
+ (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256:$src0, sub_ymm),
+ (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), addr:$ptr)), sub_ymm))>;
+
defm VMOVDQA32 : avx512_load_vl<0x6F, "vmovdqa32", "alignedload", "i", "32",
"16", "8", "4", SSEPackedInt, HasAVX512>,
avx512_store_vl<0x7F, "vmovdqa32", "alignedstore",
@@ -2171,6 +2390,46 @@ def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
(VMOVDQU32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
}
+def: Pat<(v16i32 (masked_load addr:$ptr, VK16WM:$mask, (v16i32 immAllZerosV))),
+ (VMOVDQU32Zrmkz VK16WM:$mask, addr:$ptr)>;
+
+def: Pat<(v16i32 (masked_load addr:$ptr, VK16WM:$mask, undef)),
+ (VMOVDQU32Zrmkz VK16WM:$mask, addr:$ptr)>;
+
+def: Pat<(v16i32 (masked_load addr:$ptr, VK16WM:$mask, (v16i32 VR512:$src0))),
+ (VMOVDQU32Zrmk VR512:$src0, VK16WM:$mask, addr:$ptr)>;
+
+def: Pat<(v8i64 (masked_load addr:$ptr, VK8WM:$mask,
+ (bc_v8i64 (v16i32 immAllZerosV)))),
+ (VMOVDQU64Zrmkz VK8WM:$mask, addr:$ptr)>;
+
+def: Pat<(v8i64 (masked_load addr:$ptr, VK8WM:$mask, undef)),
+ (VMOVDQU64Zrmkz VK8WM:$mask, addr:$ptr)>;
+
+def: Pat<(v8i64 (masked_load addr:$ptr, VK8WM:$mask, (v8i64 VR512:$src0))),
+ (VMOVDQU64Zrmk VR512:$src0, VK8WM:$mask, addr:$ptr)>;
+
+def: Pat<(masked_store addr:$ptr, VK16WM:$mask, (v16i32 VR512:$src)),
+ (VMOVDQU32Zmrk addr:$ptr, VK16WM:$mask, VR512:$src)>;
+
+def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8i64 VR512:$src)),
+ (VMOVDQU64Zmrk addr:$ptr, VK8WM:$mask, VR512:$src)>;
+
+// SKX replacement
+def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8i32 VR256:$src)),
+ (VMOVDQU32Z256mrk addr:$ptr, VK8WM:$mask, VR256:$src)>;
+
+// KNL replacement
+def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8i32 VR256:$src)),
+ (VMOVDQU32Zmrk addr:$ptr,
+ (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)),
+ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256:$src, sub_ymm))>;
+
+def: Pat<(v8i32 (masked_load addr:$ptr, VK8WM:$mask, undef)),
+ (v8i32 (EXTRACT_SUBREG (v16i32 (VMOVDQU32Zrmkz
+ (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), addr:$ptr)), sub_ymm))>;
+
+
// Move Int Doubleword to Packed Double Int
//
def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
@@ -2277,12 +2536,12 @@ def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
// AVX-512 MOVSS, MOVSD
//===----------------------------------------------------------------------===//
-multiclass avx512_move_scalar <string asm, RegisterClass RC,
+multiclass avx512_move_scalar <string asm, RegisterClass RC,
SDNode OpNode, ValueType vt,
X86MemOperand x86memop, PatFrag mem_pat> {
let hasSideEffects = 0 in {
- def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2),
- !strconcat(asm, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128X:$dst, (vt (OpNode VR128X:$src1,
(scalar_to_vector RC:$src2))))],
IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG;
@@ -2290,19 +2549,19 @@ multiclass avx512_move_scalar <string asm, RegisterClass RC,
def rrk : SI<0x10, MRMSrcReg, (outs VR128X:$dst),
(ins VR128X:$src1, VK1WM:$mask, RC:$src2, RC:$src3),
!strconcat(asm,
- " \t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"),
+ "\t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"),
[], IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG, EVEX_K;
def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
- !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>,
EVEX, VEX_LIG;
let mayStore = 1 in {
def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
- !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
EVEX, VEX_LIG;
def mrk: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, VK1WM:$mask, RC:$src),
- !strconcat(asm, " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
+ !strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
[], IIC_SSE_MOV_S_MR>,
EVEX, VEX_LIG, EVEX_K;
} // mayStore
@@ -2359,7 +2618,7 @@ let Predicates = [HasAVX512] in {
// Move low f32 and clear high bits.
def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSSZrr (v4f32 (V_SET0)),
+ (VMOVSSZrr (v4f32 (V_SET0)),
(EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
(SUBREG_TO_REG (i32 0),
@@ -2488,7 +2747,7 @@ let AddedComplexity = 15 in
def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
(ins VR128X:$src),
"vmovq\t{$src, $dst|$dst, $src}",
- [(set VR128X:$dst, (v2i64 (X86vzmovl
+ [(set VR128X:$dst, (v2i64 (X86vzmovl
(v2i64 VR128X:$src))))],
IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
@@ -2510,7 +2769,7 @@ let Predicates = [HasAVX512] in {
(VMOV64toPQIZrr GR64:$src)>;
def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
(VMOVDI2PDIZrr GR32:$src)>;
-
+
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
(VMOVDI2PDIZrm addr:$src)>;
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
@@ -2751,48 +3010,48 @@ multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, ValueType DstVT,
{
def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, EVEX_4V;
def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
(ins KRC:$mask, RC:$src1, RC:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
[], itins.rr>, EVEX_4V, EVEX_K;
def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
(ins KRC:$mask, RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}} {z}" ,
"|$dst {${mask}} {z}, $src1, $src2}"),
[], itins.rr>, EVEX_4V, EVEX_KZ;
}
let mayLoad = 1 in {
def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, EVEX_4V;
def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
(ins KRC:$mask, RC:$src1, x86memop:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
[], itins.rm>, EVEX_4V, EVEX_K;
def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
(ins KRC:$mask, RC:$src1, x86memop:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
+ "\t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
[], itins.rm>, EVEX_4V, EVEX_KZ;
def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86scalar_mop:$src2),
- !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
+ !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
[], itins.rm>, EVEX_4V, EVEX_B;
def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
(ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
- !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
+ !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
BrdcstStr, "}"),
[], itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
(ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
- !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
+ !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
BrdcstStr, "}"),
[], itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
@@ -2811,12 +3070,12 @@ defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmull", mul,
SSE_INTALU_ITINS_P, HasDQI, 1>, T8PD;
defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32, VK8WM, VR512,
- memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ loadv8i64, i512mem, loadi64, i64mem, "{1to8}",
SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512,
EVEX_CD8<64, CD8VF>, VEX_W;
defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32, VK8WM, VR512,
- memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ loadv8i64, i512mem, loadi64, i64mem, "{1to8}",
SSE_INTMUL_ITINS_P, 1>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
def : Pat<(v8i64 (X86pmuludq (v16i32 VR512:$src1), (v16i32 VR512:$src2))),
@@ -2902,16 +3161,16 @@ multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
d>, EVEX_4V;
}
-defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, memopv8f64,
+defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, loadv8f64,
VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, memopv8f64,
+defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, loadv8f64,
VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, memopv8f64,
+defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, loadv8f64,
VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, memopv8f64,
+defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, loadv8f64,
VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
@@ -2920,52 +3179,52 @@ multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86MemOperand x86memop> {
def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
IIC_SSE_UNPCK>, EVEX_4V;
def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1),
(bitconvert (memop_frag addr:$src2)))))],
IIC_SSE_UNPCK>, EVEX_4V;
}
defm VPUNPCKLDQZ : avx512_unpack_int<0x62, "vpunpckldq", X86Unpckl, v16i32,
- VR512, memopv16i32, i512mem>, EVEX_V512,
+ VR512, loadv16i32, i512mem>, EVEX_V512,
EVEX_CD8<32, CD8VF>;
defm VPUNPCKLQDQZ : avx512_unpack_int<0x6C, "vpunpcklqdq", X86Unpckl, v8i64,
- VR512, memopv8i64, i512mem>, EVEX_V512,
+ VR512, loadv8i64, i512mem>, EVEX_V512,
VEX_W, EVEX_CD8<64, CD8VF>;
defm VPUNPCKHDQZ : avx512_unpack_int<0x6A, "vpunpckhdq", X86Unpckh, v16i32,
- VR512, memopv16i32, i512mem>, EVEX_V512,
+ VR512, loadv16i32, i512mem>, EVEX_V512,
EVEX_CD8<32, CD8VF>;
defm VPUNPCKHQDQZ : avx512_unpack_int<0x6D, "vpunpckhqdq", X86Unpckh, v8i64,
- VR512, memopv8i64, i512mem>, EVEX_V512,
+ VR512, loadv8i64, i512mem>, EVEX_V512,
VEX_W, EVEX_CD8<64, CD8VF>;
//===----------------------------------------------------------------------===//
// AVX-512 - PSHUFD
//
multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
- SDNode OpNode, PatFrag mem_frag,
+ SDNode OpNode, PatFrag mem_frag,
X86MemOperand x86memop, ValueType OpVT> {
def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, i8imm:$src2),
+ (ins RC:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
EVEX;
def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst),
- (ins x86memop:$src1, i8imm:$src2),
+ (ins x86memop:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(OpVT (OpNode (mem_frag addr:$src1),
(i8 imm:$src2))))]>, EVEX;
}
-defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, memopv16i32,
+defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, loadv16i32,
i512mem, v16i32>, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
//===----------------------------------------------------------------------===//
@@ -3027,7 +3286,16 @@ multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
}//let mayLoad = 1
}
-multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_fp_round_packed<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd,
+ X86VectorVTInfo _, bit IsCommutable> {
+ defm rb: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr##_.Suffix,
+ "$rc, $src2, $src1", "$src1, $src2, $rc",
+ (_.VT (OpNodeRnd _.RC:$src1, _.RC:$src2, (i32 imm:$rc)))>,
+ EVEX_4V, EVEX_B, EVEX_RC;
+}
+
+multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
bit IsCommutable = 0> {
defm PSZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v16f32_info,
IsCommutable>, EVEX_V512, PS,
@@ -3053,12 +3321,23 @@ multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
}
-defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, 1>;
-defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, 1>;
+multiclass avx512_fp_binop_p_round<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd> {
+ defm PSZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, v16f32_info, 0>,
+ EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
+ defm PDZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, v8f64_info, 0>,
+ EVEX_V512, PD, VEX_W,EVEX_CD8<64, CD8VF>;
+}
+
+defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, 1>,
+ avx512_fp_binop_p_round<0x58, "vadd", X86faddRnd>;
+defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, 1>,
+ avx512_fp_binop_p_round<0x59, "vmul", X86fmulRnd>;
+defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub>,
+ avx512_fp_binop_p_round<0x5C, "vsub", X86fsubRnd>;
+defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv>,
+ avx512_fp_binop_p_round<0x5E, "vdiv", X86fdivRnd>;
defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, 1>;
defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, 1>;
-defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub>;
-defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv>;
def : Pat<(v16f32 (int_x86_avx512_mask_max_ps_512 (v16f32 VR512:$src1),
(v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
@@ -3083,34 +3362,34 @@ def : Pat<(v8f64 (int_x86_avx512_mask_min_pd_512 (v8f64 VR512:$src1),
// AVX-512 VPTESTM instructions
//===----------------------------------------------------------------------===//
-multiclass avx512_vptest<bits<8> opc, string OpcodeStr, RegisterClass KRC,
- RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
+multiclass avx512_vptest<bits<8> opc, string OpcodeStr, RegisterClass KRC,
+ RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
SDNode OpNode, ValueType vt> {
def rr : AVX512PI<opc, MRMSrcReg,
- (outs KRC:$dst), (ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ (outs KRC:$dst), (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
SSEPackedInt>, EVEX_4V;
def rm : AVX512PI<opc, MRMSrcMem,
- (outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set KRC:$dst, (OpNode (vt RC:$src1),
+ (outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set KRC:$dst, (OpNode (vt RC:$src1),
(bitconvert (memop_frag addr:$src2))))], SSEPackedInt>, EVEX_4V;
}
defm VPTESTMDZ : avx512_vptest<0x27, "vptestmd", VK16, VR512, f512mem,
- memopv16i32, X86testm, v16i32>, T8PD, EVEX_V512,
+ loadv16i32, X86testm, v16i32>, T8PD, EVEX_V512,
EVEX_CD8<32, CD8VF>;
defm VPTESTMQZ : avx512_vptest<0x27, "vptestmq", VK8, VR512, f512mem,
- memopv8i64, X86testm, v8i64>, T8PD, EVEX_V512, VEX_W,
+ loadv8i64, X86testm, v8i64>, T8PD, EVEX_V512, VEX_W,
EVEX_CD8<64, CD8VF>;
let Predicates = [HasCDI] in {
defm VPTESTNMDZ : avx512_vptest<0x27, "vptestnmd", VK16, VR512, f512mem,
- memopv16i32, X86testnm, v16i32>, T8XS, EVEX_V512,
+ loadv16i32, X86testnm, v16i32>, T8XS, EVEX_V512,
EVEX_CD8<32, CD8VF>;
defm VPTESTNMQZ : avx512_vptest<0x27, "vptestnmq", VK8, VR512, f512mem,
- memopv8i64, X86testnm, v8i64>, T8XS, EVEX_V512, VEX_W,
+ loadv8i64, X86testnm, v8i64>, T8XS, EVEX_V512, VEX_W,
EVEX_CD8<64, CD8VF>;
}
@@ -3121,147 +3400,127 @@ def : Pat <(i16 (int_x86_avx512_mask_ptestm_d_512 (v16i32 VR512:$src1),
def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1),
(v8i64 VR512:$src2), (i8 -1))),
(COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>;
+
//===----------------------------------------------------------------------===//
// AVX-512 Shift instructions
//===----------------------------------------------------------------------===//
multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
- string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
+ string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
defm ri : AVX512_maskable<opc, ImmFormR, _, (outs _.RC:$dst),
- (ins _.RC:$src1, i8imm:$src2), OpcodeStr,
+ (ins _.RC:$src1, u8imm:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, (i8 imm:$src2))),
" ", SSE_INTSHIFT_ITINS_P.rr>, AVX512BIi8Base, EVEX_4V;
defm mi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
- (ins _.MemOp:$src1, i8imm:$src2), OpcodeStr,
+ (ins _.MemOp:$src1, u8imm:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
- (_.VT (OpNode (_.MemOpFrag addr:$src1), (i8 imm:$src2))),
+ (_.VT (OpNode (_.LdFrag addr:$src1), (i8 imm:$src2))),
" ", SSE_INTSHIFT_ITINS_P.rm>, AVX512BIi8Base, EVEX_4V;
}
multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
- RegisterClass RC, ValueType vt, ValueType SrcVT,
- PatFrag bc_frag, RegisterClass KRC> {
- // src2 is always 128-bit
- def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, VR128X:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (vt (OpNode RC:$src1, (SrcVT VR128X:$src2))))],
- SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
- def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, VR128X:$src2),
- !strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
- [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
- def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (vt (OpNode RC:$src1,
- (bc_frag (memopv2i64 addr:$src2)))))],
- SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
- def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, i128mem:$src2),
- !strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
- [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
+ ValueType SrcVT, PatFrag bc_frag, X86VectorVTInfo _> {
+ // src2 is always 128-bit
+ defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, VR128X:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_.VT (OpNode _.RC:$src1, (SrcVT VR128X:$src2))),
+ " ", SSE_INTSHIFT_ITINS_P.rr>, AVX512BIBase, EVEX_4V;
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, i128mem:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_.VT (OpNode _.RC:$src1, (bc_frag (loadv2i64 addr:$src2)))),
+ " ", SSE_INTSHIFT_ITINS_P.rm>, AVX512BIBase, EVEX_4V;
+}
+
+multiclass avx512_shift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType SrcVT, PatFrag bc_frag, X86VectorVTInfo _> {
+ defm Z : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag, _>, EVEX_V512;
+}
+
+multiclass avx512_shift_types<bits<8> opcd, bits<8> opcq, string OpcodeStr,
+ SDNode OpNode> {
+ defm D : avx512_shift_sizes<opcd, OpcodeStr#"d", OpNode, v4i32, bc_v4i32,
+ v16i32_info>, EVEX_CD8<32, CD8VQ>;
+ defm Q : avx512_shift_sizes<opcq, OpcodeStr#"q", OpNode, v2i64, bc_v2i64,
+ v8i64_info>, EVEX_CD8<64, CD8VQ>, VEX_W;
}
defm VPSRLDZ : avx512_shift_rmi<0x72, MRM2r, MRM2m, "vpsrld", X86vsrli,
v16i32_info>,
EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPSRLDZ : avx512_shift_rrm<0xD2, "vpsrld", X86vsrl,
- VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
- EVEX_CD8<32, CD8VQ>;
-
defm VPSRLQZ : avx512_shift_rmi<0x73, MRM2r, MRM2m, "vpsrlq", X86vsrli,
v8i64_info>, EVEX_V512,
EVEX_CD8<64, CD8VF>, VEX_W;
-defm VPSRLQZ : avx512_shift_rrm<0xD3, "vpsrlq", X86vsrl,
- VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
- EVEX_CD8<64, CD8VQ>, VEX_W;
defm VPSLLDZ : avx512_shift_rmi<0x72, MRM6r, MRM6m, "vpslld", X86vshli,
v16i32_info>, EVEX_V512,
EVEX_CD8<32, CD8VF>;
-defm VPSLLDZ : avx512_shift_rrm<0xF2, "vpslld", X86vshl,
- VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
- EVEX_CD8<32, CD8VQ>;
-
defm VPSLLQZ : avx512_shift_rmi<0x73, MRM6r, MRM6m, "vpsllq", X86vshli,
v8i64_info>, EVEX_V512,
EVEX_CD8<64, CD8VF>, VEX_W;
-defm VPSLLQZ : avx512_shift_rrm<0xF3, "vpsllq", X86vshl,
- VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
- EVEX_CD8<64, CD8VQ>, VEX_W;
defm VPSRADZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsrad", X86vsrai,
v16i32_info>,
EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPSRADZ : avx512_shift_rrm<0xE2, "vpsrad", X86vsra,
- VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
- EVEX_CD8<32, CD8VQ>;
-
defm VPSRAQZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsraq", X86vsrai,
v8i64_info>, EVEX_V512,
EVEX_CD8<64, CD8VF>, VEX_W;
-defm VPSRAQZ : avx512_shift_rrm<0xE2, "vpsraq", X86vsra,
- VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
- EVEX_CD8<64, CD8VQ>, VEX_W;
+
+defm VPSLL : avx512_shift_types<0xF2, 0xF3, "vpsll", X86vshl>;
+defm VPSRA : avx512_shift_types<0xE2, 0xE2, "vpsra", X86vsra>;
+defm VPSRL : avx512_shift_types<0xD2, 0xD3, "vpsrl", X86vsrl>;
//===-------------------------------------------------------------------===//
// Variable Bit Shifts
//===-------------------------------------------------------------------===//
multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
- RegisterClass RC, ValueType vt,
- X86MemOperand x86memop, PatFrag mem_frag> {
- def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst,
- (vt (OpNode RC:$src1, (vt RC:$src2))))]>,
- EVEX_4V;
- def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst,
- (vt (OpNode RC:$src1, (mem_frag addr:$src2))))]>,
- EVEX_4V;
+ X86VectorVTInfo _> {
+ defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_.VT (OpNode _.RC:$src1, (_.VT _.RC:$src2))),
+ " ", SSE_INTSHIFT_ITINS_P.rr>, AVX5128IBase, EVEX_4V;
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src2))),
+ " ", SSE_INTSHIFT_ITINS_P.rm>, AVX5128IBase, EVEX_4V;
}
-defm VPSLLVDZ : avx512_var_shift<0x47, "vpsllvd", shl, VR512, v16i32,
- i512mem, memopv16i32>, EVEX_V512,
- EVEX_CD8<32, CD8VF>;
-defm VPSLLVQZ : avx512_var_shift<0x47, "vpsllvq", shl, VR512, v8i64,
- i512mem, memopv8i64>, EVEX_V512, VEX_W,
- EVEX_CD8<64, CD8VF>;
-defm VPSRLVDZ : avx512_var_shift<0x45, "vpsrlvd", srl, VR512, v16i32,
- i512mem, memopv16i32>, EVEX_V512,
- EVEX_CD8<32, CD8VF>;
-defm VPSRLVQZ : avx512_var_shift<0x45, "vpsrlvq", srl, VR512, v8i64,
- i512mem, memopv8i64>, EVEX_V512, VEX_W,
- EVEX_CD8<64, CD8VF>;
-defm VPSRAVDZ : avx512_var_shift<0x46, "vpsravd", sra, VR512, v16i32,
- i512mem, memopv16i32>, EVEX_V512,
- EVEX_CD8<32, CD8VF>;
-defm VPSRAVQZ : avx512_var_shift<0x46, "vpsravq", sra, VR512, v8i64,
- i512mem, memopv8i64>, EVEX_V512, VEX_W,
- EVEX_CD8<64, CD8VF>;
+multiclass avx512_var_shift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ AVX512VLVectorVTInfo _> {
+ defm Z : avx512_var_shift<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
+}
+
+multiclass avx512_var_shift_types<bits<8> opc, string OpcodeStr,
+ SDNode OpNode> {
+ defm D : avx512_var_shift_sizes<opc, OpcodeStr#"d", OpNode,
+ avx512vl_i32_info>, EVEX_CD8<32, CD8VQ>;
+ defm Q : avx512_var_shift_sizes<opc, OpcodeStr#"q", OpNode,
+ avx512vl_i64_info>, EVEX_CD8<64, CD8VQ>, VEX_W;
+}
+
+defm VPSLLV : avx512_var_shift_types<0x47, "vpsllv", shl>;
+defm VPSRAV : avx512_var_shift_types<0x46, "vpsrav", sra>;
+defm VPSRLV : avx512_var_shift_types<0x45, "vpsrlv", srl>;
//===----------------------------------------------------------------------===//
// AVX-512 - MOVDDUP
//===----------------------------------------------------------------------===//
-multiclass avx512_movddup<string OpcodeStr, RegisterClass RC, ValueType VT,
+multiclass avx512_movddup<string OpcodeStr, RegisterClass RC, ValueType VT,
X86MemOperand x86memop, PatFrag memop_frag> {
def rr : AVX512PDI<0x12, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (VT (X86Movddup RC:$src)))]>, EVEX;
def rm : AVX512PDI<0x12, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst,
(VT (X86Movddup (memop_frag addr:$src))))]>, EVEX;
}
-defm VMOVDDUPZ : avx512_movddup<"vmovddup", VR512, v8f64, f512mem, memopv8f64>,
+defm VMOVDDUPZ : avx512_movddup<"vmovddup", VR512, v8f64, f512mem, loadv8f64>,
VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
def : Pat<(X86Movddup (v8f64 (scalar_to_vector (loadf64 addr:$src)))),
(VMOVDDUPZrm addr:$src)>;
@@ -3273,26 +3532,26 @@ multiclass avx512_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
ValueType vt, RegisterClass RC, PatFrag mem_frag,
X86MemOperand x86memop> {
def rr : AVX512XSI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (vt (OpNode RC:$src)))]>, EVEX;
let mayLoad = 1 in
def rm : AVX512XSI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (OpNode (mem_frag addr:$src)))]>, EVEX;
}
defm VMOVSHDUPZ : avx512_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
- v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
+ v16f32, VR512, loadv16f32, f512mem>, EVEX_V512,
EVEX_CD8<32, CD8VF>;
defm VMOVSLDUPZ : avx512_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
- v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
+ v16f32, VR512, loadv16f32, f512mem>, EVEX_V512,
EVEX_CD8<32, CD8VF>;
def : Pat<(v16i32 (X86Movshdup VR512:$src)), (VMOVSHDUPZrr VR512:$src)>;
-def : Pat<(v16i32 (X86Movshdup (memopv16i32 addr:$src))),
+def : Pat<(v16i32 (X86Movshdup (loadv16i32 addr:$src))),
(VMOVSHDUPZrm addr:$src)>;
def : Pat<(v16i32 (X86Movsldup VR512:$src)), (VMOVSLDUPZrr VR512:$src)>;
-def : Pat<(v16i32 (X86Movsldup (memopv16i32 addr:$src))),
+def : Pat<(v16i32 (X86Movsldup (loadv16i32 addr:$src))),
(VMOVSLDUPZrm addr:$src)>;
//===----------------------------------------------------------------------===//
@@ -3336,73 +3595,93 @@ multiclass avx512_fma3p_rm<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
AVX512FMA3Base;
let mayLoad = 1 in
- def m: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst),
- (ins _.RC:$src1, _.RC:$src2, _.MemOp:$src3),
- !strconcat(OpcodeStr, " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set _.RC:$dst, (_.VT (OpNode _.RC:$src1, _.RC:$src2,
- (_.MemOpFrag addr:$src3))))]>;
- def mb: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst),
- (ins _.RC:$src1, _.RC:$src2, _.ScalarMemOp:$src3),
- !strconcat(OpcodeStr, " \t{${src3}", _.BroadcastStr,
- ", $src2, $dst|$dst, $src2, ${src3}", _.BroadcastStr, "}"),
- [(set _.RC:$dst, (OpNode _.RC:$src1, _.RC:$src2,
- (_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3)))))]>, EVEX_B;
-}
+ defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>,
+ AVX512FMA3Base;
+
+ defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3),
+ OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"), !strconcat("$src2, ${src3}", _.BroadcastStr ),
+ (OpNode _.RC:$src1, _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
+ AVX512FMA3Base, EVEX_B;
+ }
+} // Constraints = "$src1 = $dst"
+
+let Constraints = "$src1 = $dst" in {
+// Omitting the parameter OpNode (= null_frag) disables ISel pattern matching.
+multiclass avx512_fma3_round_rrb<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
+ SDPatternOperator OpNode> {
+ defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
+ OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc",
+ (_.VT ( OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3, (i32 imm:$rc)))>,
+ AVX512FMA3Base, EVEX_B, EVEX_RC;
+ }
} // Constraints = "$src1 = $dst"
+multiclass avx512_fma3_round_forms<bits<8> opc213, string OpcodeStr,
+ X86VectorVTInfo VTI, SDPatternOperator OpNode> {
+ defm v213r : avx512_fma3_round_rrb<opc213, !strconcat(OpcodeStr, "213", VTI.Suffix),
+ VTI, OpNode>, EVEX_CD8<VTI.EltSize, CD8VF>;
+}
+
multiclass avx512_fma3p_forms<bits<8> opc213, bits<8> opc231,
string OpcodeStr, X86VectorVTInfo VTI,
SDPatternOperator OpNode> {
- defm v213 : avx512_fma3p_rm<opc213, !strconcat(OpcodeStr, "213", VTI.Suffix),
- VTI, OpNode>,
- EVEX_V512, EVEX_CD8<VTI.EltSize, CD8VF>;
+ defm v213r : avx512_fma3p_rm<opc213, !strconcat(OpcodeStr, "213", VTI.Suffix),
+ VTI, OpNode>, EVEX_CD8<VTI.EltSize, CD8VF>;
- defm v231 : avx512_fma3p_rm<opc231, !strconcat(OpcodeStr, "231", VTI.Suffix),
- VTI>,
- EVEX_V512, EVEX_CD8<VTI.EltSize, CD8VF>;
+ defm v231r : avx512_fma3p_rm<opc231, !strconcat(OpcodeStr, "231", VTI.Suffix),
+ VTI>, EVEX_CD8<VTI.EltSize, CD8VF>;
}
+multiclass avx512_fma3p<bits<8> opc213, bits<8> opc231,
+ string OpcodeStr,
+ SDPatternOperator OpNode,
+ SDPatternOperator OpNodeRnd> {
let ExeDomain = SSEPackedSingle in {
- defm VFMADDPSZ : avx512_fma3p_forms<0xA8, 0xB8, "vfmadd",
- v16f32_info, X86Fmadd>;
- defm VFMSUBPSZ : avx512_fma3p_forms<0xAA, 0xBA, "vfmsub",
- v16f32_info, X86Fmsub>;
- defm VFMADDSUBPSZ : avx512_fma3p_forms<0xA6, 0xB6, "vfmaddsub",
- v16f32_info, X86Fmaddsub>;
- defm VFMSUBADDPSZ : avx512_fma3p_forms<0xA7, 0xB7, "vfmsubadd",
- v16f32_info, X86Fmsubadd>;
- defm VFNMADDPSZ : avx512_fma3p_forms<0xAC, 0xBC, "vfnmadd",
- v16f32_info, X86Fnmadd>;
- defm VFNMSUBPSZ : avx512_fma3p_forms<0xAE, 0xBE, "vfnmsub",
- v16f32_info, X86Fnmsub>;
-}
+ defm NAME##PSZ : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
+ v16f32_info, OpNode>,
+ avx512_fma3_round_forms<opc213, OpcodeStr,
+ v16f32_info, OpNodeRnd>, EVEX_V512;
+ defm NAME##PSZ256 : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
+ v8f32x_info, OpNode>, EVEX_V256;
+ defm NAME##PSZ128 : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
+ v4f32x_info, OpNode>, EVEX_V128;
+ }
let ExeDomain = SSEPackedDouble in {
- defm VFMADDPDZ : avx512_fma3p_forms<0xA8, 0xB8, "vfmadd",
- v8f64_info, X86Fmadd>, VEX_W;
- defm VFMSUBPDZ : avx512_fma3p_forms<0xAA, 0xBA, "vfmsub",
- v8f64_info, X86Fmsub>, VEX_W;
- defm VFMADDSUBPDZ : avx512_fma3p_forms<0xA6, 0xB6, "vfmaddsub",
- v8f64_info, X86Fmaddsub>, VEX_W;
- defm VFMSUBADDPDZ : avx512_fma3p_forms<0xA7, 0xB7, "vfmsubadd",
- v8f64_info, X86Fmsubadd>, VEX_W;
- defm VFNMADDPDZ : avx512_fma3p_forms<0xAC, 0xBC, "vfnmadd",
- v8f64_info, X86Fnmadd>, VEX_W;
- defm VFNMSUBPDZ : avx512_fma3p_forms<0xAE, 0xBE, "vfnmsub",
- v8f64_info, X86Fnmsub>, VEX_W;
+ defm NAME##PDZ : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
+ v8f64_info, OpNode>,
+ avx512_fma3_round_forms<opc213, OpcodeStr,
+ v8f64_info, OpNodeRnd>, EVEX_V512, VEX_W;
+ defm NAME##PDZ256 : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
+ v4f64x_info, OpNode>, EVEX_V256, VEX_W;
+ defm NAME##PDZ128 : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
+ v2f64x_info, OpNode>, EVEX_V128, VEX_W;
+ }
}
+defm VFMADD : avx512_fma3p<0xA8, 0xB8, "vfmadd", X86Fmadd, X86FmaddRnd>;
+defm VFMSUB : avx512_fma3p<0xAA, 0xBA, "vfmsub", X86Fmsub, X86FmsubRnd>;
+defm VFMADDSUB : avx512_fma3p<0xA6, 0xB6, "vfmaddsub", X86Fmaddsub, X86FmaddsubRnd>;
+defm VFMSUBADD : avx512_fma3p<0xA7, 0xB7, "vfmsubadd", X86Fmsubadd, X86FmsubaddRnd>;
+defm VFNMADD : avx512_fma3p<0xAC, 0xBC, "vfnmadd", X86Fnmadd, X86FnmaddRnd>;
+defm VFNMSUB : avx512_fma3p<0xAE, 0xBE, "vfnmsub", X86Fnmsub, X86FnmsubRnd>;
+
let Constraints = "$src1 = $dst" in {
multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
let mayLoad = 1 in
def m: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src3, _.MemOp:$src2),
- !strconcat(OpcodeStr, " \t{$src2, $src3, $dst|$dst, $src3, $src2}"),
- [(set _.RC:$dst, (_.VT (OpNode _.RC:$src1, (_.MemOpFrag addr:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src3, $dst|$dst, $src3, $src2}"),
+ [(set _.RC:$dst, (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src2),
_.RC:$src3)))]>;
def mb: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src3, _.ScalarMemOp:$src2),
- !strconcat(OpcodeStr, " \t{${src2}", _.BroadcastStr,
+ !strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr,
", $src3, $dst|$dst, $src3, ${src2}", _.BroadcastStr, "}"),
[(set _.RC:$dst,
(OpNode _.RC:$src1, (_.VT (X86VBroadcast
@@ -3412,65 +3691,54 @@ multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr, SDNode OpNode,
} // Constraints = "$src1 = $dst"
+multiclass avx512_fma3p_m132_f<bits<8> opc,
+ string OpcodeStr,
+ SDNode OpNode> {
+
let ExeDomain = SSEPackedSingle in {
- defm VFMADD132PSZ : avx512_fma3p_m132<0x98, "vfmadd132ps", X86Fmadd,
- v16f32_info>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
- defm VFMSUB132PSZ : avx512_fma3p_m132<0x9A, "vfmsub132ps", X86Fmsub,
- v16f32_info>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
- defm VFMADDSUB132PSZ : avx512_fma3p_m132<0x96, "vfmaddsub132ps", X86Fmaddsub,
- v16f32_info>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
- defm VFMSUBADD132PSZ : avx512_fma3p_m132<0x97, "vfmsubadd132ps", X86Fmsubadd,
- v16f32_info>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
- defm VFNMADD132PSZ : avx512_fma3p_m132<0x9C, "vfnmadd132ps", X86Fnmadd,
- v16f32_info>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
- defm VFNMSUB132PSZ : avx512_fma3p_m132<0x9E, "vfnmsub132ps", X86Fnmsub,
- v16f32_info>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-}
+ defm NAME##PSZ : avx512_fma3p_m132<opc, OpcodeStr##ps,
+ OpNode,v16f32_info>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ defm NAME##PSZ256 : avx512_fma3p_m132<opc, OpcodeStr##ps,
+ OpNode, v8f32x_info>, EVEX_V256, EVEX_CD8<32, CD8VF>;
+ defm NAME##PSZ128 : avx512_fma3p_m132<opc, OpcodeStr##ps,
+ OpNode, v4f32x_info>, EVEX_V128, EVEX_CD8<32, CD8VF>;
+ }
let ExeDomain = SSEPackedDouble in {
- defm VFMADD132PDZ : avx512_fma3p_m132<0x98, "vfmadd132pd", X86Fmadd,
- v8f64_info>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
- defm VFMSUB132PDZ : avx512_fma3p_m132<0x9A, "vfmsub132pd", X86Fmsub,
- v8f64_info>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
- defm VFMADDSUB132PDZ : avx512_fma3p_m132<0x96, "vfmaddsub132pd", X86Fmaddsub,
- v8f64_info>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
- defm VFMSUBADD132PDZ : avx512_fma3p_m132<0x97, "vfmsubadd132pd", X86Fmsubadd,
- v8f64_info>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
- defm VFNMADD132PDZ : avx512_fma3p_m132<0x9C, "vfnmadd132pd", X86Fnmadd,
- v8f64_info>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
- defm VFNMSUB132PDZ : avx512_fma3p_m132<0x9E, "vfnmsub132pd", X86Fnmsub,
- v8f64_info>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+ defm NAME##PDZ : avx512_fma3p_m132<opc, OpcodeStr##pd,
+ OpNode, v8f64_info>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VF>;
+ defm NAME##PDZ256 : avx512_fma3p_m132<opc, OpcodeStr##pd,
+ OpNode, v4f64x_info>, EVEX_V256, VEX_W, EVEX_CD8<32, CD8VF>;
+ defm NAME##PDZ128 : avx512_fma3p_m132<opc, OpcodeStr##pd,
+ OpNode, v2f64x_info>, EVEX_V128, VEX_W, EVEX_CD8<32, CD8VF>;
+ }
}
+defm VFMADD132 : avx512_fma3p_m132_f<0x98, "vfmadd132", X86Fmadd>;
+defm VFMSUB132 : avx512_fma3p_m132_f<0x9A, "vfmsub132", X86Fmsub>;
+defm VFMADDSUB132 : avx512_fma3p_m132_f<0x96, "vfmaddsub132", X86Fmaddsub>;
+defm VFMSUBADD132 : avx512_fma3p_m132_f<0x97, "vfmsubadd132", X86Fmsubadd>;
+defm VFNMADD132 : avx512_fma3p_m132_f<0x9C, "vfnmadd132", X86Fnmadd>;
+defm VFNMSUB132 : avx512_fma3p_m132_f<0x9E, "vfnmsub132", X86Fnmsub>;
+
+
// Scalar FMA
let Constraints = "$src1 = $dst" in {
-multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
- RegisterClass RC, ValueType OpVT,
- X86MemOperand x86memop, Operand memop,
+multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass RC, ValueType OpVT,
+ X86MemOperand x86memop, Operand memop,
PatFrag mem_frag> {
let isCommutable = 1 in
def r : AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
- " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set RC:$dst,
(OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
let mayLoad = 1 in
def m : AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, RC:$src2, f128mem:$src3),
!strconcat(OpcodeStr,
- " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set RC:$dst,
(OpVT (OpNode RC:$src2, RC:$src1,
(mem_frag addr:$src3))))]>;
@@ -3503,12 +3771,12 @@ multiclass avx512_vcvtsi<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
X86MemOperand x86memop, string asm> {
let hasSideEffects = 0 in {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
- !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
+ !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
EVEX_4V;
let mayLoad = 1 in
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
(ins DstRC:$src1, x86memop:$src),
- !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
+ !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
EVEX_4V;
} // hasSideEffects = 0
}
@@ -3576,12 +3844,12 @@ multiclass avx512_cvt_s_int<bits<8> opc, RegisterClass SrcRC, RegisterClass DstR
string asm> {
let hasSideEffects = 0 in {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
- !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG,
Requires<[HasAVX512]>;
let mayLoad = 1 in
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
- !strconcat(asm," \t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG,
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG,
Requires<[HasAVX512]>;
} // hasSideEffects = 0
}
@@ -3679,10 +3947,10 @@ multiclass avx512_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
string asm> {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
- !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst, (OpNode SrcRC:$src))]>, EVEX;
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
- !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>, EVEX;
}
@@ -3755,21 +4023,21 @@ def : Pat<(extloadf32 addr:$src),
def : Pat<(f32 (fround FR64X:$src)), (VCVTSD2SSZrr FR64X:$src, FR64X:$src)>,
Requires<[HasAVX512]>;
-multiclass avx512_vcvt_fp_with_rc<bits<8> opc, string asm, RegisterClass SrcRC,
- RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
+multiclass avx512_vcvt_fp_with_rc<bits<8> opc, string asm, RegisterClass SrcRC,
+ RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
Domain d> {
let hasSideEffects = 0 in {
def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
- !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst,
(OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
- !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
+ !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"),
[], d>, EVEX, EVEX_B, EVEX_RC;
let mayLoad = 1 in
def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
- !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst,
(OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
} // hasSideEffects = 0
@@ -3781,29 +4049,29 @@ multiclass avx512_vcvt_fp<bits<8> opc, string asm, RegisterClass SrcRC,
Domain d> {
let hasSideEffects = 0 in {
def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
- !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst,
(OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
let mayLoad = 1 in
def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
- !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst,
(OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
} // hasSideEffects = 0
}
defm VCVTPD2PSZ : avx512_vcvt_fp_with_rc<0x5A, "vcvtpd2ps", VR512, VR256X, fround,
- memopv8f64, f512mem, v8f32, v8f64,
+ loadv8f64, f512mem, v8f32, v8f64,
SSEPackedSingle>, EVEX_V512, VEX_W, PD,
EVEX_CD8<64, CD8VF>;
defm VCVTPS2PDZ : avx512_vcvt_fp<0x5A, "vcvtps2pd", VR256X, VR512, fextend,
- memopv4f64, f256mem, v8f64, v8f32,
+ loadv4f64, f256mem, v8f64, v8f32,
SSEPackedDouble>, EVEX_V512, PS,
EVEX_CD8<32, CD8VH>;
def : Pat<(v8f64 (extloadv8f32 addr:$src)),
(VCVTPS2PDZrm addr:$src)>;
-
+
def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
(bc_v8f32(v8i32 immAllZerosV)), (i8 -1), (i32 FROUND_CURRENT))),
(VCVTPD2PSZrr VR512:$src)>;
@@ -3817,27 +4085,27 @@ def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
//===----------------------------------------------------------------------===//
defm VCVTDQ2PSZ : avx512_vcvt_fp_with_rc<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp,
- memopv8i64, i512mem, v16f32, v16i32,
+ loadv8i64, i512mem, v16f32, v16i32,
SSEPackedSingle>, EVEX_V512, PS,
EVEX_CD8<32, CD8VF>;
defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp,
- memopv4i64, i256mem, v8f64, v8i32,
+ loadv4i64, i256mem, v8f64, v8i32,
SSEPackedDouble>, EVEX_V512, XS,
EVEX_CD8<32, CD8VH>;
defm VCVTTPS2DQZ : avx512_vcvt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint,
- memopv16f32, f512mem, v16i32, v16f32,
+ loadv16f32, f512mem, v16i32, v16f32,
SSEPackedSingle>, EVEX_V512, XS,
EVEX_CD8<32, CD8VF>;
defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint,
- memopv8f64, f512mem, v8i32, v8f64,
+ loadv8f64, f512mem, v8i32, v8f64,
SSEPackedDouble>, EVEX_V512, PD, VEX_W,
EVEX_CD8<64, CD8VF>;
defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint,
- memopv16f32, f512mem, v16i32, v16f32,
+ loadv16f32, f512mem, v16i32, v16f32,
SSEPackedSingle>, EVEX_V512, PS,
EVEX_CD8<32, CD8VF>;
@@ -3847,29 +4115,29 @@ def : Pat<(v16i32 (int_x86_avx512_mask_cvttps2udq_512 (v16f32 VR512:$src),
(VCVTTPS2UDQZrr VR512:$src)>;
defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint,
- memopv8f64, f512mem, v8i32, v8f64,
+ loadv8f64, f512mem, v8i32, v8f64,
SSEPackedDouble>, EVEX_V512, PS, VEX_W,
EVEX_CD8<64, CD8VF>;
-
+
// cvttpd2udq (src, 0, mask-all-ones, sae-current)
def : Pat<(v8i32 (int_x86_avx512_mask_cvttpd2udq_512 (v8f64 VR512:$src),
(v8i32 immAllZerosV), (i8 -1), FROUND_CURRENT)),
(VCVTTPD2UDQZrr VR512:$src)>;
defm VCVTUDQ2PDZ : avx512_vcvt_fp<0x7A, "vcvtudq2pd", VR256X, VR512, uint_to_fp,
- memopv4i64, f256mem, v8f64, v8i32,
+ loadv4i64, f256mem, v8f64, v8i32,
SSEPackedDouble>, EVEX_V512, XS,
EVEX_CD8<32, CD8VH>;
-
+
defm VCVTUDQ2PSZ : avx512_vcvt_fp_with_rc<0x7A, "vcvtudq2ps", VR512, VR512, uint_to_fp,
- memopv16i32, f512mem, v16f32, v16i32,
+ loadv16i32, f512mem, v16f32, v16i32,
SSEPackedSingle>, EVEX_V512, XD,
EVEX_CD8<32, CD8VF>;
def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
- (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
+ (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
(v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
-
+
def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
(EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
(v16f32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
@@ -3877,7 +4145,7 @@ def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
(EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
-
+
def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))),
(EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
(v16i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
@@ -3904,23 +4172,23 @@ multiclass avx512_vcvt_fp2int<bits<8> opc, string asm, RegisterClass SrcRC,
X86MemOperand x86memop, Domain d> {
let hasSideEffects = 0 in {
def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
- !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[], d>, EVEX;
def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
- !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
+ !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"),
[], d>, EVEX, EVEX_B, EVEX_RC;
let mayLoad = 1 in
def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
- !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[], d>, EVEX;
} // hasSideEffects = 0
}
defm VCVTPS2DQZ : avx512_vcvt_fp2int<0x5B, "vcvtps2dq", VR512, VR512,
- memopv16f32, f512mem, SSEPackedSingle>, PD,
+ loadv16f32, f512mem, SSEPackedSingle>, PD,
EVEX_V512, EVEX_CD8<32, CD8VF>;
defm VCVTPD2DQZ : avx512_vcvt_fp2int<0xE6, "vcvtpd2dq", VR512, VR256X,
- memopv8f64, f512mem, SSEPackedDouble>, XD, VEX_W,
+ loadv8f64, f512mem, SSEPackedDouble>, XD, VEX_W,
EVEX_V512, EVEX_CD8<64, CD8VF>;
def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2dq_512 (v16f32 VR512:$src),
@@ -3932,10 +4200,10 @@ def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2dq_512 (v8f64 VR512:$src),
(VCVTPD2DQZrrb VR512:$src, imm:$rc)>;
defm VCVTPS2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtps2udq", VR512, VR512,
- memopv16f32, f512mem, SSEPackedSingle>,
+ loadv16f32, f512mem, SSEPackedSingle>,
PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
defm VCVTPD2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtpd2udq", VR512, VR256X,
- memopv8f64, f512mem, SSEPackedDouble>, VEX_W,
+ loadv8f64, f512mem, SSEPackedDouble>, VEX_W,
PS, EVEX_V512, EVEX_CD8<64, CD8VF>;
def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2udq_512 (v16f32 VR512:$src),
@@ -3969,13 +4237,13 @@ multiclass avx512_cvtph2ps<RegisterClass destRC, RegisterClass srcRC,
multiclass avx512_cvtps2ph<RegisterClass destRC, RegisterClass srcRC,
X86MemOperand x86memop> {
def rr : AVX512AIi8<0x1D, MRMDestReg, (outs destRC:$dst),
- (ins srcRC:$src1, i32i8imm:$src2),
- "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ (ins srcRC:$src1, i32u8imm:$src2),
+ "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, EVEX;
let hasSideEffects = 0, mayStore = 1 in
def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
- (ins x86memop:$dst, srcRC:$src1, i32i8imm:$src2),
- "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX;
+ (ins x86memop:$dst, srcRC:$src1, i32u8imm:$src2),
+ "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX;
}
defm VCVTPH2PSZ : avx512_cvtph2ps<VR512, VR256X, f256mem>, EVEX_V512,
@@ -4022,7 +4290,7 @@ let Defs = [EFLAGS], Predicates = [HasAVX512] in {
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
}
}
-
+
/// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
X86MemOperand x86memop> {
@@ -4030,12 +4298,12 @@ multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
let mayLoad = 1 in {
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
}
}
}
@@ -4130,60 +4398,40 @@ def : Pat <(v8f64 (int_x86_avx512_rcp14_pd_512 (v8f64 VR512:$src),
(VRCP14PDZr VR512:$src)>;
/// avx512_fp28_s rcp28ss, rcp28sd, rsqrt28ss, rsqrt28sd
-multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
- X86MemOperand x86memop> {
- let hasSideEffects = 0, Predicates = [HasERI] in {
- def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
- def rrb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr,
- " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
- []>, EVEX_4V, EVEX_B;
- let mayLoad = 1 in {
- def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
- }
-}
-}
-
-defm VRCP28SS : avx512_fp28_s<0xCB, "vrcp28ss", FR32X, f32mem>,
- EVEX_CD8<32, CD8VT1>;
-defm VRCP28SD : avx512_fp28_s<0xCB, "vrcp28sd", FR64X, f64mem>,
- VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VRSQRT28SS : avx512_fp28_s<0xCD, "vrsqrt28ss", FR32X, f32mem>,
- EVEX_CD8<32, CD8VT1>;
-defm VRSQRT28SD : avx512_fp28_s<0xCD, "vrsqrt28sd", FR64X, f64mem>,
- VEX_W, EVEX_CD8<64, CD8VT1>;
+multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
+ SDNode OpNode> {
-def : Pat <(v4f32 (int_x86_avx512_rcp28_ss (v4f32 VR128X:$src1),
- (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
- FROUND_NO_EXC)),
- (COPY_TO_REGCLASS (VRCP28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
- (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
+ defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
+ (i32 FROUND_CURRENT))>;
-def : Pat <(v2f64 (int_x86_avx512_rcp28_sd (v2f64 VR128X:$src1),
- (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
- FROUND_NO_EXC)),
- (COPY_TO_REGCLASS (VRCP28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
- (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
+ defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
+ (i32 FROUND_NO_EXC)), "{sae}">, EVEX_B;
-def : Pat <(v4f32 (int_x86_avx512_rsqrt28_ss (v4f32 VR128X:$src1),
- (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
- FROUND_NO_EXC)),
- (COPY_TO_REGCLASS (VRSQRT28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
- (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
+ defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
+ (i32 FROUND_CURRENT))>;
+}
-def : Pat <(v2f64 (int_x86_avx512_rsqrt28_sd (v2f64 VR128X:$src1),
- (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
- FROUND_NO_EXC)),
- (COPY_TO_REGCLASS (VRSQRT28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
- (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
+multiclass avx512_eri_s<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+ defm SS : avx512_fp28_s<opc, OpcodeStr#"ss", f32x_info, OpNode>,
+ EVEX_CD8<32, CD8VT1>;
+ defm SD : avx512_fp28_s<opc, OpcodeStr#"sd", f64x_info, OpNode>,
+ EVEX_CD8<64, CD8VT1>, VEX_W;
+}
+let hasSideEffects = 0, Predicates = [HasERI] in {
+ defm VRCP28 : avx512_eri_s<0xCB, "vrcp28", X86rcp28s>, T8PD, EVEX_4V;
+ defm VRSQRT28 : avx512_eri_s<0xCD, "vrsqrt28", X86rsqrt28s>, T8PD, EVEX_4V;
+}
/// avx512_fp28_p rcp28ps, rcp28pd, rsqrt28ps, rsqrt28pd
multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
@@ -4196,12 +4444,14 @@ multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
defm rb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src), OpcodeStr,
"$src", "$src",
- (OpNode (_.VT _.RC:$src), (i32 FROUND_NO_EXC)), "{sae}">, EVEX_B;
+ (OpNode (_.VT _.RC:$src), (i32 FROUND_NO_EXC)),
+ "{sae}">, EVEX_B;
defm m : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.MemOp:$src), OpcodeStr, "$src", "$src",
(OpNode (_.FloatVT
- (bitconvert (_.LdFrag addr:$src))), (i32 FROUND_CURRENT))>;
+ (bitconvert (_.LdFrag addr:$src))),
+ (i32 FROUND_CURRENT))>;
defm mb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.MemOp:$src), OpcodeStr, "$src", "$src",
@@ -4218,7 +4468,7 @@ multiclass avx512_eri<bits<8> opc, string OpcodeStr, SDNode OpNode> {
}
let Predicates = [HasERI], hasSideEffects = 0 in {
-
+
defm VRSQRT28 : avx512_eri<0xCC, "vrsqrt28", X86rsqrt28>, EVEX, EVEX_V512, T8PD;
defm VRCP28 : avx512_eri<0xCA, "vrcp28", X86rcp28>, EVEX, EVEX_V512, T8PD;
defm VEXP2 : avx512_eri<0xC8, "vexp2", X86exp2>, EVEX, EVEX_V512, T8PD;
@@ -4257,7 +4507,7 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
(ins VR128X:$src1, VR128X:$src2),
!strconcat(OpcodeStr,
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128X:$dst,
+ [(set VR128X:$dst,
(F32Int VR128X:$src1, VR128X:$src2))],
itins_s.rr>, XS, EVEX_4V;
let mayLoad = 1 in {
@@ -4271,7 +4521,7 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
(ins VR128X:$src1, ssmem:$src2),
!strconcat(OpcodeStr,
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128X:$dst,
+ [(set VR128X:$dst,
(F32Int VR128X:$src1, sse_load_f32:$src2))],
itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
}
@@ -4285,7 +4535,7 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
(ins VR128X:$src1, VR128X:$src2),
!strconcat(OpcodeStr,
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128X:$dst,
+ [(set VR128X:$dst,
(F64Int VR128X:$src1, VR128X:$src2))],
itins_s.rr>, XD, EVEX_4V, VEX_W;
let mayLoad = 1 in {
@@ -4299,8 +4549,8 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
(ins VR128X:$src1, sdmem:$src2),
!strconcat(OpcodeStr,
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128X:$dst,
- (F64Int VR128X:$src1, sse_load_f64:$src2))]>,
+ [(set VR128X:$dst,
+ (F64Int VR128X:$src1, sse_load_f64:$src2))]>,
XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
}
}
@@ -4332,8 +4582,8 @@ multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr,
defm VSQRT : avx512_sqrt_packed_all<0x51, "vsqrt", fsqrt>;
-defm VSQRT : avx512_sqrt_scalar<0x51, "sqrt",
- int_x86_avx512_sqrt_ss, int_x86_avx512_sqrt_sd,
+defm VSQRT : avx512_sqrt_scalar<0x51, "sqrt",
+ int_x86_avx512_sqrt_ss, int_x86_avx512_sqrt_sd,
SSE_SQRTSS, SSE_SQRTSD>;
let Predicates = [HasAVX512] in {
@@ -4343,7 +4593,7 @@ let Predicates = [HasAVX512] in {
def : Pat<(v8f64 (int_x86_avx512_sqrt_pd_512 (v8f64 VR512:$src1),
(bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_CURRENT)),
(VSQRTPDZr VR512:$src1)>;
-
+
def : Pat<(f32 (fsqrt FR32X:$src)),
(VSQRTSSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
def : Pat<(f32 (fsqrt (load addr:$src))),
@@ -4383,107 +4633,6 @@ let Predicates = [HasAVX512] in {
}
-multiclass avx512_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
- X86MemOperand x86memop, RegisterClass RC,
- PatFrag mem_frag32, PatFrag mem_frag64,
- Intrinsic V4F32Int, Intrinsic V2F64Int,
- CD8VForm VForm> {
-let ExeDomain = SSEPackedSingle in {
- // Intrinsic operation, reg.
- // Vector intrinsic operation, reg
- def PSr : AVX512AIi8<opcps, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>;
-
- // Vector intrinsic operation, mem
- def PSm : AVX512AIi8<opcps, MRMSrcMem,
- (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst,
- (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
- EVEX_CD8<32, VForm>;
-} // ExeDomain = SSEPackedSingle
-
-let ExeDomain = SSEPackedDouble in {
- // Vector intrinsic operation, reg
- def PDr : AVX512AIi8<opcpd, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>;
-
- // Vector intrinsic operation, mem
- def PDm : AVX512AIi8<opcpd, MRMSrcMem,
- (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst,
- (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
- EVEX_CD8<64, VForm>;
-} // ExeDomain = SSEPackedDouble
-}
-
-multiclass avx512_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
- string OpcodeStr,
- Intrinsic F32Int,
- Intrinsic F64Int> {
-let ExeDomain = GenericDomain in {
- // Operation, reg.
- let hasSideEffects = 0 in
- def SSr : AVX512AIi8<opcss, MRMSrcReg,
- (outs FR32X:$dst), (ins FR32X:$src1, FR32X:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- []>;
-
- // Intrinsic operation, reg.
- let isCodeGenOnly = 1 in
- def SSr_Int : AVX512AIi8<opcss, MRMSrcReg,
- (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128X:$dst, (F32Int VR128X:$src1, VR128X:$src2, imm:$src3))]>;
-
- // Intrinsic operation, mem.
- def SSm : AVX512AIi8<opcss, MRMSrcMem, (outs VR128X:$dst),
- (ins VR128X:$src1, ssmem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128X:$dst, (F32Int VR128X:$src1,
- sse_load_f32:$src2, imm:$src3))]>,
- EVEX_CD8<32, CD8VT1>;
-
- // Operation, reg.
- let hasSideEffects = 0 in
- def SDr : AVX512AIi8<opcsd, MRMSrcReg,
- (outs FR64X:$dst), (ins FR64X:$src1, FR64X:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- []>, VEX_W;
-
- // Intrinsic operation, reg.
- let isCodeGenOnly = 1 in
- def SDr_Int : AVX512AIi8<opcsd, MRMSrcReg,
- (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128X:$dst, (F64Int VR128X:$src1, VR128X:$src2, imm:$src3))]>,
- VEX_W;
-
- // Intrinsic operation, mem.
- def SDm : AVX512AIi8<opcsd, MRMSrcMem,
- (outs VR128X:$dst), (ins VR128X:$src1, sdmem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128X:$dst,
- (F64Int VR128X:$src1, sse_load_f64:$src2, imm:$src3))]>,
- VEX_W, EVEX_CD8<64, CD8VT1>;
-} // ExeDomain = GenericDomain
-}
-
multiclass avx512_rndscale<bits<8> opc, string OpcodeStr,
X86MemOperand x86memop, RegisterClass RC,
PatFrag mem_frag, Domain d> {
@@ -4491,23 +4640,22 @@ let ExeDomain = d in {
// Intrinsic operation, reg.
// Vector intrinsic operation, reg
def r : AVX512AIi8<opc, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
+ (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, EVEX;
// Vector intrinsic operation, mem
def m : AVX512AIi8<opc, MRMSrcMem,
- (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
+ (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, EVEX;
} // ExeDomain
}
-
defm VRNDSCALEPSZ : avx512_rndscale<0x08, "vrndscaleps", f512mem, VR512,
- memopv16f32, SSEPackedSingle>, EVEX_V512,
+ loadv16f32, SSEPackedSingle>, EVEX_V512,
EVEX_CD8<32, CD8VF>;
def : Pat<(v16f32 (int_x86_avx512_mask_rndscale_ps_512 (v16f32 VR512:$src1),
@@ -4517,7 +4665,7 @@ def : Pat<(v16f32 (int_x86_avx512_mask_rndscale_ps_512 (v16f32 VR512:$src1),
defm VRNDSCALEPDZ : avx512_rndscale<0x09, "vrndscalepd", f512mem, VR512,
- memopv8f64, SSEPackedDouble>, EVEX_V512,
+ loadv8f64, SSEPackedDouble>, EVEX_V512,
VEX_W, EVEX_CD8<64, CD8VF>;
def : Pat<(v8f64 (int_x86_avx512_mask_rndscale_pd_512 (v8f64 VR512:$src1),
@@ -4525,50 +4673,72 @@ def : Pat<(v8f64 (int_x86_avx512_mask_rndscale_pd_512 (v8f64 VR512:$src1),
FROUND_CURRENT)),
(VRNDSCALEPDZr VR512:$src1, imm:$src2)>;
-multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
- Operand x86memop, RegisterClass RC, Domain d> {
-let ExeDomain = d in {
- def r : AVX512AIi8<opc, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, RC:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, EVEX_4V;
+multiclass
+avx512_rndscale_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
- def m : AVX512AIi8<opc, MRMSrcMem,
- (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, EVEX_4V;
-} // ExeDomain
+ let ExeDomain = _.ExeDomain in {
+ defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
+ "$src3, $src2, $src1", "$src1, $src2, $src3",
+ (_.VT (X86RndScale (_.VT _.RC:$src1), (_.VT _.RC:$src2),
+ (i32 imm:$src3), (i32 FROUND_CURRENT)))>;
+
+ defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
+ "$src3, $src2, $src1", "$src1, $src2, $src3",
+ (_.VT (X86RndScale (_.VT _.RC:$src1), (_.VT _.RC:$src2),
+ (i32 imm:$src3), (i32 FROUND_NO_EXC))), "{sae}">, EVEX_B;
+
+ let mayLoad = 1 in
+ defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3), OpcodeStr,
+ "$src3, $src2, $src1", "$src1, $src2, $src3",
+ (_.VT (X86RndScale (_.VT _.RC:$src1),
+ (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
+ (i32 imm:$src3), (i32 FROUND_CURRENT)))>;
+ }
+ let Predicates = [HasAVX512] in {
+ def : Pat<(ffloor _.FRC:$src), (COPY_TO_REGCLASS
+ (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x1))), _.FRC)>;
+ def : Pat<(fceil _.FRC:$src), (COPY_TO_REGCLASS
+ (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x2))), _.FRC)>;
+ def : Pat<(ftrunc _.FRC:$src), (COPY_TO_REGCLASS
+ (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x3))), _.FRC)>;
+ def : Pat<(frint _.FRC:$src), (COPY_TO_REGCLASS
+ (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x4))), _.FRC)>;
+ def : Pat<(fnearbyint _.FRC:$src), (COPY_TO_REGCLASS
+ (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0xc))), _.FRC)>;
+
+ def : Pat<(ffloor (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
+ (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
+ addr:$src, (i32 0x1))), _.FRC)>;
+ def : Pat<(fceil (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
+ (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
+ addr:$src, (i32 0x2))), _.FRC)>;
+ def : Pat<(ftrunc (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
+ (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
+ addr:$src, (i32 0x3))), _.FRC)>;
+ def : Pat<(frint (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
+ (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
+ addr:$src, (i32 0x4))), _.FRC)>;
+ def : Pat<(fnearbyint (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
+ (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
+ addr:$src, (i32 0xc))), _.FRC)>;
+ }
}
-defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", ssmem, FR32X,
- SSEPackedSingle>, EVEX_CD8<32, CD8VT1>;
-
-defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", sdmem, FR64X,
- SSEPackedDouble>, EVEX_CD8<64, CD8VT1>;
-
-def : Pat<(ffloor FR32X:$src),
- (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x1))>;
-def : Pat<(f64 (ffloor FR64X:$src)),
- (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x1))>;
-def : Pat<(f32 (fnearbyint FR32X:$src)),
- (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0xC))>;
-def : Pat<(f64 (fnearbyint FR64X:$src)),
- (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0xC))>;
-def : Pat<(f32 (fceil FR32X:$src)),
- (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x2))>;
-def : Pat<(f64 (fceil FR64X:$src)),
- (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x2))>;
-def : Pat<(f32 (frint FR32X:$src)),
- (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x4))>;
-def : Pat<(f64 (frint FR64X:$src)),
- (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x4))>;
-def : Pat<(f32 (ftrunc FR32X:$src)),
- (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x3))>;
-def : Pat<(f64 (ftrunc FR64X:$src)),
- (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x3))>;
+defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", f32x_info>,
+ AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VT1>;
+defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", f64x_info>, VEX_W,
+ AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VT1>;
+
+let Predicates = [HasAVX512] in {
def : Pat<(v16f32 (ffloor VR512:$src)),
(VRNDSCALEPSZr VR512:$src, (i32 0x1))>;
def : Pat<(v16f32 (fnearbyint VR512:$src)),
@@ -4590,7 +4760,7 @@ def : Pat<(v8f64 (frint VR512:$src)),
(VRNDSCALEPDZr VR512:$src, (i32 0x4))>;
def : Pat<(v8f64 (ftrunc VR512:$src)),
(VRNDSCALEPDZr VR512:$src, (i32 0x3))>;
-
+}
//-------------------------------------------------
// Integer truncate and extend operations
//-------------------------------------------------
@@ -4600,32 +4770,32 @@ multiclass avx512_trunc_sat<bits<8> opc, string OpcodeStr,
RegisterClass KRC, X86MemOperand x86memop> {
def rr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
(ins srcRC:$src),
- !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
[]>, EVEX;
def rrk : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
(ins KRC:$mask, srcRC:$src),
!strconcat(OpcodeStr,
- " \t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
+ "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
[]>, EVEX, EVEX_K;
def rrkz : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
(ins KRC:$mask, srcRC:$src),
!strconcat(OpcodeStr,
- " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
[]>, EVEX, EVEX_KZ;
def mr : AVX512XS8I<opc, MRMDestMem, (outs), (ins x86memop:$dst, srcRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[]>, EVEX;
def mrk : AVX512XS8I<opc, MRMDestMem, (outs),
(ins x86memop:$dst, KRC:$mask, srcRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|${dst} {${mask}}, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst {${mask}}|${dst} {${mask}}, $src}"),
[]>, EVEX, EVEX_K;
}
-defm VPMOVQB : avx512_trunc_sat<0x32, "vpmovqb", VR128X, VR512, VK8WM,
+defm VPMOVQB : avx512_trunc_sat<0x32, "vpmovqb", VR128X, VR512, VK8WM,
i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
defm VPMOVSQB : avx512_trunc_sat<0x22, "vpmovsqb", VR128X, VR512, VK8WM,
i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
@@ -4679,151 +4849,158 @@ multiclass avx512_extend<bits<8> opc, string OpcodeStr, RegisterClass KRC,
def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
(ins SrcRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst, (OpVT (OpNode (InVT SrcRC:$src))))]>, EVEX;
def rrk : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
(ins KRC:$mask, SrcRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
[]>, EVEX, EVEX_K;
def rrkz : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
(ins KRC:$mask, SrcRC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
[]>, EVEX, EVEX_KZ;
let mayLoad = 1 in {
def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
(ins x86memop:$src),
- !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst,
(OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))]>,
EVEX;
def rmk : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
(ins KRC:$mask, x86memop:$src),
- !strconcat(OpcodeStr," \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
+ !strconcat(OpcodeStr,"\t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
[]>,
EVEX, EVEX_K;
def rmkz : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
(ins KRC:$mask, x86memop:$src),
- !strconcat(OpcodeStr," \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
+ !strconcat(OpcodeStr,"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
[]>,
EVEX, EVEX_KZ;
}
}
defm VPMOVZXBDZ: avx512_extend<0x31, "vpmovzxbd", VK16WM, VR512, VR128X, X86vzext,
- memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
+ loadv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
EVEX_CD8<8, CD8VQ>;
defm VPMOVZXBQZ: avx512_extend<0x32, "vpmovzxbq", VK8WM, VR512, VR128X, X86vzext,
- memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
+ loadv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
EVEX_CD8<8, CD8VO>;
defm VPMOVZXWDZ: avx512_extend<0x33, "vpmovzxwd", VK16WM, VR512, VR256X, X86vzext,
- memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
+ loadv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
EVEX_CD8<16, CD8VH>;
defm VPMOVZXWQZ: avx512_extend<0x34, "vpmovzxwq", VK8WM, VR512, VR128X, X86vzext,
- memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
+ loadv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
EVEX_CD8<16, CD8VQ>;
defm VPMOVZXDQZ: avx512_extend<0x35, "vpmovzxdq", VK8WM, VR512, VR256X, X86vzext,
- memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
+ loadv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
EVEX_CD8<32, CD8VH>;
defm VPMOVSXBDZ: avx512_extend<0x21, "vpmovsxbd", VK16WM, VR512, VR128X, X86vsext,
- memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
+ loadv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
EVEX_CD8<8, CD8VQ>;
defm VPMOVSXBQZ: avx512_extend<0x22, "vpmovsxbq", VK8WM, VR512, VR128X, X86vsext,
- memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
+ loadv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
EVEX_CD8<8, CD8VO>;
defm VPMOVSXWDZ: avx512_extend<0x23, "vpmovsxwd", VK16WM, VR512, VR256X, X86vsext,
- memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
+ loadv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
EVEX_CD8<16, CD8VH>;
defm VPMOVSXWQZ: avx512_extend<0x24, "vpmovsxwq", VK8WM, VR512, VR128X, X86vsext,
- memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
+ loadv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
EVEX_CD8<16, CD8VQ>;
defm VPMOVSXDQZ: avx512_extend<0x25, "vpmovsxdq", VK8WM, VR512, VR256X, X86vsext,
- memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
+ loadv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
EVEX_CD8<32, CD8VH>;
//===----------------------------------------------------------------------===//
// GATHER - SCATTER Operations
-multiclass avx512_gather<bits<8> opc, string OpcodeStr, RegisterClass KRC,
- RegisterClass RC, X86MemOperand memop> {
-let mayLoad = 1,
+multiclass avx512_gather<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
+ X86MemOperand memop, PatFrag GatherNode> {
+let mayLoad = 1, hasTwoExplicitDefs = 1,
Constraints = "@earlyclobber $dst, $src1 = $dst, $mask = $mask_wb" in
- def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst, KRC:$mask_wb),
- (ins RC:$src1, KRC:$mask, memop:$src2),
+ def rm : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst, _.KRCWM:$mask_wb),
+ (ins _.RC:$src1, _.KRCWM:$mask, memop:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
- []>, EVEX, EVEX_K;
+ "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ [(set _.RC:$dst, _.KRCWM:$mask_wb,
+ (_.VT (GatherNode (_.VT _.RC:$src1), _.KRCWM:$mask,
+ vectoraddr:$src2)))]>, EVEX, EVEX_K,
+ EVEX_CD8<_.EltSize, CD8VT1>;
}
let ExeDomain = SSEPackedDouble in {
-defm VGATHERDPDZ : avx512_gather<0x92, "vgatherdpd", VK8WM, VR512, vy64xmem>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VGATHERQPDZ : avx512_gather<0x93, "vgatherqpd", VK8WM, VR512, vz64mem>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VGATHERDPDZ : avx512_gather<0x92, "vgatherdpd", v8f64_info, vy64xmem,
+ mgatherv8i32>, EVEX_V512, VEX_W;
+defm VGATHERQPDZ : avx512_gather<0x93, "vgatherqpd", v8f64_info, vz64mem,
+ mgatherv8i64>, EVEX_V512, VEX_W;
}
let ExeDomain = SSEPackedSingle in {
-defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", VK16WM, VR512, vz32mem>,
- EVEX_V512, EVEX_CD8<32, CD8VT1>;
-defm VGATHERQPSZ : avx512_gather<0x93, "vgatherqps", VK8WM, VR256X, vz64mem>,
- EVEX_V512, EVEX_CD8<32, CD8VT1>;
+defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", v16f32_info, vz32mem,
+ mgatherv16i32>, EVEX_V512;
+defm VGATHERQPSZ : avx512_gather<0x93, "vgatherqps", v8f32x_info, vz64mem,
+ mgatherv8i64>, EVEX_V512;
}
-
-defm VPGATHERDQZ : avx512_gather<0x90, "vpgatherdq", VK8WM, VR512, vy64xmem>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VPGATHERDDZ : avx512_gather<0x90, "vpgatherdd", VK16WM, VR512, vz32mem>,
- EVEX_V512, EVEX_CD8<32, CD8VT1>;
-
-defm VPGATHERQQZ : avx512_gather<0x91, "vpgatherqq", VK8WM, VR512, vz64mem>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VPGATHERQDZ : avx512_gather<0x91, "vpgatherqd", VK8WM, VR256X, vz64mem>,
- EVEX_V512, EVEX_CD8<32, CD8VT1>;
-
-multiclass avx512_scatter<bits<8> opc, string OpcodeStr, RegisterClass KRC,
- RegisterClass RC, X86MemOperand memop> {
+
+defm VPGATHERDQZ : avx512_gather<0x90, "vpgatherdq", v8i64_info, vy64xmem,
+ mgatherv8i32>, EVEX_V512, VEX_W;
+defm VPGATHERDDZ : avx512_gather<0x90, "vpgatherdd", v16i32_info, vz32mem,
+ mgatherv16i32>, EVEX_V512;
+
+defm VPGATHERQQZ : avx512_gather<0x91, "vpgatherqq", v8i64_info, vz64mem,
+ mgatherv8i64>, EVEX_V512, VEX_W;
+defm VPGATHERQDZ : avx512_gather<0x91, "vpgatherqd", v8i32x_info, vz64mem,
+ mgatherv8i64>, EVEX_V512;
+
+multiclass avx512_scatter<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
+ X86MemOperand memop, PatFrag ScatterNode> {
+
let mayStore = 1, Constraints = "$mask = $mask_wb" in
- def mr : AVX5128I<opc, MRMDestMem, (outs KRC:$mask_wb),
- (ins memop:$dst, KRC:$mask, RC:$src2),
+
+ def mr : AVX5128I<opc, MRMDestMem, (outs _.KRCWM:$mask_wb),
+ (ins memop:$dst, _.KRCWM:$mask, _.RC:$src),
!strconcat(OpcodeStr,
- " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
- []>, EVEX, EVEX_K;
+ "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
+ [(set _.KRCWM:$mask_wb, (ScatterNode (_.VT _.RC:$src),
+ _.KRCWM:$mask, vectoraddr:$dst))]>,
+ EVEX, EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
}
let ExeDomain = SSEPackedDouble in {
-defm VSCATTERDPDZ : avx512_scatter<0xA2, "vscatterdpd", VK8WM, VR512, vy64xmem>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VSCATTERQPDZ : avx512_scatter<0xA3, "vscatterqpd", VK8WM, VR512, vz64mem>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VSCATTERDPDZ : avx512_scatter<0xA2, "vscatterdpd", v8f64_info, vy64xmem,
+ mscatterv8i32>, EVEX_V512, VEX_W;
+defm VSCATTERQPDZ : avx512_scatter<0xA3, "vscatterqpd", v8f64_info, vz64mem,
+ mscatterv8i64>, EVEX_V512, VEX_W;
}
let ExeDomain = SSEPackedSingle in {
-defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", VK16WM, VR512, vz32mem>,
- EVEX_V512, EVEX_CD8<32, CD8VT1>;
-defm VSCATTERQPSZ : avx512_scatter<0xA3, "vscatterqps", VK8WM, VR256X, vz64mem>,
- EVEX_V512, EVEX_CD8<32, CD8VT1>;
+defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", v16f32_info, vz32mem,
+ mscatterv16i32>, EVEX_V512;
+defm VSCATTERQPSZ : avx512_scatter<0xA3, "vscatterqps", v8f32x_info, vz64mem,
+ mscatterv8i64>, EVEX_V512;
}
-defm VPSCATTERDQZ : avx512_scatter<0xA0, "vpscatterdq", VK8WM, VR512, vy64xmem>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VPSCATTERDDZ : avx512_scatter<0xA0, "vpscatterdd", VK16WM, VR512, vz32mem>,
- EVEX_V512, EVEX_CD8<32, CD8VT1>;
+defm VPSCATTERDQZ : avx512_scatter<0xA0, "vpscatterdq", v8i64_info, vy64xmem,
+ mscatterv8i32>, EVEX_V512, VEX_W;
+defm VPSCATTERDDZ : avx512_scatter<0xA0, "vpscatterdd", v16i32_info, vz32mem,
+ mscatterv16i32>, EVEX_V512;
-defm VPSCATTERQQZ : avx512_scatter<0xA1, "vpscatterqq", VK8WM, VR512, vz64mem>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VPSCATTERQDZ : avx512_scatter<0xA1, "vpscatterqd", VK8WM, VR256X, vz64mem>,
- EVEX_V512, EVEX_CD8<32, CD8VT1>;
+defm VPSCATTERQQZ : avx512_scatter<0xA1, "vpscatterqq", v8i64_info, vz64mem,
+ mscatterv8i64>, EVEX_V512, VEX_W;
+defm VPSCATTERQDZ : avx512_scatter<0xA1, "vpscatterqd", v8i32x_info, vz64mem,
+ mscatterv8i64>, EVEX_V512;
// prefetch
multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
RegisterClass KRC, X86MemOperand memop> {
let Predicates = [HasPFI], hasSideEffects = 1 in
def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
- !strconcat(OpcodeStr, " \t{$src {${mask}}|{${mask}}, $src}"),
+ !strconcat(OpcodeStr, "\t{$src {${mask}}|{${mask}}, $src}"),
[]>, EVEX, EVEX_K;
}
@@ -4838,7 +5015,7 @@ defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
-
+
defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
@@ -4881,41 +5058,41 @@ multiclass avx512_shufp<RegisterClass RC, X86MemOperand x86memop,
ValueType vt, string OpcodeStr, PatFrag mem_frag,
Domain d> {
def rmi : AVX512PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2, i8imm:$src3),
+ (ins RC:$src1, x86memop:$src2, u8imm:$src3),
!strconcat(OpcodeStr,
- " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
(i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
EVEX_4V, Sched<[WriteShuffleLd, ReadAfterLd]>;
def rri : AVX512PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, i8imm:$src3),
+ (ins RC:$src1, RC:$src2, u8imm:$src3),
!strconcat(OpcodeStr,
- " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
(i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
EVEX_4V, Sched<[WriteShuffle]>;
}
-defm VSHUFPSZ : avx512_shufp<VR512, f512mem, v16f32, "vshufps", memopv16f32,
+defm VSHUFPSZ : avx512_shufp<VR512, f512mem, v16f32, "vshufps", loadv16f32,
SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VSHUFPDZ : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", memopv8f64,
+defm VSHUFPDZ : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", loadv8f64,
SSEPackedDouble>, PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
def : Pat<(v16i32 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
(VSHUFPSZrri VR512:$src1, VR512:$src2, imm:$imm)>;
def : Pat<(v16i32 (X86Shufp VR512:$src1,
- (memopv16i32 addr:$src2), (i8 imm:$imm))),
+ (loadv16i32 addr:$src2), (i8 imm:$imm))),
(VSHUFPSZrmi VR512:$src1, addr:$src2, imm:$imm)>;
def : Pat<(v8i64 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
(VSHUFPDZrri VR512:$src1, VR512:$src2, imm:$imm)>;
def : Pat<(v8i64 (X86Shufp VR512:$src1,
- (memopv8i64 addr:$src2), (i8 imm:$imm))),
+ (loadv8i64 addr:$src2), (i8 imm:$imm))),
(VSHUFPDZrmi VR512:$src1, addr:$src2, imm:$imm)>;
multiclass avx512_valign<X86VectorVTInfo _> {
defm rri : AVX512_maskable<0x03, MRMSrcReg, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.RC:$src2, i8imm:$src3),
+ (ins _.RC:$src1, _.RC:$src2, u8imm:$src3),
"valign"##_.Suffix,
"$src3, $src2, $src1", "$src1, $src2, $src3",
(_.VT (X86VAlign _.RC:$src2, _.RC:$src1,
@@ -4928,9 +5105,9 @@ multiclass avx512_valign<X86VectorVTInfo _> {
let mayLoad = 1 in
def rmi : AVX512AIi8<0x03, MRMSrcMem, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2, i8imm:$src3),
+ (ins _.RC:$src1, _.MemOp:$src2, u8imm:$src3),
!strconcat("valign"##_.Suffix,
- " \t{$src3, $src2, $src1, $dst|"
+ "\t{$src3, $src2, $src1, $dst|"
"$dst, $src1, $src2, $src3}"),
[]>, EVEX_4V;
}
@@ -4946,43 +5123,43 @@ multiclass avx512_vpabs<bits<8> opc, string OpcodeStr, ValueType OpVT,
X86MemOperand x86memop, X86MemOperand x86scalar_mop,
string BrdcstStr> {
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[]>, EVEX;
def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
[]>, EVEX, EVEX_K;
def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
!strconcat(OpcodeStr,
- " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
+ "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
[]>, EVEX, EVEX_KZ;
let mayLoad = 1 in {
def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
(ins x86memop:$src),
- !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[]>, EVEX;
def rmk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
(ins KRC:$mask, x86memop:$src),
!strconcat(OpcodeStr,
- " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
+ "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
[]>, EVEX, EVEX_K;
def rmkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
(ins KRC:$mask, x86memop:$src),
!strconcat(OpcodeStr,
- " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
+ "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
[]>, EVEX, EVEX_KZ;
def rmb : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
(ins x86scalar_mop:$src),
- !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
+ !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
", $dst|$dst, ${src}", BrdcstStr, "}"),
[]>, EVEX, EVEX_B;
def rmbk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
(ins KRC:$mask, x86scalar_mop:$src),
- !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
+ !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
", $dst {${mask}}|$dst {${mask}}, ${src}", BrdcstStr, "}"),
[]>, EVEX, EVEX_B, EVEX_K;
def rmbkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
(ins KRC:$mask, x86scalar_mop:$src),
- !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
+ !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
", $dst {${mask}} {z}|$dst {${mask}} {z}, ${src}",
BrdcstStr, "}"),
[]>, EVEX, EVEX_B, EVEX_KZ;
@@ -5012,57 +5189,65 @@ def : Pat<(v8i64 (int_x86_avx512_mask_pabs_q_512 (v8i64 VR512:$src),
(bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
(VPABSQZrr VR512:$src)>;
-multiclass avx512_conflict<bits<8> opc, string OpcodeStr,
+multiclass avx512_conflict<bits<8> opc, string OpcodeStr,
RegisterClass RC, RegisterClass KRC,
X86MemOperand x86memop,
X86MemOperand x86scalar_mop, string BrdcstStr> {
+ let hasSideEffects = 0 in {
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src),
- !strconcat(OpcodeStr, " \t{$src, ${dst} |${dst}, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, ${dst} |${dst}, $src}"),
[]>, EVEX;
+ let mayLoad = 1 in
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins x86memop:$src),
- !strconcat(OpcodeStr, " \t{$src, ${dst}|${dst}, $src}"),
+ !strconcat(OpcodeStr, "\t{$src, ${dst}|${dst}, $src}"),
[]>, EVEX;
+ let mayLoad = 1 in
def rmb : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins x86scalar_mop:$src),
- !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
+ !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
", ${dst}|${dst}, ${src}", BrdcstStr, "}"),
[]>, EVEX, EVEX_B;
def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins KRC:$mask, RC:$src),
!strconcat(OpcodeStr,
- " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
[]>, EVEX, EVEX_KZ;
+ let mayLoad = 1 in
def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins KRC:$mask, x86memop:$src),
!strconcat(OpcodeStr,
- " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
[]>, EVEX, EVEX_KZ;
+ let mayLoad = 1 in
def rmbkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins KRC:$mask, x86scalar_mop:$src),
- !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
+ !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
", ${dst} {${mask}} {z}|${dst} {${mask}} {z}, ${src}",
BrdcstStr, "}"),
[]>, EVEX, EVEX_KZ, EVEX_B;
-
+
let Constraints = "$src1 = $dst" in {
def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, RC:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
[]>, EVEX, EVEX_K;
+ let mayLoad = 1 in
def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, x86memop:$src2),
!strconcat(OpcodeStr,
- " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
[]>, EVEX, EVEX_K;
+ let mayLoad = 1 in
def rmbk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, x86scalar_mop:$src2),
- !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
+ !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
", ${dst} {${mask}}|${dst} {${mask}}, ${src2}", BrdcstStr, "}"),
[]>, EVEX, EVEX_K, EVEX_B;
- }
+ }
+ }
}
let Predicates = [HasCDI] in {
@@ -5109,11 +5294,11 @@ def : Pat<(int_x86_avx512_mask_lzcnt_q_512 VR512:$src2, VR512:$src1,
(VPLZCNTQrrk VR512:$src1,
(v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
-def : Pat<(v16i32 (ctlz (memopv16i32 addr:$src))),
+def : Pat<(v16i32 (ctlz (loadv16i32 addr:$src))),
(VPLZCNTDrm addr:$src)>;
def : Pat<(v16i32 (ctlz (v16i32 VR512:$src))),
(VPLZCNTDrr VR512:$src)>;
-def : Pat<(v8i64 (ctlz (memopv8i64 addr:$src))),
+def : Pat<(v8i64 (ctlz (loadv8i64 addr:$src))),
(VPLZCNTQrm addr:$src)>;
def : Pat<(v8i64 (ctlz (v8i64 VR512:$src))),
(VPLZCNTQrr VR512:$src)>;
@@ -5123,7 +5308,14 @@ def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
def : Pat<(store VK1:$src, addr:$dst),
- (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK1:$src, VK16))>;
+ (MOV8mr addr:$dst,
+ (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)),
+ sub_8bit))>, Requires<[HasAVX512, NoDQI]>;
+
+def : Pat<(store VK8:$src, addr:$dst),
+ (MOV8mr addr:$dst,
+ (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
+ sub_8bit))>, Requires<[HasAVX512, NoDQI]>;
def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
(truncstore node:$val, node:$ptr), [{
@@ -5135,10 +5327,10 @@ def : Pat<(truncstorei1 GR8:$src, addr:$dst),
multiclass cvt_by_vec_width<bits<8> opc, X86VectorVTInfo Vec, string OpcodeStr > {
def rr : AVX512XS8I<opc, MRMDestReg, (outs Vec.RC:$dst), (ins Vec.KRC:$src),
- !strconcat(OpcodeStr##Vec.Suffix, " \t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr##Vec.Suffix, "\t{$src, $dst|$dst, $src}"),
[(set Vec.RC:$dst, (Vec.VT (X86vsext Vec.KRC:$src)))]>, EVEX;
}
-
+
multiclass cvt_mask_by_elt_width<bits<8> opc, AVX512VLVectorVTInfo VTInfo,
string OpcodeStr, Predicate prd> {
let Predicates = [prd] in
@@ -5160,5 +5352,108 @@ multiclass avx512_convert_mask_to_vector<string OpcodeStr> {
defm NAME##Q : cvt_mask_by_elt_width<0x38, avx512vl_i64_info, OpcodeStr,
HasDQI>, VEX_W;
}
-
+
defm VPMOVM2 : avx512_convert_mask_to_vector<"vpmovm2">;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 - COMPRESS and EXPAND
+//
+multiclass compress_by_vec_width<bits<8> opc, X86VectorVTInfo _,
+ string OpcodeStr> {
+ def rrkz : AVX5128I<opc, MRMDestReg, (outs _.RC:$dst),
+ (ins _.KRCWM:$mask, _.RC:$src),
+ OpcodeStr # "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
+ [(set _.RC:$dst, (_.VT (X86compress _.KRCWM:$mask, _.RC:$src,
+ _.ImmAllZerosV)))]>, EVEX_KZ;
+
+ let Constraints = "$src0 = $dst" in
+ def rrk : AVX5128I<opc, MRMDestReg, (outs _.RC:$dst),
+ (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src),
+ OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
+ [(set _.RC:$dst, (_.VT (X86compress _.KRCWM:$mask, _.RC:$src,
+ _.RC:$src0)))]>, EVEX_K;
+
+ let mayStore = 1 in {
+ def mrk : AVX5128I<opc, MRMDestMem, (outs),
+ (ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src),
+ OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
+ [(store (_.VT (X86compress _.KRCWM:$mask, _.RC:$src, undef)),
+ addr:$dst)]>,
+ EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
+ }
+}
+
+multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr,
+ AVX512VLVectorVTInfo VTInfo> {
+ defm Z : compress_by_vec_width<opc, VTInfo.info512, OpcodeStr>, EVEX_V512;
+
+ let Predicates = [HasVLX] in {
+ defm Z256 : compress_by_vec_width<opc, VTInfo.info256, OpcodeStr>, EVEX_V256;
+ defm Z128 : compress_by_vec_width<opc, VTInfo.info128, OpcodeStr>, EVEX_V128;
+ }
+}
+
+defm VPCOMPRESSD : compress_by_elt_width <0x8B, "vpcompressd", avx512vl_i32_info>,
+ EVEX;
+defm VPCOMPRESSQ : compress_by_elt_width <0x8B, "vpcompressq", avx512vl_i64_info>,
+ EVEX, VEX_W;
+defm VCOMPRESSPS : compress_by_elt_width <0x8A, "vcompressps", avx512vl_f32_info>,
+ EVEX;
+defm VCOMPRESSPD : compress_by_elt_width <0x8A, "vcompresspd", avx512vl_f64_info>,
+ EVEX, VEX_W;
+
+// expand
+multiclass expand_by_vec_width<bits<8> opc, X86VectorVTInfo _,
+ string OpcodeStr> {
+ def rrkz : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
+ (ins _.KRCWM:$mask, _.RC:$src),
+ OpcodeStr # "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
+ [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask, (_.VT _.RC:$src),
+ _.ImmAllZerosV)))]>, EVEX_KZ;
+
+ let Constraints = "$src0 = $dst" in
+ def rrk : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
+ (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src),
+ OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
+ [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask,
+ (_.VT _.RC:$src), _.RC:$src0)))]>, EVEX_K;
+
+ let mayLoad = 1, Constraints = "$src0 = $dst" in
+ def rmk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
+ (ins _.RC:$src0, _.KRCWM:$mask, _.MemOp:$src),
+ OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
+ [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask,
+ (_.VT (bitconvert
+ (_.LdFrag addr:$src))),
+ _.RC:$src0)))]>,
+ EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
+
+ let mayLoad = 1 in
+ def rmkz : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
+ (ins _.KRCWM:$mask, _.MemOp:$src),
+ OpcodeStr # "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
+ [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask,
+ (_.VT (bitconvert (_.LdFrag addr:$src))),
+ _.ImmAllZerosV)))]>,
+ EVEX_KZ, EVEX_CD8<_.EltSize, CD8VT1>;
+
+}
+
+multiclass expand_by_elt_width<bits<8> opc, string OpcodeStr,
+ AVX512VLVectorVTInfo VTInfo> {
+ defm Z : expand_by_vec_width<opc, VTInfo.info512, OpcodeStr>, EVEX_V512;
+
+ let Predicates = [HasVLX] in {
+ defm Z256 : expand_by_vec_width<opc, VTInfo.info256, OpcodeStr>, EVEX_V256;
+ defm Z128 : expand_by_vec_width<opc, VTInfo.info128, OpcodeStr>, EVEX_V128;
+ }
+}
+
+defm VPEXPANDD : expand_by_elt_width <0x89, "vpexpandd", avx512vl_i32_info>,
+ EVEX;
+defm VPEXPANDQ : expand_by_elt_width <0x89, "vpexpandq", avx512vl_i64_info>,
+ EVEX, VEX_W;
+defm VEXPANDPS : expand_by_elt_width <0x88, "vexpandps", avx512vl_f32_info>,
+ EVEX;
+defm VEXPANDPD : expand_by_elt_width <0x88, "vexpandpd", avx512vl_f64_info>,
+ EVEX, VEX_W;
diff --git a/lib/Target/X86/X86InstrArithmetic.td b/lib/Target/X86/X86InstrArithmetic.td
index 25e1e80..78efc4d 100644
--- a/lib/Target/X86/X86InstrArithmetic.td
+++ b/lib/Target/X86/X86InstrArithmetic.td
@@ -15,13 +15,13 @@
//===----------------------------------------------------------------------===//
// LEA - Load Effective Address
let SchedRW = [WriteLEA] in {
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def LEA16r : I<0x8D, MRMSrcMem,
- (outs GR16:$dst), (ins i32mem:$src),
+ (outs GR16:$dst), (ins anymem:$src),
"lea{w}\t{$src|$dst}, {$dst|$src}", [], IIC_LEA_16>, OpSize16;
let isReMaterializable = 1 in
def LEA32r : I<0x8D, MRMSrcMem,
- (outs GR32:$dst), (ins i32mem:$src),
+ (outs GR32:$dst), (ins anymem:$src),
"lea{l}\t{$src|$dst}, {$dst|$src}",
[(set GR32:$dst, lea32addr:$src)], IIC_LEA>,
OpSize32, Requires<[Not64BitMode]>;
@@ -65,18 +65,18 @@ def MUL8r : I<0xF6, MRM4r, (outs), (ins GR8:$src), "mul{b}\t$src",
[(set AL, (mul AL, GR8:$src)),
(implicit EFLAGS)], IIC_MUL8>, Sched<[WriteIMul]>;
// AX,DX = AX*GR16
-let Defs = [AX,DX,EFLAGS], Uses = [AX], neverHasSideEffects = 1 in
+let Defs = [AX,DX,EFLAGS], Uses = [AX], hasSideEffects = 0 in
def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src),
"mul{w}\t$src",
[], IIC_MUL16_REG>, OpSize16, Sched<[WriteIMul]>;
// EAX,EDX = EAX*GR32
-let Defs = [EAX,EDX,EFLAGS], Uses = [EAX], neverHasSideEffects = 1 in
+let Defs = [EAX,EDX,EFLAGS], Uses = [EAX], hasSideEffects = 0 in
def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src),
"mul{l}\t$src",
[/*(set EAX, EDX, EFLAGS, (X86umul_flag EAX, GR32:$src))*/],
IIC_MUL32_REG>, OpSize32, Sched<[WriteIMul]>;
// RAX,RDX = RAX*GR64
-let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], hasSideEffects = 0 in
def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
"mul{q}\t$src",
[/*(set RAX, RDX, EFLAGS, (X86umul_flag RAX, GR64:$src))*/],
@@ -91,7 +91,7 @@ def MUL8m : I<0xF6, MRM4m, (outs), (ins i8mem :$src),
[(set AL, (mul AL, (loadi8 addr:$src))),
(implicit EFLAGS)], IIC_MUL8>, SchedLoadReg<WriteIMulLd>;
// AX,DX = AX*[mem16]
-let mayLoad = 1, neverHasSideEffects = 1 in {
+let mayLoad = 1, hasSideEffects = 0 in {
let Defs = [AX,DX,EFLAGS], Uses = [AX] in
def MUL16m : I<0xF7, MRM4m, (outs), (ins i16mem:$src),
"mul{w}\t$src",
@@ -107,7 +107,7 @@ def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
"mul{q}\t$src", [], IIC_MUL64>, SchedLoadReg<WriteIMulLd>;
}
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
// AL,AH = AL*GR8
let Defs = [AL,EFLAGS,AX], Uses = [AL] in
def IMUL8r : I<0xF6, MRM5r, (outs), (ins GR8:$src), "imul{b}\t$src", [],
@@ -145,7 +145,7 @@ let Defs = [RAX,RDX,EFLAGS], Uses = [RAX] in
def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
"imul{q}\t$src", [], IIC_IMUL64>, SchedLoadReg<WriteIMulLd>;
}
-} // neverHasSideEffects
+} // hasSideEffects
let Defs = [EFLAGS] in {
@@ -456,64 +456,29 @@ def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
"inc{b}\t$dst",
[(set GR8:$dst, EFLAGS, (X86inc_flag GR8:$src1))],
IIC_UNARY_REG>;
-
-let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
-def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
+let isConvertibleToThreeAddress = 1, CodeSize = 2 in { // Can xform into LEA.
+def INC16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
"inc{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))], IIC_UNARY_REG>,
- OpSize16, Requires<[Not64BitMode]>;
-def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
+ [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))],
+ IIC_UNARY_REG>, OpSize16;
+def INC32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"inc{l}\t$dst",
[(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))],
- IIC_UNARY_REG>,
- OpSize32, Requires<[Not64BitMode]>;
+ IIC_UNARY_REG>, OpSize32;
def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src1), "inc{q}\t$dst",
[(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src1))],
IIC_UNARY_REG>;
-} // isConvertibleToThreeAddress = 1, CodeSize = 1
-
-
-// In 64-bit mode, single byte INC and DEC cannot be encoded.
-let isConvertibleToThreeAddress = 1, CodeSize = 2 in {
-// Can transform into LEA.
-def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
- "inc{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))],
- IIC_UNARY_REG>,
- OpSize16, Requires<[In64BitMode]>;
-def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
- "inc{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))],
- IIC_UNARY_REG>,
- OpSize32, Requires<[In64BitMode]>;
-def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
- "dec{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))],
- IIC_UNARY_REG>,
- OpSize16, Requires<[In64BitMode]>;
-def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
- "dec{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))],
- IIC_UNARY_REG>,
- OpSize32, Requires<[In64BitMode]>;
} // isConvertibleToThreeAddress = 1, CodeSize = 2
-let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
- CodeSize = 2 in {
-def INC32_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
- "inc{w}\t$dst", [], IIC_UNARY_REG>,
- OpSize16, Requires<[Not64BitMode]>;
-def INC32_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
- "inc{l}\t$dst", [], IIC_UNARY_REG>,
- OpSize32, Requires<[Not64BitMode]>;
-def DEC32_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
- "dec{w}\t$dst", [], IIC_UNARY_REG>,
- OpSize16, Requires<[Not64BitMode]>;
-def DEC32_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
- "dec{l}\t$dst", [], IIC_UNARY_REG>,
- OpSize32, Requires<[Not64BitMode]>;
-} // isCodeGenOnly = 1, ForceDisassemble = 1, HasSideEffects = 0, CodeSize = 2
-
+// Short forms only valid in 32-bit mode. Selected during MCInst lowering.
+let CodeSize = 1, hasSideEffects = 0 in {
+def INC16r_alt : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
+ "inc{w}\t$dst", [], IIC_UNARY_REG>,
+ OpSize16, Requires<[Not64BitMode]>;
+def INC32r_alt : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
+ "inc{l}\t$dst", [], IIC_UNARY_REG>,
+ OpSize32, Requires<[Not64BitMode]>;
+} // CodeSize = 1, hasSideEffects = 0
} // Constraints = "$src1 = $dst", SchedRW
let CodeSize = 2, SchedRW = [WriteALULd, WriteRMW] in {
@@ -522,35 +487,13 @@ let CodeSize = 2, SchedRW = [WriteALULd, WriteRMW] in {
(implicit EFLAGS)], IIC_UNARY_MEM>;
def INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
[(store (add (loadi16 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize16, Requires<[Not64BitMode]>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>, OpSize16;
def INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
[(store (add (loadi32 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize32, Requires<[Not64BitMode]>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>, OpSize32;
def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
[(store (add (loadi64 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>;
-
-// These are duplicates of their 32-bit counterparts. Only needed so X86 knows
-// how to unfold them.
-// FIXME: What is this for??
-def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
- [(store (add (loadi16 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize16, Requires<[In64BitMode]>;
-def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
- [(store (add (loadi32 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize32, Requires<[In64BitMode]>;
-def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
- [(store (add (loadi16 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize16, Requires<[In64BitMode]>;
-def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
- [(store (add (loadi32 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize32, Requires<[In64BitMode]>;
} // CodeSize = 2, SchedRW
let Constraints = "$src1 = $dst", SchedRW = [WriteALU] in {
@@ -559,21 +502,29 @@ def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
"dec{b}\t$dst",
[(set GR8:$dst, EFLAGS, (X86dec_flag GR8:$src1))],
IIC_UNARY_REG>;
-let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
-def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
+let isConvertibleToThreeAddress = 1, CodeSize = 2 in { // Can xform into LEA.
+def DEC16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
"dec{w}\t$dst",
[(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))],
- IIC_UNARY_REG>,
- OpSize16, Requires<[Not64BitMode]>;
-def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
+ IIC_UNARY_REG>, OpSize16;
+def DEC32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"dec{l}\t$dst",
[(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))],
- IIC_UNARY_REG>,
- OpSize32, Requires<[Not64BitMode]>;
+ IIC_UNARY_REG>, OpSize32;
def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src1), "dec{q}\t$dst",
[(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src1))],
IIC_UNARY_REG>;
-} // CodeSize = 2
+} // isConvertibleToThreeAddress = 1, CodeSize = 2
+
+// Short forms only valid in 32-bit mode. Selected during MCInst lowering.
+let CodeSize = 1, hasSideEffects = 0 in {
+def DEC16r_alt : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
+ "dec{w}\t$dst", [], IIC_UNARY_REG>,
+ OpSize16, Requires<[Not64BitMode]>;
+def DEC32r_alt : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
+ "dec{l}\t$dst", [], IIC_UNARY_REG>,
+ OpSize32, Requires<[Not64BitMode]>;
+} // CodeSize = 1, hasSideEffects = 0
} // Constraints = "$src1 = $dst", SchedRW
@@ -583,12 +534,10 @@ let CodeSize = 2, SchedRW = [WriteALULd, WriteRMW] in {
(implicit EFLAGS)], IIC_UNARY_MEM>;
def DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
[(store (add (loadi16 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize16, Requires<[Not64BitMode]>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>, OpSize16;
def DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
[(store (add (loadi32 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize32, Requires<[Not64BitMode]>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>, OpSize32;
def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
[(store (add (loadi64 addr:$dst), -1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>;
@@ -710,15 +659,6 @@ class BinOpRR<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
mnemonic, "{$src2, $src1|$src1, $src2}", pattern, itin>,
Sched<[WriteALU]>;
-// BinOpRR_R - Instructions like "add reg, reg, reg", where the pattern has
-// just a regclass (no eflags) as a result.
-class BinOpRR_R<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- SDNode opnode>
- : BinOpRR<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
- [(set typeinfo.RegClass:$dst,
- (opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2))],
- IIC_BIN_NONMEM>;
-
// BinOpRR_F - Instructions like "cmp reg, Reg", where the pattern has
// just a EFLAGS as a result.
class BinOpRR_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
@@ -825,13 +765,6 @@ class BinOpRI<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
let ImmT = typeinfo.ImmEncoding;
}
-// BinOpRI_R - Instructions like "add reg, reg, imm".
-class BinOpRI_R<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- SDNode opnode, Format f>
- : BinOpRI<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
- [(set typeinfo.RegClass:$dst,
- (opnode typeinfo.RegClass:$src1, typeinfo.ImmOperator:$src2))]>;
-
// BinOpRI_F - Instructions like "cmp reg, imm".
class BinOpRI_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
SDPatternOperator opnode, Format f>
@@ -864,30 +797,23 @@ class BinOpRI8<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
let ImmT = Imm8; // Always 8-bit immediate.
}
-// BinOpRI8_R - Instructions like "add reg, reg, imm8".
-class BinOpRI8_R<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- SDNode opnode, Format f>
- : BinOpRI8<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
- [(set typeinfo.RegClass:$dst,
- (opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2))]>;
-
// BinOpRI8_F - Instructions like "cmp reg, imm8".
class BinOpRI8_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- SDNode opnode, Format f>
+ SDPatternOperator opnode, Format f>
: BinOpRI8<opcode, mnemonic, typeinfo, f, (outs),
[(set EFLAGS,
(opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2))]>;
// BinOpRI8_RF - Instructions like "add reg, reg, imm8".
class BinOpRI8_RF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- SDNode opnode, Format f>
+ SDPatternOperator opnode, Format f>
: BinOpRI8<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
[(set typeinfo.RegClass:$dst, EFLAGS,
(opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2))]>;
// BinOpRI8_RFF - Instructions like "adc reg, reg, imm8".
class BinOpRI8_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- SDNode opnode, Format f>
+ SDPatternOperator opnode, Format f>
: BinOpRI8<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
[(set typeinfo.RegClass:$dst, EFLAGS,
(opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2,
@@ -923,8 +849,8 @@ class BinOpMR_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
[(set EFLAGS, (opnode (load addr:$dst), typeinfo.RegClass:$src))]>;
// BinOpMI - Instructions like "add [mem], imm".
-class BinOpMI<string mnemonic, X86TypeInfo typeinfo,
- Format f, list<dag> pattern, bits<8> opcode = 0x80,
+class BinOpMI<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ Format f, list<dag> pattern,
InstrItinClass itin = IIC_BIN_MEM>
: ITy<opcode, f, typeinfo,
(outs), (ins typeinfo.MemOperand:$dst, typeinfo.ImmOperand:$src),
@@ -934,27 +860,26 @@ class BinOpMI<string mnemonic, X86TypeInfo typeinfo,
}
// BinOpMI_RMW - Instructions like "add [mem], imm".
-class BinOpMI_RMW<string mnemonic, X86TypeInfo typeinfo,
+class BinOpMI_RMW<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
SDNode opnode, Format f>
- : BinOpMI<mnemonic, typeinfo, f,
+ : BinOpMI<opcode, mnemonic, typeinfo, f,
[(store (opnode (typeinfo.VT (load addr:$dst)),
typeinfo.ImmOperator:$src), addr:$dst),
(implicit EFLAGS)]>;
// BinOpMI_RMW_FF - Instructions like "adc [mem], imm".
-class BinOpMI_RMW_FF<string mnemonic, X86TypeInfo typeinfo,
- SDNode opnode, Format f>
- : BinOpMI<mnemonic, typeinfo, f,
+class BinOpMI_RMW_FF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpMI<opcode, mnemonic, typeinfo, f,
[(store (opnode (typeinfo.VT (load addr:$dst)),
typeinfo.ImmOperator:$src, EFLAGS), addr:$dst),
- (implicit EFLAGS)], 0x80, IIC_BIN_CARRY_MEM>;
+ (implicit EFLAGS)], IIC_BIN_CARRY_MEM>;
// BinOpMI_F - Instructions like "cmp [mem], imm".
-class BinOpMI_F<string mnemonic, X86TypeInfo typeinfo,
- SDPatternOperator opnode, Format f, bits<8> opcode = 0x80>
- : BinOpMI<mnemonic, typeinfo, f,
+class BinOpMI_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDPatternOperator opnode, Format f>
+ : BinOpMI<opcode, mnemonic, typeinfo, f,
[(set EFLAGS, (opnode (typeinfo.VT (load addr:$dst)),
- typeinfo.ImmOperator:$src))],
- opcode>;
+ typeinfo.ImmOperator:$src))]>;
// BinOpMI8 - Instructions like "add [mem], imm8".
class BinOpMI8<string mnemonic, X86TypeInfo typeinfo,
@@ -969,7 +894,7 @@ class BinOpMI8<string mnemonic, X86TypeInfo typeinfo,
// BinOpMI8_RMW - Instructions like "add [mem], imm8".
class BinOpMI8_RMW<string mnemonic, X86TypeInfo typeinfo,
- SDNode opnode, Format f>
+ SDPatternOperator opnode, Format f>
: BinOpMI8<mnemonic, typeinfo, f,
[(store (opnode (load addr:$dst),
typeinfo.Imm8Operator:$src), addr:$dst),
@@ -977,7 +902,7 @@ class BinOpMI8_RMW<string mnemonic, X86TypeInfo typeinfo,
// BinOpMI8_RMW_FF - Instructions like "adc [mem], imm8".
class BinOpMI8_RMW_FF<string mnemonic, X86TypeInfo typeinfo,
- SDNode opnode, Format f>
+ SDPatternOperator opnode, Format f>
: BinOpMI8<mnemonic, typeinfo, f,
[(store (opnode (load addr:$dst),
typeinfo.Imm8Operator:$src, EFLAGS), addr:$dst),
@@ -985,7 +910,7 @@ class BinOpMI8_RMW_FF<string mnemonic, X86TypeInfo typeinfo,
// BinOpMI8_F - Instructions like "cmp [mem], imm8".
class BinOpMI8_F<string mnemonic, X86TypeInfo typeinfo,
- SDNode opnode, Format f>
+ SDPatternOperator opnode, Format f>
: BinOpMI8<mnemonic, typeinfo, f,
[(set EFLAGS, (opnode (load addr:$dst),
typeinfo.Imm8Operator:$src))]>;
@@ -1023,12 +948,13 @@ multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
bit CommutableRR, bit ConvertibleToThreeAddress> {
let Defs = [EFLAGS] in {
let Constraints = "$src1 = $dst" in {
- let isCommutable = CommutableRR,
- isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ let isCommutable = CommutableRR in {
def NAME#8rr : BinOpRR_RF<BaseOpc, mnemonic, Xi8 , opnodeflag>;
- def NAME#16rr : BinOpRR_RF<BaseOpc, mnemonic, Xi16, opnodeflag>;
- def NAME#32rr : BinOpRR_RF<BaseOpc, mnemonic, Xi32, opnodeflag>;
- def NAME#64rr : BinOpRR_RF<BaseOpc, mnemonic, Xi64, opnodeflag>;
+ let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ def NAME#16rr : BinOpRR_RF<BaseOpc, mnemonic, Xi16, opnodeflag>;
+ def NAME#32rr : BinOpRR_RF<BaseOpc, mnemonic, Xi32, opnodeflag>;
+ def NAME#64rr : BinOpRR_RF<BaseOpc, mnemonic, Xi64, opnodeflag>;
+ } // isConvertibleToThreeAddress
} // isCommutable
def NAME#8rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi8>;
@@ -1041,6 +967,8 @@ multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#32rm : BinOpRM_RF<BaseOpc2, mnemonic, Xi32, opnodeflag>;
def NAME#64rm : BinOpRM_RF<BaseOpc2, mnemonic, Xi64, opnodeflag>;
+ def NAME#8ri : BinOpRI_RF<0x80, mnemonic, Xi8 , opnodeflag, RegMRM>;
+
let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
// NOTE: These are order specific, we want the ri8 forms to be listed
// first so that they are slightly preferred to the ri forms.
@@ -1048,7 +976,6 @@ multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#32ri8 : BinOpRI8_RF<0x82, mnemonic, Xi32, opnodeflag, RegMRM>;
def NAME#64ri8 : BinOpRI8_RF<0x82, mnemonic, Xi64, opnodeflag, RegMRM>;
- def NAME#8ri : BinOpRI_RF<0x80, mnemonic, Xi8 , opnodeflag, RegMRM>;
def NAME#16ri : BinOpRI_RF<0x80, mnemonic, Xi16, opnodeflag, RegMRM>;
def NAME#32ri : BinOpRI_RF<0x80, mnemonic, Xi32, opnodeflag, RegMRM>;
def NAME#64ri32: BinOpRI_RF<0x80, mnemonic, Xi64, opnodeflag, RegMRM>;
@@ -1066,10 +993,20 @@ multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#32mi8 : BinOpMI8_RMW<mnemonic, Xi32, opnode, MemMRM>;
def NAME#64mi8 : BinOpMI8_RMW<mnemonic, Xi64, opnode, MemMRM>;
- def NAME#8mi : BinOpMI_RMW<mnemonic, Xi8 , opnode, MemMRM>;
- def NAME#16mi : BinOpMI_RMW<mnemonic, Xi16, opnode, MemMRM>;
- def NAME#32mi : BinOpMI_RMW<mnemonic, Xi32, opnode, MemMRM>;
- def NAME#64mi32 : BinOpMI_RMW<mnemonic, Xi64, opnode, MemMRM>;
+ def NAME#8mi : BinOpMI_RMW<0x80, mnemonic, Xi8 , opnode, MemMRM>;
+ def NAME#16mi : BinOpMI_RMW<0x80, mnemonic, Xi16, opnode, MemMRM>;
+ def NAME#32mi : BinOpMI_RMW<0x80, mnemonic, Xi32, opnode, MemMRM>;
+ def NAME#64mi32 : BinOpMI_RMW<0x80, mnemonic, Xi64, opnode, MemMRM>;
+
+ // These are for the disassembler since 0x82 opcode behaves like 0x80, but
+ // not in 64-bit mode.
+ let Predicates = [Not64BitMode], isCodeGenOnly = 1, ForceDisassemble = 1,
+ hasSideEffects = 0 in {
+ let Constraints = "$src1 = $dst" in
+ def NAME#8ri8 : BinOpRI8_RF<0x82, mnemonic, Xi8, null_frag, RegMRM>;
+ let mayLoad = 1, mayStore = 1 in
+ def NAME#8mi8 : BinOpMI8_RMW<mnemonic, Xi8, null_frag, MemMRM>;
+ }
} // Defs = [EFLAGS]
def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL,
@@ -1094,12 +1031,13 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
bit ConvertibleToThreeAddress> {
let Uses = [EFLAGS], Defs = [EFLAGS] in {
let Constraints = "$src1 = $dst" in {
- let isCommutable = CommutableRR,
- isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ let isCommutable = CommutableRR in {
def NAME#8rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi8 , opnode>;
- def NAME#16rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi16, opnode>;
- def NAME#32rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi32, opnode>;
- def NAME#64rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi64, opnode>;
+ let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ def NAME#16rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi16, opnode>;
+ def NAME#32rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi32, opnode>;
+ def NAME#64rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi64, opnode>;
+ } // isConvertibleToThreeAddress
} // isCommutable
def NAME#8rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi8>;
@@ -1112,6 +1050,8 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#32rm : BinOpRM_RFF<BaseOpc2, mnemonic, Xi32, opnode>;
def NAME#64rm : BinOpRM_RFF<BaseOpc2, mnemonic, Xi64, opnode>;
+ def NAME#8ri : BinOpRI_RFF<0x80, mnemonic, Xi8 , opnode, RegMRM>;
+
let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
// NOTE: These are order specific, we want the ri8 forms to be listed
// first so that they are slightly preferred to the ri forms.
@@ -1119,7 +1059,6 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#32ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi32, opnode, RegMRM>;
def NAME#64ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi64, opnode, RegMRM>;
- def NAME#8ri : BinOpRI_RFF<0x80, mnemonic, Xi8 , opnode, RegMRM>;
def NAME#16ri : BinOpRI_RFF<0x80, mnemonic, Xi16, opnode, RegMRM>;
def NAME#32ri : BinOpRI_RFF<0x80, mnemonic, Xi32, opnode, RegMRM>;
def NAME#64ri32: BinOpRI_RFF<0x80, mnemonic, Xi64, opnode, RegMRM>;
@@ -1137,10 +1076,20 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#32mi8 : BinOpMI8_RMW_FF<mnemonic, Xi32, opnode, MemMRM>;
def NAME#64mi8 : BinOpMI8_RMW_FF<mnemonic, Xi64, opnode, MemMRM>;
- def NAME#8mi : BinOpMI_RMW_FF<mnemonic, Xi8 , opnode, MemMRM>;
- def NAME#16mi : BinOpMI_RMW_FF<mnemonic, Xi16, opnode, MemMRM>;
- def NAME#32mi : BinOpMI_RMW_FF<mnemonic, Xi32, opnode, MemMRM>;
- def NAME#64mi32 : BinOpMI_RMW_FF<mnemonic, Xi64, opnode, MemMRM>;
+ def NAME#8mi : BinOpMI_RMW_FF<0x80, mnemonic, Xi8 , opnode, MemMRM>;
+ def NAME#16mi : BinOpMI_RMW_FF<0x80, mnemonic, Xi16, opnode, MemMRM>;
+ def NAME#32mi : BinOpMI_RMW_FF<0x80, mnemonic, Xi32, opnode, MemMRM>;
+ def NAME#64mi32 : BinOpMI_RMW_FF<0x80, mnemonic, Xi64, opnode, MemMRM>;
+
+ // These are for the disassembler since 0x82 opcode behaves like 0x80, but
+ // not in 64-bit mode.
+ let Predicates = [Not64BitMode], isCodeGenOnly = 1, ForceDisassemble = 1,
+ hasSideEffects = 0 in {
+ let Constraints = "$src1 = $dst" in
+ def NAME#8ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi8, null_frag, RegMRM>;
+ let mayLoad = 1, mayStore = 1 in
+ def NAME#8mi8 : BinOpMI8_RMW_FF<mnemonic, Xi8, null_frag, MemMRM>;
+ }
} // Uses = [EFLAGS], Defs = [EFLAGS]
def NAME#8i8 : BinOpAI_FF<BaseOpc4, mnemonic, Xi8 , AL,
@@ -1162,12 +1111,13 @@ multiclass ArithBinOp_F<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
SDNode opnode,
bit CommutableRR, bit ConvertibleToThreeAddress> {
let Defs = [EFLAGS] in {
- let isCommutable = CommutableRR,
- isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ let isCommutable = CommutableRR in {
def NAME#8rr : BinOpRR_F<BaseOpc, mnemonic, Xi8 , opnode>;
- def NAME#16rr : BinOpRR_F<BaseOpc, mnemonic, Xi16, opnode>;
- def NAME#32rr : BinOpRR_F<BaseOpc, mnemonic, Xi32, opnode>;
- def NAME#64rr : BinOpRR_F<BaseOpc, mnemonic, Xi64, opnode>;
+ let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ def NAME#16rr : BinOpRR_F<BaseOpc, mnemonic, Xi16, opnode>;
+ def NAME#32rr : BinOpRR_F<BaseOpc, mnemonic, Xi32, opnode>;
+ def NAME#64rr : BinOpRR_F<BaseOpc, mnemonic, Xi64, opnode>;
+ }
} // isCommutable
def NAME#8rr_REV : BinOpRR_F_Rev<BaseOpc2, mnemonic, Xi8>;
@@ -1180,6 +1130,8 @@ multiclass ArithBinOp_F<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#32rm : BinOpRM_F<BaseOpc2, mnemonic, Xi32, opnode>;
def NAME#64rm : BinOpRM_F<BaseOpc2, mnemonic, Xi64, opnode>;
+ def NAME#8ri : BinOpRI_F<0x80, mnemonic, Xi8 , opnode, RegMRM>;
+
let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
// NOTE: These are order specific, we want the ri8 forms to be listed
// first so that they are slightly preferred to the ri forms.
@@ -1187,7 +1139,6 @@ multiclass ArithBinOp_F<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#32ri8 : BinOpRI8_F<0x82, mnemonic, Xi32, opnode, RegMRM>;
def NAME#64ri8 : BinOpRI8_F<0x82, mnemonic, Xi64, opnode, RegMRM>;
- def NAME#8ri : BinOpRI_F<0x80, mnemonic, Xi8 , opnode, RegMRM>;
def NAME#16ri : BinOpRI_F<0x80, mnemonic, Xi16, opnode, RegMRM>;
def NAME#32ri : BinOpRI_F<0x80, mnemonic, Xi32, opnode, RegMRM>;
def NAME#64ri32: BinOpRI_F<0x80, mnemonic, Xi64, opnode, RegMRM>;
@@ -1204,10 +1155,19 @@ multiclass ArithBinOp_F<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#32mi8 : BinOpMI8_F<mnemonic, Xi32, opnode, MemMRM>;
def NAME#64mi8 : BinOpMI8_F<mnemonic, Xi64, opnode, MemMRM>;
- def NAME#8mi : BinOpMI_F<mnemonic, Xi8 , opnode, MemMRM>;
- def NAME#16mi : BinOpMI_F<mnemonic, Xi16, opnode, MemMRM>;
- def NAME#32mi : BinOpMI_F<mnemonic, Xi32, opnode, MemMRM>;
- def NAME#64mi32 : BinOpMI_F<mnemonic, Xi64, opnode, MemMRM>;
+ def NAME#8mi : BinOpMI_F<0x80, mnemonic, Xi8 , opnode, MemMRM>;
+ def NAME#16mi : BinOpMI_F<0x80, mnemonic, Xi16, opnode, MemMRM>;
+ def NAME#32mi : BinOpMI_F<0x80, mnemonic, Xi32, opnode, MemMRM>;
+ def NAME#64mi32 : BinOpMI_F<0x80, mnemonic, Xi64, opnode, MemMRM>;
+
+ // These are for the disassembler since 0x82 opcode behaves like 0x80, but
+ // not in 64-bit mode.
+ let Predicates = [Not64BitMode], isCodeGenOnly = 1, ForceDisassemble = 1,
+ hasSideEffects = 0 in {
+ def NAME#8ri8 : BinOpRI8_F<0x82, mnemonic, Xi8, null_frag, RegMRM>;
+ let mayLoad = 1 in
+ def NAME#8mi8 : BinOpMI8_F<mnemonic, Xi8, null_frag, MemMRM>;
+ }
} // Defs = [EFLAGS]
def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL,
@@ -1272,15 +1232,15 @@ let isCompare = 1 in {
def TEST32ri : BinOpRI_F<0xF6, "test", Xi32, X86testpat, MRM0r>;
def TEST64ri32 : BinOpRI_F<0xF6, "test", Xi64, X86testpat, MRM0r>;
- def TEST8mi : BinOpMI_F<"test", Xi8 , X86testpat, MRM0m, 0xF6>;
- def TEST16mi : BinOpMI_F<"test", Xi16, X86testpat, MRM0m, 0xF6>;
- def TEST32mi : BinOpMI_F<"test", Xi32, X86testpat, MRM0m, 0xF6>;
- def TEST64mi32 : BinOpMI_F<"test", Xi64, X86testpat, MRM0m, 0xF6>;
+ def TEST8mi : BinOpMI_F<0xF6, "test", Xi8 , X86testpat, MRM0m>;
+ def TEST16mi : BinOpMI_F<0xF6, "test", Xi16, X86testpat, MRM0m>;
+ def TEST32mi : BinOpMI_F<0xF6, "test", Xi32, X86testpat, MRM0m>;
+ def TEST64mi32 : BinOpMI_F<0xF6, "test", Xi64, X86testpat, MRM0m>;
// When testing the result of EXTRACT_SUBREG sub_8bit_hi, make sure the
// register class is constrained to GR8_NOREX. This pseudo is explicitly
// marked side-effect free, since it doesn't have an isel pattern like
- // other test instructions.
+ // other test instructions.
let isPseudo = 1, hasSideEffects = 0 in
def TEST8ri_NOREX : I<0, Pseudo, (outs), (ins GR8_NOREX:$src, i8imm:$mask),
"", [], IIC_BIN_NONMEM>, Sched<[WriteALU]>;
@@ -1332,7 +1292,7 @@ let Predicates = [HasBMI] in {
// MULX Instruction
//
multiclass bmi_mulx<string mnemonic, RegisterClass RC, X86MemOperand x86memop> {
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let isCommutable = 1 in
def rr : I<0xF6, MRMSrcReg, (outs RC:$dst1, RC:$dst2), (ins RC:$src),
!strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"),
@@ -1355,19 +1315,19 @@ let Predicates = [HasBMI2] in {
//===----------------------------------------------------------------------===//
// ADCX Instruction
//
-let hasSideEffects = 0, Defs = [EFLAGS], Uses = [EFLAGS],
+let Predicates = [HasADX], Defs = [EFLAGS], Uses = [EFLAGS],
Constraints = "$src0 = $dst", AddedComplexity = 10 in {
let SchedRW = [WriteALU] in {
def ADCX32rr : I<0xF6, MRMSrcReg, (outs GR32:$dst),
(ins GR32:$src0, GR32:$src), "adcx{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS,
(X86adc_flag GR32:$src0, GR32:$src, EFLAGS))],
- IIC_BIN_CARRY_NONMEM>, T8PD, Requires<[HasADX]>;
+ IIC_BIN_CARRY_NONMEM>, T8PD;
def ADCX64rr : RI<0xF6, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src0, GR64:$src), "adcx{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS,
(X86adc_flag GR64:$src0, GR64:$src, EFLAGS))],
- IIC_BIN_CARRY_NONMEM>, T8PD, Requires<[HasADX, In64BitMode]>;
+ IIC_BIN_CARRY_NONMEM>, T8PD;
} // SchedRW
let mayLoad = 1, SchedRW = [WriteALULd] in {
@@ -1375,37 +1335,34 @@ let hasSideEffects = 0, Defs = [EFLAGS], Uses = [EFLAGS],
(ins GR32:$src0, i32mem:$src), "adcx{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS,
(X86adc_flag GR32:$src0, (loadi32 addr:$src), EFLAGS))],
- IIC_BIN_CARRY_MEM>, T8PD, Requires<[HasADX]>;
+ IIC_BIN_CARRY_MEM>, T8PD;
def ADCX64rm : RI<0xF6, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src0, i64mem:$src), "adcx{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS,
(X86adc_flag GR64:$src0, (loadi64 addr:$src), EFLAGS))],
- IIC_BIN_CARRY_MEM>, T8PD, Requires<[HasADX, In64BitMode]>;
+ IIC_BIN_CARRY_MEM>, T8PD;
}
}
//===----------------------------------------------------------------------===//
// ADOX Instruction
//
-let hasSideEffects = 0, Defs = [EFLAGS], Uses = [EFLAGS] in {
+let Predicates = [HasADX], hasSideEffects = 0, Defs = [EFLAGS],
+ Uses = [EFLAGS] in {
let SchedRW = [WriteALU] in {
def ADOX32rr : I<0xF6, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "adox{l}\t{$src, $dst|$dst, $src}",
- [], IIC_BIN_NONMEM>, T8XS, Requires<[HasADX]>;
+ "adox{l}\t{$src, $dst|$dst, $src}", [], IIC_BIN_NONMEM>, T8XS;
def ADOX64rr : RI<0xF6, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "adox{q}\t{$src, $dst|$dst, $src}",
- [], IIC_BIN_NONMEM>, T8XS, Requires<[HasADX, In64BitMode]>;
+ "adox{q}\t{$src, $dst|$dst, $src}", [], IIC_BIN_NONMEM>, T8XS;
} // SchedRW
let mayLoad = 1, SchedRW = [WriteALULd] in {
def ADOX32rm : I<0xF6, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "adox{l}\t{$src, $dst|$dst, $src}",
- [], IIC_BIN_MEM>, T8XS, Requires<[HasADX]>;
+ "adox{l}\t{$src, $dst|$dst, $src}", [], IIC_BIN_MEM>, T8XS;
def ADOX64rm : RI<0xF6, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "adox{q}\t{$src, $dst|$dst, $src}",
- [], IIC_BIN_MEM>, T8XS, Requires<[HasADX, In64BitMode]>;
+ "adox{q}\t{$src, $dst|$dst, $src}", [], IIC_BIN_MEM>, T8XS;
}
}
diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td
index 117b6ff..18bbe5d 100644
--- a/lib/Target/X86/X86InstrCompiler.td
+++ b/lib/Target/X86/X86InstrCompiler.td
@@ -32,7 +32,7 @@ def GetLo8XForm : SDNodeXForm<imm, [{
// PIC base construction. This expands to code that looks like this:
// call $next_inst
// popl %destreg"
-let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
+let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP] in
def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
"", []>;
@@ -43,15 +43,18 @@ let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
// sub / add which can clobber EFLAGS.
let Defs = [ESP, EFLAGS], Uses = [ESP] in {
-def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
+def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
"#ADJCALLSTACKDOWN",
- [(X86callseq_start timm:$amt)]>,
+ []>,
Requires<[NotLP64]>;
def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
"#ADJCALLSTACKUP",
[(X86callseq_end timm:$amt1, timm:$amt2)]>,
Requires<[NotLP64]>;
}
+def : Pat<(X86callseq_start timm:$amt1),
+ (ADJCALLSTACKDOWN32 i32imm:$amt1, 0)>, Requires<[NotLP64]>;
+
// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
// a stack adjustment and the codegen must know that they may modify the stack
@@ -59,16 +62,17 @@ def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
// sub / add which can clobber EFLAGS.
let Defs = [RSP, EFLAGS], Uses = [RSP] in {
-def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
+def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
"#ADJCALLSTACKDOWN",
- [(X86callseq_start timm:$amt)]>,
+ []>,
Requires<[IsLP64]>;
def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
"#ADJCALLSTACKUP",
[(X86callseq_end timm:$amt1, timm:$amt2)]>,
Requires<[IsLP64]>;
}
-
+def : Pat<(X86callseq_start timm:$amt1),
+ (ADJCALLSTACKDOWN64 i32imm:$amt1, 0)>, Requires<[IsLP64]>;
// x86-64 va_start lowering magic.
@@ -259,7 +263,7 @@ def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
// that would make it more difficult to rematerialize.
let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
- isCodeGenOnly = 1, neverHasSideEffects = 1 in
+ isCodeGenOnly = 1, hasSideEffects = 0 in
def MOV32ri64 : Ii32<0xb8, AddRegFrm, (outs GR32:$dst), (ins i64i32imm:$src),
"", [], IIC_ALU_NONMEM>, Sched<[WriteALU]>;
@@ -471,59 +475,50 @@ def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
//===----------------------------------------------------------------------===//
// Conditional Move Pseudo Instructions
-// X86 doesn't have 8-bit conditional moves. Use a customInserter to
-// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
-// however that requires promoting the operands, and can induce additional
-// i8 register pressure.
-let usesCustomInserter = 1, Uses = [EFLAGS] in {
-def CMOV_GR8 : I<0, Pseudo,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
- "#CMOV_GR8 PSEUDO!",
- [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
- imm:$cond, EFLAGS))]>;
-
-let Predicates = [NoCMov] in {
-def CMOV_GR32 : I<0, Pseudo,
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
- "#CMOV_GR32* PSEUDO!",
- [(set GR32:$dst,
- (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
-def CMOV_GR16 : I<0, Pseudo,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
- "#CMOV_GR16* PSEUDO!",
- [(set GR16:$dst,
- (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
-} // Predicates = [NoCMov]
-
-// fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
-// SSE1.
-let Predicates = [FPStackf32] in
-def CMOV_RFP32 : I<0, Pseudo,
- (outs RFP32:$dst),
- (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
- "#CMOV_RFP32 PSEUDO!",
- [(set RFP32:$dst,
- (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
- EFLAGS))]>;
-// fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
-// SSE2.
-let Predicates = [FPStackf64] in
-def CMOV_RFP64 : I<0, Pseudo,
- (outs RFP64:$dst),
- (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
- "#CMOV_RFP64 PSEUDO!",
- [(set RFP64:$dst,
- (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
- EFLAGS))]>;
-def CMOV_RFP80 : I<0, Pseudo,
- (outs RFP80:$dst),
- (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
- "#CMOV_RFP80 PSEUDO!",
- [(set RFP80:$dst,
- (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
- EFLAGS))]>;
-} // UsesCustomInserter = 1, Uses = [EFLAGS]
+// CMOV* - Used to implement the SELECT DAG operation. Expanded after
+// instruction selection into a branch sequence.
+multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
+ def CMOV#NAME : I<0, Pseudo,
+ (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
+ "#CMOV_"#NAME#" PSEUDO!",
+ [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, imm:$cond,
+ EFLAGS)))]>;
+}
+let usesCustomInserter = 1, Uses = [EFLAGS] in {
+ // X86 doesn't have 8-bit conditional moves. Use a customInserter to
+ // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
+ // however that requires promoting the operands, and can induce additional
+ // i8 register pressure.
+ defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
+
+ let Predicates = [NoCMov] in {
+ defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
+ defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
+ } // Predicates = [NoCMov]
+
+ // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
+ // SSE1/SSE2.
+ let Predicates = [FPStackf32] in
+ defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
+
+ let Predicates = [FPStackf64] in
+ defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
+
+ defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
+
+ defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
+ defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
+ defm _V4F32 : CMOVrr_PSEUDO<VR128, v4f32>;
+ defm _V2F64 : CMOVrr_PSEUDO<VR128, v2f64>;
+ defm _V2I64 : CMOVrr_PSEUDO<VR128, v2i64>;
+ defm _V8F32 : CMOVrr_PSEUDO<VR256, v8f32>;
+ defm _V4F64 : CMOVrr_PSEUDO<VR256, v4f64>;
+ defm _V4I64 : CMOVrr_PSEUDO<VR256, v4i64>;
+ defm _V8I64 : CMOVrr_PSEUDO<VR512, v8i64>;
+ defm _V8F64 : CMOVrr_PSEUDO<VR512, v8f64>;
+ defm _V16F32 : CMOVrr_PSEUDO<VR512, v16f32>;
+} // usesCustomInserter = 1, Uses = [EFLAGS]
//===----------------------------------------------------------------------===//
// Normal-Instructions-With-Lock-Prefix Pseudo Instructions
@@ -600,12 +595,12 @@ def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
"{$src2, $dst|$dst, $src2}"),
[], IIC_ALU_MEM>, OpSize32, LOCK;
-def NAME#64mi32 : RIi32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
- ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
- ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
- !strconcat(mnemonic, "{q}\t",
- "{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, LOCK;
+def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
+ ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
+ ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
+ !strconcat(mnemonic, "{q}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_MEM>, LOCK;
def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
@@ -859,79 +854,6 @@ def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
"#ACQUIRE_MOV PSEUDO!",
[(set GR64:$dst, (atomic_load_64 addr:$src))]>;
-//===----------------------------------------------------------------------===//
-// Conditional Move Pseudo Instructions.
-//===----------------------------------------------------------------------===//
-
-// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
-// instruction selection into a branch sequence.
-let Uses = [EFLAGS], usesCustomInserter = 1 in {
- def CMOV_FR32 : I<0, Pseudo,
- (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
- "#CMOV_FR32 PSEUDO!",
- [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
- EFLAGS))]>;
- def CMOV_FR64 : I<0, Pseudo,
- (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
- "#CMOV_FR64 PSEUDO!",
- [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
- EFLAGS))]>;
- def CMOV_V4F32 : I<0, Pseudo,
- (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
- "#CMOV_V4F32 PSEUDO!",
- [(set VR128:$dst,
- (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
- EFLAGS)))]>;
- def CMOV_V2F64 : I<0, Pseudo,
- (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
- "#CMOV_V2F64 PSEUDO!",
- [(set VR128:$dst,
- (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
- EFLAGS)))]>;
- def CMOV_V2I64 : I<0, Pseudo,
- (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
- "#CMOV_V2I64 PSEUDO!",
- [(set VR128:$dst,
- (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
- EFLAGS)))]>;
- def CMOV_V8F32 : I<0, Pseudo,
- (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
- "#CMOV_V8F32 PSEUDO!",
- [(set VR256:$dst,
- (v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond,
- EFLAGS)))]>;
- def CMOV_V4F64 : I<0, Pseudo,
- (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
- "#CMOV_V4F64 PSEUDO!",
- [(set VR256:$dst,
- (v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
- EFLAGS)))]>;
- def CMOV_V4I64 : I<0, Pseudo,
- (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
- "#CMOV_V4I64 PSEUDO!",
- [(set VR256:$dst,
- (v4i64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
- EFLAGS)))]>;
- def CMOV_V8I64 : I<0, Pseudo,
- (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
- "#CMOV_V8I64 PSEUDO!",
- [(set VR512:$dst,
- (v8i64 (X86cmov VR512:$t, VR512:$f, imm:$cond,
- EFLAGS)))]>;
- def CMOV_V8F64 : I<0, Pseudo,
- (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
- "#CMOV_V8F64 PSEUDO!",
- [(set VR512:$dst,
- (v8f64 (X86cmov VR512:$t, VR512:$f, imm:$cond,
- EFLAGS)))]>;
- def CMOV_V16F32 : I<0, Pseudo,
- (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
- "#CMOV_V16F32 PSEUDO!",
- [(set VR512:$dst,
- (v16f32 (X86cmov VR512:$t, VR512:$f, imm:$cond,
- EFLAGS)))]>;
-}
-
//===----------------------------------------------------------------------===//
// DAG Pattern Matching Rules
@@ -1010,6 +932,9 @@ def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tblockaddress:$src)>,
Requires<[NearData, IsStatic]>;
+def : Pat<(i32 (X86RecoverFrameAlloc texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
+def : Pat<(i64 (X86RecoverFrameAlloc texternalsym:$dst)), (MOV64ri texternalsym:$dst)>;
+
// Calls
// tls has some funny stuff here...
@@ -1058,12 +983,12 @@ def : Pat<(X86tcret (load addr:$dst), imm:$off),
Requires<[Not64BitMode, IsNotPIC]>;
def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
- (TCRETURNdi texternalsym:$dst, imm:$off)>,
- Requires<[Not64BitMode]>;
+ (TCRETURNdi tglobaladdr:$dst, imm:$off)>,
+ Requires<[NotLP64]>;
def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
(TCRETURNdi texternalsym:$dst, imm:$off)>,
- Requires<[Not64BitMode]>;
+ Requires<[NotLP64]>;
def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
(TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
@@ -1077,11 +1002,11 @@ def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
(TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
- Requires<[In64BitMode]>;
+ Requires<[IsLP64]>;
def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
(TCRETURNdi64 texternalsym:$dst, imm:$off)>,
- Requires<[In64BitMode]>;
+ Requires<[IsLP64]>;
// Normal calls, with various flavors of addresses.
def : Pat<(X86call (i32 tglobaladdr:$dst)),
@@ -1556,8 +1481,12 @@ def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
// Helper imms that check if a mask doesn't change significant shift bits.
-def immShift32 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 5; }]>;
-def immShift64 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 6; }]>;
+def immShift32 : ImmLeaf<i8, [{
+ return countTrailingOnes<uint64_t>(Imm) >= 5;
+}]>;
+def immShift64 : ImmLeaf<i8, [{
+ return countTrailingOnes<uint64_t>(Imm) >= 6;
+}]>;
// Shift amount is implicitly masked.
multiclass MaskedShiftAmountPats<SDNode frag, string name> {
@@ -1724,35 +1653,18 @@ def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
(IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
-// Increment reg.
-// Do not make INC if it is slow
-def : Pat<(add GR8:$src, 1),
- (INC8r GR8:$src)>, Requires<[NotSlowIncDec]>;
-def : Pat<(add GR16:$src, 1),
- (INC16r GR16:$src)>, Requires<[NotSlowIncDec, Not64BitMode]>;
-def : Pat<(add GR16:$src, 1),
- (INC64_16r GR16:$src)>, Requires<[NotSlowIncDec, In64BitMode]>;
-def : Pat<(add GR32:$src, 1),
- (INC32r GR32:$src)>, Requires<[NotSlowIncDec, Not64BitMode]>;
-def : Pat<(add GR32:$src, 1),
- (INC64_32r GR32:$src)>, Requires<[NotSlowIncDec, In64BitMode]>;
-def : Pat<(add GR64:$src, 1),
- (INC64r GR64:$src)>, Requires<[NotSlowIncDec]>;
-
-// Decrement reg.
-// Do not make DEC if it is slow
-def : Pat<(add GR8:$src, -1),
- (DEC8r GR8:$src)>, Requires<[NotSlowIncDec]>;
-def : Pat<(add GR16:$src, -1),
- (DEC16r GR16:$src)>, Requires<[NotSlowIncDec, Not64BitMode]>;
-def : Pat<(add GR16:$src, -1),
- (DEC64_16r GR16:$src)>, Requires<[NotSlowIncDec, In64BitMode]>;
-def : Pat<(add GR32:$src, -1),
- (DEC32r GR32:$src)>, Requires<[NotSlowIncDec, Not64BitMode]>;
-def : Pat<(add GR32:$src, -1),
- (DEC64_32r GR32:$src)>, Requires<[NotSlowIncDec, In64BitMode]>;
-def : Pat<(add GR64:$src, -1),
- (DEC64r GR64:$src)>, Requires<[NotSlowIncDec]>;
+// Increment/Decrement reg.
+// Do not make INC/DEC if it is slow
+let Predicates = [NotSlowIncDec] in {
+ def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
+ def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
+ def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;
+ def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
+ def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;
+ def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
+ def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
+ def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
+}
// or reg/reg.
def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
diff --git a/lib/Target/X86/X86InstrControl.td b/lib/Target/X86/X86InstrControl.td
index 39ad395..6ab961f 100644
--- a/lib/Target/X86/X86InstrControl.td
+++ b/lib/Target/X86/X86InstrControl.td
@@ -57,33 +57,32 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1,
// Unconditional branches.
let isBarrier = 1, isBranch = 1, isTerminator = 1, SchedRW = [WriteJump] in {
- def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst),
- "jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>, OpSize32;
- def JMP_2 : Ii16PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst),
- "jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>, OpSize16,
- Requires<[In16BitMode]>;
- let hasSideEffects = 0 in
def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
- "jmp\t$dst", [], IIC_JMP_REL>;
+ "jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>;
+ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
+ def JMP_2 : Ii16PCRel<0xE9, RawFrm, (outs), (ins brtarget16:$dst),
+ "jmp\t$dst", [], IIC_JMP_REL>, OpSize16;
+ def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget32:$dst),
+ "jmp\t$dst", [], IIC_JMP_REL>, OpSize32;
+ }
}
// Conditional Branches.
let isBranch = 1, isTerminator = 1, Uses = [EFLAGS], SchedRW = [WriteJump] in {
multiclass ICBr<bits<8> opc1, bits<8> opc4, string asm, PatFrag Cond> {
- let hasSideEffects = 0 in
- def _1 : Ii8PCRel <opc1, RawFrm, (outs), (ins brtarget8:$dst), asm, [],
- IIC_Jcc>;
- def _2 : Ii16PCRel<opc4, RawFrm, (outs), (ins brtarget:$dst), asm,
- [(X86brcond bb:$dst, Cond, EFLAGS)], IIC_Jcc>, OpSize16,
- TB, Requires<[In16BitMode]>;
- def _4 : Ii32PCRel<opc4, RawFrm, (outs), (ins brtarget:$dst), asm,
- [(X86brcond bb:$dst, Cond, EFLAGS)], IIC_Jcc>, TB,
- OpSize32;
+ def _1 : Ii8PCRel <opc1, RawFrm, (outs), (ins brtarget8:$dst), asm,
+ [(X86brcond bb:$dst, Cond, EFLAGS)], IIC_Jcc>;
+ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
+ def _2 : Ii16PCRel<opc4, RawFrm, (outs), (ins brtarget16:$dst), asm,
+ [], IIC_Jcc>, OpSize16, TB;
+ def _4 : Ii32PCRel<opc4, RawFrm, (outs), (ins brtarget32:$dst), asm,
+ [], IIC_Jcc>, TB, OpSize32;
+ }
}
}
defm JO : ICBr<0x70, 0x80, "jo\t$dst" , X86_COND_O>;
-defm JNO : ICBr<0x71, 0x81, "jno\t$dst" , X86_COND_NO>;
+defm JNO : ICBr<0x71, 0x81, "jno\t$dst", X86_COND_NO>;
defm JB : ICBr<0x72, 0x82, "jb\t$dst" , X86_COND_B>;
defm JAE : ICBr<0x73, 0x83, "jae\t$dst", X86_COND_AE>;
defm JE : ICBr<0x74, 0x84, "je\t$dst" , X86_COND_E>;
@@ -106,20 +105,14 @@ let isBranch = 1, isTerminator = 1, hasSideEffects = 0, SchedRW = [WriteJump] in
// jecxz.
let Uses = [CX] in
def JCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jcxz\t$dst", [], IIC_JCXZ>, AdSize, Requires<[Not64BitMode]>;
+ "jcxz\t$dst", [], IIC_JCXZ>, AdSize16;
let Uses = [ECX] in
- def JECXZ_32 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jecxz\t$dst", [], IIC_JCXZ>, Requires<[Not64BitMode]>;
+ def JECXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
+ "jecxz\t$dst", [], IIC_JCXZ>, AdSize32;
- // J*CXZ instruction: 64-bit versions of this instruction for the asmparser.
- // In 64-bit mode, the address size prefix is jecxz and the unprefixed version
- // is jrcxz.
- let Uses = [ECX] in
- def JECXZ_64 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jecxz\t$dst", [], IIC_JCXZ>, AdSize, Requires<[In64BitMode]>;
let Uses = [RCX] in
def JRCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jrcxz\t$dst", [], IIC_JCXZ>, Requires<[In64BitMode]>;
+ "jrcxz\t$dst", [], IIC_JCXZ>, AdSize64;
}
// Indirect branches
@@ -145,14 +138,16 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
[(brind (loadi64 addr:$dst))], IIC_JMP_MEM>,
Requires<[In64BitMode]>, Sched<[WriteJumpLd]>;
- def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs),
- (ins i16imm:$off, i16imm:$seg),
- "ljmp{w}\t{$seg, $off|$off, $seg}", [],
- IIC_JMP_FAR_PTR>, OpSize16, Sched<[WriteJump]>;
- def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs),
- (ins i32imm:$off, i16imm:$seg),
- "ljmp{l}\t{$seg, $off|$off, $seg}", [],
- IIC_JMP_FAR_PTR>, OpSize32, Sched<[WriteJump]>;
+ let Predicates = [Not64BitMode] in {
+ def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs),
+ (ins i16imm:$off, i16imm:$seg),
+ "ljmp{w}\t$seg, $off", [],
+ IIC_JMP_FAR_PTR>, OpSize16, Sched<[WriteJump]>;
+ def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs),
+ (ins i32imm:$off, i16imm:$seg),
+ "ljmp{l}\t$seg, $off", [],
+ IIC_JMP_FAR_PTR>, OpSize32, Sched<[WriteJump]>;
+ }
def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
"ljmp{q}\t{*}$dst", [], IIC_JMP_FAR_MEM>,
Sched<[WriteJump]>;
@@ -186,10 +181,11 @@ let isCall = 1 in
(outs), (ins i32imm_pcrel:$dst),
"call{l}\t$dst", [], IIC_CALL_RI>, OpSize32,
Requires<[Not64BitMode]>, Sched<[WriteJump]>;
- def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
- (outs), (ins i16imm_pcrel:$dst),
- "call{w}\t$dst", [], IIC_CALL_RI>, OpSize16,
- Sched<[WriteJump]>;
+ let hasSideEffects = 0 in
+ def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
+ (outs), (ins i16imm_pcrel:$dst),
+ "call{w}\t$dst", [], IIC_CALL_RI>, OpSize16,
+ Sched<[WriteJump]>;
def CALL16r : I<0xFF, MRM2r, (outs), (ins GR16:$dst),
"call{w}\t{*}$dst", [(X86call GR16:$dst)], IIC_CALL_RI>,
OpSize16, Requires<[Not64BitMode]>, Sched<[WriteJump]>;
@@ -207,14 +203,16 @@ let isCall = 1 in
Requires<[Not64BitMode,FavorMemIndirectCall]>,
Sched<[WriteJumpLd]>;
- def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs),
- (ins i16imm:$off, i16imm:$seg),
- "lcall{w}\t{$seg, $off|$off, $seg}", [],
- IIC_CALL_FAR_PTR>, OpSize16, Sched<[WriteJump]>;
- def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs),
- (ins i32imm:$off, i16imm:$seg),
- "lcall{l}\t{$seg, $off|$off, $seg}", [],
- IIC_CALL_FAR_PTR>, OpSize32, Sched<[WriteJump]>;
+ let Predicates = [Not64BitMode] in {
+ def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs),
+ (ins i16imm:$off, i16imm:$seg),
+ "lcall{w}\t$seg, $off", [],
+ IIC_CALL_FAR_PTR>, OpSize16, Sched<[WriteJump]>;
+ def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs),
+ (ins i32imm:$off, i16imm:$seg),
+ "lcall{l}\t$seg, $off", [],
+ IIC_CALL_FAR_PTR>, OpSize32, Sched<[WriteJump]>;
+ }
def FARCALL16m : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst),
"lcall{w}\t{*}$dst", [], IIC_CALL_FAR_MEM>, OpSize16,
@@ -242,13 +240,13 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
// mcinst.
def TAILJMPd : Ii32PCRel<0xE9, RawFrm, (outs),
(ins i32imm_pcrel:$dst),
- "jmp\t$dst # TAILCALL",
+ "jmp\t$dst",
[], IIC_JMP_REL>;
def TAILJMPr : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst),
"", [], IIC_JMP_REG>; // FIXME: Remove encoding when JIT is dead.
let mayLoad = 1 in
def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst),
- "jmp{l}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
+ "jmp{l}\t{*}$dst", [], IIC_JMP_MEM>;
}
@@ -280,17 +278,6 @@ let isCall = 1, Uses = [RSP], SchedRW = [WriteJump] in {
"lcall{q}\t{*}$dst", [], IIC_CALL_FAR_MEM>;
}
-let isCall = 1, isCodeGenOnly = 1 in
- // __chkstk(MSVC): clobber R10, R11 and EFLAGS.
- // ___chkstk(Mingw64): clobber R10, R11, RAX and EFLAGS, and update RSP.
- let Defs = [RAX, R10, R11, RSP, EFLAGS],
- Uses = [RSP] in {
- def W64ALLOCA : Ii32PCRel<0xE8, RawFrm,
- (outs), (ins i64i32imm_pcrel:$dst),
- "call{q}\t$dst", [], IIC_CALL_RI>,
- Requires<[IsWin64]>, Sched<[WriteJump]>;
- }
-
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
isCodeGenOnly = 1, Uses = [RSP], usesCustomInserter = 1,
SchedRW = [WriteJump] in {
@@ -303,13 +290,25 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
def TCRETURNmi64 : PseudoI<(outs),
(ins i64mem_TC:$dst, i32imm:$offset), []>;
- def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
- (ins i64i32imm_pcrel:$dst),
- "jmp\t$dst # TAILCALL", [], IIC_JMP_REL>;
+ def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs), (ins i64i32imm_pcrel:$dst),
+ "jmp\t$dst", [], IIC_JMP_REL>;
def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst),
- "jmp{q}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
+ "jmp{q}\t{*}$dst", [], IIC_JMP_MEM>;
let mayLoad = 1 in
def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst),
- "jmp{q}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
+ "jmp{q}\t{*}$dst", [], IIC_JMP_MEM>;
+
+ // Win64 wants jumps leaving the function to have a REX_W prefix.
+ let hasREX_WPrefix = 1 in {
+ def TAILJMPd64_REX : Ii32PCRel<0xE9, RawFrm, (outs),
+ (ins i64i32imm_pcrel:$dst),
+ "rex64 jmp\t$dst", [], IIC_JMP_REL>;
+ def TAILJMPr64_REX : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst),
+ "rex64 jmp{q}\t{*}$dst", [], IIC_JMP_MEM>;
+
+ let mayLoad = 1 in
+ def TAILJMPm64_REX : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst),
+ "rex64 jmp{q}\t{*}$dst", [], IIC_JMP_MEM>;
+ }
}
diff --git a/lib/Target/X86/X86InstrExtension.td b/lib/Target/X86/X86InstrExtension.td
index b38129a..c4b2d6d 100644
--- a/lib/Target/X86/X86InstrExtension.td
+++ b/lib/Target/X86/X86InstrExtension.td
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
let Defs = [AX], Uses = [AL] in
def CBW : I<0x98, RawFrm, (outs), (ins),
"{cbtw|cbw}", [], IIC_CBW>, OpSize16; // AX = signext(AL)
@@ -39,7 +39,7 @@ let neverHasSideEffects = 1 in {
// Sign/Zero extenders
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
"movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_R8>,
TB, OpSize16, Sched<[WriteALU]>;
@@ -47,7 +47,7 @@ let mayLoad = 1 in
def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
"movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_M8>,
TB, OpSize16, Sched<[WriteALULd]>;
-} // neverHasSideEffects = 1
+} // hasSideEffects = 0
def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8:$src),
"movs{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (sext GR8:$src))], IIC_MOVSX>, TB,
@@ -65,7 +65,7 @@ def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
[(set GR32:$dst, (sextloadi32i16 addr:$src))], IIC_MOVSX>,
OpSize32, TB, Sched<[WriteALULd]>;
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
"movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_R8>,
TB, OpSize16, Sched<[WriteALU]>;
@@ -73,7 +73,7 @@ let mayLoad = 1 in
def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
"movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_M8>,
TB, OpSize16, Sched<[WriteALULd]>;
-} // neverHasSideEffects = 1
+} // hasSideEffects = 0
def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (zext GR8:$src))], IIC_MOVZX>, TB,
@@ -94,7 +94,7 @@ def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
// These are the same as the regular MOVZX32rr8 and MOVZX32rm8
// except that they use GR32_NOREX for the output operand register class
// instead of GR32. This allows them to operate on h registers on x86-64.
-let neverHasSideEffects = 1, isCodeGenOnly = 1 in {
+let hasSideEffects = 0, isCodeGenOnly = 1 in {
def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg,
(outs GR32_NOREX:$dst), (ins GR8_NOREX:$src),
"movz{bl|x}\t{$src, $dst|$dst, $src} # NOREX",
@@ -139,11 +139,11 @@ def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
"movs{lq|xd}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (sext GR32:$src))], IIC_MOVSX>,
- Sched<[WriteALU]>;
+ Sched<[WriteALU]>, Requires<[In64BitMode]>;
def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
"movs{lq|xd}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (sextloadi64i32 addr:$src))], IIC_MOVSX>,
- Sched<[WriteALULd]>;
+ Sched<[WriteALULd]>, Requires<[In64BitMode]>;
// movzbq and movzwq encodings for the disassembler
def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src),
diff --git a/lib/Target/X86/X86InstrFMA.td b/lib/Target/X86/X86InstrFMA.td
index c0a6864..2993e42 100644
--- a/lib/Target/X86/X86InstrFMA.td
+++ b/lib/Target/X86/X86InstrFMA.td
@@ -69,7 +69,7 @@ multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
/* IsRVariantCommutable */ 1,
/* IsMVariantCommutable */ 1,
Op>;
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
defm r132 : fma3p_rm<opc132,
!strconcat(OpcodeStr, "132", PackTy),
MemFrag128, MemFrag256, OpTy128, OpTy256>;
@@ -81,7 +81,7 @@ let neverHasSideEffects = 1 in {
MemFrag128, MemFrag256, OpTy128, OpTy256,
/* IsRVariantCommutable */ 1,
/* IsMVariantCommutable */ 0>;
-} // neverHasSideEffects = 1
+} // hasSideEffects = 0
}
// Fused Multiply-Add
@@ -155,7 +155,7 @@ multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
SDNode OpNode, RegisterClass RC, ValueType OpVT,
X86MemOperand x86memop, Operand memop, PatFrag mem_frag,
ComplexPattern mem_cpat> {
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
defm r132 : fma3s_rm<opc132, !strconcat(OpStr, "132", PackTy),
x86memop, RC, OpVT, mem_frag>;
// See the other defm of r231 for the explanation regarding the
diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td
index d9f173e..6cd5e79 100644
--- a/lib/Target/X86/X86InstrFPStack.td
+++ b/lib/Target/X86/X86InstrFPStack.td
@@ -17,13 +17,13 @@
// FPStack specific DAG Nodes.
//===----------------------------------------------------------------------===//
-def SDTX86FpGet2 : SDTypeProfile<2, 0, [SDTCisVT<0, f80>,
+def SDTX86FpGet2 : SDTypeProfile<2, 0, [SDTCisVT<0, f80>,
SDTCisVT<1, f80>]>;
def SDTX86Fld : SDTypeProfile<1, 2, [SDTCisFP<0>,
- SDTCisPtrTy<1>,
+ SDTCisPtrTy<1>,
SDTCisVT<2, OtherVT>]>;
def SDTX86Fst : SDTypeProfile<0, 3, [SDTCisFP<0>,
- SDTCisPtrTy<1>,
+ SDTCisPtrTy<1>,
SDTCisVT<2, OtherVT>]>;
def SDTX86Fild : SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisPtrTy<1>,
SDTCisVT<2, OtherVT>]>;
@@ -98,7 +98,7 @@ let usesCustomInserter = 1 in { // Expanded after instruction selection.
// All FP Stack operations are represented with four instructions here. The
// first three instructions, generated by the instruction selector, use "RFP32"
// "RFP64" or "RFP80" registers: traditional register files to reference 32-bit,
-// 64-bit or 80-bit floating point values. These sizes apply to the values,
+// 64-bit or 80-bit floating point values. These sizes apply to the values,
// not the registers, which are always 80 bits; RFP32, RFP64 and RFP80 can be
// copied to each other without losing information. These instructions are all
// pseudo instructions and use the "_Fp" suffix.
@@ -107,7 +107,7 @@ let usesCustomInserter = 1 in { // Expanded after instruction selection.
// The second instruction is defined with FPI, which is the actual instruction
// emitted by the assembler. These use "RST" registers, although frequently
// the actual register(s) used are implicit. These are always 80 bits.
-// The FP stackifier pass converts one to the other after register allocation
+// The FP stackifier pass converts one to the other after register allocation
// occurs.
//
// Note that the FpI instruction should have instruction selection info (e.g.
@@ -139,66 +139,66 @@ def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2), TwoArgFP,
// These instructions cannot address 80-bit memory.
multiclass FPBinary<SDNode OpNode, Format fp, string asmstring> {
// ST(0) = ST(0) + [mem]
-def _Fp32m : FpIf32<(outs RFP32:$dst),
+def _Fp32m : FpIf32<(outs RFP32:$dst),
(ins RFP32:$src1, f32mem:$src2), OneArgFPRW,
- [(set RFP32:$dst,
+ [(set RFP32:$dst,
(OpNode RFP32:$src1, (loadf32 addr:$src2)))]>;
-def _Fp64m : FpIf64<(outs RFP64:$dst),
+def _Fp64m : FpIf64<(outs RFP64:$dst),
(ins RFP64:$src1, f64mem:$src2), OneArgFPRW,
- [(set RFP64:$dst,
+ [(set RFP64:$dst,
(OpNode RFP64:$src1, (loadf64 addr:$src2)))]>;
-def _Fp64m32: FpIf64<(outs RFP64:$dst),
+def _Fp64m32: FpIf64<(outs RFP64:$dst),
(ins RFP64:$src1, f32mem:$src2), OneArgFPRW,
- [(set RFP64:$dst,
+ [(set RFP64:$dst,
(OpNode RFP64:$src1, (f64 (extloadf32 addr:$src2))))]>;
-def _Fp80m32: FpI_<(outs RFP80:$dst),
+def _Fp80m32: FpI_<(outs RFP80:$dst),
(ins RFP80:$src1, f32mem:$src2), OneArgFPRW,
- [(set RFP80:$dst,
+ [(set RFP80:$dst,
(OpNode RFP80:$src1, (f80 (extloadf32 addr:$src2))))]>;
-def _Fp80m64: FpI_<(outs RFP80:$dst),
+def _Fp80m64: FpI_<(outs RFP80:$dst),
(ins RFP80:$src1, f64mem:$src2), OneArgFPRW,
- [(set RFP80:$dst,
+ [(set RFP80:$dst,
(OpNode RFP80:$src1, (f80 (extloadf64 addr:$src2))))]>;
-def _F32m : FPI<0xD8, fp, (outs), (ins f32mem:$src),
- !strconcat("f", asmstring, "{s}\t$src")> {
- let mayLoad = 1;
+def _F32m : FPI<0xD8, fp, (outs), (ins f32mem:$src),
+ !strconcat("f", asmstring, "{s}\t$src")> {
+ let mayLoad = 1;
}
-def _F64m : FPI<0xDC, fp, (outs), (ins f64mem:$src),
- !strconcat("f", asmstring, "{l}\t$src")> {
- let mayLoad = 1;
+def _F64m : FPI<0xDC, fp, (outs), (ins f64mem:$src),
+ !strconcat("f", asmstring, "{l}\t$src")> {
+ let mayLoad = 1;
}
// ST(0) = ST(0) + [memint]
-def _FpI16m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i16mem:$src2),
+def _FpI16m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i16mem:$src2),
OneArgFPRW,
[(set RFP32:$dst, (OpNode RFP32:$src1,
(X86fild addr:$src2, i16)))]>;
-def _FpI32m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i32mem:$src2),
+def _FpI32m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i32mem:$src2),
OneArgFPRW,
[(set RFP32:$dst, (OpNode RFP32:$src1,
(X86fild addr:$src2, i32)))]>;
-def _FpI16m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i16mem:$src2),
+def _FpI16m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i16mem:$src2),
OneArgFPRW,
[(set RFP64:$dst, (OpNode RFP64:$src1,
(X86fild addr:$src2, i16)))]>;
-def _FpI32m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i32mem:$src2),
+def _FpI32m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i32mem:$src2),
OneArgFPRW,
[(set RFP64:$dst, (OpNode RFP64:$src1,
(X86fild addr:$src2, i32)))]>;
-def _FpI16m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i16mem:$src2),
+def _FpI16m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i16mem:$src2),
OneArgFPRW,
[(set RFP80:$dst, (OpNode RFP80:$src1,
(X86fild addr:$src2, i16)))]>;
-def _FpI32m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i32mem:$src2),
+def _FpI32m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i32mem:$src2),
OneArgFPRW,
[(set RFP80:$dst, (OpNode RFP80:$src1,
(X86fild addr:$src2, i32)))]>;
-def _FI16m : FPI<0xDE, fp, (outs), (ins i16mem:$src),
- !strconcat("fi", asmstring, "{s}\t$src")> {
- let mayLoad = 1;
+def _FI16m : FPI<0xDE, fp, (outs), (ins i16mem:$src),
+ !strconcat("fi", asmstring, "{s}\t$src")> {
+ let mayLoad = 1;
}
-def _FI32m : FPI<0xDA, fp, (outs), (ins i32mem:$src),
- !strconcat("fi", asmstring, "{l}\t$src")> {
- let mayLoad = 1;
+def _FI32m : FPI<0xDA, fp, (outs), (ins i32mem:$src),
+ !strconcat("fi", asmstring, "{l}\t$src")> {
+ let mayLoad = 1;
}
}
@@ -282,7 +282,7 @@ defm SQRT: FPUnary<fsqrt,MRM_FA, "fsqrt">;
defm SIN : FPUnary<fsin, MRM_FE, "fsin">;
defm COS : FPUnary<fcos, MRM_FF, "fcos">;
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def TST_Fp32 : FpIf32<(outs), (ins RFP32:$src), OneArgFP, []>;
def TST_Fp64 : FpIf64<(outs), (ins RFP64:$src), OneArgFP, []>;
def TST_Fp80 : FpI_<(outs), (ins RFP80:$src), OneArgFP, []>;
@@ -415,7 +415,7 @@ def ST_Fp80m64 : FpI_<(outs), (ins f64mem:$op, RFP80:$src), OneArgFP,
[(truncstoref64 RFP80:$src, addr:$op)]>;
// FST does not support 80-bit memory target; FSTP must be used.
-let mayStore = 1, neverHasSideEffects = 1 in {
+let mayStore = 1, hasSideEffects = 0 in {
def ST_FpP32m : FpIf32<(outs), (ins f32mem:$op, RFP32:$src), OneArgFP, []>;
def ST_FpP64m32 : FpIf64<(outs), (ins f32mem:$op, RFP64:$src), OneArgFP, []>;
def ST_FpP64m : FpIf64<(outs), (ins f64mem:$op, RFP64:$src), OneArgFP, []>;
@@ -424,7 +424,7 @@ def ST_FpP80m64 : FpI_<(outs), (ins f64mem:$op, RFP80:$src), OneArgFP, []>;
}
def ST_FpP80m : FpI_<(outs), (ins f80mem:$op, RFP80:$src), OneArgFP,
[(store RFP80:$src, addr:$op)]>;
-let mayStore = 1, neverHasSideEffects = 1 in {
+let mayStore = 1, hasSideEffects = 0 in {
def IST_Fp16m32 : FpIf32<(outs), (ins i16mem:$op, RFP32:$src), OneArgFP, []>;
def IST_Fp32m32 : FpIf32<(outs), (ins i32mem:$op, RFP32:$src), OneArgFP, []>;
def IST_Fp64m32 : FpIf32<(outs), (ins i64mem:$op, RFP32:$src), OneArgFP, []>;
@@ -500,7 +500,7 @@ def ISTT_FP16m : FPI<0xDF, MRM1m, (outs), (ins i16mem:$dst), "fisttp{s}\t$dst",
IIC_FST>;
def ISTT_FP32m : FPI<0xDB, MRM1m, (outs), (ins i32mem:$dst), "fisttp{l}\t$dst",
IIC_FST>;
-def ISTT_FP64m : FPI<0xDD, MRM1m, (outs), (ins i64mem:$dst),
+def ISTT_FP64m : FPI<0xDD, MRM1m, (outs), (ins i64mem:$dst),
"fisttp{ll}\t$dst", IIC_FST>;
}
@@ -636,12 +636,12 @@ def FCOMPP : I<0xDE, MRM_D9, (outs), (ins), "fcompp", [], IIC_FCOMPP>;
def FXSAVE : I<0xAE, MRM0m, (outs opaque512mem:$dst), (ins),
"fxsave\t$dst", [], IIC_FXSAVE>, TB;
def FXSAVE64 : RI<0xAE, MRM0m, (outs opaque512mem:$dst), (ins),
- "fxsave{q|64}\t$dst", [], IIC_FXSAVE>, TB,
+ "fxsave64\t$dst", [], IIC_FXSAVE>, TB,
Requires<[In64BitMode]>;
def FXRSTOR : I<0xAE, MRM1m, (outs), (ins opaque512mem:$src),
"fxrstor\t$src", [], IIC_FXRSTOR>, TB;
def FXRSTOR64 : RI<0xAE, MRM1m, (outs), (ins opaque512mem:$src),
- "fxrstor{q|64}\t$src", [], IIC_FXRSTOR>, TB,
+ "fxrstor64\t$src", [], IIC_FXRSTOR>, TB,
Requires<[In64BitMode]>;
} // SchedRW
@@ -656,12 +656,12 @@ def : Pat<(X86fld addr:$src, f80), (LD_Fp80m addr:$src)>;
// Required for CALL which return f32 / f64 / f80 values.
def : Pat<(X86fst RFP32:$src, addr:$op, f32), (ST_Fp32m addr:$op, RFP32:$src)>;
-def : Pat<(X86fst RFP64:$src, addr:$op, f32), (ST_Fp64m32 addr:$op,
+def : Pat<(X86fst RFP64:$src, addr:$op, f32), (ST_Fp64m32 addr:$op,
RFP64:$src)>;
def : Pat<(X86fst RFP64:$src, addr:$op, f64), (ST_Fp64m addr:$op, RFP64:$src)>;
-def : Pat<(X86fst RFP80:$src, addr:$op, f32), (ST_Fp80m32 addr:$op,
+def : Pat<(X86fst RFP80:$src, addr:$op, f32), (ST_Fp80m32 addr:$op,
RFP80:$src)>;
-def : Pat<(X86fst RFP80:$src, addr:$op, f64), (ST_Fp80m64 addr:$op,
+def : Pat<(X86fst RFP80:$src, addr:$op, f64), (ST_Fp80m64 addr:$op,
RFP80:$src)>;
def : Pat<(X86fst RFP80:$src, addr:$op, f80), (ST_FpP80m addr:$op,
RFP80:$src)>;
diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td
index fe4ead1..56043fb 100644
--- a/lib/Target/X86/X86InstrFormats.td
+++ b/lib/Target/X86/X86InstrFormats.td
@@ -34,23 +34,27 @@ def MRM0m : Format<24>; def MRM1m : Format<25>; def MRM2m : Format<26>;
def MRM3m : Format<27>; def MRM4m : Format<28>; def MRM5m : Format<29>;
def MRM6m : Format<30>; def MRM7m : Format<31>;
def MRM_C0 : Format<32>; def MRM_C1 : Format<33>; def MRM_C2 : Format<34>;
-def MRM_C3 : Format<35>; def MRM_C4 : Format<36>; def MRM_C8 : Format<37>;
-def MRM_C9 : Format<38>; def MRM_CA : Format<39>; def MRM_CB : Format<40>;
-def MRM_CF : Format<41>; def MRM_D0 : Format<42>; def MRM_D1 : Format<43>;
-def MRM_D4 : Format<44>; def MRM_D5 : Format<45>; def MRM_D6 : Format<46>;
-def MRM_D7 : Format<47>; def MRM_D8 : Format<48>; def MRM_D9 : Format<49>;
-def MRM_DA : Format<50>; def MRM_DB : Format<51>; def MRM_DC : Format<52>;
-def MRM_DD : Format<53>; def MRM_DE : Format<54>; def MRM_DF : Format<55>;
-def MRM_E0 : Format<56>; def MRM_E1 : Format<57>; def MRM_E2 : Format<58>;
-def MRM_E3 : Format<59>; def MRM_E4 : Format<60>; def MRM_E5 : Format<61>;
-def MRM_E8 : Format<62>; def MRM_E9 : Format<63>; def MRM_EA : Format<64>;
-def MRM_EB : Format<65>; def MRM_EC : Format<66>; def MRM_ED : Format<67>;
-def MRM_EE : Format<68>; def MRM_F0 : Format<69>; def MRM_F1 : Format<70>;
-def MRM_F2 : Format<71>; def MRM_F3 : Format<72>; def MRM_F4 : Format<73>;
-def MRM_F5 : Format<74>; def MRM_F6 : Format<75>; def MRM_F7 : Format<76>;
-def MRM_F8 : Format<77>; def MRM_F9 : Format<78>; def MRM_FA : Format<79>;
-def MRM_FB : Format<80>; def MRM_FC : Format<81>; def MRM_FD : Format<82>;
-def MRM_FE : Format<83>; def MRM_FF : Format<84>;
+def MRM_C3 : Format<35>; def MRM_C4 : Format<36>; def MRM_C5 : Format<37>;
+def MRM_C6 : Format<38>; def MRM_C7 : Format<39>; def MRM_C8 : Format<40>;
+def MRM_C9 : Format<41>; def MRM_CA : Format<42>; def MRM_CB : Format<43>;
+def MRM_CC : Format<44>; def MRM_CD : Format<45>; def MRM_CE : Format<46>;
+def MRM_CF : Format<47>; def MRM_D0 : Format<48>; def MRM_D1 : Format<49>;
+def MRM_D2 : Format<50>; def MRM_D3 : Format<51>; def MRM_D4 : Format<52>;
+def MRM_D5 : Format<53>; def MRM_D6 : Format<54>; def MRM_D7 : Format<55>;
+def MRM_D8 : Format<56>; def MRM_D9 : Format<57>; def MRM_DA : Format<58>;
+def MRM_DB : Format<59>; def MRM_DC : Format<60>; def MRM_DD : Format<61>;
+def MRM_DE : Format<62>; def MRM_DF : Format<63>; def MRM_E0 : Format<64>;
+def MRM_E1 : Format<65>; def MRM_E2 : Format<66>; def MRM_E3 : Format<67>;
+def MRM_E4 : Format<68>; def MRM_E5 : Format<69>; def MRM_E6 : Format<70>;
+def MRM_E7 : Format<71>; def MRM_E8 : Format<72>; def MRM_E9 : Format<73>;
+def MRM_EA : Format<74>; def MRM_EB : Format<75>; def MRM_EC : Format<76>;
+def MRM_ED : Format<77>; def MRM_EE : Format<78>; def MRM_EF : Format<79>;
+def MRM_F0 : Format<80>; def MRM_F1 : Format<81>; def MRM_F2 : Format<82>;
+def MRM_F3 : Format<83>; def MRM_F4 : Format<84>; def MRM_F5 : Format<85>;
+def MRM_F6 : Format<86>; def MRM_F7 : Format<87>; def MRM_F8 : Format<88>;
+def MRM_F9 : Format<89>; def MRM_FA : Format<90>; def MRM_FB : Format<91>;
+def MRM_FC : Format<92>; def MRM_FD : Format<93>; def MRM_FE : Format<94>;
+def MRM_FF : Format<95>;
// ImmType - This specifies the immediate type used by an instruction. This is
// part of the ad-hoc solution used to emit machine instruction encodings by our
@@ -146,11 +150,22 @@ def OpSizeFixed : OperandSize<0>; // Never needs a 0x66 prefix.
def OpSize16 : OperandSize<1>; // Needs 0x66 prefix in 32-bit mode.
def OpSize32 : OperandSize<2>; // Needs 0x66 prefix in 16-bit mode.
+// Address size for encodings that change based on mode.
+class AddressSize<bits<2> val> {
+ bits<2> Value = val;
+}
+def AdSizeX : AddressSize<0>; // Address size determined using addr operand.
+def AdSize16 : AddressSize<1>; // Encodes a 16-bit address.
+def AdSize32 : AddressSize<2>; // Encodes a 32-bit address.
+def AdSize64 : AddressSize<3>; // Encodes a 64-bit address.
+
// Prefix byte classes which are used to indicate to the ad-hoc machine code
// emitter that various prefix bytes are required.
class OpSize16 { OperandSize OpSize = OpSize16; }
class OpSize32 { OperandSize OpSize = OpSize32; }
-class AdSize { bit hasAdSizePrefix = 1; }
+class AdSize16 { AddressSize AdSize = AdSize16; }
+class AdSize32 { AddressSize AdSize = AdSize32; }
+class AdSize64 { AddressSize AdSize = AdSize64; }
class REX_W { bit hasREX_WPrefix = 1; }
class LOCK { bit hasLockPrefix = 1; }
class REP { bit hasREPPrefix = 1; }
@@ -231,9 +246,11 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
// AsmString from the parser, but still disassemble.
OperandSize OpSize = OpSizeFixed; // Does this instruction's encoding change
- // based on operand size of the mode
+ // based on operand size of the mode?
bits<2> OpSizeBits = OpSize.Value;
- bit hasAdSizePrefix = 0; // Does this inst have a 0x67 prefix?
+ AddressSize AdSize = AdSizeX; // Does this instruction's encoding change
+ // based on address size of the mode?
+ bits<2> AdSizeBits = AdSize.Value;
Prefix OpPrefix = NoPrfx; // Which prefix byte does this inst have?
bits<3> OpPrefixBits = OpPrefix.Value;
@@ -284,35 +301,35 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
CD8_EltSize,
!srl(VectSize, CD8_Form{1-0}))), 0);
- // TSFlags layout should be kept in sync with X86InstrInfo.h.
+ // TSFlags layout should be kept in sync with X86BaseInfo.h.
let TSFlags{6-0} = FormBits;
let TSFlags{8-7} = OpSizeBits;
- let TSFlags{9} = hasAdSizePrefix;
- let TSFlags{12-10} = OpPrefixBits;
- let TSFlags{15-13} = OpMapBits;
- let TSFlags{16} = hasREX_WPrefix;
- let TSFlags{20-17} = ImmT.Value;
- let TSFlags{23-21} = FPForm.Value;
- let TSFlags{24} = hasLockPrefix;
- let TSFlags{25} = hasREPPrefix;
- let TSFlags{27-26} = ExeDomain.Value;
- let TSFlags{29-28} = OpEncBits;
- let TSFlags{37-30} = Opcode;
- let TSFlags{38} = hasVEX_WPrefix;
- let TSFlags{39} = hasVEX_4V;
- let TSFlags{40} = hasVEX_4VOp3;
- let TSFlags{41} = hasVEX_i8ImmReg;
- let TSFlags{42} = hasVEX_L;
- let TSFlags{43} = ignoresVEX_L;
- let TSFlags{44} = hasEVEX_K;
- let TSFlags{45} = hasEVEX_Z;
- let TSFlags{46} = hasEVEX_L2;
- let TSFlags{47} = hasEVEX_B;
+ let TSFlags{10-9} = AdSizeBits;
+ let TSFlags{13-11} = OpPrefixBits;
+ let TSFlags{16-14} = OpMapBits;
+ let TSFlags{17} = hasREX_WPrefix;
+ let TSFlags{21-18} = ImmT.Value;
+ let TSFlags{24-22} = FPForm.Value;
+ let TSFlags{25} = hasLockPrefix;
+ let TSFlags{26} = hasREPPrefix;
+ let TSFlags{28-27} = ExeDomain.Value;
+ let TSFlags{30-29} = OpEncBits;
+ let TSFlags{38-31} = Opcode;
+ let TSFlags{39} = hasVEX_WPrefix;
+ let TSFlags{40} = hasVEX_4V;
+ let TSFlags{41} = hasVEX_4VOp3;
+ let TSFlags{42} = hasVEX_i8ImmReg;
+ let TSFlags{43} = hasVEX_L;
+ let TSFlags{44} = ignoresVEX_L;
+ let TSFlags{45} = hasEVEX_K;
+ let TSFlags{46} = hasEVEX_Z;
+ let TSFlags{47} = hasEVEX_L2;
+ let TSFlags{48} = hasEVEX_B;
// If we run out of TSFlags bits, it's possible to encode this in 3 bits.
- let TSFlags{54-48} = CD8_Scale;
- let TSFlags{55} = has3DNow0F0FOpcode;
- let TSFlags{56} = hasMemOp4Prefix;
- let TSFlags{57} = hasEVEX_RC;
+ let TSFlags{55-49} = CD8_Scale;
+ let TSFlags{56} = has3DNow0F0FOpcode;
+ let TSFlags{57} = hasMemOp4Prefix;
+ let TSFlags{58} = hasEVEX_RC;
}
class PseudoI<dag oops, dag iops, list<dag> pattern>
@@ -327,26 +344,26 @@ class I<bits<8> o, Format f, dag outs, dag ins, string asm,
let Pattern = pattern;
let CodeSize = 3;
}
-class Ii8 <bits<8> o, Format f, dag outs, dag ins, string asm,
+class Ii8 <bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary,
Domain d = GenericDomain>
: X86Inst<o, f, Imm8, outs, ins, asm, itin, d> {
let Pattern = pattern;
let CodeSize = 3;
}
-class Ii8PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
+class Ii8PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: X86Inst<o, f, Imm8PCRel, outs, ins, asm, itin> {
let Pattern = pattern;
let CodeSize = 3;
}
-class Ii16<bits<8> o, Format f, dag outs, dag ins, string asm,
+class Ii16<bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: X86Inst<o, f, Imm16, outs, ins, asm, itin> {
let Pattern = pattern;
let CodeSize = 3;
}
-class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm,
+class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: X86Inst<o, f, Imm32, outs, ins, asm, itin> {
let Pattern = pattern;
@@ -359,14 +376,14 @@ class Ii32S<bits<8> o, Format f, dag outs, dag ins, string asm,
let CodeSize = 3;
}
-class Ii16PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
+class Ii16PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: X86Inst<o, f, Imm16PCRel, outs, ins, asm, itin> {
let Pattern = pattern;
let CodeSize = 3;
}
-class Ii32PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
+class Ii32PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: X86Inst<o, f, Imm32PCRel, outs, ins, asm, itin> {
let Pattern = pattern;
@@ -393,14 +410,14 @@ class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern,
// Iseg16 - 16-bit segment selector, 16-bit offset
// Iseg32 - 16-bit segment selector, 32-bit offset
-class Iseg16 <bits<8> o, Format f, dag outs, dag ins, string asm,
+class Iseg16 <bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: X86Inst<o, f, Imm16, outs, ins, asm, itin> {
let Pattern = pattern;
let CodeSize = 3;
}
-class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm,
+class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: X86Inst<o, f, Imm32, outs, ins, asm, itin> {
let Pattern = pattern;
@@ -409,8 +426,9 @@ class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm,
// SI - SSE 1 & 2 scalar instructions
class SI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin> {
+ list<dag> pattern, InstrItinClass itin = NoItinerary,
+ Domain d = GenericDomain>
+ : I<o, F, outs, ins, asm, pattern, itin, d> {
let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512],
!if(!eq(OpEnc.Value, EncVEX.Value), [UseAVX],
!if(!eq(OpPrefix.Value, XS.Value), [UseSSE1],
@@ -478,7 +496,7 @@ class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
}
// SSE1 Instruction Templates:
-//
+//
// SSI - SSE1 instructions with XS prefix.
// PSI - SSE1 instructions with PS prefix.
// PSIi8 - SSE1 instructions with ImmT == Imm8 and PS prefix.
@@ -509,7 +527,7 @@ class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
Requires<[HasAVX]>;
// SSE2 Instruction Templates:
-//
+//
// SDI - SSE2 instructions with XD prefix.
// SDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix.
// S2SI - SSE2 instructions with XS prefix.
@@ -573,16 +591,16 @@ class MMXS2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
: Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE2]>;
// SSE3 Instruction Templates:
-//
+//
// S3I - SSE3 instructions with PD prefixes.
// S3SI - SSE3 instructions with XS prefix.
// S3DI - SSE3 instructions with XD prefix.
-class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm,
+class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, XS,
Requires<[UseSSE3]>;
-class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm,
+class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, XD,
Requires<[UseSSE3]>;
@@ -593,7 +611,7 @@ class S3I<bits<8> o, Format F, dag outs, dag ins, string asm,
// SSSE3 Instruction Templates:
-//
+//
// SS38I - SSSE3 instructions with T8 prefix.
// SS3AI - SSSE3 instructions with TA prefix.
// MMXSS38I - SSSE3 instructions with T8 prefix and MMX operands.
@@ -621,7 +639,7 @@ class MMXSS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
Requires<[HasSSSE3]>;
// SSE4.1 Instruction Templates:
-//
+//
// SS48I - SSE 4.1 instructions with T8 prefix.
// SS41AIi8 - SSE 4.1 instructions with TA prefix and ImmT == Imm8.
//
@@ -635,7 +653,7 @@ class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
Requires<[UseSSE41]>;
// SSE4.2 Instruction Templates:
-//
+//
// SS428I - SSE 4.2 instructions with T8 prefix.
class SS428I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
@@ -699,6 +717,9 @@ class AVX5128I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
Requires<[HasAVX512]>;
+class AVX5128IBase : T8PD {
+ Domain ExeDomain = SSEPackedInt;
+}
class AVX512XS8I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8XS,
@@ -868,27 +889,27 @@ class VRS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
// MMXIi8 - MMX instructions with ImmT == Imm8 and PS prefix.
// MMXID - MMX instructions with XD prefix.
// MMXIS - MMX instructions with XS prefix.
-class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm,
+class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX]>;
-class MMXI32<bits<8> o, Format F, dag outs, dag ins, string asm,
+class MMXI32<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX,Not64BitMode]>;
-class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm,
+class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX,In64BitMode]>;
-class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm,
+class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin>, PS, REX_W, Requires<[HasMMX]>;
-class MMX2I<bits<8> o, Format F, dag outs, dag ins, string asm,
+class MMX2I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin>, PD, Requires<[HasMMX]>;
-class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: Ii8<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX]>;
-class MMXID<bits<8> o, Format F, dag outs, dag ins, string asm,
+class MMXID<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasMMX]>;
-class MMXIS<bits<8> o, Format F, dag outs, dag ins, string asm,
+class MMXIS<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: Ii8<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[HasMMX]>;
diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td
index 1c7215c..bf515a8 100644
--- a/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -12,10 +12,23 @@
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
+// MMX specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+
+// Low word of MMX to GPR.
+def MMX_X86movd2w : SDNode<"X86ISD::MMX_MOVD2W", SDTypeProfile<1, 1,
+ [SDTCisVT<0, i32>, SDTCisVT<1, x86mmx>]>>;
+// GPR to low word of MMX.
+def MMX_X86movw2d : SDNode<"X86ISD::MMX_MOVW2D", SDTypeProfile<1, 1,
+ [SDTCisVT<0, x86mmx>, SDTCisVT<1, i32>]>>;
+
+//===----------------------------------------------------------------------===//
// MMX Pattern Fragments
//===----------------------------------------------------------------------===//
def load_mmx : PatFrag<(ops node:$ptr), (x86mmx (load node:$ptr))>;
+def load_mvmmx : PatFrag<(ops node:$ptr),
+ (x86mmx (MMX_X86movw2d (load node:$ptr)))>;
def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
//===----------------------------------------------------------------------===//
@@ -201,10 +214,19 @@ def SDTVBroadcastm : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>]>;
def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<1,2>, SDTCisVT<3, i8>]>;
+def SDTFPBinOpRound : SDTypeProfile<1, 3, [ // fadd_round, fmul_round, etc.
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>, SDTCisInt<3>]>;
+
def SDTFma : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
SDTCisSameAs<1,2>, SDTCisSameAs<1,3>]>;
+def SDTFmaRound : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>,
+ SDTCisSameAs<1,2>, SDTCisSameAs<1,3>, SDTCisInt<4>]>;
def STDFp1SrcRm : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>,
SDTCisVec<0>, SDTCisInt<2>]>;
+def STDFp2SrcRm : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
+ SDTCisVec<0>, SDTCisInt<3>]>;
+def STDFp3SrcRm : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>,
+ SDTCisVec<0>, SDTCisInt<3>, SDTCisInt<4>]>;
def X86PAlignr : SDNode<"X86ISD::PALIGNR", SDTShuff3OpI>;
def X86VAlign : SDNode<"X86ISD::VALIGN", SDTShuff3OpI>;
@@ -256,6 +278,11 @@ def X86Blendi : SDNode<"X86ISD::BLENDI", SDTBlend>;
def X86Addsub : SDNode<"X86ISD::ADDSUB", SDTFPBinOp>;
+def X86faddRnd : SDNode<"X86ISD::FADD_RND", SDTFPBinOpRound>;
+def X86fsubRnd : SDNode<"X86ISD::FSUB_RND", SDTFPBinOpRound>;
+def X86fmulRnd : SDNode<"X86ISD::FMUL_RND", SDTFPBinOpRound>;
+def X86fdivRnd : SDNode<"X86ISD::FDIV_RND", SDTFPBinOpRound>;
+
def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>;
def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFma>;
def X86Fmsub : SDNode<"X86ISD::FMSUB", SDTFma>;
@@ -263,9 +290,22 @@ def X86Fnmsub : SDNode<"X86ISD::FNMSUB", SDTFma>;
def X86Fmaddsub : SDNode<"X86ISD::FMADDSUB", SDTFma>;
def X86Fmsubadd : SDNode<"X86ISD::FMSUBADD", SDTFma>;
+def X86FmaddRnd : SDNode<"X86ISD::FMADD_RND", SDTFmaRound>;
+def X86FnmaddRnd : SDNode<"X86ISD::FNMADD_RND", SDTFmaRound>;
+def X86FmsubRnd : SDNode<"X86ISD::FMSUB_RND", SDTFmaRound>;
+def X86FnmsubRnd : SDNode<"X86ISD::FNMSUB_RND", SDTFmaRound>;
+def X86FmaddsubRnd : SDNode<"X86ISD::FMADDSUB_RND", SDTFmaRound>;
+def X86FmsubaddRnd : SDNode<"X86ISD::FMSUBADD_RND", SDTFmaRound>;
+
def X86rsqrt28 : SDNode<"X86ISD::RSQRT28", STDFp1SrcRm>;
def X86rcp28 : SDNode<"X86ISD::RCP28", STDFp1SrcRm>;
-def X86exp2 : SDNode<"X86ISD::EXP2", STDFp1SrcRm>;
+def X86exp2 : SDNode<"X86ISD::EXP2", STDFp1SrcRm>;
+
+def X86rsqrt28s : SDNode<"X86ISD::RSQRT28", STDFp2SrcRm>;
+def X86rcp28s : SDNode<"X86ISD::RCP28", STDFp2SrcRm>;
+def X86RndScale : SDNode<"X86ISD::RNDSCALE", STDFp3SrcRm>;
+def X86mgather : SDNode<"X86ISD::GATHER", SDTypeProfile<1, 3,
+ [SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>]>>;
def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>,
@@ -278,6 +318,13 @@ def SDT_PCMPESTRI : SDTypeProfile<2, 5, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
def X86pcmpistri : SDNode<"X86ISD::PCMPISTRI", SDT_PCMPISTRI>;
def X86pcmpestri : SDNode<"X86ISD::PCMPESTRI", SDT_PCMPESTRI>;
+def X86compress: SDNode<"X86ISD::COMPRESS", SDTypeProfile<1, 3,
+ [SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>,
+ SDTCisVec<3>, SDTCisVec<1>, SDTCisInt<1>]>, []>;
+def X86expand : SDNode<"X86ISD::EXPAND", SDTypeProfile<1, 3,
+ [SDTCisSameAs<0, 3>,
+ SDTCisVec<3>, SDTCisVec<1>, SDTCisInt<1>]>, []>;
+
//===----------------------------------------------------------------------===//
// SSE Complex Patterns
//===----------------------------------------------------------------------===//
@@ -334,6 +381,15 @@ def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>;
def extloadv4f32 : PatFrag<(ops node:$ptr), (v4f64 (extloadvf32 node:$ptr))>;
def extloadv8f32 : PatFrag<(ops node:$ptr), (v8f64 (extloadvf32 node:$ptr))>;
+// These are needed to match a scalar load that is used in a vector-only
+// math instruction such as the FP logical ops: andps, andnps, orps, xorps.
+// The memory operand is required to be a 128-bit load, so it must be converted
+// from a vector to a scalar.
+def loadf32_128 : PatFrag<(ops node:$ptr),
+ (f32 (vector_extract (loadv4f32 node:$ptr), (iPTR 0)))>;
+def loadf64_128 : PatFrag<(ops node:$ptr),
+ (f64 (vector_extract (loadv2f64 node:$ptr), (iPTR 0)))>;
+
// Like 'store', but always requires 128-bit vector alignment.
def alignedstore : PatFrag<(ops node:$val, node:$ptr),
(store node:$val, node:$ptr), [{
@@ -412,20 +468,10 @@ def alignedloadv8i64 : PatFrag<(ops node:$ptr),
// setting a feature bit in the processor (on startup, for example).
// Opteron 10h and later implement such a feature.
def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return Subtarget->hasVectorUAMem()
+ return Subtarget->hasSSEUnalignedMem()
|| cast<LoadSDNode>(N)->getAlignment() >= 16;
}]>;
-def memop4 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return Subtarget->hasVectorUAMem()
- || cast<LoadSDNode>(N)->getAlignment() >= 4;
-}]>;
-
-def memop8 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return Subtarget->hasVectorUAMem()
- || cast<LoadSDNode>(N)->getAlignment() >= 8;
-}]>;
-
def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
@@ -435,17 +481,15 @@ def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
-// 256-bit memop pattern fragments
-// NOTE: all 256-bit integer vector loads are promoted to v4i64
-def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
-def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
-def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
+// These are needed to match a scalar memop that is used in a vector-only
+// math instruction such as the FP logical ops: andps, andnps, orps, xorps.
+// The memory operand is required to be a 128-bit load, so it must be converted
+// from a vector to a scalar.
+def memopfsf32_128 : PatFrag<(ops node:$ptr),
+ (f32 (vector_extract (memopv4f32 node:$ptr), (iPTR 0)))>;
+def memopfsf64_128 : PatFrag<(ops node:$ptr),
+ (f64 (vector_extract (memopv2f64 node:$ptr), (iPTR 0)))>;
-// 512-bit memop pattern fragments
-def memopv16f32 : PatFrag<(ops node:$ptr), (v16f32 (memop4 node:$ptr))>;
-def memopv8f64 : PatFrag<(ops node:$ptr), (v8f64 (memop8 node:$ptr))>;
-def memopv16i32 : PatFrag<(ops node:$ptr), (v16i32 (memop4 node:$ptr))>;
-def memopv8i64 : PatFrag<(ops node:$ptr), (v8i64 (memop8 node:$ptr))>;
// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
// 16-byte boundary.
@@ -482,6 +526,58 @@ def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
return false;
}]>;
+def mgatherv8i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
+ (masked_gather node:$src1, node:$src2, node:$src3) , [{
+ //if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
+ // return (Mgt->getIndex().getValueType() == MVT::v8i32 ||
+ // Mgt->getBasePtr().getValueType() == MVT::v8i32);
+ //return false;
+ return N != 0;
+}]>;
+
+def mgatherv8i64 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
+ (masked_gather node:$src1, node:$src2, node:$src3) , [{
+ //if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
+ // return (Mgt->getIndex().getValueType() == MVT::v8i64 ||
+ // Mgt->getBasePtr().getValueType() == MVT::v8i64);
+ //return false;
+ return N != 0;
+}]>;
+def mgatherv16i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
+ (masked_gather node:$src1, node:$src2, node:$src3) , [{
+ //if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
+ // return (Mgt->getIndex().getValueType() == MVT::v16i32 ||
+ // Mgt->getBasePtr().getValueType() == MVT::v16i32);
+ //return false;
+ return N != 0;
+}]>;
+
+def mscatterv8i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
+ (masked_scatter node:$src1, node:$src2, node:$src3) , [{
+ //if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
+ // return (Sc->getIndex().getValueType() == MVT::v8i32 ||
+ // Sc->getBasePtr().getValueType() == MVT::v8i32);
+ //return false;
+ return N != 0;
+}]>;
+
+def mscatterv8i64 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
+ (masked_scatter node:$src1, node:$src2, node:$src3) , [{
+ //if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
+ // return (Sc->getIndex().getValueType() == MVT::v8i64 ||
+ // Sc->getBasePtr().getValueType() == MVT::v8i64);
+ //return false;
+ return N != 0;
+}]>;
+def mscatterv16i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
+ (masked_scatter node:$src1, node:$src2, node:$src3) , [{
+ //if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
+ // return (Sc->getIndex().getValueType() == MVT::v16i32 ||
+ // Sc->getBasePtr().getValueType() == MVT::v16i32);
+ //return false;
+ return N != 0;
+}]>;
+
// 128-bit bitconvert pattern fragments
def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 7f87bdd..f5b9680 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -65,6 +65,7 @@ enum {
TB_INDEX_1 = 1,
TB_INDEX_2 = 2,
TB_INDEX_3 = 3,
+ TB_INDEX_4 = 4,
TB_INDEX_MASK = 0xf,
// Do not insert the reverse map (MemOp -> RegOp) into the table.
@@ -90,7 +91,7 @@ enum {
TB_ALIGN_MASK = 0xff << TB_ALIGN_SHIFT
};
-struct X86OpTblEntry {
+struct X86MemoryFoldTableEntry {
uint16_t RegOp;
uint16_t MemOp;
uint16_t Flags;
@@ -105,7 +106,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
(STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64 : X86::ADJCALLSTACKUP32)),
Subtarget(STI), RI(STI) {
- static const X86OpTblEntry OpTbl2Addr[] = {
+ static const X86MemoryFoldTableEntry MemoryFoldTable2Addr[] = {
{ X86::ADC32ri, X86::ADC32mi, 0 },
{ X86::ADC32ri8, X86::ADC32mi8, 0 },
{ X86::ADC32rr, X86::ADC32mr, 0 },
@@ -145,14 +146,10 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::AND8rr, X86::AND8mr, 0 },
{ X86::DEC16r, X86::DEC16m, 0 },
{ X86::DEC32r, X86::DEC32m, 0 },
- { X86::DEC64_16r, X86::DEC64_16m, 0 },
- { X86::DEC64_32r, X86::DEC64_32m, 0 },
{ X86::DEC64r, X86::DEC64m, 0 },
{ X86::DEC8r, X86::DEC8m, 0 },
{ X86::INC16r, X86::INC16m, 0 },
{ X86::INC32r, X86::INC32m, 0 },
- { X86::INC64_16r, X86::INC64_16m, 0 },
- { X86::INC64_32r, X86::INC64_32m, 0 },
{ X86::INC64r, X86::INC64m, 0 },
{ X86::INC8r, X86::INC8m, 0 },
{ X86::NEG16r, X86::NEG16m, 0 },
@@ -272,17 +269,17 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::XOR8rr, X86::XOR8mr, 0 }
};
- for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) {
- unsigned RegOp = OpTbl2Addr[i].RegOp;
- unsigned MemOp = OpTbl2Addr[i].MemOp;
- unsigned Flags = OpTbl2Addr[i].Flags;
+ for (unsigned i = 0, e = array_lengthof(MemoryFoldTable2Addr); i != e; ++i) {
+ unsigned RegOp = MemoryFoldTable2Addr[i].RegOp;
+ unsigned MemOp = MemoryFoldTable2Addr[i].MemOp;
+ unsigned Flags = MemoryFoldTable2Addr[i].Flags;
AddTableEntry(RegOp2MemOpTable2Addr, MemOp2RegOpTable,
RegOp, MemOp,
// Index 0, folded load and store, no alignment requirement.
Flags | TB_INDEX_0 | TB_FOLDED_LOAD | TB_FOLDED_STORE);
}
- static const X86OpTblEntry OpTbl0[] = {
+ static const X86MemoryFoldTableEntry MemoryFoldTable0[] = {
{ X86::BT16ri8, X86::BT16mi8, TB_FOLDED_LOAD },
{ X86::BT32ri8, X86::BT32mi8, TB_FOLDED_LOAD },
{ X86::BT64ri8, X86::BT64mi8, TB_FOLDED_LOAD },
@@ -336,6 +333,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::MUL32r, X86::MUL32m, TB_FOLDED_LOAD },
{ X86::MUL64r, X86::MUL64m, TB_FOLDED_LOAD },
{ X86::MUL8r, X86::MUL8m, TB_FOLDED_LOAD },
+ { X86::PEXTRDrr, X86::PEXTRDmr, TB_FOLDED_STORE },
+ { X86::PEXTRQrr, X86::PEXTRQmr, TB_FOLDED_STORE },
{ X86::SETAEr, X86::SETAEm, TB_FOLDED_STORE },
{ X86::SETAr, X86::SETAm, TB_FOLDED_STORE },
{ X86::SETBEr, X86::SETBEm, TB_FOLDED_STORE },
@@ -354,10 +353,12 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::SETSr, X86::SETSm, TB_FOLDED_STORE },
{ X86::TAILJMPr, X86::TAILJMPm, TB_FOLDED_LOAD },
{ X86::TAILJMPr64, X86::TAILJMPm64, TB_FOLDED_LOAD },
+ { X86::TAILJMPr64_REX, X86::TAILJMPm64_REX, TB_FOLDED_LOAD },
{ X86::TEST16ri, X86::TEST16mi, TB_FOLDED_LOAD },
{ X86::TEST32ri, X86::TEST32mi, TB_FOLDED_LOAD },
{ X86::TEST64ri32, X86::TEST64mi32, TB_FOLDED_LOAD },
{ X86::TEST8ri, X86::TEST8mi, TB_FOLDED_LOAD },
+
// AVX 128-bit versions of foldable instructions
{ X86::VEXTRACTPSrr,X86::VEXTRACTPSmr, TB_FOLDED_STORE },
{ X86::VEXTRACTF128rr, X86::VEXTRACTF128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
@@ -370,6 +371,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMOVSS2DIrr, X86::VMOVSS2DImr, TB_FOLDED_STORE },
{ X86::VMOVUPDrr, X86::VMOVUPDmr, TB_FOLDED_STORE },
{ X86::VMOVUPSrr, X86::VMOVUPSmr, TB_FOLDED_STORE },
+ { X86::VPEXTRDrr, X86::VPEXTRDmr, TB_FOLDED_STORE },
+ { X86::VPEXTRQrr, X86::VPEXTRQmr, TB_FOLDED_STORE },
+
// AVX 256-bit foldable instructions
{ X86::VEXTRACTI128rr, X86::VEXTRACTI128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
{ X86::VMOVAPDYrr, X86::VMOVAPDYmr, TB_FOLDED_STORE | TB_ALIGN_32 },
@@ -377,6 +381,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMOVDQAYrr, X86::VMOVDQAYmr, TB_FOLDED_STORE | TB_ALIGN_32 },
{ X86::VMOVUPDYrr, X86::VMOVUPDYmr, TB_FOLDED_STORE },
{ X86::VMOVUPSYrr, X86::VMOVUPSYmr, TB_FOLDED_STORE },
+
// AVX-512 foldable instructions
{ X86::VMOVPDI2DIZrr, X86::VMOVPDI2DIZmr, TB_FOLDED_STORE },
{ X86::VMOVAPDZrr, X86::VMOVAPDZmr, TB_FOLDED_STORE | TB_ALIGN_64 },
@@ -389,6 +394,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMOVDQU16Zrr, X86::VMOVDQU16Zmr, TB_FOLDED_STORE },
{ X86::VMOVDQU32Zrr, X86::VMOVDQU32Zmr, TB_FOLDED_STORE },
{ X86::VMOVDQU64Zrr, X86::VMOVDQU64Zmr, TB_FOLDED_STORE },
+
// AVX-512 foldable instructions (256-bit versions)
{ X86::VMOVAPDZ256rr, X86::VMOVAPDZ256mr, TB_FOLDED_STORE | TB_ALIGN_32 },
{ X86::VMOVAPSZ256rr, X86::VMOVAPSZ256mr, TB_FOLDED_STORE | TB_ALIGN_32 },
@@ -400,6 +406,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256mr, TB_FOLDED_STORE },
{ X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256mr, TB_FOLDED_STORE },
{ X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256mr, TB_FOLDED_STORE },
+
// AVX-512 foldable instructions (128-bit versions)
{ X86::VMOVAPDZ128rr, X86::VMOVAPDZ128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
{ X86::VMOVAPSZ128rr, X86::VMOVAPSZ128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
@@ -410,18 +417,22 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128mr, TB_FOLDED_STORE },
{ X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128mr, TB_FOLDED_STORE },
{ X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128mr, TB_FOLDED_STORE },
- { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128mr, TB_FOLDED_STORE }
+ { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128mr, TB_FOLDED_STORE },
+
+ // F16C foldable instructions
+ { X86::VCVTPS2PHrr, X86::VCVTPS2PHmr, TB_FOLDED_STORE },
+ { X86::VCVTPS2PHYrr, X86::VCVTPS2PHYmr, TB_FOLDED_STORE }
};
- for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
- unsigned RegOp = OpTbl0[i].RegOp;
- unsigned MemOp = OpTbl0[i].MemOp;
- unsigned Flags = OpTbl0[i].Flags;
+ for (unsigned i = 0, e = array_lengthof(MemoryFoldTable0); i != e; ++i) {
+ unsigned RegOp = MemoryFoldTable0[i].RegOp;
+ unsigned MemOp = MemoryFoldTable0[i].MemOp;
+ unsigned Flags = MemoryFoldTable0[i].Flags;
AddTableEntry(RegOp2MemOpTable0, MemOp2RegOpTable,
RegOp, MemOp, TB_INDEX_0 | Flags);
}
- static const X86OpTblEntry OpTbl1[] = {
+ static const X86MemoryFoldTableEntry MemoryFoldTable1[] = {
{ X86::CMP16rr, X86::CMP16rm, 0 },
{ X86::CMP32rr, X86::CMP32rm, 0 },
{ X86::CMP64rr, X86::CMP64rm, 0 },
@@ -448,9 +459,12 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::CVTSD2SIrr, X86::CVTSD2SIrm, 0 },
{ X86::CVTSS2SI64rr, X86::CVTSS2SI64rm, 0 },
{ X86::CVTSS2SIrr, X86::CVTSS2SIrm, 0 },
+ { X86::CVTDQ2PDrr, X86::CVTDQ2PDrm, TB_ALIGN_16 },
{ X86::CVTDQ2PSrr, X86::CVTDQ2PSrm, TB_ALIGN_16 },
{ X86::CVTPD2DQrr, X86::CVTPD2DQrm, TB_ALIGN_16 },
+ { X86::CVTPD2PSrr, X86::CVTPD2PSrm, TB_ALIGN_16 },
{ X86::CVTPS2DQrr, X86::CVTPS2DQrm, TB_ALIGN_16 },
+ { X86::CVTPS2PDrr, X86::CVTPS2PDrm, TB_ALIGN_16 },
{ X86::CVTTPD2DQrr, X86::CVTTPD2DQrm, TB_ALIGN_16 },
{ X86::CVTTPS2DQrr, X86::CVTTPS2DQrm, TB_ALIGN_16 },
{ X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm, 0 },
@@ -490,11 +504,31 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::PABSBrr128, X86::PABSBrm128, TB_ALIGN_16 },
{ X86::PABSDrr128, X86::PABSDrm128, TB_ALIGN_16 },
{ X86::PABSWrr128, X86::PABSWrm128, TB_ALIGN_16 },
+ { X86::PCMPESTRIrr, X86::PCMPESTRIrm, TB_ALIGN_16 },
+ { X86::PCMPESTRM128rr, X86::PCMPESTRM128rm, TB_ALIGN_16 },
+ { X86::PCMPISTRIrr, X86::PCMPISTRIrm, TB_ALIGN_16 },
+ { X86::PCMPISTRM128rr, X86::PCMPISTRM128rm, TB_ALIGN_16 },
+ { X86::PHMINPOSUWrr128, X86::PHMINPOSUWrm128, TB_ALIGN_16 },
+ { X86::PMOVSXBDrr, X86::PMOVSXBDrm, TB_ALIGN_16 },
+ { X86::PMOVSXBQrr, X86::PMOVSXBQrm, TB_ALIGN_16 },
+ { X86::PMOVSXBWrr, X86::PMOVSXBWrm, TB_ALIGN_16 },
+ { X86::PMOVSXDQrr, X86::PMOVSXDQrm, TB_ALIGN_16 },
+ { X86::PMOVSXWDrr, X86::PMOVSXWDrm, TB_ALIGN_16 },
+ { X86::PMOVSXWQrr, X86::PMOVSXWQrm, TB_ALIGN_16 },
+ { X86::PMOVZXBDrr, X86::PMOVZXBDrm, TB_ALIGN_16 },
+ { X86::PMOVZXBQrr, X86::PMOVZXBQrm, TB_ALIGN_16 },
+ { X86::PMOVZXBWrr, X86::PMOVZXBWrm, TB_ALIGN_16 },
+ { X86::PMOVZXDQrr, X86::PMOVZXDQrm, TB_ALIGN_16 },
+ { X86::PMOVZXWDrr, X86::PMOVZXWDrm, TB_ALIGN_16 },
+ { X86::PMOVZXWQrr, X86::PMOVZXWQrm, TB_ALIGN_16 },
{ X86::PSHUFDri, X86::PSHUFDmi, TB_ALIGN_16 },
{ X86::PSHUFHWri, X86::PSHUFHWmi, TB_ALIGN_16 },
{ X86::PSHUFLWri, X86::PSHUFLWmi, TB_ALIGN_16 },
+ { X86::PTESTrr, X86::PTESTrm, TB_ALIGN_16 },
{ X86::RCPPSr, X86::RCPPSm, TB_ALIGN_16 },
{ X86::RCPPSr_Int, X86::RCPPSm_Int, TB_ALIGN_16 },
+ { X86::ROUNDPDr, X86::ROUNDPDm, TB_ALIGN_16 },
+ { X86::ROUNDPSr, X86::ROUNDPSm, TB_ALIGN_16 },
{ X86::RSQRTPSr, X86::RSQRTPSm, TB_ALIGN_16 },
{ X86::RSQRTPSr_Int, X86::RSQRTPSm_Int, TB_ALIGN_16 },
{ X86::RSQRTSSr, X86::RSQRTSSm, 0 },
@@ -512,6 +546,19 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
// FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0
{ X86::UCOMISDrr, X86::UCOMISDrm, 0 },
{ X86::UCOMISSrr, X86::UCOMISSrm, 0 },
+
+ // MMX version of foldable instructions
+ { X86::MMX_CVTPD2PIirr, X86::MMX_CVTPD2PIirm, 0 },
+ { X86::MMX_CVTPI2PDirr, X86::MMX_CVTPI2PDirm, 0 },
+ { X86::MMX_CVTPS2PIirr, X86::MMX_CVTPS2PIirm, 0 },
+ { X86::MMX_CVTTPD2PIirr, X86::MMX_CVTTPD2PIirm, 0 },
+ { X86::MMX_CVTTPS2PIirr, X86::MMX_CVTTPS2PIirm, 0 },
+ { X86::MMX_MOVD64to64rr, X86::MMX_MOVQ64rm, 0 },
+ { X86::MMX_PABSBrr64, X86::MMX_PABSBrm64, 0 },
+ { X86::MMX_PABSDrr64, X86::MMX_PABSDrm64, 0 },
+ { X86::MMX_PABSWrr64, X86::MMX_PABSWrm64, 0 },
+ { X86::MMX_PSHUFWri, X86::MMX_PSHUFWmi, 0 },
+
// AVX 128-bit versions of foldable instructions
{ X86::Int_VCOMISDrr, X86::Int_VCOMISDrm, 0 },
{ X86::Int_VCOMISSrr, X86::Int_VCOMISSrm, 0 },
@@ -529,9 +576,12 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VCVTSD2SIrr, X86::VCVTSD2SIrm, 0 },
{ X86::VCVTSS2SI64rr, X86::VCVTSS2SI64rm, 0 },
{ X86::VCVTSS2SIrr, X86::VCVTSS2SIrm, 0 },
+ { X86::VCVTDQ2PDrr, X86::VCVTDQ2PDrm, 0 },
{ X86::VCVTDQ2PSrr, X86::VCVTDQ2PSrm, 0 },
{ X86::VCVTPD2DQrr, X86::VCVTPD2DQXrm, 0 },
+ { X86::VCVTPD2PSrr, X86::VCVTPD2PSXrm, 0 },
{ X86::VCVTPS2DQrr, X86::VCVTPS2DQrm, 0 },
+ { X86::VCVTPS2PDrr, X86::VCVTPS2PDrm, 0 },
{ X86::VCVTTPD2DQrr, X86::VCVTTPD2DQXrm, 0 },
{ X86::VCVTTPS2DQrr, X86::VCVTTPS2DQrm, 0 },
{ X86::VMOV64toPQIrr, X86::VMOVQI2PQIrm, 0 },
@@ -542,8 +592,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMOVDI2PDIrr, X86::VMOVDI2PDIrm, 0 },
{ X86::VMOVDI2SSrr, X86::VMOVDI2SSrm, 0 },
{ X86::VMOVDQArr, X86::VMOVDQArm, TB_ALIGN_16 },
- { X86::VMOVSLDUPrr, X86::VMOVSLDUPrm, TB_ALIGN_16 },
- { X86::VMOVSHDUPrr, X86::VMOVSHDUPrm, TB_ALIGN_16 },
+ { X86::VMOVSLDUPrr, X86::VMOVSLDUPrm, 0 },
+ { X86::VMOVSHDUPrr, X86::VMOVSHDUPrm, 0 },
{ X86::VMOVUPDrr, X86::VMOVUPDrm, 0 },
{ X86::VMOVUPSrr, X86::VMOVUPSrm, 0 },
{ X86::VMOVZQI2PQIrr, X86::VMOVZQI2PQIrm, 0 },
@@ -551,50 +601,151 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPABSBrr128, X86::VPABSBrm128, 0 },
{ X86::VPABSDrr128, X86::VPABSDrm128, 0 },
{ X86::VPABSWrr128, X86::VPABSWrm128, 0 },
+ { X86::VPCMPESTRIrr, X86::VPCMPESTRIrm, 0 },
+ { X86::VPCMPESTRM128rr, X86::VPCMPESTRM128rm, 0 },
+ { X86::VPCMPISTRIrr, X86::VPCMPISTRIrm, 0 },
+ { X86::VPCMPISTRM128rr, X86::VPCMPISTRM128rm, 0 },
+ { X86::VPHMINPOSUWrr128, X86::VPHMINPOSUWrm128, 0 },
{ X86::VPERMILPDri, X86::VPERMILPDmi, 0 },
{ X86::VPERMILPSri, X86::VPERMILPSmi, 0 },
+ { X86::VPMOVSXBDrr, X86::VPMOVSXBDrm, 0 },
+ { X86::VPMOVSXBQrr, X86::VPMOVSXBQrm, 0 },
+ { X86::VPMOVSXBWrr, X86::VPMOVSXBWrm, 0 },
+ { X86::VPMOVSXDQrr, X86::VPMOVSXDQrm, 0 },
+ { X86::VPMOVSXWDrr, X86::VPMOVSXWDrm, 0 },
+ { X86::VPMOVSXWQrr, X86::VPMOVSXWQrm, 0 },
+ { X86::VPMOVZXBDrr, X86::VPMOVZXBDrm, 0 },
+ { X86::VPMOVZXBQrr, X86::VPMOVZXBQrm, 0 },
+ { X86::VPMOVZXBWrr, X86::VPMOVZXBWrm, 0 },
+ { X86::VPMOVZXDQrr, X86::VPMOVZXDQrm, 0 },
+ { X86::VPMOVZXWDrr, X86::VPMOVZXWDrm, 0 },
+ { X86::VPMOVZXWQrr, X86::VPMOVZXWQrm, 0 },
{ X86::VPSHUFDri, X86::VPSHUFDmi, 0 },
{ X86::VPSHUFHWri, X86::VPSHUFHWmi, 0 },
{ X86::VPSHUFLWri, X86::VPSHUFLWmi, 0 },
+ { X86::VPTESTrr, X86::VPTESTrm, 0 },
{ X86::VRCPPSr, X86::VRCPPSm, 0 },
{ X86::VRCPPSr_Int, X86::VRCPPSm_Int, 0 },
+ { X86::VROUNDPDr, X86::VROUNDPDm, 0 },
+ { X86::VROUNDPSr, X86::VROUNDPSm, 0 },
{ X86::VRSQRTPSr, X86::VRSQRTPSm, 0 },
{ X86::VRSQRTPSr_Int, X86::VRSQRTPSm_Int, 0 },
{ X86::VSQRTPDr, X86::VSQRTPDm, 0 },
{ X86::VSQRTPSr, X86::VSQRTPSm, 0 },
+ { X86::VTESTPDrr, X86::VTESTPDrm, 0 },
+ { X86::VTESTPSrr, X86::VTESTPSrm, 0 },
{ X86::VUCOMISDrr, X86::VUCOMISDrm, 0 },
{ X86::VUCOMISSrr, X86::VUCOMISSrm, 0 },
- { X86::VBROADCASTSSrr, X86::VBROADCASTSSrm, TB_NO_REVERSE },
// AVX 256-bit foldable instructions
+ { X86::VCVTDQ2PDYrr, X86::VCVTDQ2PDYrm, 0 },
{ X86::VCVTDQ2PSYrr, X86::VCVTDQ2PSYrm, 0 },
{ X86::VCVTPD2DQYrr, X86::VCVTPD2DQYrm, 0 },
+ { X86::VCVTPD2PSYrr, X86::VCVTPD2PSYrm, 0 },
{ X86::VCVTPS2DQYrr, X86::VCVTPS2DQYrm, 0 },
+ { X86::VCVTPS2PDYrr, X86::VCVTPS2PDYrm, 0 },
{ X86::VCVTTPD2DQYrr, X86::VCVTTPD2DQYrm, 0 },
{ X86::VCVTTPS2DQYrr, X86::VCVTTPS2DQYrm, 0 },
{ X86::VMOVAPDYrr, X86::VMOVAPDYrm, TB_ALIGN_32 },
{ X86::VMOVAPSYrr, X86::VMOVAPSYrm, TB_ALIGN_32 },
+ { X86::VMOVDDUPYrr, X86::VMOVDDUPYrm, 0 },
{ X86::VMOVDQAYrr, X86::VMOVDQAYrm, TB_ALIGN_32 },
+ { X86::VMOVSLDUPYrr, X86::VMOVSLDUPYrm, 0 },
+ { X86::VMOVSHDUPYrr, X86::VMOVSHDUPYrm, 0 },
{ X86::VMOVUPDYrr, X86::VMOVUPDYrm, 0 },
{ X86::VMOVUPSYrr, X86::VMOVUPSYrm, 0 },
{ X86::VPERMILPDYri, X86::VPERMILPDYmi, 0 },
{ X86::VPERMILPSYri, X86::VPERMILPSYmi, 0 },
+ { X86::VPTESTYrr, X86::VPTESTYrm, 0 },
{ X86::VRCPPSYr, X86::VRCPPSYm, 0 },
{ X86::VRCPPSYr_Int, X86::VRCPPSYm_Int, 0 },
+ { X86::VROUNDYPDr, X86::VROUNDYPDm, 0 },
+ { X86::VROUNDYPSr, X86::VROUNDYPSm, 0 },
{ X86::VRSQRTPSYr, X86::VRSQRTPSYm, 0 },
+ { X86::VRSQRTPSYr_Int, X86::VRSQRTPSYm_Int, 0 },
{ X86::VSQRTPDYr, X86::VSQRTPDYm, 0 },
{ X86::VSQRTPSYr, X86::VSQRTPSYm, 0 },
- { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrm, TB_NO_REVERSE },
- { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrm, TB_NO_REVERSE },
+ { X86::VTESTPDYrr, X86::VTESTPDYrm, 0 },
+ { X86::VTESTPSYrr, X86::VTESTPSYrm, 0 },
// AVX2 foldable instructions
+
+ // VBROADCASTS{SD}rr register instructions were an AVX2 addition while the
+ // VBROADCASTS{SD}rm memory instructions were available from AVX1.
+ // TB_NO_REVERSE prevents unfolding from introducing an illegal instruction
+ // on AVX1 targets. The VPBROADCAST instructions are all AVX2 instructions
+ // so they don't need an equivalent limitation.
+ { X86::VBROADCASTSSrr, X86::VBROADCASTSSrm, TB_NO_REVERSE },
+ { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrm, TB_NO_REVERSE },
+ { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrm, TB_NO_REVERSE },
{ X86::VPABSBrr256, X86::VPABSBrm256, 0 },
{ X86::VPABSDrr256, X86::VPABSDrm256, 0 },
{ X86::VPABSWrr256, X86::VPABSWrm256, 0 },
+ { X86::VPBROADCASTBrr, X86::VPBROADCASTBrm, 0 },
+ { X86::VPBROADCASTBYrr, X86::VPBROADCASTBYrm, 0 },
+ { X86::VPBROADCASTDrr, X86::VPBROADCASTDrm, 0 },
+ { X86::VPBROADCASTDYrr, X86::VPBROADCASTDYrm, 0 },
+ { X86::VPBROADCASTQrr, X86::VPBROADCASTQrm, 0 },
+ { X86::VPBROADCASTQYrr, X86::VPBROADCASTQYrm, 0 },
+ { X86::VPBROADCASTWrr, X86::VPBROADCASTWrm, 0 },
+ { X86::VPBROADCASTWYrr, X86::VPBROADCASTWYrm, 0 },
+ { X86::VPERMPDYri, X86::VPERMPDYmi, 0 },
+ { X86::VPERMQYri, X86::VPERMQYmi, 0 },
+ { X86::VPMOVSXBDYrr, X86::VPMOVSXBDYrm, 0 },
+ { X86::VPMOVSXBQYrr, X86::VPMOVSXBQYrm, 0 },
+ { X86::VPMOVSXBWYrr, X86::VPMOVSXBWYrm, 0 },
+ { X86::VPMOVSXDQYrr, X86::VPMOVSXDQYrm, 0 },
+ { X86::VPMOVSXWDYrr, X86::VPMOVSXWDYrm, 0 },
+ { X86::VPMOVSXWQYrr, X86::VPMOVSXWQYrm, 0 },
+ { X86::VPMOVZXBDYrr, X86::VPMOVZXBDYrm, 0 },
+ { X86::VPMOVZXBQYrr, X86::VPMOVZXBQYrm, 0 },
+ { X86::VPMOVZXBWYrr, X86::VPMOVZXBWYrm, 0 },
+ { X86::VPMOVZXDQYrr, X86::VPMOVZXDQYrm, 0 },
+ { X86::VPMOVZXWDYrr, X86::VPMOVZXWDYrm, 0 },
+ { X86::VPMOVZXWQYrr, X86::VPMOVZXWQYrm, 0 },
{ X86::VPSHUFDYri, X86::VPSHUFDYmi, 0 },
{ X86::VPSHUFHWYri, X86::VPSHUFHWYmi, 0 },
{ X86::VPSHUFLWYri, X86::VPSHUFLWYmi, 0 },
+ // XOP foldable instructions
+ { X86::VFRCZPDrr, X86::VFRCZPDrm, 0 },
+ { X86::VFRCZPDrrY, X86::VFRCZPDrmY, 0 },
+ { X86::VFRCZPSrr, X86::VFRCZPSrm, 0 },
+ { X86::VFRCZPSrrY, X86::VFRCZPSrmY, 0 },
+ { X86::VFRCZSDrr, X86::VFRCZSDrm, 0 },
+ { X86::VFRCZSSrr, X86::VFRCZSSrm, 0 },
+ { X86::VPHADDBDrr, X86::VPHADDBDrm, 0 },
+ { X86::VPHADDBQrr, X86::VPHADDBQrm, 0 },
+ { X86::VPHADDBWrr, X86::VPHADDBWrm, 0 },
+ { X86::VPHADDDQrr, X86::VPHADDDQrm, 0 },
+ { X86::VPHADDWDrr, X86::VPHADDWDrm, 0 },
+ { X86::VPHADDWQrr, X86::VPHADDWQrm, 0 },
+ { X86::VPHADDUBDrr, X86::VPHADDUBDrm, 0 },
+ { X86::VPHADDUBQrr, X86::VPHADDUBQrm, 0 },
+ { X86::VPHADDUBWrr, X86::VPHADDUBWrm, 0 },
+ { X86::VPHADDUDQrr, X86::VPHADDUDQrm, 0 },
+ { X86::VPHADDUWDrr, X86::VPHADDUWDrm, 0 },
+ { X86::VPHADDUWQrr, X86::VPHADDUWQrm, 0 },
+ { X86::VPHSUBBWrr, X86::VPHSUBBWrm, 0 },
+ { X86::VPHSUBDQrr, X86::VPHSUBDQrm, 0 },
+ { X86::VPHSUBWDrr, X86::VPHSUBWDrm, 0 },
+ { X86::VPROTBri, X86::VPROTBmi, 0 },
+ { X86::VPROTBrr, X86::VPROTBmr, 0 },
+ { X86::VPROTDri, X86::VPROTDmi, 0 },
+ { X86::VPROTDrr, X86::VPROTDmr, 0 },
+ { X86::VPROTQri, X86::VPROTQmi, 0 },
+ { X86::VPROTQrr, X86::VPROTQmr, 0 },
+ { X86::VPROTWri, X86::VPROTWmi, 0 },
+ { X86::VPROTWrr, X86::VPROTWmr, 0 },
+ { X86::VPSHABrr, X86::VPSHABmr, 0 },
+ { X86::VPSHADrr, X86::VPSHADmr, 0 },
+ { X86::VPSHAQrr, X86::VPSHAQmr, 0 },
+ { X86::VPSHAWrr, X86::VPSHAWmr, 0 },
+ { X86::VPSHLBrr, X86::VPSHLBmr, 0 },
+ { X86::VPSHLDrr, X86::VPSHLDmr, 0 },
+ { X86::VPSHLQrr, X86::VPSHLQmr, 0 },
+ { X86::VPSHLWrr, X86::VPSHLWmr, 0 },
+
// BMI/BMI2/LZCNT/POPCNT/TBM foldable instructions
{ X86::BEXTR32rr, X86::BEXTR32rm, 0 },
{ X86::BEXTR64rr, X86::BEXTR64rm, 0 },
@@ -659,6 +810,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 },
{ X86::VPABSDZrr, X86::VPABSDZrm, 0 },
{ X86::VPABSQZrr, X86::VPABSQZrm, 0 },
+ { X86::VBROADCASTSSZr, X86::VBROADCASTSSZm, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZr, X86::VBROADCASTSDZm, TB_NO_REVERSE },
+
// AVX-512 foldable instructions (256-bit versions)
{ X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
{ X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 },
@@ -670,6 +824,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 },
{ X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 },
{ X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 },
+ { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
+
// AVX-512 foldable instructions (256-bit versions)
{ X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
{ X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 },
@@ -681,25 +838,30 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 },
{ X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 },
{ X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 },
+ { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
+
+ // F16C foldable instructions
+ { X86::VCVTPH2PSrr, X86::VCVTPH2PSrm, 0 },
+ { X86::VCVTPH2PSYrr, X86::VCVTPH2PSYrm, 0 },
// AES foldable instructions
{ X86::AESIMCrr, X86::AESIMCrm, TB_ALIGN_16 },
{ X86::AESKEYGENASSIST128rr, X86::AESKEYGENASSIST128rm, TB_ALIGN_16 },
- { X86::VAESIMCrr, X86::VAESIMCrm, TB_ALIGN_16 },
- { X86::VAESKEYGENASSIST128rr, X86::VAESKEYGENASSIST128rm, TB_ALIGN_16 }
+ { X86::VAESIMCrr, X86::VAESIMCrm, 0 },
+ { X86::VAESKEYGENASSIST128rr, X86::VAESKEYGENASSIST128rm, 0 }
};
- for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
- unsigned RegOp = OpTbl1[i].RegOp;
- unsigned MemOp = OpTbl1[i].MemOp;
- unsigned Flags = OpTbl1[i].Flags;
+ for (unsigned i = 0, e = array_lengthof(MemoryFoldTable1); i != e; ++i) {
+ unsigned RegOp = MemoryFoldTable1[i].RegOp;
+ unsigned MemOp = MemoryFoldTable1[i].MemOp;
+ unsigned Flags = MemoryFoldTable1[i].Flags;
AddTableEntry(RegOp2MemOpTable1, MemOp2RegOpTable,
RegOp, MemOp,
// Index 1, folded load
Flags | TB_INDEX_1 | TB_FOLDED_LOAD);
}
- static const X86OpTblEntry OpTbl2[] = {
+ static const X86MemoryFoldTableEntry MemoryFoldTable2[] = {
{ X86::ADC32rr, X86::ADC32rm, 0 },
{ X86::ADC64rr, X86::ADC64rm, 0 },
{ X86::ADD16rr, X86::ADD16rm, 0 },
@@ -712,7 +874,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::ADDPDrr, X86::ADDPDrm, TB_ALIGN_16 },
{ X86::ADDPSrr, X86::ADDPSrm, TB_ALIGN_16 },
{ X86::ADDSDrr, X86::ADDSDrm, 0 },
+ { X86::ADDSDrr_Int, X86::ADDSDrm_Int, 0 },
{ X86::ADDSSrr, X86::ADDSSrm, 0 },
+ { X86::ADDSSrr_Int, X86::ADDSSrm_Int, 0 },
{ X86::ADDSUBPDrr, X86::ADDSUBPDrm, TB_ALIGN_16 },
{ X86::ADDSUBPSrr, X86::ADDSUBPSrm, TB_ALIGN_16 },
{ X86::AND16rr, X86::AND16rm, 0 },
@@ -782,7 +946,16 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::DIVPDrr, X86::DIVPDrm, TB_ALIGN_16 },
{ X86::DIVPSrr, X86::DIVPSrm, TB_ALIGN_16 },
{ X86::DIVSDrr, X86::DIVSDrm, 0 },
+ { X86::DIVSDrr_Int, X86::DIVSDrm_Int, 0 },
{ X86::DIVSSrr, X86::DIVSSrm, 0 },
+ { X86::DIVSSrr_Int, X86::DIVSSrm_Int, 0 },
+ { X86::DPPDrri, X86::DPPDrmi, TB_ALIGN_16 },
+ { X86::DPPSrri, X86::DPPSrmi, TB_ALIGN_16 },
+
+ // FIXME: We should not be folding Fs* scalar loads into vector
+ // instructions because the vector instructions require vector-sized
+ // loads. Lowering should create vector-sized instructions (the Fv*
+ // variants below) to allow load folding.
{ X86::FsANDNPDrr, X86::FsANDNPDrm, TB_ALIGN_16 },
{ X86::FsANDNPSrr, X86::FsANDNPSrm, TB_ALIGN_16 },
{ X86::FsANDPDrr, X86::FsANDPDrm, TB_ALIGN_16 },
@@ -791,6 +964,15 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::FsORPSrr, X86::FsORPSrm, TB_ALIGN_16 },
{ X86::FsXORPDrr, X86::FsXORPDrm, TB_ALIGN_16 },
{ X86::FsXORPSrr, X86::FsXORPSrm, TB_ALIGN_16 },
+
+ { X86::FvANDNPDrr, X86::FvANDNPDrm, TB_ALIGN_16 },
+ { X86::FvANDNPSrr, X86::FvANDNPSrm, TB_ALIGN_16 },
+ { X86::FvANDPDrr, X86::FvANDPDrm, TB_ALIGN_16 },
+ { X86::FvANDPSrr, X86::FvANDPSrm, TB_ALIGN_16 },
+ { X86::FvORPDrr, X86::FvORPDrm, TB_ALIGN_16 },
+ { X86::FvORPSrr, X86::FvORPSrm, TB_ALIGN_16 },
+ { X86::FvXORPDrr, X86::FvXORPDrm, TB_ALIGN_16 },
+ { X86::FvXORPSrr, X86::FvXORPSrm, TB_ALIGN_16 },
{ X86::HADDPDrr, X86::HADDPDrm, TB_ALIGN_16 },
{ X86::HADDPSrr, X86::HADDPSrm, TB_ALIGN_16 },
{ X86::HSUBPDrr, X86::HSUBPDrm, TB_ALIGN_16 },
@@ -809,16 +991,22 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::MAXPDrr, X86::MAXPDrm, TB_ALIGN_16 },
{ X86::MAXPSrr, X86::MAXPSrm, TB_ALIGN_16 },
{ X86::MAXSDrr, X86::MAXSDrm, 0 },
+ { X86::MAXSDrr_Int, X86::MAXSDrm_Int, 0 },
{ X86::MAXSSrr, X86::MAXSSrm, 0 },
+ { X86::MAXSSrr_Int, X86::MAXSSrm_Int, 0 },
{ X86::MINPDrr, X86::MINPDrm, TB_ALIGN_16 },
{ X86::MINPSrr, X86::MINPSrm, TB_ALIGN_16 },
{ X86::MINSDrr, X86::MINSDrm, 0 },
+ { X86::MINSDrr_Int, X86::MINSDrm_Int, 0 },
{ X86::MINSSrr, X86::MINSSrm, 0 },
+ { X86::MINSSrr_Int, X86::MINSSrm_Int, 0 },
{ X86::MPSADBWrri, X86::MPSADBWrmi, TB_ALIGN_16 },
{ X86::MULPDrr, X86::MULPDrm, TB_ALIGN_16 },
{ X86::MULPSrr, X86::MULPSrm, TB_ALIGN_16 },
{ X86::MULSDrr, X86::MULSDrm, 0 },
+ { X86::MULSDrr_Int, X86::MULSDrm_Int, 0 },
{ X86::MULSSrr, X86::MULSSrm, 0 },
+ { X86::MULSSrr_Int, X86::MULSSrm_Int, 0 },
{ X86::OR16rr, X86::OR16rm, 0 },
{ X86::OR32rr, X86::OR32rm, 0 },
{ X86::OR64rr, X86::OR64rm, 0 },
@@ -842,7 +1030,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::PANDrr, X86::PANDrm, TB_ALIGN_16 },
{ X86::PAVGBrr, X86::PAVGBrm, TB_ALIGN_16 },
{ X86::PAVGWrr, X86::PAVGWrm, TB_ALIGN_16 },
+ { X86::PBLENDVBrr0, X86::PBLENDVBrm0, TB_ALIGN_16 },
{ X86::PBLENDWrri, X86::PBLENDWrmi, TB_ALIGN_16 },
+ { X86::PCLMULQDQrr, X86::PCLMULQDQrm, TB_ALIGN_16 },
{ X86::PCMPEQBrr, X86::PCMPEQBrm, TB_ALIGN_16 },
{ X86::PCMPEQDrr, X86::PCMPEQDrm, TB_ALIGN_16 },
{ X86::PCMPEQQrr, X86::PCMPEQQrm, TB_ALIGN_16 },
@@ -857,7 +1047,10 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::PHSUBDrr, X86::PHSUBDrm, TB_ALIGN_16 },
{ X86::PHSUBSWrr128, X86::PHSUBSWrm128, TB_ALIGN_16 },
{ X86::PHSUBWrr, X86::PHSUBWrm, TB_ALIGN_16 },
- { X86::PINSRWrri, X86::PINSRWrmi, TB_ALIGN_16 },
+ { X86::PINSRBrr, X86::PINSRBrm, 0 },
+ { X86::PINSRDrr, X86::PINSRDrm, 0 },
+ { X86::PINSRQrr, X86::PINSRQrm, 0 },
+ { X86::PINSRWrri, X86::PINSRWrmi, 0 },
{ X86::PMADDUBSWrr128, X86::PMADDUBSWrm128, TB_ALIGN_16 },
{ X86::PMADDWDrr, X86::PMADDWDrm, TB_ALIGN_16 },
{ X86::PMAXSWrr, X86::PMAXSWrm, TB_ALIGN_16 },
@@ -895,8 +1088,11 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::PSRLWrr, X86::PSRLWrm, TB_ALIGN_16 },
{ X86::PSUBBrr, X86::PSUBBrm, TB_ALIGN_16 },
{ X86::PSUBDrr, X86::PSUBDrm, TB_ALIGN_16 },
+ { X86::PSUBQrr, X86::PSUBQrm, TB_ALIGN_16 },
{ X86::PSUBSBrr, X86::PSUBSBrm, TB_ALIGN_16 },
{ X86::PSUBSWrr, X86::PSUBSWrm, TB_ALIGN_16 },
+ { X86::PSUBUSBrr, X86::PSUBUSBrm, TB_ALIGN_16 },
+ { X86::PSUBUSWrr, X86::PSUBUSWrm, TB_ALIGN_16 },
{ X86::PSUBWrr, X86::PSUBWrm, TB_ALIGN_16 },
{ X86::PUNPCKHBWrr, X86::PUNPCKHBWrm, TB_ALIGN_16 },
{ X86::PUNPCKHDQrr, X86::PUNPCKHDQrm, TB_ALIGN_16 },
@@ -918,7 +1114,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::SUBPDrr, X86::SUBPDrm, TB_ALIGN_16 },
{ X86::SUBPSrr, X86::SUBPSrm, TB_ALIGN_16 },
{ X86::SUBSDrr, X86::SUBSDrm, 0 },
+ { X86::SUBSDrr_Int, X86::SUBSDrm_Int, 0 },
{ X86::SUBSSrr, X86::SUBSSrm, 0 },
+ { X86::SUBSSrr_Int, X86::SUBSSrm_Int, 0 },
// FIXME: TEST*rr -> swapped operand of TEST*mr.
{ X86::UNPCKHPDrr, X86::UNPCKHPDrm, TB_ALIGN_16 },
{ X86::UNPCKHPSrr, X86::UNPCKHPSrm, TB_ALIGN_16 },
@@ -930,6 +1128,79 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::XOR8rr, X86::XOR8rm, 0 },
{ X86::XORPDrr, X86::XORPDrm, TB_ALIGN_16 },
{ X86::XORPSrr, X86::XORPSrm, TB_ALIGN_16 },
+
+ // MMX version of foldable instructions
+ { X86::MMX_CVTPI2PSirr, X86::MMX_CVTPI2PSirm, 0 },
+ { X86::MMX_PACKSSDWirr, X86::MMX_PACKSSDWirm, 0 },
+ { X86::MMX_PACKSSWBirr, X86::MMX_PACKSSWBirm, 0 },
+ { X86::MMX_PACKUSWBirr, X86::MMX_PACKUSWBirm, 0 },
+ { X86::MMX_PADDBirr, X86::MMX_PADDBirm, 0 },
+ { X86::MMX_PADDDirr, X86::MMX_PADDDirm, 0 },
+ { X86::MMX_PADDQirr, X86::MMX_PADDQirm, 0 },
+ { X86::MMX_PADDSBirr, X86::MMX_PADDSBirm, 0 },
+ { X86::MMX_PADDSWirr, X86::MMX_PADDSWirm, 0 },
+ { X86::MMX_PADDUSBirr, X86::MMX_PADDUSBirm, 0 },
+ { X86::MMX_PADDUSWirr, X86::MMX_PADDUSWirm, 0 },
+ { X86::MMX_PADDWirr, X86::MMX_PADDWirm, 0 },
+ { X86::MMX_PALIGNR64irr, X86::MMX_PALIGNR64irm, 0 },
+ { X86::MMX_PANDNirr, X86::MMX_PANDNirm, 0 },
+ { X86::MMX_PANDirr, X86::MMX_PANDirm, 0 },
+ { X86::MMX_PAVGBirr, X86::MMX_PAVGBirm, 0 },
+ { X86::MMX_PAVGWirr, X86::MMX_PAVGWirm, 0 },
+ { X86::MMX_PCMPEQBirr, X86::MMX_PCMPEQBirm, 0 },
+ { X86::MMX_PCMPEQDirr, X86::MMX_PCMPEQDirm, 0 },
+ { X86::MMX_PCMPEQWirr, X86::MMX_PCMPEQWirm, 0 },
+ { X86::MMX_PCMPGTBirr, X86::MMX_PCMPGTBirm, 0 },
+ { X86::MMX_PCMPGTDirr, X86::MMX_PCMPGTDirm, 0 },
+ { X86::MMX_PCMPGTWirr, X86::MMX_PCMPGTWirm, 0 },
+ { X86::MMX_PHADDSWrr64, X86::MMX_PHADDSWrm64, 0 },
+ { X86::MMX_PHADDWrr64, X86::MMX_PHADDWrm64, 0 },
+ { X86::MMX_PHADDrr64, X86::MMX_PHADDrm64, 0 },
+ { X86::MMX_PHSUBDrr64, X86::MMX_PHSUBDrm64, 0 },
+ { X86::MMX_PHSUBSWrr64, X86::MMX_PHSUBSWrm64, 0 },
+ { X86::MMX_PHSUBWrr64, X86::MMX_PHSUBWrm64, 0 },
+ { X86::MMX_PINSRWirri, X86::MMX_PINSRWirmi, 0 },
+ { X86::MMX_PMADDUBSWrr64, X86::MMX_PMADDUBSWrm64, 0 },
+ { X86::MMX_PMADDWDirr, X86::MMX_PMADDWDirm, 0 },
+ { X86::MMX_PMAXSWirr, X86::MMX_PMAXSWirm, 0 },
+ { X86::MMX_PMAXUBirr, X86::MMX_PMAXUBirm, 0 },
+ { X86::MMX_PMINSWirr, X86::MMX_PMINSWirm, 0 },
+ { X86::MMX_PMINUBirr, X86::MMX_PMINUBirm, 0 },
+ { X86::MMX_PMULHRSWrr64, X86::MMX_PMULHRSWrm64, 0 },
+ { X86::MMX_PMULHUWirr, X86::MMX_PMULHUWirm, 0 },
+ { X86::MMX_PMULHWirr, X86::MMX_PMULHWirm, 0 },
+ { X86::MMX_PMULLWirr, X86::MMX_PMULLWirm, 0 },
+ { X86::MMX_PMULUDQirr, X86::MMX_PMULUDQirm, 0 },
+ { X86::MMX_PORirr, X86::MMX_PORirm, 0 },
+ { X86::MMX_PSADBWirr, X86::MMX_PSADBWirm, 0 },
+ { X86::MMX_PSHUFBrr64, X86::MMX_PSHUFBrm64, 0 },
+ { X86::MMX_PSIGNBrr64, X86::MMX_PSIGNBrm64, 0 },
+ { X86::MMX_PSIGNDrr64, X86::MMX_PSIGNDrm64, 0 },
+ { X86::MMX_PSIGNWrr64, X86::MMX_PSIGNWrm64, 0 },
+ { X86::MMX_PSLLDrr, X86::MMX_PSLLDrm, 0 },
+ { X86::MMX_PSLLQrr, X86::MMX_PSLLQrm, 0 },
+ { X86::MMX_PSLLWrr, X86::MMX_PSLLWrm, 0 },
+ { X86::MMX_PSRADrr, X86::MMX_PSRADrm, 0 },
+ { X86::MMX_PSRAWrr, X86::MMX_PSRAWrm, 0 },
+ { X86::MMX_PSRLDrr, X86::MMX_PSRLDrm, 0 },
+ { X86::MMX_PSRLQrr, X86::MMX_PSRLQrm, 0 },
+ { X86::MMX_PSRLWrr, X86::MMX_PSRLWrm, 0 },
+ { X86::MMX_PSUBBirr, X86::MMX_PSUBBirm, 0 },
+ { X86::MMX_PSUBDirr, X86::MMX_PSUBDirm, 0 },
+ { X86::MMX_PSUBQirr, X86::MMX_PSUBQirm, 0 },
+ { X86::MMX_PSUBSBirr, X86::MMX_PSUBSBirm, 0 },
+ { X86::MMX_PSUBSWirr, X86::MMX_PSUBSWirm, 0 },
+ { X86::MMX_PSUBUSBirr, X86::MMX_PSUBUSBirm, 0 },
+ { X86::MMX_PSUBUSWirr, X86::MMX_PSUBUSWirm, 0 },
+ { X86::MMX_PSUBWirr, X86::MMX_PSUBWirm, 0 },
+ { X86::MMX_PUNPCKHBWirr, X86::MMX_PUNPCKHBWirm, 0 },
+ { X86::MMX_PUNPCKHDQirr, X86::MMX_PUNPCKHDQirm, 0 },
+ { X86::MMX_PUNPCKHWDirr, X86::MMX_PUNPCKHWDirm, 0 },
+ { X86::MMX_PUNPCKLBWirr, X86::MMX_PUNPCKLBWirm, 0 },
+ { X86::MMX_PUNPCKLDQirr, X86::MMX_PUNPCKLDQirm, 0 },
+ { X86::MMX_PUNPCKLWDirr, X86::MMX_PUNPCKLWDirm, 0 },
+ { X86::MMX_PXORirr, X86::MMX_PXORirm, 0 },
+
// AVX 128-bit versions of foldable instructions
{ X86::VCVTSD2SSrr, X86::VCVTSD2SSrm, 0 },
{ X86::Int_VCVTSD2SSrr, X86::Int_VCVTSD2SSrm, 0 },
@@ -943,13 +1214,16 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::Int_VCVTSI2SSrr, X86::Int_VCVTSI2SSrm, 0 },
{ X86::VCVTSS2SDrr, X86::VCVTSS2SDrm, 0 },
{ X86::Int_VCVTSS2SDrr, X86::Int_VCVTSS2SDrm, 0 },
+ { X86::VRCPSSr, X86::VRCPSSm, 0 },
{ X86::VRSQRTSSr, X86::VRSQRTSSm, 0 },
{ X86::VSQRTSDr, X86::VSQRTSDm, 0 },
{ X86::VSQRTSSr, X86::VSQRTSSm, 0 },
{ X86::VADDPDrr, X86::VADDPDrm, 0 },
{ X86::VADDPSrr, X86::VADDPSrm, 0 },
{ X86::VADDSDrr, X86::VADDSDrm, 0 },
+ { X86::VADDSDrr_Int, X86::VADDSDrm_Int, 0 },
{ X86::VADDSSrr, X86::VADDSSrm, 0 },
+ { X86::VADDSSrr_Int, X86::VADDSSrm_Int, 0 },
{ X86::VADDSUBPDrr, X86::VADDSUBPDrm, 0 },
{ X86::VADDSUBPSrr, X86::VADDSUBPSrm, 0 },
{ X86::VANDNPDrr, X86::VANDNPDrm, 0 },
@@ -967,15 +1241,22 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VDIVPDrr, X86::VDIVPDrm, 0 },
{ X86::VDIVPSrr, X86::VDIVPSrm, 0 },
{ X86::VDIVSDrr, X86::VDIVSDrm, 0 },
+ { X86::VDIVSDrr_Int, X86::VDIVSDrm_Int, 0 },
{ X86::VDIVSSrr, X86::VDIVSSrm, 0 },
- { X86::VFsANDNPDrr, X86::VFsANDNPDrm, TB_ALIGN_16 },
- { X86::VFsANDNPSrr, X86::VFsANDNPSrm, TB_ALIGN_16 },
- { X86::VFsANDPDrr, X86::VFsANDPDrm, TB_ALIGN_16 },
- { X86::VFsANDPSrr, X86::VFsANDPSrm, TB_ALIGN_16 },
- { X86::VFsORPDrr, X86::VFsORPDrm, TB_ALIGN_16 },
- { X86::VFsORPSrr, X86::VFsORPSrm, TB_ALIGN_16 },
- { X86::VFsXORPDrr, X86::VFsXORPDrm, TB_ALIGN_16 },
- { X86::VFsXORPSrr, X86::VFsXORPSrm, TB_ALIGN_16 },
+ { X86::VDIVSSrr_Int, X86::VDIVSSrm_Int, 0 },
+ { X86::VDPPDrri, X86::VDPPDrmi, 0 },
+ { X86::VDPPSrri, X86::VDPPSrmi, 0 },
+ // Do not fold VFs* loads because there are no scalar load variants for
+ // these instructions. When folded, the load is required to be 128-bits, so
+ // the load size would not match.
+ { X86::VFvANDNPDrr, X86::VFvANDNPDrm, 0 },
+ { X86::VFvANDNPSrr, X86::VFvANDNPSrm, 0 },
+ { X86::VFvANDPDrr, X86::VFvANDPDrm, 0 },
+ { X86::VFvANDPSrr, X86::VFvANDPSrm, 0 },
+ { X86::VFvORPDrr, X86::VFvORPDrm, 0 },
+ { X86::VFvORPSrr, X86::VFvORPSrm, 0 },
+ { X86::VFvXORPDrr, X86::VFvXORPDrm, 0 },
+ { X86::VFvXORPSrr, X86::VFvXORPSrm, 0 },
{ X86::VHADDPDrr, X86::VHADDPDrm, 0 },
{ X86::VHADDPSrr, X86::VHADDPSrm, 0 },
{ X86::VHSUBPDrr, X86::VHSUBPDrm, 0 },
@@ -985,16 +1266,22 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMAXPDrr, X86::VMAXPDrm, 0 },
{ X86::VMAXPSrr, X86::VMAXPSrm, 0 },
{ X86::VMAXSDrr, X86::VMAXSDrm, 0 },
+ { X86::VMAXSDrr_Int, X86::VMAXSDrm_Int, 0 },
{ X86::VMAXSSrr, X86::VMAXSSrm, 0 },
+ { X86::VMAXSSrr_Int, X86::VMAXSSrm_Int, 0 },
{ X86::VMINPDrr, X86::VMINPDrm, 0 },
{ X86::VMINPSrr, X86::VMINPSrm, 0 },
{ X86::VMINSDrr, X86::VMINSDrm, 0 },
+ { X86::VMINSDrr_Int, X86::VMINSDrm_Int, 0 },
{ X86::VMINSSrr, X86::VMINSSrm, 0 },
+ { X86::VMINSSrr_Int, X86::VMINSSrm_Int, 0 },
{ X86::VMPSADBWrri, X86::VMPSADBWrmi, 0 },
{ X86::VMULPDrr, X86::VMULPDrm, 0 },
{ X86::VMULPSrr, X86::VMULPSrm, 0 },
{ X86::VMULSDrr, X86::VMULSDrm, 0 },
+ { X86::VMULSDrr_Int, X86::VMULSDrm_Int, 0 },
{ X86::VMULSSrr, X86::VMULSSrm, 0 },
+ { X86::VMULSSrr_Int, X86::VMULSSrm_Int, 0 },
{ X86::VORPDrr, X86::VORPDrm, 0 },
{ X86::VORPSrr, X86::VORPSrm, 0 },
{ X86::VPACKSSDWrr, X86::VPACKSSDWrm, 0 },
@@ -1014,7 +1301,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPANDrr, X86::VPANDrm, 0 },
{ X86::VPAVGBrr, X86::VPAVGBrm, 0 },
{ X86::VPAVGWrr, X86::VPAVGWrm, 0 },
+ { X86::VPBLENDVBrr, X86::VPBLENDVBrm, 0 },
{ X86::VPBLENDWrri, X86::VPBLENDWrmi, 0 },
+ { X86::VPCLMULQDQrr, X86::VPCLMULQDQrm, 0 },
{ X86::VPCMPEQBrr, X86::VPCMPEQBrm, 0 },
{ X86::VPCMPEQDrr, X86::VPCMPEQDrm, 0 },
{ X86::VPCMPEQQrr, X86::VPCMPEQQrm, 0 },
@@ -1031,6 +1320,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPHSUBWrr, X86::VPHSUBWrm, 0 },
{ X86::VPERMILPDrr, X86::VPERMILPDrm, 0 },
{ X86::VPERMILPSrr, X86::VPERMILPSrm, 0 },
+ { X86::VPINSRBrr, X86::VPINSRBrm, 0 },
+ { X86::VPINSRDrr, X86::VPINSRDrm, 0 },
+ { X86::VPINSRQrr, X86::VPINSRQrm, 0 },
{ X86::VPINSRWrri, X86::VPINSRWrmi, 0 },
{ X86::VPMADDUBSWrr128, X86::VPMADDUBSWrm128, 0 },
{ X86::VPMADDWDrr, X86::VPMADDWDrm, 0 },
@@ -1069,8 +1361,11 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPSRLWrr, X86::VPSRLWrm, 0 },
{ X86::VPSUBBrr, X86::VPSUBBrm, 0 },
{ X86::VPSUBDrr, X86::VPSUBDrm, 0 },
+ { X86::VPSUBQrr, X86::VPSUBQrm, 0 },
{ X86::VPSUBSBrr, X86::VPSUBSBrm, 0 },
{ X86::VPSUBSWrr, X86::VPSUBSWrm, 0 },
+ { X86::VPSUBUSBrr, X86::VPSUBUSBrm, 0 },
+ { X86::VPSUBUSWrr, X86::VPSUBUSWrm, 0 },
{ X86::VPSUBWrr, X86::VPSUBWrm, 0 },
{ X86::VPUNPCKHBWrr, X86::VPUNPCKHBWrm, 0 },
{ X86::VPUNPCKHDQrr, X86::VPUNPCKHDQrm, 0 },
@@ -1086,13 +1381,16 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VSUBPDrr, X86::VSUBPDrm, 0 },
{ X86::VSUBPSrr, X86::VSUBPSrm, 0 },
{ X86::VSUBSDrr, X86::VSUBSDrm, 0 },
+ { X86::VSUBSDrr_Int, X86::VSUBSDrm_Int, 0 },
{ X86::VSUBSSrr, X86::VSUBSSrm, 0 },
+ { X86::VSUBSSrr_Int, X86::VSUBSSrm_Int, 0 },
{ X86::VUNPCKHPDrr, X86::VUNPCKHPDrm, 0 },
{ X86::VUNPCKHPSrr, X86::VUNPCKHPSrm, 0 },
{ X86::VUNPCKLPDrr, X86::VUNPCKLPDrm, 0 },
{ X86::VUNPCKLPSrr, X86::VUNPCKLPSrm, 0 },
{ X86::VXORPDrr, X86::VXORPDrm, 0 },
{ X86::VXORPSrr, X86::VXORPSrm, 0 },
+
// AVX 256-bit foldable instructions
{ X86::VADDPDYrr, X86::VADDPDYrm, 0 },
{ X86::VADDPSYrr, X86::VADDPSYrm, 0 },
@@ -1110,6 +1408,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VCMPPSYrri, X86::VCMPPSYrmi, 0 },
{ X86::VDIVPDYrr, X86::VDIVPDYrm, 0 },
{ X86::VDIVPSYrr, X86::VDIVPSYrm, 0 },
+ { X86::VDPPSYrri, X86::VDPPSYrmi, 0 },
{ X86::VHADDPDYrr, X86::VHADDPDYrm, 0 },
{ X86::VHADDPSYrr, X86::VHADDPSYrm, 0 },
{ X86::VHSUBPDYrr, X86::VHSUBPDYrm, 0 },
@@ -1136,6 +1435,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrm, 0 },
{ X86::VXORPDYrr, X86::VXORPDYrm, 0 },
{ X86::VXORPSYrr, X86::VXORPSYrm, 0 },
+
// AVX2 foldable instructions
{ X86::VINSERTI128rr, X86::VINSERTI128rm, 0 },
{ X86::VPACKSSDWYrr, X86::VPACKSSDWYrm, 0 },
@@ -1157,6 +1457,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPAVGWYrr, X86::VPAVGWYrm, 0 },
{ X86::VPBLENDDrri, X86::VPBLENDDrmi, 0 },
{ X86::VPBLENDDYrri, X86::VPBLENDDYrmi, 0 },
+ { X86::VPBLENDVBYrr, X86::VPBLENDVBYrm, 0 },
{ X86::VPBLENDWYrri, X86::VPBLENDWYrmi, 0 },
{ X86::VPCMPEQBYrr, X86::VPCMPEQBYrm, 0 },
{ X86::VPCMPEQDYrr, X86::VPCMPEQDYrm, 0 },
@@ -1168,9 +1469,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPCMPGTWYrr, X86::VPCMPGTWYrm, 0 },
{ X86::VPERM2I128rr, X86::VPERM2I128rm, 0 },
{ X86::VPERMDYrr, X86::VPERMDYrm, 0 },
- { X86::VPERMPDYri, X86::VPERMPDYmi, 0 },
{ X86::VPERMPSYrr, X86::VPERMPSYrm, 0 },
- { X86::VPERMQYri, X86::VPERMQYmi, 0 },
{ X86::VPHADDDYrr, X86::VPHADDDYrm, 0 },
{ X86::VPHADDSWrr256, X86::VPHADDSWrm256, 0 },
{ X86::VPHADDWYrr, X86::VPHADDWYrm, 0 },
@@ -1225,8 +1524,11 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPSRLVQYrr, X86::VPSRLVQYrm, 0 },
{ X86::VPSUBBYrr, X86::VPSUBBYrm, 0 },
{ X86::VPSUBDYrr, X86::VPSUBDYrm, 0 },
+ { X86::VPSUBQYrr, X86::VPSUBQYrm, 0 },
{ X86::VPSUBSBYrr, X86::VPSUBSBYrm, 0 },
{ X86::VPSUBSWYrr, X86::VPSUBSWYrm, 0 },
+ { X86::VPSUBUSBYrr, X86::VPSUBUSBYrm, 0 },
+ { X86::VPSUBUSWYrr, X86::VPSUBUSWYrm, 0 },
{ X86::VPSUBWYrr, X86::VPSUBWYrm, 0 },
{ X86::VPUNPCKHBWYrr, X86::VPUNPCKHBWYrm, 0 },
{ X86::VPUNPCKHDQYrr, X86::VPUNPCKHDQYrm, 0 },
@@ -1237,41 +1539,81 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPUNPCKLQDQYrr, X86::VPUNPCKLQDQYrm, 0 },
{ X86::VPUNPCKLWDYrr, X86::VPUNPCKLWDYrm, 0 },
{ X86::VPXORYrr, X86::VPXORYrm, 0 },
- // FIXME: add AVX 256-bit foldable instructions
// FMA4 foldable patterns
- { X86::VFMADDSS4rr, X86::VFMADDSS4mr, 0 },
- { X86::VFMADDSD4rr, X86::VFMADDSD4mr, 0 },
- { X86::VFMADDPS4rr, X86::VFMADDPS4mr, TB_ALIGN_16 },
- { X86::VFMADDPD4rr, X86::VFMADDPD4mr, TB_ALIGN_16 },
- { X86::VFMADDPS4rrY, X86::VFMADDPS4mrY, TB_ALIGN_32 },
- { X86::VFMADDPD4rrY, X86::VFMADDPD4mrY, TB_ALIGN_32 },
- { X86::VFNMADDSS4rr, X86::VFNMADDSS4mr, 0 },
- { X86::VFNMADDSD4rr, X86::VFNMADDSD4mr, 0 },
- { X86::VFNMADDPS4rr, X86::VFNMADDPS4mr, TB_ALIGN_16 },
- { X86::VFNMADDPD4rr, X86::VFNMADDPD4mr, TB_ALIGN_16 },
- { X86::VFNMADDPS4rrY, X86::VFNMADDPS4mrY, TB_ALIGN_32 },
- { X86::VFNMADDPD4rrY, X86::VFNMADDPD4mrY, TB_ALIGN_32 },
- { X86::VFMSUBSS4rr, X86::VFMSUBSS4mr, 0 },
- { X86::VFMSUBSD4rr, X86::VFMSUBSD4mr, 0 },
- { X86::VFMSUBPS4rr, X86::VFMSUBPS4mr, TB_ALIGN_16 },
- { X86::VFMSUBPD4rr, X86::VFMSUBPD4mr, TB_ALIGN_16 },
- { X86::VFMSUBPS4rrY, X86::VFMSUBPS4mrY, TB_ALIGN_32 },
- { X86::VFMSUBPD4rrY, X86::VFMSUBPD4mrY, TB_ALIGN_32 },
- { X86::VFNMSUBSS4rr, X86::VFNMSUBSS4mr, 0 },
- { X86::VFNMSUBSD4rr, X86::VFNMSUBSD4mr, 0 },
- { X86::VFNMSUBPS4rr, X86::VFNMSUBPS4mr, TB_ALIGN_16 },
- { X86::VFNMSUBPD4rr, X86::VFNMSUBPD4mr, TB_ALIGN_16 },
- { X86::VFNMSUBPS4rrY, X86::VFNMSUBPS4mrY, TB_ALIGN_32 },
- { X86::VFNMSUBPD4rrY, X86::VFNMSUBPD4mrY, TB_ALIGN_32 },
- { X86::VFMADDSUBPS4rr, X86::VFMADDSUBPS4mr, TB_ALIGN_16 },
- { X86::VFMADDSUBPD4rr, X86::VFMADDSUBPD4mr, TB_ALIGN_16 },
- { X86::VFMADDSUBPS4rrY, X86::VFMADDSUBPS4mrY, TB_ALIGN_32 },
- { X86::VFMADDSUBPD4rrY, X86::VFMADDSUBPD4mrY, TB_ALIGN_32 },
- { X86::VFMSUBADDPS4rr, X86::VFMSUBADDPS4mr, TB_ALIGN_16 },
- { X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4mr, TB_ALIGN_16 },
- { X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4mrY, TB_ALIGN_32 },
- { X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4mrY, TB_ALIGN_32 },
+ { X86::VFMADDSS4rr, X86::VFMADDSS4mr, 0 },
+ { X86::VFMADDSD4rr, X86::VFMADDSD4mr, 0 },
+ { X86::VFMADDPS4rr, X86::VFMADDPS4mr, 0 },
+ { X86::VFMADDPD4rr, X86::VFMADDPD4mr, 0 },
+ { X86::VFMADDPS4rrY, X86::VFMADDPS4mrY, 0 },
+ { X86::VFMADDPD4rrY, X86::VFMADDPD4mrY, 0 },
+ { X86::VFNMADDSS4rr, X86::VFNMADDSS4mr, 0 },
+ { X86::VFNMADDSD4rr, X86::VFNMADDSD4mr, 0 },
+ { X86::VFNMADDPS4rr, X86::VFNMADDPS4mr, 0 },
+ { X86::VFNMADDPD4rr, X86::VFNMADDPD4mr, 0 },
+ { X86::VFNMADDPS4rrY, X86::VFNMADDPS4mrY, 0 },
+ { X86::VFNMADDPD4rrY, X86::VFNMADDPD4mrY, 0 },
+ { X86::VFMSUBSS4rr, X86::VFMSUBSS4mr, 0 },
+ { X86::VFMSUBSD4rr, X86::VFMSUBSD4mr, 0 },
+ { X86::VFMSUBPS4rr, X86::VFMSUBPS4mr, 0 },
+ { X86::VFMSUBPD4rr, X86::VFMSUBPD4mr, 0 },
+ { X86::VFMSUBPS4rrY, X86::VFMSUBPS4mrY, 0 },
+ { X86::VFMSUBPD4rrY, X86::VFMSUBPD4mrY, 0 },
+ { X86::VFNMSUBSS4rr, X86::VFNMSUBSS4mr, 0 },
+ { X86::VFNMSUBSD4rr, X86::VFNMSUBSD4mr, 0 },
+ { X86::VFNMSUBPS4rr, X86::VFNMSUBPS4mr, 0 },
+ { X86::VFNMSUBPD4rr, X86::VFNMSUBPD4mr, 0 },
+ { X86::VFNMSUBPS4rrY, X86::VFNMSUBPS4mrY, 0 },
+ { X86::VFNMSUBPD4rrY, X86::VFNMSUBPD4mrY, 0 },
+ { X86::VFMADDSUBPS4rr, X86::VFMADDSUBPS4mr, 0 },
+ { X86::VFMADDSUBPD4rr, X86::VFMADDSUBPD4mr, 0 },
+ { X86::VFMADDSUBPS4rrY, X86::VFMADDSUBPS4mrY, 0 },
+ { X86::VFMADDSUBPD4rrY, X86::VFMADDSUBPD4mrY, 0 },
+ { X86::VFMSUBADDPS4rr, X86::VFMSUBADDPS4mr, 0 },
+ { X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4mr, 0 },
+ { X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4mrY, 0 },
+ { X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4mrY, 0 },
+
+ // XOP foldable instructions
+ { X86::VPCMOVrr, X86::VPCMOVmr, 0 },
+ { X86::VPCMOVrrY, X86::VPCMOVmrY, 0 },
+ { X86::VPCOMBri, X86::VPCOMBmi, 0 },
+ { X86::VPCOMDri, X86::VPCOMDmi, 0 },
+ { X86::VPCOMQri, X86::VPCOMQmi, 0 },
+ { X86::VPCOMWri, X86::VPCOMWmi, 0 },
+ { X86::VPCOMUBri, X86::VPCOMUBmi, 0 },
+ { X86::VPCOMUDri, X86::VPCOMUDmi, 0 },
+ { X86::VPCOMUQri, X86::VPCOMUQmi, 0 },
+ { X86::VPCOMUWri, X86::VPCOMUWmi, 0 },
+ { X86::VPERMIL2PDrr, X86::VPERMIL2PDmr, 0 },
+ { X86::VPERMIL2PDrrY, X86::VPERMIL2PDmrY, 0 },
+ { X86::VPERMIL2PSrr, X86::VPERMIL2PSmr, 0 },
+ { X86::VPERMIL2PSrrY, X86::VPERMIL2PSmrY, 0 },
+ { X86::VPMACSDDrr, X86::VPMACSDDrm, 0 },
+ { X86::VPMACSDQHrr, X86::VPMACSDQHrm, 0 },
+ { X86::VPMACSDQLrr, X86::VPMACSDQLrm, 0 },
+ { X86::VPMACSSDDrr, X86::VPMACSSDDrm, 0 },
+ { X86::VPMACSSDQHrr, X86::VPMACSSDQHrm, 0 },
+ { X86::VPMACSSDQLrr, X86::VPMACSSDQLrm, 0 },
+ { X86::VPMACSSWDrr, X86::VPMACSSWDrm, 0 },
+ { X86::VPMACSSWWrr, X86::VPMACSSWWrm, 0 },
+ { X86::VPMACSWDrr, X86::VPMACSWDrm, 0 },
+ { X86::VPMACSWWrr, X86::VPMACSWWrm, 0 },
+ { X86::VPMADCSSWDrr, X86::VPMADCSSWDrm, 0 },
+ { X86::VPMADCSWDrr, X86::VPMADCSWDrm, 0 },
+ { X86::VPPERMrr, X86::VPPERMmr, 0 },
+ { X86::VPROTBrr, X86::VPROTBrm, 0 },
+ { X86::VPROTDrr, X86::VPROTDrm, 0 },
+ { X86::VPROTQrr, X86::VPROTQrm, 0 },
+ { X86::VPROTWrr, X86::VPROTWrm, 0 },
+ { X86::VPSHABrr, X86::VPSHABrm, 0 },
+ { X86::VPSHADrr, X86::VPSHADrm, 0 },
+ { X86::VPSHAQrr, X86::VPSHAQrm, 0 },
+ { X86::VPSHAWrr, X86::VPSHAWrm, 0 },
+ { X86::VPSHLBrr, X86::VPSHLBrm, 0 },
+ { X86::VPSHLDrr, X86::VPSHLDrm, 0 },
+ { X86::VPSHLQrr, X86::VPSHLQrm, 0 },
+ { X86::VPSHLWrr, X86::VPSHLWrm, 0 },
// BMI/BMI2 foldable instructions
{ X86::ANDN32rr, X86::ANDN32rm, 0 },
@@ -1321,16 +1663,29 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VALIGNQrri, X86::VALIGNQrmi, 0 },
{ X86::VALIGNDrri, X86::VALIGNDrmi, 0 },
{ X86::VPMULUDQZrr, X86::VPMULUDQZrm, 0 },
+ { X86::VBROADCASTSSZrkz, X86::VBROADCASTSSZmkz, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZrkz, X86::VBROADCASTSDZmkz, TB_NO_REVERSE },
+
+ // AVX-512{F,VL} foldable instructions
+ { X86::VBROADCASTSSZ256rkz, X86::VBROADCASTSSZ256mkz, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZ256rkz, X86::VBROADCASTSDZ256mkz, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZ128rkz, X86::VBROADCASTSSZ128mkz, TB_NO_REVERSE },
+
+ // AVX-512{F,VL} foldable instructions
+ { X86::VADDPDZ128rr, X86::VADDPDZ128rm, 0 },
+ { X86::VADDPDZ256rr, X86::VADDPDZ256rm, 0 },
+ { X86::VADDPSZ128rr, X86::VADDPSZ128rm, 0 },
+ { X86::VADDPSZ256rr, X86::VADDPSZ256rm, 0 },
// AES foldable instructions
{ X86::AESDECLASTrr, X86::AESDECLASTrm, TB_ALIGN_16 },
{ X86::AESDECrr, X86::AESDECrm, TB_ALIGN_16 },
{ X86::AESENCLASTrr, X86::AESENCLASTrm, TB_ALIGN_16 },
{ X86::AESENCrr, X86::AESENCrm, TB_ALIGN_16 },
- { X86::VAESDECLASTrr, X86::VAESDECLASTrm, TB_ALIGN_16 },
- { X86::VAESDECrr, X86::VAESDECrm, TB_ALIGN_16 },
- { X86::VAESENCLASTrr, X86::VAESENCLASTrm, TB_ALIGN_16 },
- { X86::VAESENCrr, X86::VAESENCrm, TB_ALIGN_16 },
+ { X86::VAESDECLASTrr, X86::VAESDECLASTrm, 0 },
+ { X86::VAESDECrr, X86::VAESDECrm, 0 },
+ { X86::VAESENCLASTrr, X86::VAESENCLASTrm, 0 },
+ { X86::VAESENCrr, X86::VAESENCrm, 0 },
// SHA foldable instructions
{ X86::SHA1MSG1rr, X86::SHA1MSG1rm, TB_ALIGN_16 },
@@ -1339,20 +1694,20 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::SHA1RNDS4rri, X86::SHA1RNDS4rmi, TB_ALIGN_16 },
{ X86::SHA256MSG1rr, X86::SHA256MSG1rm, TB_ALIGN_16 },
{ X86::SHA256MSG2rr, X86::SHA256MSG2rm, TB_ALIGN_16 },
- { X86::SHA256RNDS2rr, X86::SHA256RNDS2rm, TB_ALIGN_16 },
+ { X86::SHA256RNDS2rr, X86::SHA256RNDS2rm, TB_ALIGN_16 }
};
- for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
- unsigned RegOp = OpTbl2[i].RegOp;
- unsigned MemOp = OpTbl2[i].MemOp;
- unsigned Flags = OpTbl2[i].Flags;
+ for (unsigned i = 0, e = array_lengthof(MemoryFoldTable2); i != e; ++i) {
+ unsigned RegOp = MemoryFoldTable2[i].RegOp;
+ unsigned MemOp = MemoryFoldTable2[i].MemOp;
+ unsigned Flags = MemoryFoldTable2[i].Flags;
AddTableEntry(RegOp2MemOpTable2, MemOp2RegOpTable,
RegOp, MemOp,
// Index 2, folded load
Flags | TB_INDEX_2 | TB_FOLDED_LOAD);
}
- static const X86OpTblEntry OpTbl3[] = {
+ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = {
// FMA foldable instructions
{ X86::VFMADDSSr231r, X86::VFMADDSSr231m, TB_ALIGN_NONE },
{ X86::VFMADDSDr231r, X86::VFMADDSDr231m, TB_ALIGN_NONE },
@@ -1493,6 +1848,16 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4rm, TB_ALIGN_16 },
{ X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4rmY, TB_ALIGN_32 },
{ X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4rmY, TB_ALIGN_32 },
+
+ // XOP foldable instructions
+ { X86::VPCMOVrr, X86::VPCMOVrm, 0 },
+ { X86::VPCMOVrrY, X86::VPCMOVrmY, 0 },
+ { X86::VPERMIL2PDrr, X86::VPERMIL2PDrm, 0 },
+ { X86::VPERMIL2PDrrY, X86::VPERMIL2PDrmY, 0 },
+ { X86::VPERMIL2PSrr, X86::VPERMIL2PSrm, 0 },
+ { X86::VPERMIL2PSrrY, X86::VPERMIL2PSrmY, 0 },
+ { X86::VPPERMrr, X86::VPPERMrm, 0 },
+
// AVX-512 VPERMI instructions with 3 source operands.
{ X86::VPERMI2Drr, X86::VPERMI2Drm, 0 },
{ X86::VPERMI2Qrr, X86::VPERMI2Qrm, 0 },
@@ -1501,19 +1866,114 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VBLENDMPDZrr, X86::VBLENDMPDZrm, 0 },
{ X86::VBLENDMPSZrr, X86::VBLENDMPSZrm, 0 },
{ X86::VPBLENDMDZrr, X86::VPBLENDMDZrm, 0 },
- { X86::VPBLENDMQZrr, X86::VPBLENDMQZrm, 0 }
+ { X86::VPBLENDMQZrr, X86::VPBLENDMQZrm, 0 },
+ { X86::VBROADCASTSSZrk, X86::VBROADCASTSSZmk, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZrk, X86::VBROADCASTSDZmk, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZ256rk, X86::VBROADCASTSSZ256mk, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZ256rk, X86::VBROADCASTSDZ256mk, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZ128rk, X86::VBROADCASTSSZ128mk, TB_NO_REVERSE },
+ // AVX-512 arithmetic instructions
+ { X86::VADDPSZrrkz, X86::VADDPSZrmkz, 0 },
+ { X86::VADDPDZrrkz, X86::VADDPDZrmkz, 0 },
+ { X86::VSUBPSZrrkz, X86::VSUBPSZrmkz, 0 },
+ { X86::VSUBPDZrrkz, X86::VSUBPDZrmkz, 0 },
+ { X86::VMULPSZrrkz, X86::VMULPSZrmkz, 0 },
+ { X86::VMULPDZrrkz, X86::VMULPDZrmkz, 0 },
+ { X86::VDIVPSZrrkz, X86::VDIVPSZrmkz, 0 },
+ { X86::VDIVPDZrrkz, X86::VDIVPDZrmkz, 0 },
+ { X86::VMINPSZrrkz, X86::VMINPSZrmkz, 0 },
+ { X86::VMINPDZrrkz, X86::VMINPDZrmkz, 0 },
+ { X86::VMAXPSZrrkz, X86::VMAXPSZrmkz, 0 },
+ { X86::VMAXPDZrrkz, X86::VMAXPDZrmkz, 0 },
+ // AVX-512{F,VL} arithmetic instructions 256-bit
+ { X86::VADDPSZ256rrkz, X86::VADDPSZ256rmkz, 0 },
+ { X86::VADDPDZ256rrkz, X86::VADDPDZ256rmkz, 0 },
+ { X86::VSUBPSZ256rrkz, X86::VSUBPSZ256rmkz, 0 },
+ { X86::VSUBPDZ256rrkz, X86::VSUBPDZ256rmkz, 0 },
+ { X86::VMULPSZ256rrkz, X86::VMULPSZ256rmkz, 0 },
+ { X86::VMULPDZ256rrkz, X86::VMULPDZ256rmkz, 0 },
+ { X86::VDIVPSZ256rrkz, X86::VDIVPSZ256rmkz, 0 },
+ { X86::VDIVPDZ256rrkz, X86::VDIVPDZ256rmkz, 0 },
+ { X86::VMINPSZ256rrkz, X86::VMINPSZ256rmkz, 0 },
+ { X86::VMINPDZ256rrkz, X86::VMINPDZ256rmkz, 0 },
+ { X86::VMAXPSZ256rrkz, X86::VMAXPSZ256rmkz, 0 },
+ { X86::VMAXPDZ256rrkz, X86::VMAXPDZ256rmkz, 0 },
+ // AVX-512{F,VL} arithmetic instructions 128-bit
+ { X86::VADDPSZ128rrkz, X86::VADDPSZ128rmkz, 0 },
+ { X86::VADDPDZ128rrkz, X86::VADDPDZ128rmkz, 0 },
+ { X86::VSUBPSZ128rrkz, X86::VSUBPSZ128rmkz, 0 },
+ { X86::VSUBPDZ128rrkz, X86::VSUBPDZ128rmkz, 0 },
+ { X86::VMULPSZ128rrkz, X86::VMULPSZ128rmkz, 0 },
+ { X86::VMULPDZ128rrkz, X86::VMULPDZ128rmkz, 0 },
+ { X86::VDIVPSZ128rrkz, X86::VDIVPSZ128rmkz, 0 },
+ { X86::VDIVPDZ128rrkz, X86::VDIVPDZ128rmkz, 0 },
+ { X86::VMINPSZ128rrkz, X86::VMINPSZ128rmkz, 0 },
+ { X86::VMINPDZ128rrkz, X86::VMINPDZ128rmkz, 0 },
+ { X86::VMAXPSZ128rrkz, X86::VMAXPSZ128rmkz, 0 },
+ { X86::VMAXPDZ128rrkz, X86::VMAXPDZ128rmkz, 0 }
};
- for (unsigned i = 0, e = array_lengthof(OpTbl3); i != e; ++i) {
- unsigned RegOp = OpTbl3[i].RegOp;
- unsigned MemOp = OpTbl3[i].MemOp;
- unsigned Flags = OpTbl3[i].Flags;
+ for (unsigned i = 0, e = array_lengthof(MemoryFoldTable3); i != e; ++i) {
+ unsigned RegOp = MemoryFoldTable3[i].RegOp;
+ unsigned MemOp = MemoryFoldTable3[i].MemOp;
+ unsigned Flags = MemoryFoldTable3[i].Flags;
AddTableEntry(RegOp2MemOpTable3, MemOp2RegOpTable,
RegOp, MemOp,
// Index 3, folded load
Flags | TB_INDEX_3 | TB_FOLDED_LOAD);
}
+ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = {
+ // AVX-512 foldable instructions
+ { X86::VADDPSZrrk, X86::VADDPSZrmk, 0 },
+ { X86::VADDPDZrrk, X86::VADDPDZrmk, 0 },
+ { X86::VSUBPSZrrk, X86::VSUBPSZrmk, 0 },
+ { X86::VSUBPDZrrk, X86::VSUBPDZrmk, 0 },
+ { X86::VMULPSZrrk, X86::VMULPSZrmk, 0 },
+ { X86::VMULPDZrrk, X86::VMULPDZrmk, 0 },
+ { X86::VDIVPSZrrk, X86::VDIVPSZrmk, 0 },
+ { X86::VDIVPDZrrk, X86::VDIVPDZrmk, 0 },
+ { X86::VMINPSZrrk, X86::VMINPSZrmk, 0 },
+ { X86::VMINPDZrrk, X86::VMINPDZrmk, 0 },
+ { X86::VMAXPSZrrk, X86::VMAXPSZrmk, 0 },
+ { X86::VMAXPDZrrk, X86::VMAXPDZrmk, 0 },
+ // AVX-512{F,VL} foldable instructions 256-bit
+ { X86::VADDPSZ256rrk, X86::VADDPSZ256rmk, 0 },
+ { X86::VADDPDZ256rrk, X86::VADDPDZ256rmk, 0 },
+ { X86::VSUBPSZ256rrk, X86::VSUBPSZ256rmk, 0 },
+ { X86::VSUBPDZ256rrk, X86::VSUBPDZ256rmk, 0 },
+ { X86::VMULPSZ256rrk, X86::VMULPSZ256rmk, 0 },
+ { X86::VMULPDZ256rrk, X86::VMULPDZ256rmk, 0 },
+ { X86::VDIVPSZ256rrk, X86::VDIVPSZ256rmk, 0 },
+ { X86::VDIVPDZ256rrk, X86::VDIVPDZ256rmk, 0 },
+ { X86::VMINPSZ256rrk, X86::VMINPSZ256rmk, 0 },
+ { X86::VMINPDZ256rrk, X86::VMINPDZ256rmk, 0 },
+ { X86::VMAXPSZ256rrk, X86::VMAXPSZ256rmk, 0 },
+ { X86::VMAXPDZ256rrk, X86::VMAXPDZ256rmk, 0 },
+ // AVX-512{F,VL} foldable instructions 128-bit
+ { X86::VADDPSZ128rrk, X86::VADDPSZ128rmk, 0 },
+ { X86::VADDPDZ128rrk, X86::VADDPDZ128rmk, 0 },
+ { X86::VSUBPSZ128rrk, X86::VSUBPSZ128rmk, 0 },
+ { X86::VSUBPDZ128rrk, X86::VSUBPDZ128rmk, 0 },
+ { X86::VMULPSZ128rrk, X86::VMULPSZ128rmk, 0 },
+ { X86::VMULPDZ128rrk, X86::VMULPDZ128rmk, 0 },
+ { X86::VDIVPSZ128rrk, X86::VDIVPSZ128rmk, 0 },
+ { X86::VDIVPDZ128rrk, X86::VDIVPDZ128rmk, 0 },
+ { X86::VMINPSZ128rrk, X86::VMINPSZ128rmk, 0 },
+ { X86::VMINPDZ128rrk, X86::VMINPDZ128rmk, 0 },
+ { X86::VMAXPSZ128rrk, X86::VMAXPSZ128rmk, 0 },
+ { X86::VMAXPDZ128rrk, X86::VMAXPDZ128rmk, 0 }
+ };
+
+ for (unsigned i = 0, e = array_lengthof(MemoryFoldTable4); i != e; ++i) {
+ unsigned RegOp = MemoryFoldTable4[i].RegOp;
+ unsigned MemOp = MemoryFoldTable4[i].MemOp;
+ unsigned Flags = MemoryFoldTable4[i].Flags;
+ AddTableEntry(RegOp2MemOpTable4, MemOp2RegOpTable,
+ RegOp, MemOp,
+ // Index 4, folded load
+ Flags | TB_INDEX_4 | TB_FOLDED_LOAD);
+ }
}
void
@@ -1579,7 +2039,59 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
return false;
}
-/// isFrameOperand - Return true and the FrameIndex if the specified
+int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const {
+ const MachineFunction *MF = MI->getParent()->getParent();
+ const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
+
+ if (MI->getOpcode() == getCallFrameSetupOpcode() ||
+ MI->getOpcode() == getCallFrameDestroyOpcode()) {
+ unsigned StackAlign = TFI->getStackAlignment();
+ int SPAdj = (MI->getOperand(0).getImm() + StackAlign - 1) / StackAlign *
+ StackAlign;
+
+ SPAdj -= MI->getOperand(1).getImm();
+
+ if (MI->getOpcode() == getCallFrameSetupOpcode())
+ return SPAdj;
+ else
+ return -SPAdj;
+ }
+
+ // To know whether a call adjusts the stack, we need information
+ // that is bound to the following ADJCALLSTACKUP pseudo.
+ // Look for the next ADJCALLSTACKUP that follows the call.
+ if (MI->isCall()) {
+ const MachineBasicBlock* MBB = MI->getParent();
+ auto I = ++MachineBasicBlock::const_iterator(MI);
+ for (auto E = MBB->end(); I != E; ++I) {
+ if (I->getOpcode() == getCallFrameDestroyOpcode() ||
+ I->isCall())
+ break;
+ }
+
+ // If we could not find a frame destroy opcode, then it has already
+ // been simplified, so we don't care.
+ if (I->getOpcode() != getCallFrameDestroyOpcode())
+ return 0;
+
+ return -(I->getOperand(1).getImm());
+ }
+
+ // Currently handle only PUSHes we can reasonably expect to see
+ // in call sequences
+ switch (MI->getOpcode()) {
+ default:
+ return 0;
+ case X86::PUSH32i8:
+ case X86::PUSH32r:
+ case X86::PUSH32rmm:
+ case X86::PUSH32rmr:
+ case X86::PUSHi32:
+ return 4;
+ }
+}
+
+/// Return true and the FrameIndex if the specified
/// operand and follow operands form a reference to the stack frame.
bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op,
int &FrameIndex) const {
@@ -1706,8 +2218,7 @@ unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
return 0;
}
-/// regIsPICBase - Return true if register is PIC base (i.e.g defined by
-/// X86::MOVPC32r.
+/// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
// Don't waste compile time scanning use-def chains of physregs.
if (!TargetRegisterInfo::isVirtualRegister(BaseReg))
@@ -1903,8 +2414,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
}
-/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
-/// is not marked dead.
+/// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
static bool hasLiveCondCodeDef(MachineInstr *MI) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
@@ -1916,8 +2426,7 @@ static bool hasLiveCondCodeDef(MachineInstr *MI) {
return false;
}
-/// getTruncatedShiftCount - check whether the shift count for a machine operand
-/// is non-zero.
+/// Check whether the shift count for a machine operand is non-zero.
inline static unsigned getTruncatedShiftCount(MachineInstr *MI,
unsigned ShiftAmtOperandIdx) {
// The shift count is six bits with the REX.W prefix and five bits without.
@@ -1926,7 +2435,7 @@ inline static unsigned getTruncatedShiftCount(MachineInstr *MI,
return Imm & ShiftCountMask;
}
-/// isTruncatedShiftCountForLEA - check whether the given shift count is appropriate
+/// Check whether the given shift count is appropriate
/// can be represented by a LEA instruction.
inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
// Left shift instructions can be transformed into load-effective-address
@@ -2008,10 +2517,9 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
return true;
}
-/// convertToThreeAddressWithLEA - Helper for convertToThreeAddress when
-/// 16-bit LEA is disabled, use 32-bit LEA to form 3-address code by promoting
-/// to a 32-bit superregister and then truncating back down to a 16-bit
-/// subregister.
+/// Helper for convertToThreeAddress when 16-bit LEA is disabled, use 32-bit
+/// LEA to form 3-address code by promoting to a 32-bit superregister and then
+/// truncating back down to a 16-bit subregister.
MachineInstr *
X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
MachineFunction::iterator &MFI,
@@ -2058,11 +2566,9 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
break;
}
case X86::INC16r:
- case X86::INC64_16r:
addRegOffset(MIB, leaInReg, true, 1);
break;
case X86::DEC16r:
- case X86::DEC64_16r:
addRegOffset(MIB, leaInReg, true, -1);
break;
case X86::ADD16ri:
@@ -2120,7 +2626,7 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
return ExtMI;
}
-/// convertToThreeAddress - This method must be implemented by targets that
+/// This method must be implemented by targets that
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
/// may be able to convert a two-address instruction into a true
/// three-address instruction on demand. This allows the X86 target (for
@@ -2156,6 +2662,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
unsigned MIOpc = MI->getOpcode();
switch (MIOpc) {
+ default: return nullptr;
case X86::SHL64ri: {
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
@@ -2210,185 +2717,175 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
.addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
break;
}
- default: {
+ case X86::INC64r:
+ case X86::INC32r: {
+ assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
+ unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
+ : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
+ bool isKill, isUndef;
+ unsigned SrcReg;
+ MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
+ SrcReg, isKill, isUndef, ImplicitOp))
+ return nullptr;
- switch (MIOpc) {
- default: return nullptr;
- case X86::INC64r:
- case X86::INC32r:
- case X86::INC64_32r: {
- assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
- unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
- : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
- bool isKill, isUndef;
- unsigned SrcReg;
- MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
- if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
- SrcReg, isKill, isUndef, ImplicitOp))
- return nullptr;
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef));
+ if (ImplicitOp.getReg() != 0)
+ MIB.addOperand(ImplicitOp);
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest)
- .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef));
- if (ImplicitOp.getReg() != 0)
- MIB.addOperand(ImplicitOp);
+ NewMI = addOffset(MIB, 1);
+ break;
+ }
+ case X86::INC16r:
+ if (DisableLEA16)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ : nullptr;
+ assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
+ NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest).addOperand(Src), 1);
+ break;
+ case X86::DEC64r:
+ case X86::DEC32r: {
+ assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
+ unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
+ : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
- NewMI = addOffset(MIB, 1);
- break;
- }
- case X86::INC16r:
- case X86::INC64_16r:
- if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
- : nullptr;
- assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest).addOperand(Src), 1);
- break;
- case X86::DEC64r:
- case X86::DEC32r:
- case X86::DEC64_32r: {
- assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
- unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
- : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
-
- bool isKill, isUndef;
- unsigned SrcReg;
- MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
- if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
- SrcReg, isKill, isUndef, ImplicitOp))
- return nullptr;
+ bool isKill, isUndef;
+ unsigned SrcReg;
+ MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
+ SrcReg, isKill, isUndef, ImplicitOp))
+ return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest)
- .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
- if (ImplicitOp.getReg() != 0)
- MIB.addOperand(ImplicitOp);
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
+ if (ImplicitOp.getReg() != 0)
+ MIB.addOperand(ImplicitOp);
- NewMI = addOffset(MIB, -1);
+ NewMI = addOffset(MIB, -1);
- break;
- }
- case X86::DEC16r:
- case X86::DEC64_16r:
- if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
- : nullptr;
- assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest).addOperand(Src), -1);
- break;
- case X86::ADD64rr:
- case X86::ADD64rr_DB:
- case X86::ADD32rr:
- case X86::ADD32rr_DB: {
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- unsigned Opc;
- if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
- Opc = X86::LEA64r;
- else
- Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
+ break;
+ }
+ case X86::DEC16r:
+ if (DisableLEA16)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ : nullptr;
+ assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
+ NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest).addOperand(Src), -1);
+ break;
+ case X86::ADD64rr:
+ case X86::ADD64rr_DB:
+ case X86::ADD32rr:
+ case X86::ADD32rr_DB: {
+ assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
+ unsigned Opc;
+ if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
+ Opc = X86::LEA64r;
+ else
+ Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
- bool isKill, isUndef;
- unsigned SrcReg;
- MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
- if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
- SrcReg, isKill, isUndef, ImplicitOp))
- return nullptr;
+ bool isKill, isUndef;
+ unsigned SrcReg;
+ MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
+ SrcReg, isKill, isUndef, ImplicitOp))
+ return nullptr;
- const MachineOperand &Src2 = MI->getOperand(2);
- bool isKill2, isUndef2;
- unsigned SrcReg2;
- MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
- if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
- SrcReg2, isKill2, isUndef2, ImplicitOp2))
- return nullptr;
+ const MachineOperand &Src2 = MI->getOperand(2);
+ bool isKill2, isUndef2;
+ unsigned SrcReg2;
+ MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
+ SrcReg2, isKill2, isUndef2, ImplicitOp2))
+ return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest);
- if (ImplicitOp.getReg() != 0)
- MIB.addOperand(ImplicitOp);
- if (ImplicitOp2.getReg() != 0)
- MIB.addOperand(ImplicitOp2);
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addOperand(Dest);
+ if (ImplicitOp.getReg() != 0)
+ MIB.addOperand(ImplicitOp);
+ if (ImplicitOp2.getReg() != 0)
+ MIB.addOperand(ImplicitOp2);
- NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2);
+ NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2);
- // Preserve undefness of the operands.
- NewMI->getOperand(1).setIsUndef(isUndef);
- NewMI->getOperand(3).setIsUndef(isUndef2);
+ // Preserve undefness of the operands.
+ NewMI->getOperand(1).setIsUndef(isUndef);
+ NewMI->getOperand(3).setIsUndef(isUndef2);
- if (LV && Src2.isKill())
- LV->replaceKillInstruction(SrcReg2, MI, NewMI);
- break;
- }
- case X86::ADD16rr:
- case X86::ADD16rr_DB: {
- if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
- : nullptr;
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- unsigned Src2 = MI->getOperand(2).getReg();
- bool isKill2 = MI->getOperand(2).isKill();
- NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest),
- Src.getReg(), Src.isKill(), Src2, isKill2);
-
- // Preserve undefness of the operands.
- bool isUndef = MI->getOperand(1).isUndef();
- bool isUndef2 = MI->getOperand(2).isUndef();
- NewMI->getOperand(1).setIsUndef(isUndef);
- NewMI->getOperand(3).setIsUndef(isUndef2);
-
- if (LV && isKill2)
- LV->replaceKillInstruction(Src2, MI, NewMI);
- break;
- }
- case X86::ADD64ri32:
- case X86::ADD64ri8:
- case X86::ADD64ri32_DB:
- case X86::ADD64ri8_DB:
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
- .addOperand(Dest).addOperand(Src),
- MI->getOperand(2).getImm());
- break;
- case X86::ADD32ri:
- case X86::ADD32ri8:
- case X86::ADD32ri_DB:
- case X86::ADD32ri8_DB: {
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
-
- bool isKill, isUndef;
- unsigned SrcReg;
- MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
- if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
- SrcReg, isKill, isUndef, ImplicitOp))
- return nullptr;
+ if (LV && Src2.isKill())
+ LV->replaceKillInstruction(SrcReg2, MI, NewMI);
+ break;
+ }
+ case X86::ADD16rr:
+ case X86::ADD16rr_DB: {
+ if (DisableLEA16)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ : nullptr;
+ assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
+ unsigned Src2 = MI->getOperand(2).getReg();
+ bool isKill2 = MI->getOperand(2).isKill();
+ NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest),
+ Src.getReg(), Src.isKill(), Src2, isKill2);
+
+ // Preserve undefness of the operands.
+ bool isUndef = MI->getOperand(1).isUndef();
+ bool isUndef2 = MI->getOperand(2).isUndef();
+ NewMI->getOperand(1).setIsUndef(isUndef);
+ NewMI->getOperand(3).setIsUndef(isUndef2);
+
+ if (LV && isKill2)
+ LV->replaceKillInstruction(Src2, MI, NewMI);
+ break;
+ }
+ case X86::ADD64ri32:
+ case X86::ADD64ri8:
+ case X86::ADD64ri32_DB:
+ case X86::ADD64ri8_DB:
+ assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
+ NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
+ .addOperand(Dest).addOperand(Src),
+ MI->getOperand(2).getImm());
+ break;
+ case X86::ADD32ri:
+ case X86::ADD32ri8:
+ case X86::ADD32ri_DB:
+ case X86::ADD32ri8_DB: {
+ assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
+ unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest)
- .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
- if (ImplicitOp.getReg() != 0)
- MIB.addOperand(ImplicitOp);
+ bool isKill, isUndef;
+ unsigned SrcReg;
+ MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
+ SrcReg, isKill, isUndef, ImplicitOp))
+ return nullptr;
- NewMI = addOffset(MIB, MI->getOperand(2).getImm());
- break;
- }
- case X86::ADD16ri:
- case X86::ADD16ri8:
- case X86::ADD16ri_DB:
- case X86::ADD16ri8_DB:
- if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
- : nullptr;
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest).addOperand(Src),
- MI->getOperand(2).getImm());
- break;
- }
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
+ if (ImplicitOp.getReg() != 0)
+ MIB.addOperand(ImplicitOp);
+
+ NewMI = addOffset(MIB, MI->getOperand(2).getImm());
+ break;
}
+ case X86::ADD16ri:
+ case X86::ADD16ri8:
+ case X86::ADD16ri_DB:
+ case X86::ADD16ri8_DB:
+ if (DisableLEA16)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ : nullptr;
+ assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
+ NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest).addOperand(Src),
+ MI->getOperand(2).getImm());
+ break;
}
if (!NewMI) return nullptr;
@@ -2404,8 +2901,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
return NewMI;
}
-/// commuteInstruction - We have a few instructions that must be hacked on to
-/// commute them.
+/// We have a few instructions that must be hacked on to commute them.
///
MachineInstr *
X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
@@ -2473,6 +2969,71 @@ X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
MI->getOperand(3).setImm(Mask ^ Imm);
return TargetInstrInfo::commuteInstruction(MI, NewMI);
}
+ case X86::PCLMULQDQrr:
+ case X86::VPCLMULQDQrr:{
+ // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
+ // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
+ unsigned Imm = MI->getOperand(3).getImm();
+ unsigned Src1Hi = Imm & 0x01;
+ unsigned Src2Hi = Imm & 0x10;
+ if (NewMI) {
+ MachineFunction &MF = *MI->getParent()->getParent();
+ MI = MF.CloneMachineInstr(MI);
+ NewMI = false;
+ }
+ MI->getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
+ return TargetInstrInfo::commuteInstruction(MI, NewMI);
+ }
+ case X86::CMPPDrri:
+ case X86::CMPPSrri:
+ case X86::VCMPPDrri:
+ case X86::VCMPPSrri:
+ case X86::VCMPPDYrri:
+ case X86::VCMPPSYrri: {
+ // Float comparison can be safely commuted for
+ // Ordered/Unordered/Equal/NotEqual tests
+ unsigned Imm = MI->getOperand(3).getImm() & 0x7;
+ switch (Imm) {
+ case 0x00: // EQUAL
+ case 0x03: // UNORDERED
+ case 0x04: // NOT EQUAL
+ case 0x07: // ORDERED
+ if (NewMI) {
+ MachineFunction &MF = *MI->getParent()->getParent();
+ MI = MF.CloneMachineInstr(MI);
+ NewMI = false;
+ }
+ return TargetInstrInfo::commuteInstruction(MI, NewMI);
+ default:
+ return nullptr;
+ }
+ }
+ case X86::VPCOMBri: case X86::VPCOMUBri:
+ case X86::VPCOMDri: case X86::VPCOMUDri:
+ case X86::VPCOMQri: case X86::VPCOMUQri:
+ case X86::VPCOMWri: case X86::VPCOMUWri: {
+ // Flip comparison mode immediate (if necessary).
+ unsigned Imm = MI->getOperand(3).getImm() & 0x7;
+ switch (Imm) {
+ case 0x00: Imm = 0x02; break; // LT -> GT
+ case 0x01: Imm = 0x03; break; // LE -> GE
+ case 0x02: Imm = 0x00; break; // GT -> LT
+ case 0x03: Imm = 0x01; break; // GE -> LE
+ case 0x04: // EQ
+ case 0x05: // NE
+ case 0x06: // FALSE
+ case 0x07: // TRUE
+ default:
+ break;
+ }
+ if (NewMI) {
+ MachineFunction &MF = *MI->getParent()->getParent();
+ MI = MF.CloneMachineInstr(MI);
+ NewMI = false;
+ }
+ MI->getOperand(3).setImm(Imm);
+ return TargetInstrInfo::commuteInstruction(MI, NewMI);
+ }
case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr:
case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr:
case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr:
@@ -2557,20 +3118,26 @@ X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
bool X86InstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
switch (MI->getOpcode()) {
- case X86::BLENDPDrri:
- case X86::BLENDPSrri:
- case X86::PBLENDWrri:
- case X86::VBLENDPDrri:
- case X86::VBLENDPSrri:
- case X86::VBLENDPDYrri:
- case X86::VBLENDPSYrri:
- case X86::VPBLENDDrri:
- case X86::VPBLENDDYrri:
- case X86::VPBLENDWrri:
- case X86::VPBLENDWYrri:
- SrcOpIdx1 = 1;
- SrcOpIdx2 = 2;
- return true;
+ case X86::CMPPDrri:
+ case X86::CMPPSrri:
+ case X86::VCMPPDrri:
+ case X86::VCMPPSrri:
+ case X86::VCMPPDYrri:
+ case X86::VCMPPSYrri: {
+ // Float comparison can be safely commuted for
+ // Ordered/Unordered/Equal/NotEqual tests
+ unsigned Imm = MI->getOperand(3).getImm() & 0x7;
+ switch (Imm) {
+ case 0x00: // EQUAL
+ case 0x03: // UNORDERED
+ case 0x04: // NOT EQUAL
+ case 0x07: // ORDERED
+ SrcOpIdx1 = 1;
+ SrcOpIdx2 = 2;
+ return true;
+ }
+ return false;
+ }
case X86::VFMADDPDr231r:
case X86::VFMADDPSr231r:
case X86::VFMADDSDr231r:
@@ -2606,26 +3173,26 @@ bool X86InstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
static X86::CondCode getCondFromBranchOpc(unsigned BrOpc) {
switch (BrOpc) {
default: return X86::COND_INVALID;
- case X86::JE_4: return X86::COND_E;
- case X86::JNE_4: return X86::COND_NE;
- case X86::JL_4: return X86::COND_L;
- case X86::JLE_4: return X86::COND_LE;
- case X86::JG_4: return X86::COND_G;
- case X86::JGE_4: return X86::COND_GE;
- case X86::JB_4: return X86::COND_B;
- case X86::JBE_4: return X86::COND_BE;
- case X86::JA_4: return X86::COND_A;
- case X86::JAE_4: return X86::COND_AE;
- case X86::JS_4: return X86::COND_S;
- case X86::JNS_4: return X86::COND_NS;
- case X86::JP_4: return X86::COND_P;
- case X86::JNP_4: return X86::COND_NP;
- case X86::JO_4: return X86::COND_O;
- case X86::JNO_4: return X86::COND_NO;
+ case X86::JE_1: return X86::COND_E;
+ case X86::JNE_1: return X86::COND_NE;
+ case X86::JL_1: return X86::COND_L;
+ case X86::JLE_1: return X86::COND_LE;
+ case X86::JG_1: return X86::COND_G;
+ case X86::JGE_1: return X86::COND_GE;
+ case X86::JB_1: return X86::COND_B;
+ case X86::JBE_1: return X86::COND_BE;
+ case X86::JA_1: return X86::COND_A;
+ case X86::JAE_1: return X86::COND_AE;
+ case X86::JS_1: return X86::COND_S;
+ case X86::JNS_1: return X86::COND_NS;
+ case X86::JP_1: return X86::COND_P;
+ case X86::JNP_1: return X86::COND_NP;
+ case X86::JO_1: return X86::COND_O;
+ case X86::JNO_1: return X86::COND_NO;
}
}
-/// getCondFromSETOpc - return condition code of a SET opcode.
+/// Return condition code of a SET opcode.
static X86::CondCode getCondFromSETOpc(unsigned Opc) {
switch (Opc) {
default: return X86::COND_INVALID;
@@ -2648,7 +3215,7 @@ static X86::CondCode getCondFromSETOpc(unsigned Opc) {
}
}
-/// getCondFromCmovOpc - return condition code of a CMov opcode.
+/// Return condition code of a CMov opcode.
X86::CondCode X86::getCondFromCMovOpc(unsigned Opc) {
switch (Opc) {
default: return X86::COND_INVALID;
@@ -2706,26 +3273,26 @@ X86::CondCode X86::getCondFromCMovOpc(unsigned Opc) {
unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
switch (CC) {
default: llvm_unreachable("Illegal condition code!");
- case X86::COND_E: return X86::JE_4;
- case X86::COND_NE: return X86::JNE_4;
- case X86::COND_L: return X86::JL_4;
- case X86::COND_LE: return X86::JLE_4;
- case X86::COND_G: return X86::JG_4;
- case X86::COND_GE: return X86::JGE_4;
- case X86::COND_B: return X86::JB_4;
- case X86::COND_BE: return X86::JBE_4;
- case X86::COND_A: return X86::JA_4;
- case X86::COND_AE: return X86::JAE_4;
- case X86::COND_S: return X86::JS_4;
- case X86::COND_NS: return X86::JNS_4;
- case X86::COND_P: return X86::JP_4;
- case X86::COND_NP: return X86::JNP_4;
- case X86::COND_O: return X86::JO_4;
- case X86::COND_NO: return X86::JNO_4;
+ case X86::COND_E: return X86::JE_1;
+ case X86::COND_NE: return X86::JNE_1;
+ case X86::COND_L: return X86::JL_1;
+ case X86::COND_LE: return X86::JLE_1;
+ case X86::COND_G: return X86::JG_1;
+ case X86::COND_GE: return X86::JGE_1;
+ case X86::COND_B: return X86::JB_1;
+ case X86::COND_BE: return X86::JBE_1;
+ case X86::COND_A: return X86::JA_1;
+ case X86::COND_AE: return X86::JAE_1;
+ case X86::COND_S: return X86::JS_1;
+ case X86::COND_NS: return X86::JNS_1;
+ case X86::COND_P: return X86::JP_1;
+ case X86::COND_NP: return X86::JNP_1;
+ case X86::COND_O: return X86::JO_1;
+ case X86::COND_NO: return X86::JNO_1;
}
}
-/// GetOppositeBranchCondition - Return the inverse of the specified condition,
+/// Return the inverse of the specified condition,
/// e.g. turning COND_E to COND_NE.
X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
switch (CC) {
@@ -2749,9 +3316,8 @@ X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
}
}
-/// getSwappedCondition - assume the flags are set by MI(a,b), return
-/// the condition code if we modify the instructions such that flags are
-/// set by MI(b,a).
+/// Assuming the flags are set by MI(a,b), return the condition code if we
+/// modify the instructions such that flags are set by MI(b,a).
static X86::CondCode getSwappedCondition(X86::CondCode CC) {
switch (CC) {
default: return X86::COND_INVALID;
@@ -2768,7 +3334,7 @@ static X86::CondCode getSwappedCondition(X86::CondCode CC) {
}
}
-/// getSETFromCond - Return a set opcode for the given condition and
+/// Return a set opcode for the given condition and
/// whether it has memory operand.
unsigned X86::getSETFromCond(CondCode CC, bool HasMemoryOperand) {
static const uint16_t Opc[16][2] = {
@@ -2794,7 +3360,7 @@ unsigned X86::getSETFromCond(CondCode CC, bool HasMemoryOperand) {
return Opc[CC][HasMemoryOperand ? 1 : 0];
}
-/// getCMovFromCond - Return a cmov opcode for the given condition,
+/// Return a cmov opcode for the given condition,
/// register size in bytes, and operand type.
unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes,
bool HasMemoryOperand) {
@@ -2879,7 +3445,7 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return true;
// Handle unconditional branches.
- if (I->getOpcode() == X86::JMP_4) {
+ if (I->getOpcode() == X86::JMP_1) {
UnCondBrIter = I;
if (!AllowModify) {
@@ -2941,7 +3507,7 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC))
.addMBB(UnCondBrIter->getOperand(0).getMBB());
- BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_4))
+ BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1))
.addMBB(TargetBB);
OldInst->eraseFromParent();
@@ -3006,7 +3572,7 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
--I;
if (I->isDebugValue())
continue;
- if (I->getOpcode() != X86::JMP_4 &&
+ if (I->getOpcode() != X86::JMP_1 &&
getCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
break;
// Remove the branch.
@@ -3031,7 +3597,7 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
if (Cond.empty()) {
// Unconditional branch?
assert(!FBB && "Unconditional branch with multiple successors!");
- BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB);
return 1;
}
@@ -3041,16 +3607,16 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
switch (CC) {
case X86::COND_NP_OR_E:
// Synthesize NP_OR_E with two branches.
- BuildMI(&MBB, DL, get(X86::JNP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JNP_1)).addMBB(TBB);
++Count;
- BuildMI(&MBB, DL, get(X86::JE_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JE_1)).addMBB(TBB);
++Count;
break;
case X86::COND_NE_OR_P:
// Synthesize NE_OR_P with two branches.
- BuildMI(&MBB, DL, get(X86::JNE_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(TBB);
++Count;
- BuildMI(&MBB, DL, get(X86::JP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JP_1)).addMBB(TBB);
++Count;
break;
default: {
@@ -3061,7 +3627,7 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
}
if (FBB) {
// Two-way Conditional branch. Insert the second branch.
- BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
++Count;
}
return Count;
@@ -3117,7 +3683,7 @@ void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg);
}
-/// isHReg - Test if the given register is a physical h register.
+/// Test if the given register is a physical h register.
static bool isHReg(unsigned Reg) {
return X86::GR8_ABCD_HRegClass.contains(Reg);
}
@@ -3389,11 +3955,9 @@ void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() &&
"Stack slot too small for store");
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
- bool isAligned = (MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment() >= Alignment) ||
- RI.canRealignStack(MF);
+ bool isAligned =
+ (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
+ RI.canRealignStack(MF);
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI);
addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx)
@@ -3428,11 +3992,9 @@ void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
- bool isAligned = (MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment() >= Alignment) ||
- RI.canRealignStack(MF);
+ bool isAligned =
+ (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
+ RI.canRealignStack(MF);
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI);
addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
@@ -3528,7 +4090,7 @@ analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
return false;
}
-/// isRedundantFlagInstr - check whether the first instruction, whose only
+/// Check whether the first instruction, whose only
/// purpose is to update flags, can be made redundant.
/// CMPrr can be made redundant by SUBrr if the operands are the same.
/// This function can be extended later on.
@@ -3571,7 +4133,7 @@ inline static bool isRedundantFlagInstr(MachineInstr *FlagI, unsigned SrcReg,
return false;
}
-/// isDefConvertible - check whether the definition can be converted
+/// Check whether the definition can be converted
/// to remove a comparison against zero.
inline static bool isDefConvertible(MachineInstr *MI) {
switch (MI->getOpcode()) {
@@ -3601,14 +4163,12 @@ inline static bool isDefConvertible(MachineInstr *MI) {
case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm:
case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm:
case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r:
- case X86::DEC64_32r: case X86::DEC64_16r:
case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8:
case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr:
case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm:
case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm:
case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r:
- case X86::INC64_32r: case X86::INC64_16r:
case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri:
case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8:
case X86::AND8ri: case X86::AND64rr: case X86::AND32rr:
@@ -3659,8 +4219,7 @@ inline static bool isDefConvertible(MachineInstr *MI) {
}
}
-/// isUseDefConvertible - check whether the use can be converted
-/// to remove a comparison against zero.
+/// Check whether the use can be converted to remove a comparison against zero.
static X86::CondCode isUseDefConvertible(MachineInstr *MI) {
switch (MI->getOpcode()) {
default: return X86::COND_INVALID;
@@ -3679,7 +4238,7 @@ static X86::CondCode isUseDefConvertible(MachineInstr *MI) {
}
}
-/// optimizeCompareInstr - Check if there exists an earlier instruction that
+/// Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
bool X86InstrInfo::
@@ -3970,7 +4529,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
return true;
}
-/// optimizeLoadInstr - Try to remove the load by folding it to a register
+/// Try to remove the load by folding it to a register
/// operand at the use. We fold the load instructions if load defines a virtual
/// register, the virtual register is used once in the same BB, and the
/// instructions in-between do not load or store, and have no side effects.
@@ -4025,9 +4584,9 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI,
return nullptr;
}
-/// Expand2AddrUndef - Expand a single-def pseudo instruction to a two-addr
-/// instruction with two undef reads of the register being defined. This is
-/// used for mapping:
+/// Expand a single-def pseudo instruction to a two-addr
+/// instruction with two undef reads of the register being defined.
+/// This is used for mapping:
/// %xmm4 = V_SET0
/// to:
/// %xmm4 = PXORrr %xmm4<undef>, %xmm4<undef>
@@ -4099,7 +4658,7 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
case X86::TEST8ri_NOREX:
MI->setDesc(get(X86::TEST8ri));
return true;
- case X86::KSET0B:
+ case X86::KSET0B:
case X86::KSET0W: return Expand2AddrUndef(MIB, get(X86::KXORWrr));
case X86::KSET1B:
case X86::KSET1W: return Expand2AddrUndef(MIB, get(X86::KXNORWrr));
@@ -4179,7 +4738,7 @@ static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
MachineInstr*
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI, unsigned i,
+ MachineInstr *MI, unsigned OpNum,
const SmallVectorImpl<MachineOperand> &MOs,
unsigned Size, unsigned Align,
bool AllowCommute) const {
@@ -4188,12 +4747,11 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
bool isCallRegIndirect = Subtarget.callRegIndirect();
bool isTwoAddrFold = false;
- // Atom favors register form of call. So, we do not fold loads into calls
- // when X86Subtarget is Atom.
+ // For CPUs that favor the register form of a call,
+ // do not fold loads into calls.
if (isCallRegIndirect &&
- (MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r)) {
+ (MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r))
return nullptr;
- }
unsigned NumOps = MI->getDesc().getNumOperands();
bool isTwoAddr = NumOps > 1 &&
@@ -4209,13 +4767,13 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Folding a memory location into the two-address part of a two-address
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
- if (isTwoAddr && NumOps >= 2 && i < 2 &&
+ if (isTwoAddr && NumOps >= 2 && OpNum < 2 &&
MI->getOperand(0).isReg() &&
MI->getOperand(1).isReg() &&
MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
isTwoAddrFold = true;
- } else if (i == 0) { // If operand 0
+ } else if (OpNum == 0) {
if (MI->getOpcode() == X86::MOV32r0) {
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
if (NewMI)
@@ -4223,12 +4781,14 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
}
OpcodeTablePtr = &RegOp2MemOpTable0;
- } else if (i == 1) {
+ } else if (OpNum == 1) {
OpcodeTablePtr = &RegOp2MemOpTable1;
- } else if (i == 2) {
+ } else if (OpNum == 2) {
OpcodeTablePtr = &RegOp2MemOpTable2;
- } else if (i == 3) {
+ } else if (OpNum == 3) {
OpcodeTablePtr = &RegOp2MemOpTable3;
+ } else if (OpNum == 4) {
+ OpcodeTablePtr = &RegOp2MemOpTable4;
}
// If table selected...
@@ -4243,7 +4803,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
return nullptr;
bool NarrowToMOV32rm = false;
if (Size) {
- unsigned RCSize = getRegClass(MI->getDesc(), i, &RI, MF)->getSize();
+ unsigned RCSize = getRegClass(MI->getDesc(), OpNum, &RI, MF)->getSize();
if (Size < RCSize) {
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
@@ -4262,7 +4822,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
if (isTwoAddrFold)
NewMI = FuseTwoAddrInst(MF, Opcode, MOs, MI, *this);
else
- NewMI = FuseInst(MF, Opcode, i, MOs, MI, *this);
+ NewMI = FuseInst(MF, Opcode, OpNum, MOs, MI, *this);
if (NarrowToMOV32rm) {
// If this is the special case where we use a MOV32rm to load a 32-bit
@@ -4281,7 +4841,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// If the instruction and target operand are commutable, commute the
// instruction and try again.
if (AllowCommute) {
- unsigned OriginalOpIdx = i, CommuteOpIdx1, CommuteOpIdx2;
+ unsigned OriginalOpIdx = OpNum, CommuteOpIdx1, CommuteOpIdx2;
if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
bool HasDef = MI->getDesc().getNumDefs();
unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
@@ -4339,11 +4899,11 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// No fusion
if (PrintFailedFusing && !MI->isCopy())
- dbgs() << "We failed to fuse operand " << i << " in " << *MI;
+ dbgs() << "We failed to fuse operand " << OpNum << " in " << *MI;
return nullptr;
}
-/// hasPartialRegUpdate - Return true for all instructions that only update
+/// Return true for all instructions that only update
/// the first 32 or 64-bits of the destination register and leave the rest
/// unmodified. This can be used to avoid folding loads if the instructions
/// only update part of the destination register, and the non-updated part is
@@ -4362,30 +4922,50 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
static bool hasPartialRegUpdate(unsigned Opcode) {
switch (Opcode) {
case X86::CVTSI2SSrr:
+ case X86::CVTSI2SSrm:
case X86::CVTSI2SS64rr:
+ case X86::CVTSI2SS64rm:
case X86::CVTSI2SDrr:
+ case X86::CVTSI2SDrm:
case X86::CVTSI2SD64rr:
+ case X86::CVTSI2SD64rm:
case X86::CVTSD2SSrr:
+ case X86::CVTSD2SSrm:
case X86::Int_CVTSD2SSrr:
+ case X86::Int_CVTSD2SSrm:
case X86::CVTSS2SDrr:
+ case X86::CVTSS2SDrm:
case X86::Int_CVTSS2SDrr:
+ case X86::Int_CVTSS2SDrm:
case X86::RCPSSr:
+ case X86::RCPSSm:
case X86::RCPSSr_Int:
+ case X86::RCPSSm_Int:
case X86::ROUNDSDr:
+ case X86::ROUNDSDm:
case X86::ROUNDSDr_Int:
case X86::ROUNDSSr:
+ case X86::ROUNDSSm:
case X86::ROUNDSSr_Int:
case X86::RSQRTSSr:
+ case X86::RSQRTSSm:
case X86::RSQRTSSr_Int:
+ case X86::RSQRTSSm_Int:
case X86::SQRTSSr:
+ case X86::SQRTSSm:
case X86::SQRTSSr_Int:
+ case X86::SQRTSSm_Int:
+ case X86::SQRTSDr:
+ case X86::SQRTSDm:
+ case X86::SQRTSDr_Int:
+ case X86::SQRTSDm_Int:
return true;
}
return false;
}
-/// getPartialRegUpdateClearance - Inform the ExeDepsFix pass how many idle
+/// Inform the ExeDepsFix pass how many idle
/// instructions we would like before a partial register update.
unsigned X86InstrInfo::
getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
@@ -4415,28 +4995,52 @@ getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
static bool hasUndefRegUpdate(unsigned Opcode) {
switch (Opcode) {
case X86::VCVTSI2SSrr:
+ case X86::VCVTSI2SSrm:
case X86::Int_VCVTSI2SSrr:
+ case X86::Int_VCVTSI2SSrm:
case X86::VCVTSI2SS64rr:
+ case X86::VCVTSI2SS64rm:
case X86::Int_VCVTSI2SS64rr:
+ case X86::Int_VCVTSI2SS64rm:
case X86::VCVTSI2SDrr:
+ case X86::VCVTSI2SDrm:
case X86::Int_VCVTSI2SDrr:
+ case X86::Int_VCVTSI2SDrm:
case X86::VCVTSI2SD64rr:
+ case X86::VCVTSI2SD64rm:
case X86::Int_VCVTSI2SD64rr:
+ case X86::Int_VCVTSI2SD64rm:
case X86::VCVTSD2SSrr:
+ case X86::VCVTSD2SSrm:
case X86::Int_VCVTSD2SSrr:
+ case X86::Int_VCVTSD2SSrm:
case X86::VCVTSS2SDrr:
+ case X86::VCVTSS2SDrm:
case X86::Int_VCVTSS2SDrr:
+ case X86::Int_VCVTSS2SDrm:
case X86::VRCPSSr:
+ case X86::VRCPSSm:
+ case X86::VRCPSSm_Int:
case X86::VROUNDSDr:
+ case X86::VROUNDSDm:
case X86::VROUNDSDr_Int:
case X86::VROUNDSSr:
+ case X86::VROUNDSSm:
case X86::VROUNDSSr_Int:
case X86::VRSQRTSSr:
+ case X86::VRSQRTSSm:
+ case X86::VRSQRTSSm_Int:
case X86::VSQRTSSr:
-
- // AVX-512
+ case X86::VSQRTSSm:
+ case X86::VSQRTSSm_Int:
+ case X86::VSQRTSDr:
+ case X86::VSQRTSDm:
+ case X86::VSQRTSDm_Int:
+ // AVX-512
case X86::VCVTSD2SSZrr:
+ case X86::VCVTSD2SSZrm:
case X86::VCVTSS2SDZrr:
+ case X86::VCVTSS2SDZrm:
return true;
}
@@ -4509,8 +5113,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
- if (!MF.getFunction()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
+ if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return nullptr;
@@ -4520,10 +5123,8 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
// If the function stack isn't realigned we don't want to fold instructions
// that need increased alignment.
if (!RI.needsStackRealignment(MF))
- Alignment = std::min(Alignment, MF.getTarget()
- .getSubtargetImpl()
- ->getFrameLowering()
- ->getStackAlignment());
+ Alignment =
+ std::min(Alignment, Subtarget.getFrameLowering()->getStackAlignment());
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
unsigned RCSize = 0;
@@ -4587,8 +5188,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
- if (!MF.getFunction()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
+ if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return nullptr;
@@ -4743,7 +5343,7 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
- } else if (OpNum == 0) { // If operand 0
+ } else if (OpNum == 0) {
if (Opc == X86::MOV32r0)
return true;
@@ -4986,7 +5586,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
NewNodes.push_back(Store);
// Preserve memory reference information.
- cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second);
+ cast<MachineSDNode>(Store)->setMemRefs(MMOs.first, MMOs.second);
}
return true;
@@ -5181,26 +5781,26 @@ bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
switch(Second->getOpcode()) {
default:
return false;
- case X86::JE_4:
- case X86::JNE_4:
- case X86::JL_4:
- case X86::JLE_4:
- case X86::JG_4:
- case X86::JGE_4:
+ case X86::JE_1:
+ case X86::JNE_1:
+ case X86::JL_1:
+ case X86::JLE_1:
+ case X86::JG_1:
+ case X86::JGE_1:
FuseKind = FuseInc;
break;
- case X86::JB_4:
- case X86::JBE_4:
- case X86::JA_4:
- case X86::JAE_4:
+ case X86::JB_1:
+ case X86::JBE_1:
+ case X86::JA_1:
+ case X86::JAE_1:
FuseKind = FuseCmp;
break;
- case X86::JS_4:
- case X86::JNS_4:
- case X86::JP_4:
- case X86::JNP_4:
- case X86::JO_4:
- case X86::JNO_4:
+ case X86::JS_1:
+ case X86::JNS_1:
+ case X86::JP_1:
+ case X86::JNP_1:
+ case X86::JO_1:
+ case X86::JNO_1:
FuseKind = FuseTest;
break;
}
@@ -5313,14 +5913,10 @@ bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
return FuseKind == FuseCmp || FuseKind == FuseInc;
case X86::INC16r:
case X86::INC32r:
- case X86::INC64_16r:
- case X86::INC64_32r:
case X86::INC64r:
case X86::INC8r:
case X86::DEC16r:
case X86::DEC32r:
- case X86::DEC64_16r:
- case X86::DEC64_32r:
case X86::DEC64r:
case X86::DEC8r:
return FuseKind == FuseInc;
@@ -5345,7 +5941,7 @@ isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
RC == &X86::RFP64RegClass || RC == &X86::RFP80RegClass);
}
-/// getGlobalBaseReg - Return a virtual register initialized with the
+/// Return a virtual register initialized with the
/// the global base register value. Output instructions required to
/// initialize the register in the function entry block, if necessary.
///
@@ -5478,7 +6074,7 @@ void X86InstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
MI->setDesc(get(table[Domain-1]));
}
-/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
+/// Return the noop instruction to use for a noop.
void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
NopInst.setOpcode(X86::NOOP);
}
@@ -5489,7 +6085,7 @@ void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
// getUnconditionalBranch and getTrap.
void X86InstrInfo::getUnconditionalBranch(
MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const {
- Branch.setOpcode(X86::JMP_4);
+ Branch.setOpcode(X86::JMP_1);
Branch.addOperand(MCOperand::CreateExpr(BranchTarget));
}
@@ -5595,7 +6191,7 @@ hasHighOperandLatency(const InstrItineraryData *ItinData,
}
namespace {
- /// CGBR - Create Global Base Reg pass. This initializes the PIC
+ /// Create Global Base Reg pass. This initializes the PIC
/// global base register for x86-32.
struct CGBR : public MachineFunctionPass {
static char ID;
@@ -5604,10 +6200,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override {
const X86TargetMachine *TM =
static_cast<const X86TargetMachine *>(&MF.getTarget());
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
// Don't do anything if this is 64-bit as 64-bit PIC
// uses RIP relative addressing.
- if (TM->getSubtarget<X86Subtarget>().is64Bit())
+ if (STI.is64Bit())
return false;
// Only emit a global base reg in PIC mode.
@@ -5626,10 +6223,10 @@ namespace {
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
MachineRegisterInfo &RegInfo = MF.getRegInfo();
- const X86InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
+ const X86InstrInfo *TII = STI.getInstrInfo();
unsigned PC;
- if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT())
+ if (STI.isPICStyleGOT())
PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
else
PC = GlobalBaseReg;
@@ -5640,7 +6237,7 @@ namespace {
// If we're using vanilla 'GOT' PIC style, we should use relative addressing
// not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
- if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) {
+ if (STI.isPICStyleGOT()) {
// Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
.addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
@@ -5721,10 +6318,9 @@ namespace {
MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I,
unsigned TLSBaseAddrReg) {
MachineFunction *MF = I->getParent()->getParent();
- const X86TargetMachine *TM =
- static_cast<const X86TargetMachine *>(&MF->getTarget());
- const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit();
- const X86InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
+ const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
+ const bool is64Bit = STI.is64Bit();
+ const X86InstrInfo *TII = STI.getInstrInfo();
// Insert a Copy from TLSBaseAddrReg to RAX/EAX.
MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
@@ -5742,10 +6338,9 @@ namespace {
// inserting a copy instruction after I. Returns the new instruction.
MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
MachineFunction *MF = I->getParent()->getParent();
- const X86TargetMachine *TM =
- static_cast<const X86TargetMachine *>(&MF->getTarget());
- const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit();
- const X86InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
+ const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
+ const bool is64Bit = STI.is64Bit();
+ const X86InstrInfo *TII = STI.getInstrInfo();
// Create a virtual register for the TLS base address.
MachineRegisterInfo &RegInfo = MF->getRegInfo();
diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h
index 57b1958..4d15467 100644
--- a/lib/Target/X86/X86InstrInfo.h
+++ b/lib/Target/X86/X86InstrInfo.h
@@ -152,6 +152,7 @@ class X86InstrInfo final : public X86GenInstrInfo {
RegOp2MemOpTableType RegOp2MemOpTable1;
RegOp2MemOpTableType RegOp2MemOpTable2;
RegOp2MemOpTableType RegOp2MemOpTable3;
+ RegOp2MemOpTableType RegOp2MemOpTable4;
/// MemOp2RegOpTable - Load / store unfolding opcode map.
///
@@ -174,6 +175,11 @@ public:
///
const X86RegisterInfo &getRegisterInfo() const { return RI; }
+ /// getSPAdjust - This returns the stack pointer adjustment made by
+ /// this instruction. For x86, we need to handle more complex call
+ /// sequences involving PUSHes.
+ int getSPAdjust(const MachineInstr *MI) const override;
+
/// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
/// extension instruction. That is, it's like a copy where it's legal for the
/// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 3dbf819..9881caf 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -32,7 +32,8 @@ def SDTX86Cmov : SDTypeProfile<1, 4,
// Unary and binary operator instructions that set EFLAGS as a side-effect.
def SDTUnaryArithWithFlags : SDTypeProfile<2, 1,
- [SDTCisInt<0>, SDTCisVT<1, i32>]>;
+ [SDTCisSameAs<0, 2>,
+ SDTCisInt<0>, SDTCisVT<1, i32>]>;
def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
[SDTCisSameAs<0, 2>,
@@ -188,11 +189,15 @@ def X86rdtsc : SDNode<"X86ISD::RDTSC_DAG", SDTX86Void,
def X86rdtscp : SDNode<"X86ISD::RDTSCP_DAG", SDTX86Void,
[SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
def X86rdpmc : SDNode<"X86ISD::RDPMC_DAG", SDTX86Void,
- [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
+ [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
+def X86RecoverFrameAlloc : SDNode<"ISD::FRAME_ALLOC_RECOVER",
+ SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
+ SDTCisInt<1>]>>;
+
def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
@@ -261,121 +266,75 @@ def ptr_rc_nosp : PointerLikeRegClass<1>;
def X86MemAsmOperand : AsmOperandClass {
let Name = "Mem";
}
-def X86Mem8AsmOperand : AsmOperandClass {
- let Name = "Mem8"; let RenderMethod = "addMemOperands";
-}
-def X86Mem16AsmOperand : AsmOperandClass {
- let Name = "Mem16"; let RenderMethod = "addMemOperands";
-}
-def X86Mem32AsmOperand : AsmOperandClass {
- let Name = "Mem32"; let RenderMethod = "addMemOperands";
-}
-def X86Mem64AsmOperand : AsmOperandClass {
- let Name = "Mem64"; let RenderMethod = "addMemOperands";
-}
-def X86Mem80AsmOperand : AsmOperandClass {
- let Name = "Mem80"; let RenderMethod = "addMemOperands";
-}
-def X86Mem128AsmOperand : AsmOperandClass {
- let Name = "Mem128"; let RenderMethod = "addMemOperands";
-}
-def X86Mem256AsmOperand : AsmOperandClass {
- let Name = "Mem256"; let RenderMethod = "addMemOperands";
-}
-def X86Mem512AsmOperand : AsmOperandClass {
- let Name = "Mem512"; let RenderMethod = "addMemOperands";
-}
-
-// Gather mem operands
-def X86MemVX32Operand : AsmOperandClass {
- let Name = "MemVX32"; let RenderMethod = "addMemOperands";
-}
-def X86MemVY32Operand : AsmOperandClass {
- let Name = "MemVY32"; let RenderMethod = "addMemOperands";
-}
-def X86MemVZ32Operand : AsmOperandClass {
- let Name = "MemVZ32"; let RenderMethod = "addMemOperands";
-}
-def X86MemVX64Operand : AsmOperandClass {
- let Name = "MemVX64"; let RenderMethod = "addMemOperands";
-}
-def X86MemVY64Operand : AsmOperandClass {
- let Name = "MemVY64"; let RenderMethod = "addMemOperands";
-}
-def X86MemVZ64Operand : AsmOperandClass {
- let Name = "MemVZ64"; let RenderMethod = "addMemOperands";
+let RenderMethod = "addMemOperands" in {
+ def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; }
+ def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; }
+ def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; }
+ def X86Mem64AsmOperand : AsmOperandClass { let Name = "Mem64"; }
+ def X86Mem80AsmOperand : AsmOperandClass { let Name = "Mem80"; }
+ def X86Mem128AsmOperand : AsmOperandClass { let Name = "Mem128"; }
+ def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; }
+ def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; }
+ // Gather mem operands
+ def X86MemVX32Operand : AsmOperandClass { let Name = "MemVX32"; }
+ def X86MemVY32Operand : AsmOperandClass { let Name = "MemVY32"; }
+ def X86MemVZ32Operand : AsmOperandClass { let Name = "MemVZ32"; }
+ def X86MemVX64Operand : AsmOperandClass { let Name = "MemVX64"; }
+ def X86MemVY64Operand : AsmOperandClass { let Name = "MemVY64"; }
+ def X86MemVZ64Operand : AsmOperandClass { let Name = "MemVZ64"; }
}
def X86AbsMemAsmOperand : AsmOperandClass {
let Name = "AbsMem";
let SuperClasses = [X86MemAsmOperand];
}
-class X86MemOperand<string printMethod> : Operand<iPTR> {
+
+class X86MemOperand<string printMethod,
+ AsmOperandClass parserMatchClass = X86MemAsmOperand> : Operand<iPTR> {
let PrintMethod = printMethod;
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
+ let ParserMatchClass = parserMatchClass;
+ let OperandType = "OPERAND_MEMORY";
}
-let OperandType = "OPERAND_MEMORY" in {
+// Gather mem operands
+class X86VMemOperand<RegisterClass RC, string printMethod,
+ AsmOperandClass parserMatchClass>
+ : X86MemOperand<printMethod, parserMatchClass> {
+ let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, i8imm);
+}
+
+def anymem : X86MemOperand<"printanymem">;
+
def opaque32mem : X86MemOperand<"printopaquemem">;
def opaque48mem : X86MemOperand<"printopaquemem">;
def opaque80mem : X86MemOperand<"printopaquemem">;
def opaque512mem : X86MemOperand<"printopaquemem">;
-def i8mem : X86MemOperand<"printi8mem"> {
- let ParserMatchClass = X86Mem8AsmOperand; }
-def i16mem : X86MemOperand<"printi16mem"> {
- let ParserMatchClass = X86Mem16AsmOperand; }
-def i32mem : X86MemOperand<"printi32mem"> {
- let ParserMatchClass = X86Mem32AsmOperand; }
-def i64mem : X86MemOperand<"printi64mem"> {
- let ParserMatchClass = X86Mem64AsmOperand; }
-def i128mem : X86MemOperand<"printi128mem"> {
- let ParserMatchClass = X86Mem128AsmOperand; }
-def i256mem : X86MemOperand<"printi256mem"> {
- let ParserMatchClass = X86Mem256AsmOperand; }
-def i512mem : X86MemOperand<"printi512mem"> {
- let ParserMatchClass = X86Mem512AsmOperand; }
-def f32mem : X86MemOperand<"printf32mem"> {
- let ParserMatchClass = X86Mem32AsmOperand; }
-def f64mem : X86MemOperand<"printf64mem"> {
- let ParserMatchClass = X86Mem64AsmOperand; }
-def f80mem : X86MemOperand<"printf80mem"> {
- let ParserMatchClass = X86Mem80AsmOperand; }
-def f128mem : X86MemOperand<"printf128mem"> {
- let ParserMatchClass = X86Mem128AsmOperand; }
-def f256mem : X86MemOperand<"printf256mem">{
- let ParserMatchClass = X86Mem256AsmOperand; }
-def f512mem : X86MemOperand<"printf512mem">{
- let ParserMatchClass = X86Mem512AsmOperand; }
-def v512mem : Operand<iPTR> {
- let PrintMethod = "printf512mem";
- let MIOperandInfo = (ops ptr_rc, i8imm, VR512, i32imm, i8imm);
- let ParserMatchClass = X86Mem512AsmOperand; }
+def i8mem : X86MemOperand<"printi8mem", X86Mem8AsmOperand>;
+def i16mem : X86MemOperand<"printi16mem", X86Mem16AsmOperand>;
+def i32mem : X86MemOperand<"printi32mem", X86Mem32AsmOperand>;
+def i64mem : X86MemOperand<"printi64mem", X86Mem64AsmOperand>;
+def i128mem : X86MemOperand<"printi128mem", X86Mem128AsmOperand>;
+def i256mem : X86MemOperand<"printi256mem", X86Mem256AsmOperand>;
+def i512mem : X86MemOperand<"printi512mem", X86Mem512AsmOperand>;
+def f32mem : X86MemOperand<"printf32mem", X86Mem32AsmOperand>;
+def f64mem : X86MemOperand<"printf64mem", X86Mem64AsmOperand>;
+def f80mem : X86MemOperand<"printf80mem", X86Mem80AsmOperand>;
+def f128mem : X86MemOperand<"printf128mem", X86Mem128AsmOperand>;
+def f256mem : X86MemOperand<"printf256mem", X86Mem256AsmOperand>;
+def f512mem : X86MemOperand<"printf512mem", X86Mem512AsmOperand>;
+
+def v512mem : X86VMemOperand<VR512, "printf512mem", X86Mem512AsmOperand>;
// Gather mem operands
-def vx32mem : X86MemOperand<"printi32mem">{
- let MIOperandInfo = (ops ptr_rc, i8imm, VR128, i32imm, i8imm);
- let ParserMatchClass = X86MemVX32Operand; }
-def vy32mem : X86MemOperand<"printi32mem">{
- let MIOperandInfo = (ops ptr_rc, i8imm, VR256, i32imm, i8imm);
- let ParserMatchClass = X86MemVY32Operand; }
-def vx64mem : X86MemOperand<"printi64mem">{
- let MIOperandInfo = (ops ptr_rc, i8imm, VR128, i32imm, i8imm);
- let ParserMatchClass = X86MemVX64Operand; }
-def vy64mem : X86MemOperand<"printi64mem">{
- let MIOperandInfo = (ops ptr_rc, i8imm, VR256, i32imm, i8imm);
- let ParserMatchClass = X86MemVY64Operand; }
-def vy64xmem : X86MemOperand<"printi64mem">{
- let MIOperandInfo = (ops ptr_rc, i8imm, VR256X, i32imm, i8imm);
- let ParserMatchClass = X86MemVY64Operand; }
-def vz32mem : X86MemOperand<"printi32mem">{
- let MIOperandInfo = (ops ptr_rc, i16imm, VR512, i32imm, i8imm);
- let ParserMatchClass = X86MemVZ32Operand; }
-def vz64mem : X86MemOperand<"printi64mem">{
- let MIOperandInfo = (ops ptr_rc, i8imm, VR512, i32imm, i8imm);
- let ParserMatchClass = X86MemVZ64Operand; }
-}
+def vx32mem : X86VMemOperand<VR128, "printi32mem", X86MemVX32Operand>;
+def vy32mem : X86VMemOperand<VR256, "printi32mem", X86MemVY32Operand>;
+def vx64mem : X86VMemOperand<VR128, "printi64mem", X86MemVX64Operand>;
+def vy64mem : X86VMemOperand<VR256, "printi64mem", X86MemVY64Operand>;
+def vy64xmem : X86VMemOperand<VR256X, "printi64mem", X86MemVY64Operand>;
+def vz32mem : X86VMemOperand<VR512, "printi32mem", X86MemVZ32Operand>;
+def vz64mem : X86VMemOperand<VR512, "printi64mem", X86MemVZ64Operand>;
// A version of i8mem for use on x86-64 that uses GR64_NOREX instead of
// plain GR64, so that it doesn't potentially require a REX prefix.
@@ -424,125 +383,180 @@ def brtarget8 : Operand<OtherVT>;
}
-def X86SrcIdx8Operand : AsmOperandClass {
- let Name = "SrcIdx8";
- let RenderMethod = "addSrcIdxOperands";
- let SuperClasses = [X86Mem8AsmOperand];
-}
-def X86SrcIdx16Operand : AsmOperandClass {
- let Name = "SrcIdx16";
- let RenderMethod = "addSrcIdxOperands";
- let SuperClasses = [X86Mem16AsmOperand];
-}
-def X86SrcIdx32Operand : AsmOperandClass {
- let Name = "SrcIdx32";
- let RenderMethod = "addSrcIdxOperands";
- let SuperClasses = [X86Mem32AsmOperand];
-}
-def X86SrcIdx64Operand : AsmOperandClass {
- let Name = "SrcIdx64";
- let RenderMethod = "addSrcIdxOperands";
- let SuperClasses = [X86Mem64AsmOperand];
-}
-def X86DstIdx8Operand : AsmOperandClass {
- let Name = "DstIdx8";
- let RenderMethod = "addDstIdxOperands";
- let SuperClasses = [X86Mem8AsmOperand];
-}
-def X86DstIdx16Operand : AsmOperandClass {
- let Name = "DstIdx16";
- let RenderMethod = "addDstIdxOperands";
- let SuperClasses = [X86Mem16AsmOperand];
-}
-def X86DstIdx32Operand : AsmOperandClass {
- let Name = "DstIdx32";
- let RenderMethod = "addDstIdxOperands";
- let SuperClasses = [X86Mem32AsmOperand];
-}
-def X86DstIdx64Operand : AsmOperandClass {
- let Name = "DstIdx64";
- let RenderMethod = "addDstIdxOperands";
- let SuperClasses = [X86Mem64AsmOperand];
-}
-def X86MemOffs8AsmOperand : AsmOperandClass {
- let Name = "MemOffs8";
- let RenderMethod = "addMemOffsOperands";
- let SuperClasses = [X86Mem8AsmOperand];
-}
-def X86MemOffs16AsmOperand : AsmOperandClass {
- let Name = "MemOffs16";
- let RenderMethod = "addMemOffsOperands";
- let SuperClasses = [X86Mem16AsmOperand];
-}
-def X86MemOffs32AsmOperand : AsmOperandClass {
- let Name = "MemOffs32";
- let RenderMethod = "addMemOffsOperands";
- let SuperClasses = [X86Mem32AsmOperand];
-}
-def X86MemOffs64AsmOperand : AsmOperandClass {
- let Name = "MemOffs64";
- let RenderMethod = "addMemOffsOperands";
- let SuperClasses = [X86Mem64AsmOperand];
-}
-let OperandType = "OPERAND_MEMORY" in {
-def srcidx8 : Operand<iPTR> {
- let ParserMatchClass = X86SrcIdx8Operand;
- let MIOperandInfo = (ops ptr_rc, i8imm);
- let PrintMethod = "printSrcIdx8"; }
-def srcidx16 : Operand<iPTR> {
- let ParserMatchClass = X86SrcIdx16Operand;
- let MIOperandInfo = (ops ptr_rc, i8imm);
- let PrintMethod = "printSrcIdx16"; }
-def srcidx32 : Operand<iPTR> {
- let ParserMatchClass = X86SrcIdx32Operand;
- let MIOperandInfo = (ops ptr_rc, i8imm);
- let PrintMethod = "printSrcIdx32"; }
-def srcidx64 : Operand<iPTR> {
- let ParserMatchClass = X86SrcIdx64Operand;
+// Special parser to detect 16-bit mode to select 16-bit displacement.
+def X86AbsMem16AsmOperand : AsmOperandClass {
+ let Name = "AbsMem16";
+ let RenderMethod = "addAbsMemOperands";
+ let SuperClasses = [X86AbsMemAsmOperand];
+}
+
+// Branch targets have OtherVT type and print as pc-relative values.
+let OperandType = "OPERAND_PCREL",
+ PrintMethod = "printPCRelImm" in {
+let ParserMatchClass = X86AbsMem16AsmOperand in
+ def brtarget16 : Operand<OtherVT>;
+let ParserMatchClass = X86AbsMemAsmOperand in
+ def brtarget32 : Operand<OtherVT>;
+}
+
+let RenderMethod = "addSrcIdxOperands" in {
+ def X86SrcIdx8Operand : AsmOperandClass {
+ let Name = "SrcIdx8";
+ let SuperClasses = [X86Mem8AsmOperand];
+ }
+ def X86SrcIdx16Operand : AsmOperandClass {
+ let Name = "SrcIdx16";
+ let SuperClasses = [X86Mem16AsmOperand];
+ }
+ def X86SrcIdx32Operand : AsmOperandClass {
+ let Name = "SrcIdx32";
+ let SuperClasses = [X86Mem32AsmOperand];
+ }
+ def X86SrcIdx64Operand : AsmOperandClass {
+ let Name = "SrcIdx64";
+ let SuperClasses = [X86Mem64AsmOperand];
+ }
+} // RenderMethod = "addSrcIdxOperands"
+
+let RenderMethod = "addDstIdxOperands" in {
+ def X86DstIdx8Operand : AsmOperandClass {
+ let Name = "DstIdx8";
+ let SuperClasses = [X86Mem8AsmOperand];
+ }
+ def X86DstIdx16Operand : AsmOperandClass {
+ let Name = "DstIdx16";
+ let SuperClasses = [X86Mem16AsmOperand];
+ }
+ def X86DstIdx32Operand : AsmOperandClass {
+ let Name = "DstIdx32";
+ let SuperClasses = [X86Mem32AsmOperand];
+ }
+ def X86DstIdx64Operand : AsmOperandClass {
+ let Name = "DstIdx64";
+ let SuperClasses = [X86Mem64AsmOperand];
+ }
+} // RenderMethod = "addDstIdxOperands"
+
+let RenderMethod = "addMemOffsOperands" in {
+ def X86MemOffs16_8AsmOperand : AsmOperandClass {
+ let Name = "MemOffs16_8";
+ let SuperClasses = [X86Mem8AsmOperand];
+ }
+ def X86MemOffs16_16AsmOperand : AsmOperandClass {
+ let Name = "MemOffs16_16";
+ let SuperClasses = [X86Mem16AsmOperand];
+ }
+ def X86MemOffs16_32AsmOperand : AsmOperandClass {
+ let Name = "MemOffs16_32";
+ let SuperClasses = [X86Mem32AsmOperand];
+ }
+ def X86MemOffs32_8AsmOperand : AsmOperandClass {
+ let Name = "MemOffs32_8";
+ let SuperClasses = [X86Mem8AsmOperand];
+ }
+ def X86MemOffs32_16AsmOperand : AsmOperandClass {
+ let Name = "MemOffs32_16";
+ let SuperClasses = [X86Mem16AsmOperand];
+ }
+ def X86MemOffs32_32AsmOperand : AsmOperandClass {
+ let Name = "MemOffs32_32";
+ let SuperClasses = [X86Mem32AsmOperand];
+ }
+ def X86MemOffs32_64AsmOperand : AsmOperandClass {
+ let Name = "MemOffs32_64";
+ let SuperClasses = [X86Mem64AsmOperand];
+ }
+ def X86MemOffs64_8AsmOperand : AsmOperandClass {
+ let Name = "MemOffs64_8";
+ let SuperClasses = [X86Mem8AsmOperand];
+ }
+ def X86MemOffs64_16AsmOperand : AsmOperandClass {
+ let Name = "MemOffs64_16";
+ let SuperClasses = [X86Mem16AsmOperand];
+ }
+ def X86MemOffs64_32AsmOperand : AsmOperandClass {
+ let Name = "MemOffs64_32";
+ let SuperClasses = [X86Mem32AsmOperand];
+ }
+ def X86MemOffs64_64AsmOperand : AsmOperandClass {
+ let Name = "MemOffs64_64";
+ let SuperClasses = [X86Mem64AsmOperand];
+ }
+} // RenderMethod = "addMemOffsOperands"
+
+class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
+ : X86MemOperand<printMethod, parserMatchClass> {
let MIOperandInfo = (ops ptr_rc, i8imm);
- let PrintMethod = "printSrcIdx64"; }
-def dstidx8 : Operand<iPTR> {
- let ParserMatchClass = X86DstIdx8Operand;
- let MIOperandInfo = (ops ptr_rc);
- let PrintMethod = "printDstIdx8"; }
-def dstidx16 : Operand<iPTR> {
- let ParserMatchClass = X86DstIdx16Operand;
- let MIOperandInfo = (ops ptr_rc);
- let PrintMethod = "printDstIdx16"; }
-def dstidx32 : Operand<iPTR> {
- let ParserMatchClass = X86DstIdx32Operand;
- let MIOperandInfo = (ops ptr_rc);
- let PrintMethod = "printDstIdx32"; }
-def dstidx64 : Operand<iPTR> {
- let ParserMatchClass = X86DstIdx64Operand;
+}
+
+class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
+ : X86MemOperand<printMethod, parserMatchClass> {
let MIOperandInfo = (ops ptr_rc);
- let PrintMethod = "printDstIdx64"; }
-def offset8 : Operand<iPTR> {
- let ParserMatchClass = X86MemOffs8AsmOperand;
- let MIOperandInfo = (ops i64imm, i8imm);
- let PrintMethod = "printMemOffs8"; }
-def offset16 : Operand<iPTR> {
- let ParserMatchClass = X86MemOffs16AsmOperand;
- let MIOperandInfo = (ops i64imm, i8imm);
- let PrintMethod = "printMemOffs16"; }
-def offset32 : Operand<iPTR> {
- let ParserMatchClass = X86MemOffs32AsmOperand;
- let MIOperandInfo = (ops i64imm, i8imm);
- let PrintMethod = "printMemOffs32"; }
-def offset64 : Operand<iPTR> {
- let ParserMatchClass = X86MemOffs64AsmOperand;
- let MIOperandInfo = (ops i64imm, i8imm);
- let PrintMethod = "printMemOffs64"; }
}
+def srcidx8 : X86SrcIdxOperand<"printSrcIdx8", X86SrcIdx8Operand>;
+def srcidx16 : X86SrcIdxOperand<"printSrcIdx16", X86SrcIdx16Operand>;
+def srcidx32 : X86SrcIdxOperand<"printSrcIdx32", X86SrcIdx32Operand>;
+def srcidx64 : X86SrcIdxOperand<"printSrcIdx64", X86SrcIdx64Operand>;
+def dstidx8 : X86DstIdxOperand<"printDstIdx8", X86DstIdx8Operand>;
+def dstidx16 : X86DstIdxOperand<"printDstIdx16", X86DstIdx16Operand>;
+def dstidx32 : X86DstIdxOperand<"printDstIdx32", X86DstIdx32Operand>;
+def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>;
+
+class X86MemOffsOperand<Operand immOperand, string printMethod,
+ AsmOperandClass parserMatchClass>
+ : X86MemOperand<printMethod, parserMatchClass> {
+ let MIOperandInfo = (ops immOperand, i8imm);
+}
+
+def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8",
+ X86MemOffs16_8AsmOperand>;
+def offset16_16 : X86MemOffsOperand<i16imm, "printMemOffs16",
+ X86MemOffs16_16AsmOperand>;
+def offset16_32 : X86MemOffsOperand<i16imm, "printMemOffs32",
+ X86MemOffs16_32AsmOperand>;
+def offset32_8 : X86MemOffsOperand<i32imm, "printMemOffs8",
+ X86MemOffs32_8AsmOperand>;
+def offset32_16 : X86MemOffsOperand<i32imm, "printMemOffs16",
+ X86MemOffs32_16AsmOperand>;
+def offset32_32 : X86MemOffsOperand<i32imm, "printMemOffs32",
+ X86MemOffs32_32AsmOperand>;
+def offset32_64 : X86MemOffsOperand<i32imm, "printMemOffs64",
+ X86MemOffs32_64AsmOperand>;
+def offset64_8 : X86MemOffsOperand<i64imm, "printMemOffs8",
+ X86MemOffs64_8AsmOperand>;
+def offset64_16 : X86MemOffsOperand<i64imm, "printMemOffs16",
+ X86MemOffs64_16AsmOperand>;
+def offset64_32 : X86MemOffsOperand<i64imm, "printMemOffs32",
+ X86MemOffs64_32AsmOperand>;
+def offset64_64 : X86MemOffsOperand<i64imm, "printMemOffs64",
+ X86MemOffs64_64AsmOperand>;
def SSECC : Operand<i8> {
- let PrintMethod = "printSSECC";
+ let PrintMethod = "printSSEAVXCC";
let OperandType = "OPERAND_IMMEDIATE";
}
+def i8immZExt3 : ImmLeaf<i8, [{
+ return Imm >= 0 && Imm < 8;
+}]>;
+
def AVXCC : Operand<i8> {
- let PrintMethod = "printAVXCC";
+ let PrintMethod = "printSSEAVXCC";
+ let OperandType = "OPERAND_IMMEDIATE";
+}
+
+def i8immZExt5 : ImmLeaf<i8, [{
+ return Imm >= 0 && Imm < 32;
+}]>;
+
+def AVX512ICC : Operand<i8> {
+ let PrintMethod = "printSSEAVXCC";
+ let OperandType = "OPERAND_IMMEDIATE";
+}
+
+def XOPCC : Operand<i8> {
+ let PrintMethod = "printXOPCC";
let OperandType = "OPERAND_IMMEDIATE";
}
@@ -599,6 +613,14 @@ def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
ImmSExti64i32AsmOperand];
}
+// Unsigned immediate used by SSE/AVX instructions
+// [0, 0xFF]
+// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
+def ImmUnsignedi8AsmOperand : AsmOperandClass {
+ let Name = "ImmUnsignedi8";
+ let RenderMethod = "addImmOperands";
+}
+
// A couple of more descriptive operand definitions.
// 16-bits but only 8 bits are significant.
def i16i8imm : Operand<i16> {
@@ -617,6 +639,27 @@ def i64i32imm : Operand<i64> {
let OperandType = "OPERAND_IMMEDIATE";
}
+// 64-bits but only 8 bits are significant.
+def i64i8imm : Operand<i64> {
+ let ParserMatchClass = ImmSExti64i8AsmOperand;
+ let OperandType = "OPERAND_IMMEDIATE";
+}
+
+// Unsigned 8-bit immediate used by SSE/AVX instructions.
+def u8imm : Operand<i8> {
+ let PrintMethod = "printU8Imm";
+ let ParserMatchClass = ImmUnsignedi8AsmOperand;
+ let OperandType = "OPERAND_IMMEDIATE";
+}
+
+// 32-bit immediate but only 8-bits are significant and they are unsigned.
+// Used by some SSE/AVX instructions that use intrinsics.
+def i32u8imm : Operand<i32> {
+ let PrintMethod = "printU8Imm";
+ let ParserMatchClass = ImmUnsignedi8AsmOperand;
+ let OperandType = "OPERAND_IMMEDIATE";
+}
+
// 64-bits but only 32 bits are significant, and those bits are treated as being
// pc relative.
def i64i32imm_pcrel : Operand<i64> {
@@ -625,21 +668,15 @@ def i64i32imm_pcrel : Operand<i64> {
let OperandType = "OPERAND_PCREL";
}
-// 64-bits but only 8 bits are significant.
-def i64i8imm : Operand<i64> {
- let ParserMatchClass = ImmSExti64i8AsmOperand;
- let OperandType = "OPERAND_IMMEDIATE";
-}
-
def lea64_32mem : Operand<i32> {
- let PrintMethod = "printi32mem";
+ let PrintMethod = "printanymem";
let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, i8imm);
let ParserMatchClass = X86MemAsmOperand;
}
// Memory operands that use 64-bit pointers in both ILP32 and LP64.
def lea64mem : Operand<i64> {
- let PrintMethod = "printi64mem";
+ let PrintMethod = "printanymem";
let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, i8imm);
let ParserMatchClass = X86MemAsmOperand;
}
@@ -676,6 +713,9 @@ def tls64addr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
def tls64baseaddr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
[tglobaltlsaddr], []>;
+def vectoraddr : ComplexPattern<iPTR, 5, "SelectAddr", [],[SDNPWantParent]>;
+//def vectoraddr : ComplexPattern<iPTR, 5, "SelectVectorAddr", [],[SDNPWantParent]>;
+
//===----------------------------------------------------------------------===//
// X86 Instruction Predicate Definitions.
def HasCMov : Predicate<"Subtarget->hasCMov()">;
@@ -706,14 +746,19 @@ def HasAVX512 : Predicate<"Subtarget->hasAVX512()">,
def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
-def HasCDI : Predicate<"Subtarget->hasCDI()">;
-def HasPFI : Predicate<"Subtarget->hasPFI()">;
-def HasERI : Predicate<"Subtarget->hasERI()">;
-def HasDQI : Predicate<"Subtarget->hasDQI()">;
+def HasCDI : Predicate<"Subtarget->hasCDI()">,
+ AssemblerPredicate<"FeatureCDI", "AVX-512 CD ISA">;
+def HasPFI : Predicate<"Subtarget->hasPFI()">,
+ AssemblerPredicate<"FeaturePFI", "AVX-512 PF ISA">;
+def HasERI : Predicate<"Subtarget->hasERI()">,
+ AssemblerPredicate<"FeatureERI", "AVX-512 ER ISA">;
+def HasDQI : Predicate<"Subtarget->hasDQI()">,
+ AssemblerPredicate<"FeatureDQI", "AVX-512 DQ ISA">;
def NoDQI : Predicate<"!Subtarget->hasDQI()">;
-def HasBWI : Predicate<"Subtarget->hasBWI()">;
+def HasBWI : Predicate<"Subtarget->hasBWI()">,
+ AssemblerPredicate<"FeatureBWI", "AVX-512 BW ISA">;
def HasVLX : Predicate<"Subtarget->hasVLX()">,
- AssemblerPredicate<"FeatureVLX", "AVX-512 VLX ISA">;
+ AssemblerPredicate<"FeatureVLX", "AVX-512 VL ISA">;
def NoVLX : Predicate<"!Subtarget->hasVLX()">;
def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
@@ -736,10 +781,8 @@ def HasHLE : Predicate<"Subtarget->hasHLE()">;
def HasTSX : Predicate<"Subtarget->hasRTM() || Subtarget->hasHLE()">;
def HasADX : Predicate<"Subtarget->hasADX()">;
def HasSHA : Predicate<"Subtarget->hasSHA()">;
-def HasSGX : Predicate<"Subtarget->hasSGX()">;
def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">;
def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
-def HasSMAP : Predicate<"Subtarget->hasSMAP()">;
def HasPrefetchW : Predicate<"Subtarget->hasPRFCHW()">;
def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
@@ -757,6 +800,9 @@ def Not16BitMode : Predicate<"!Subtarget->is16Bit()">,
def In32BitMode : Predicate<"Subtarget->is32Bit()">,
AssemblerPredicate<"Mode32Bit", "32-bit mode">;
def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
+def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
+def IsPS4 : Predicate<"Subtarget->isTargetPS4()">;
+def NotPS4 : Predicate<"!Subtarget->isTargetPS4()">;
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
@@ -773,6 +819,7 @@ def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">;
def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">;
def FavorMemIndirectCall : Predicate<"!Subtarget->callRegIndirect()">;
def NotSlowIncDec : Predicate<"!Subtarget->slowIncDec()">;
+def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
//===----------------------------------------------------------------------===//
// X86 Instruction Format Definitions.
@@ -803,6 +850,11 @@ def X86_COND_O : PatLeaf<(i8 13)>;
def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE
def X86_COND_S : PatLeaf<(i8 15)>;
+// Predicate used to help when pattern matching LZCNT/TZCNT.
+def X86_COND_E_OR_NE : ImmLeaf<i8, [{
+ return (Imm == X86::COND_E) || (Imm == X86::COND_NE);
+}]>;
+
let FastIselShouldIgnore = 1 in { // FastIsel should ignore all simm8 instrs.
def i16immSExt8 : ImmLeaf<i16, [{ return Imm == (int8_t)Imm; }]>;
def i32immSExt8 : ImmLeaf<i32, [{ return Imm == (int8_t)Imm; }]>;
@@ -905,7 +957,7 @@ def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
//
// Nop
-let neverHasSideEffects = 1, SchedRW = [WriteZero] in {
+let hasSideEffects = 0, SchedRW = [WriteZero] in {
def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", [], IIC_NOP>;
def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
"nop{w}\t$zero", [], IIC_NOP>, TB, OpSize16;
@@ -919,12 +971,12 @@ def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
"enter\t$len, $lvl", [], IIC_ENTER>, Sched<[WriteMicrocoded]>;
let SchedRW = [WriteALU] in {
-let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in
+let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, hasSideEffects=0 in
def LEAVE : I<0xC9, RawFrm,
(outs), (ins), "leave", [], IIC_LEAVE>,
Requires<[Not64BitMode]>;
-let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
+let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, hasSideEffects = 0 in
def LEAVE64 : I<0xC9, RawFrm,
(outs), (ins), "leave", [], IIC_LEAVE>,
Requires<[In64BitMode]>;
@@ -934,7 +986,7 @@ def LEAVE64 : I<0xC9, RawFrm,
// Miscellaneous Instructions.
//
-let Defs = [ESP], Uses = [ESP], neverHasSideEffects=1 in {
+let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
let mayLoad = 1, SchedRW = [WriteLoad] in {
def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", [],
IIC_POP_REG16>, OpSize16;
@@ -948,11 +1000,6 @@ def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", [],
IIC_POP_REG>, OpSize32, Requires<[Not64BitMode]>;
def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", [],
IIC_POP_MEM>, OpSize32, Requires<[Not64BitMode]>;
-
-def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>,
- OpSize16;
-def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", [], IIC_POP_FD>,
- OpSize32, Requires<[Not64BitMode]>;
} // mayLoad, SchedRW
let mayStore = 1, SchedRW = [WriteStore] in {
@@ -981,16 +1028,26 @@ def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
"push{l}\t$imm", [], IIC_PUSH_IMM>, OpSize32,
Requires<[Not64BitMode]>;
+} // mayStore, SchedRW
+}
+
+let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, hasSideEffects=0,
+ SchedRW = [WriteLoad] in {
+def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>,
+ OpSize16;
+def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", [], IIC_POP_FD>,
+ OpSize32, Requires<[Not64BitMode]>;
+}
+let Defs = [ESP], Uses = [ESP, EFLAGS], mayStore = 1, hasSideEffects=0,
+ SchedRW = [WriteStore] in {
def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", [], IIC_PUSH_F>,
OpSize16;
def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", [], IIC_PUSH_F>,
OpSize32, Requires<[Not64BitMode]>;
-
-} // mayStore, SchedRW
}
-let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
+let Defs = [RSP], Uses = [RSP], hasSideEffects=0 in {
let mayLoad = 1, SchedRW = [WriteLoad] in {
def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", [],
IIC_POP_REG>, OpSize32, Requires<[In64BitMode]>;
@@ -1009,7 +1066,7 @@ def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", [],
} // mayStore, SchedRW
}
-let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1,
+let Defs = [RSP], Uses = [RSP], hasSideEffects = 0, mayStore = 1,
SchedRW = [WriteStore] in {
def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
"push{q}\t$imm", [], IIC_PUSH_IMM>, Requires<[In64BitMode]>;
@@ -1021,22 +1078,22 @@ def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
Requires<[In64BitMode]>;
}
-let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in
+let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, hasSideEffects=0 in
def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", [], IIC_POP_FD>,
OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>;
-let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
+let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, hasSideEffects=0 in
def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", [], IIC_PUSH_F>,
OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>;
let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
- mayLoad = 1, neverHasSideEffects = 1, SchedRW = [WriteLoad] in {
+ mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in {
def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", [], IIC_POP_A>,
OpSize32, Requires<[Not64BitMode]>;
def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", [], IIC_POP_A>,
OpSize16, Requires<[Not64BitMode]>;
}
let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
- mayStore = 1, neverHasSideEffects = 1, SchedRW = [WriteStore] in {
+ mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", [], IIC_PUSH_A>,
OpSize32, Requires<[Not64BitMode]>;
def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", [], IIC_PUSH_A>,
@@ -1166,7 +1223,7 @@ def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
// Move Instructions.
//
let SchedRW = [WriteMove] in {
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
"mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
@@ -1225,62 +1282,67 @@ def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
let hasSideEffects = 0 in {
-/// moffs8, moffs16 and moffs32 versions of moves. The immediate is a
-/// 32-bit offset from the segment base. These are only valid in x86-32 mode.
+/// Memory offset versions of moves. The immediate is an address mode sized
+/// offset from the segment base.
let SchedRW = [WriteALU] in {
let mayLoad = 1 in {
let Defs = [AL] in
-def MOV8o8a : Ii32 <0xA0, RawFrmMemOffs, (outs), (ins offset8:$src),
- "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>,
- Requires<[In32BitMode]>;
+def MOV8ao32 : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src),
+ "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>,
+ AdSize32;
let Defs = [AX] in
-def MOV16o16a : Ii32 <0xA1, RawFrmMemOffs, (outs), (ins offset16:$src),
- "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>,
- OpSize16, Requires<[In32BitMode]>;
+def MOV16ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src),
+ "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>,
+ OpSize16, AdSize32;
let Defs = [EAX] in
-def MOV32o32a : Ii32 <0xA1, RawFrmMemOffs, (outs), (ins offset32:$src),
- "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>,
- OpSize32, Requires<[In32BitMode]>;
+def MOV32ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src),
+ "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>,
+ OpSize32, AdSize32;
+let Defs = [RAX] in
+def MOV64ao32 : RIi32<0xA1, RawFrmMemOffs, (outs), (ins offset32_64:$src),
+ "mov{q}\t{$src, %rax|rax, $src}", [], IIC_MOV_MEM>,
+ AdSize32;
let Defs = [AL] in
-def MOV8o8a_16 : Ii16 <0xA0, RawFrmMemOffs, (outs), (ins offset8:$src),
- "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>,
- AdSize, Requires<[In16BitMode]>;
+def MOV8ao16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src),
+ "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>, AdSize16;
let Defs = [AX] in
-def MOV16o16a_16 : Ii16 <0xA1, RawFrmMemOffs, (outs), (ins offset16:$src),
- "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>,
- OpSize16, AdSize, Requires<[In16BitMode]>;
+def MOV16ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src),
+ "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>,
+ OpSize16, AdSize16;
let Defs = [EAX] in
-def MOV32o32a_16 : Ii16 <0xA1, RawFrmMemOffs, (outs), (ins offset32:$src),
- "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>,
- AdSize, OpSize32, Requires<[In16BitMode]>;
+def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
+ "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>,
+ AdSize16, OpSize32;
}
let mayStore = 1 in {
let Uses = [AL] in
-def MOV8ao8 : Ii32 <0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins),
- "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>,
- Requires<[In32BitMode]>;
+def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs offset32_8:$dst), (ins),
+ "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, AdSize32;
let Uses = [AX] in
-def MOV16ao16 : Ii32 <0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins),
- "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
- OpSize16, Requires<[In32BitMode]>;
+def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs offset32_16:$dst), (ins),
+ "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
+ OpSize16, AdSize32;
let Uses = [EAX] in
-def MOV32ao32 : Ii32 <0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins),
- "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
- OpSize32, Requires<[In32BitMode]>;
+def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs offset32_32:$dst), (ins),
+ "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
+ OpSize32, AdSize32;
+let Uses = [RAX] in
+def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs offset32_64:$dst), (ins),
+ "mov{q}\t{%rax, $dst|$dst, rax}", [], IIC_MOV_MEM>,
+ AdSize32;
let Uses = [AL] in
-def MOV8ao8_16 : Ii16 <0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins),
- "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>,
- AdSize, Requires<[In16BitMode]>;
+def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs offset16_8:$dst), (ins),
+ "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, AdSize16;
let Uses = [AX] in
-def MOV16ao16_16 : Ii16 <0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins),
- "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
- OpSize16, AdSize, Requires<[In16BitMode]>;
+def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs offset16_16:$dst), (ins),
+ "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
+ OpSize16, AdSize16;
let Uses = [EAX] in
-def MOV32ao32_16 : Ii16 <0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins),
- "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
- OpSize32, AdSize, Requires<[In16BitMode]>;
+def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs offset16_32:$dst), (ins),
+ "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
+ OpSize32, AdSize16;
}
}
@@ -1288,40 +1350,34 @@ def MOV32ao32_16 : Ii16 <0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins),
// and use the movabs mnemonic to indicate this specific form.
let mayLoad = 1 in {
let Defs = [AL] in
-def MOV64o8a : RIi64_NOREX<0xA0, RawFrmMemOffs, (outs), (ins offset8:$src),
- "movabs{b}\t{$src, %al|al, $src}", []>,
- Requires<[In64BitMode]>;
+def MOV8ao64 : RIi64_NOREX<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src),
+ "movabs{b}\t{$src, %al|al, $src}", []>, AdSize64;
let Defs = [AX] in
-def MOV64o16a : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset16:$src),
- "movabs{w}\t{$src, %ax|ax, $src}", []>, OpSize16,
- Requires<[In64BitMode]>;
+def MOV16ao64 : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src),
+ "movabs{w}\t{$src, %ax|ax, $src}", []>, OpSize16, AdSize64;
let Defs = [EAX] in
-def MOV64o32a : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset32:$src),
+def MOV32ao64 : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src),
"movabs{l}\t{$src, %eax|eax, $src}", []>, OpSize32,
- Requires<[In64BitMode]>;
+ AdSize64;
let Defs = [RAX] in
-def MOV64o64a : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64:$src),
- "movabs{q}\t{$src, %rax|rax, $src}", []>,
- Requires<[In64BitMode]>;
+def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
+ "movabs{q}\t{$src, %rax|rax, $src}", []>, AdSize64;
}
let mayStore = 1 in {
let Uses = [AL] in
-def MOV64ao8 : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins),
- "movabs{b}\t{%al, $dst|$dst, al}", []>,
- Requires<[In64BitMode]>;
+def MOV8o64a : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs offset64_8:$dst), (ins),
+ "movabs{b}\t{%al, $dst|$dst, al}", []>, AdSize64;
let Uses = [AX] in
-def MOV64ao16 : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins),
- "movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize16,
- Requires<[In64BitMode]>;
+def MOV16o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset64_16:$dst), (ins),
+ "movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize16, AdSize64;
let Uses = [EAX] in
-def MOV64ao32 : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins),
+def MOV32o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset64_32:$dst), (ins),
"movabs{l}\t{%eax, $dst|$dst, eax}", []>, OpSize32,
- Requires<[In64BitMode]>;
+ AdSize64;
let Uses = [RAX] in
-def MOV64ao64 : RIi64<0xA3, RawFrmMemOffs, (outs offset64:$dst), (ins),
- "movabs{q}\t{%rax, $dst|$dst, rax}", []>,
- Requires<[In64BitMode]>;
+def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs offset64_64:$dst), (ins),
+ "movabs{q}\t{%rax, $dst|$dst, rax}", []>, AdSize64;
}
} // hasSideEffects = 0
@@ -1371,17 +1427,17 @@ def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
// that they can be used for copying and storing h registers, which can't be
// encoded when a REX prefix is present.
let isCodeGenOnly = 1 in {
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def MOV8rr_NOREX : I<0x88, MRMDestReg,
(outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
"mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], IIC_MOV>,
Sched<[WriteMove]>;
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, hasSideEffects = 0 in
def MOV8mr_NOREX : I<0x88, MRMDestMem,
(outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
"mov{b}\t{$src, $dst|$dst, $src} # NOREX", [],
IIC_MOV_MEM>, Sched<[WriteStore]>;
-let mayLoad = 1, neverHasSideEffects = 1,
+let mayLoad = 1, hasSideEffects = 0,
canFoldAsLoad = 1, isReMaterializable = 1 in
def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
(outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
@@ -1395,7 +1451,7 @@ let SchedRW = [WriteALU] in {
let Defs = [EFLAGS], Uses = [AH] in
def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf",
[(set EFLAGS, (X86sahf AH))], IIC_AHF>;
-let Defs = [AH], Uses = [EFLAGS], neverHasSideEffects = 1 in
+let Defs = [AH], Uses = [EFLAGS], hasSideEffects = 0 in
def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", [],
IIC_AHF>; // AH = flags
} // SchedRW
@@ -1981,42 +2037,42 @@ let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
}
let Predicates = [HasLZCNT] in {
- def : Pat<(X86cmov (ctlz GR16:$src), (i16 16), (X86_COND_E),
- (X86cmp GR16:$src, (i16 0))),
+ def : Pat<(X86cmov (ctlz GR16:$src), (i16 16), (X86_COND_E_OR_NE),
+ (X86cmp GR16:$src, (i16 0))),
(LZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (ctlz GR32:$src), (i32 32), (X86_COND_E),
+ def : Pat<(X86cmov (ctlz GR32:$src), (i32 32), (X86_COND_E_OR_NE),
(X86cmp GR32:$src, (i32 0))),
(LZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (ctlz GR64:$src), (i64 64), (X86_COND_E),
+ def : Pat<(X86cmov (ctlz GR64:$src), (i64 64), (X86_COND_E_OR_NE),
(X86cmp GR64:$src, (i64 0))),
(LZCNT64rr GR64:$src)>;
- def : Pat<(X86cmov (i16 16), (ctlz GR16:$src), (X86_COND_E),
+ def : Pat<(X86cmov (i16 16), (ctlz GR16:$src), (X86_COND_E_OR_NE),
(X86cmp GR16:$src, (i16 0))),
(LZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (i32 32), (ctlz GR32:$src), (X86_COND_E),
+ def : Pat<(X86cmov (i32 32), (ctlz GR32:$src), (X86_COND_E_OR_NE),
(X86cmp GR32:$src, (i32 0))),
(LZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (i64 64), (ctlz GR64:$src), (X86_COND_E),
+ def : Pat<(X86cmov (i64 64), (ctlz GR64:$src), (X86_COND_E_OR_NE),
(X86cmp GR64:$src, (i64 0))),
(LZCNT64rr GR64:$src)>;
- def : Pat<(X86cmov (ctlz (loadi16 addr:$src)), (i16 16), (X86_COND_E),
- (X86cmp (loadi16 addr:$src), (i16 0))),
+ def : Pat<(X86cmov (ctlz (loadi16 addr:$src)), (i16 16), (X86_COND_E_OR_NE),
+ (X86cmp (loadi16 addr:$src), (i16 0))),
(LZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (ctlz (loadi32 addr:$src)), (i32 32), (X86_COND_E),
- (X86cmp (loadi32 addr:$src), (i32 0))),
+ def : Pat<(X86cmov (ctlz (loadi32 addr:$src)), (i32 32), (X86_COND_E_OR_NE),
+ (X86cmp (loadi32 addr:$src), (i32 0))),
(LZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (ctlz (loadi64 addr:$src)), (i64 64), (X86_COND_E),
- (X86cmp (loadi64 addr:$src), (i64 0))),
+ def : Pat<(X86cmov (ctlz (loadi64 addr:$src)), (i64 64), (X86_COND_E_OR_NE),
+ (X86cmp (loadi64 addr:$src), (i64 0))),
(LZCNT64rm addr:$src)>;
- def : Pat<(X86cmov (i16 16), (ctlz (loadi16 addr:$src)), (X86_COND_E),
- (X86cmp (loadi16 addr:$src), (i16 0))),
+ def : Pat<(X86cmov (i16 16), (ctlz (loadi16 addr:$src)), (X86_COND_E_OR_NE),
+ (X86cmp (loadi16 addr:$src), (i16 0))),
(LZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (i32 32), (ctlz (loadi32 addr:$src)), (X86_COND_E),
- (X86cmp (loadi32 addr:$src), (i32 0))),
+ def : Pat<(X86cmov (i32 32), (ctlz (loadi32 addr:$src)), (X86_COND_E_OR_NE),
+ (X86cmp (loadi32 addr:$src), (i32 0))),
(LZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (i64 64), (ctlz (loadi64 addr:$src)), (X86_COND_E),
- (X86cmp (loadi64 addr:$src), (i64 0))),
+ def : Pat<(X86cmov (i64 64), (ctlz (loadi64 addr:$src)), (X86_COND_E_OR_NE),
+ (X86cmp (loadi64 addr:$src), (i64 0))),
(LZCNT64rm addr:$src)>;
}
@@ -2097,42 +2153,42 @@ let Predicates = [HasBMI] in {
}
let Predicates = [HasBMI] in {
- def : Pat<(X86cmov (cttz GR16:$src), (i16 16), (X86_COND_E),
+ def : Pat<(X86cmov (cttz GR16:$src), (i16 16), (X86_COND_E_OR_NE),
(X86cmp GR16:$src, (i16 0))),
(TZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (cttz GR32:$src), (i32 32), (X86_COND_E),
+ def : Pat<(X86cmov (cttz GR32:$src), (i32 32), (X86_COND_E_OR_NE),
(X86cmp GR32:$src, (i32 0))),
(TZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (cttz GR64:$src), (i64 64), (X86_COND_E),
+ def : Pat<(X86cmov (cttz GR64:$src), (i64 64), (X86_COND_E_OR_NE),
(X86cmp GR64:$src, (i64 0))),
(TZCNT64rr GR64:$src)>;
- def : Pat<(X86cmov (i16 16), (cttz GR16:$src), (X86_COND_E),
+ def : Pat<(X86cmov (i16 16), (cttz GR16:$src), (X86_COND_E_OR_NE),
(X86cmp GR16:$src, (i16 0))),
(TZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (i32 32), (cttz GR32:$src), (X86_COND_E),
+ def : Pat<(X86cmov (i32 32), (cttz GR32:$src), (X86_COND_E_OR_NE),
(X86cmp GR32:$src, (i32 0))),
(TZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (i64 64), (cttz GR64:$src), (X86_COND_E),
+ def : Pat<(X86cmov (i64 64), (cttz GR64:$src), (X86_COND_E_OR_NE),
(X86cmp GR64:$src, (i64 0))),
(TZCNT64rr GR64:$src)>;
- def : Pat<(X86cmov (cttz (loadi16 addr:$src)), (i16 16), (X86_COND_E),
- (X86cmp (loadi16 addr:$src), (i16 0))),
+ def : Pat<(X86cmov (cttz (loadi16 addr:$src)), (i16 16), (X86_COND_E_OR_NE),
+ (X86cmp (loadi16 addr:$src), (i16 0))),
(TZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (cttz (loadi32 addr:$src)), (i32 32), (X86_COND_E),
- (X86cmp (loadi32 addr:$src), (i32 0))),
+ def : Pat<(X86cmov (cttz (loadi32 addr:$src)), (i32 32), (X86_COND_E_OR_NE),
+ (X86cmp (loadi32 addr:$src), (i32 0))),
(TZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (cttz (loadi64 addr:$src)), (i64 64), (X86_COND_E),
- (X86cmp (loadi64 addr:$src), (i64 0))),
+ def : Pat<(X86cmov (cttz (loadi64 addr:$src)), (i64 64), (X86_COND_E_OR_NE),
+ (X86cmp (loadi64 addr:$src), (i64 0))),
(TZCNT64rm addr:$src)>;
- def : Pat<(X86cmov (i16 16), (cttz (loadi16 addr:$src)), (X86_COND_E),
- (X86cmp (loadi16 addr:$src), (i16 0))),
+ def : Pat<(X86cmov (i16 16), (cttz (loadi16 addr:$src)), (X86_COND_E_OR_NE),
+ (X86cmp (loadi16 addr:$src), (i16 0))),
(TZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (i32 32), (cttz (loadi32 addr:$src)), (X86_COND_E),
- (X86cmp (loadi32 addr:$src), (i32 0))),
+ def : Pat<(X86cmov (i32 32), (cttz (loadi32 addr:$src)), (X86_COND_E_OR_NE),
+ (X86cmp (loadi32 addr:$src), (i32 0))),
(TZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (i64 64), (cttz (loadi64 addr:$src)), (X86_COND_E),
- (X86cmp (loadi64 addr:$src), (i64 0))),
+ def : Pat<(X86cmov (i64 64), (cttz (loadi64 addr:$src)), (X86_COND_E_OR_NE),
+ (X86cmp (loadi64 addr:$src), (i64 0))),
(TZCNT64rm addr:$src)>;
}
@@ -2167,11 +2223,11 @@ let Predicates = [HasBMI2], Defs = [EFLAGS] in {
def CountTrailingOnes : SDNodeXForm<imm, [{
// Count the trailing ones in the immediate.
- return getI8Imm(CountTrailingOnes_64(N->getZExtValue()));
+ return getI8Imm(countTrailingOnes(N->getZExtValue()));
}]>;
def BZHIMask : ImmLeaf<i64, [{
- return isMask_64(Imm) && (CountTrailingOnes_64(Imm) > 32);
+ return isMask_64(Imm) && (countTrailingOnes<uint64_t>(Imm) > 32);
}]>;
let Predicates = [HasBMI2] in {
@@ -2361,6 +2417,16 @@ let Predicates = [HasTBM] in {
} // HasTBM
//===----------------------------------------------------------------------===//
+// Memory Instructions
+//
+
+def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
+ "clflushopt\t$src", []>, PD;
+def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src", []>, PD;
+def PCOMMIT : I<0xAE, MRM_F8, (outs), (ins), "pcommit", []>, PD;
+
+
+//===----------------------------------------------------------------------===//
// Subsystems.
//===----------------------------------------------------------------------===//
@@ -2513,6 +2579,12 @@ def : MnemonicAlias<"fnstsww", "fnstsw", "att">;
def : MnemonicAlias<"fucomip", "fucompi", "att">;
def : MnemonicAlias<"fwait", "wait">;
+def : MnemonicAlias<"fxsaveq", "fxsave64", "att">;
+def : MnemonicAlias<"fxrstorq", "fxrstor64", "att">;
+def : MnemonicAlias<"xsaveq", "xsave64", "att">;
+def : MnemonicAlias<"xrstorq", "xrstor64", "att">;
+def : MnemonicAlias<"xsaveoptq", "xsaveopt64", "att">;
+
class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond,
string VariantName>
@@ -2700,28 +2772,28 @@ def : InstAlias<"fnstsw" , (FNSTSW16r)>;
// this is compatible with what GAS does.
def : InstAlias<"lcall $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[Not16BitMode]>;
def : InstAlias<"ljmp $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[Not16BitMode]>;
-def : InstAlias<"lcall *$dst", (FARCALL32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
-def : InstAlias<"ljmp *$dst", (FARJMP32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
+def : InstAlias<"lcall {*}$dst", (FARCALL32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
+def : InstAlias<"ljmp {*}$dst", (FARJMP32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
def : InstAlias<"lcall $seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
def : InstAlias<"ljmp $seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
-def : InstAlias<"lcall *$dst", (FARCALL16m opaque32mem:$dst), 0>, Requires<[In16BitMode]>;
-def : InstAlias<"ljmp *$dst", (FARJMP16m opaque32mem:$dst), 0>, Requires<[In16BitMode]>;
+def : InstAlias<"lcall {*}$dst", (FARCALL16m opaque32mem:$dst), 0>, Requires<[In16BitMode]>;
+def : InstAlias<"ljmp {*}$dst", (FARJMP16m opaque32mem:$dst), 0>, Requires<[In16BitMode]>;
-def : InstAlias<"call *$dst", (CALL64m i16mem:$dst), 0>, Requires<[In64BitMode]>;
-def : InstAlias<"jmp *$dst", (JMP64m i16mem:$dst), 0>, Requires<[In64BitMode]>;
-def : InstAlias<"call *$dst", (CALL32m i16mem:$dst), 0>, Requires<[In32BitMode]>;
-def : InstAlias<"jmp *$dst", (JMP32m i16mem:$dst), 0>, Requires<[In32BitMode]>;
-def : InstAlias<"call *$dst", (CALL16m i16mem:$dst), 0>, Requires<[In16BitMode]>;
-def : InstAlias<"jmp *$dst", (JMP16m i16mem:$dst), 0>, Requires<[In16BitMode]>;
+def : InstAlias<"call {*}$dst", (CALL64m i64mem:$dst), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"jmp {*}$dst", (JMP64m i64mem:$dst), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"call {*}$dst", (CALL32m i32mem:$dst), 0>, Requires<[In32BitMode]>;
+def : InstAlias<"jmp {*}$dst", (JMP32m i32mem:$dst), 0>, Requires<[In32BitMode]>;
+def : InstAlias<"call {*}$dst", (CALL16m i16mem:$dst), 0>, Requires<[In16BitMode]>;
+def : InstAlias<"jmp {*}$dst", (JMP16m i16mem:$dst), 0>, Requires<[In16BitMode]>;
// "imul <imm>, B" is an alias for "imul <imm>, B, B".
-def : InstAlias<"imulw $imm, $r", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm)>;
-def : InstAlias<"imulw $imm, $r", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm)>;
-def : InstAlias<"imull $imm, $r", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm)>;
-def : InstAlias<"imull $imm, $r", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm)>;
-def : InstAlias<"imulq $imm, $r",(IMUL64rri32 GR64:$r, GR64:$r,i64i32imm:$imm)>;
-def : InstAlias<"imulq $imm, $r", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm)>;
+def : InstAlias<"imulw {$imm, $r|$r, $imm}", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm), 0>;
+def : InstAlias<"imulw {$imm, $r|$r, $imm}", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm), 0>;
+def : InstAlias<"imull {$imm, $r|$r, $imm}", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm), 0>;
+def : InstAlias<"imull {$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm), 0>;
+def : InstAlias<"imulq {$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>;
+def : InstAlias<"imulq {$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>;
// inb %dx -> inb %al, %dx
def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
@@ -2745,34 +2817,34 @@ def : InstAlias<"jmpl $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
// Force mov without a suffix with a segment and mem to prefer the 'l' form of
// the move. All segment/mem forms are equivalent, this has the shortest
// encoding.
-def : InstAlias<"mov $mem, $seg", (MOV32sm SEGMENT_REG:$seg, i32mem:$mem), 0>;
-def : InstAlias<"mov $seg, $mem", (MOV32ms i32mem:$mem, SEGMENT_REG:$seg), 0>;
+def : InstAlias<"mov {$mem, $seg|$seg, $mem}", (MOV32sm SEGMENT_REG:$seg, i32mem:$mem), 0>;
+def : InstAlias<"mov {$seg, $mem|$mem, $seg}", (MOV32ms i32mem:$mem, SEGMENT_REG:$seg), 0>;
// Match 'movq <largeimm>, <reg>' as an alias for movabsq.
-def : InstAlias<"movq $imm, $reg", (MOV64ri GR64:$reg, i64imm:$imm), 0>;
+def : InstAlias<"movq {$imm, $reg|$reg, $imm}", (MOV64ri GR64:$reg, i64imm:$imm), 0>;
// Match 'movq GR64, MMX' as an alias for movd.
-def : InstAlias<"movq $src, $dst",
+def : InstAlias<"movq {$src, $dst|$dst, $src}",
(MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0>;
-def : InstAlias<"movq $src, $dst",
+def : InstAlias<"movq {$src, $dst|$dst, $src}",
(MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>;
// movsx aliases
-def : InstAlias<"movsx $src, $dst", (MOVSX16rr8 GR16:$dst, GR8:$src), 0>;
-def : InstAlias<"movsx $src, $dst", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0>;
-def : InstAlias<"movsx $src, $dst", (MOVSX32rr8 GR32:$dst, GR8:$src), 0>;
-def : InstAlias<"movsx $src, $dst", (MOVSX32rr16 GR32:$dst, GR16:$src), 0>;
-def : InstAlias<"movsx $src, $dst", (MOVSX64rr8 GR64:$dst, GR8:$src), 0>;
-def : InstAlias<"movsx $src, $dst", (MOVSX64rr16 GR64:$dst, GR16:$src), 0>;
-def : InstAlias<"movsx $src, $dst", (MOVSX64rr32 GR64:$dst, GR32:$src), 0>;
+def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX16rr8 GR16:$dst, GR8:$src), 0>;
+def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0>;
+def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX32rr8 GR32:$dst, GR8:$src), 0>;
+def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX32rr16 GR32:$dst, GR16:$src), 0>;
+def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX64rr8 GR64:$dst, GR8:$src), 0>;
+def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX64rr16 GR64:$dst, GR16:$src), 0>;
+def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX64rr32 GR64:$dst, GR32:$src), 0>;
// movzx aliases
-def : InstAlias<"movzx $src, $dst", (MOVZX16rr8 GR16:$dst, GR8:$src), 0>;
-def : InstAlias<"movzx $src, $dst", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0>;
-def : InstAlias<"movzx $src, $dst", (MOVZX32rr8 GR32:$dst, GR8:$src), 0>;
-def : InstAlias<"movzx $src, $dst", (MOVZX32rr16 GR32:$dst, GR16:$src), 0>;
-def : InstAlias<"movzx $src, $dst", (MOVZX64rr8_Q GR64:$dst, GR8:$src), 0>;
-def : InstAlias<"movzx $src, $dst", (MOVZX64rr16_Q GR64:$dst, GR16:$src), 0>;
+def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX16rr8 GR16:$dst, GR8:$src), 0>;
+def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0>;
+def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX32rr8 GR32:$dst, GR8:$src), 0>;
+def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX32rr16 GR32:$dst, GR16:$src), 0>;
+def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX64rr8_Q GR64:$dst, GR8:$src), 0>;
+def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX64rr16_Q GR64:$dst, GR16:$src), 0>;
// Note: No GR32->GR64 movzx form.
// outb %dx -> outb %al, %dx
diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td
index 9001fba..eaa7894 100644
--- a/lib/Target/X86/X86InstrMMX.td
+++ b/lib/Target/X86/X86InstrMMX.td
@@ -125,9 +125,9 @@ let Constraints = "$src1 = $dst" in {
(bitconvert (load_mmx addr:$src2))))],
itins.rm>, Sched<[WriteVecShiftLd, ReadAfterLd]>;
def ri : MMXIi8<opc2, ImmForm, (outs VR64:$dst),
- (ins VR64:$src1, i32i8imm:$src2),
+ (ins VR64:$src1, i32u8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId2 VR64:$src1, (i32 imm:$src2)))], itins.ri>,
+ [(set VR64:$dst, (IntId2 VR64:$src1, imm:$src2))], itins.ri>,
Sched<[WriteVecShift]>;
}
}
@@ -170,12 +170,12 @@ multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr,
/// PALIGN MMX instructions (require SSSE3).
multiclass ssse3_palign_mm<string asm, Intrinsic IntId> {
def R64irr : MMXSS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2, i8imm:$src3),
- !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ (ins VR64:$src1, VR64:$src2, u8imm:$src3),
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set VR64:$dst, (IntId VR64:$src1, VR64:$src2, (i8 imm:$src3)))]>,
Sched<[WriteShuffle]>;
def R64irm : MMXSS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
+ (ins VR64:$src1, i64mem:$src2, u8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set VR64:$dst, (IntId VR64:$src1,
(bitconvert (load_mmx addr:$src2)), (i8 imm:$src3)))]>,
@@ -220,23 +220,29 @@ def MMX_EMMS : MMXI<0x77, RawFrm, (outs), (ins), "emms",
// Data Transfer Instructions
def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst,
+ [(set VR64:$dst,
(x86mmx (scalar_to_vector GR32:$src)))],
IIC_MMX_MOV_MM_RM>, Sched<[WriteMove]>;
-let canFoldAsLoad = 1 in
def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
(x86mmx (scalar_to_vector (loadi32 addr:$src))))],
IIC_MMX_MOV_MM_RM>, Sched<[WriteLoad]>;
+
+let Predicates = [HasMMX] in {
+ let AddedComplexity = 15 in
+ def : Pat<(x86mmx (MMX_X86movw2d GR32:$src)),
+ (MMX_MOVD64rr GR32:$src)>;
+ let AddedComplexity = 20 in
+ def : Pat<(x86mmx (MMX_X86movw2d (loadi32 addr:$src))),
+ (MMX_MOVD64rm addr:$src)>;
+}
+
let mayStore = 1 in
def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src),
"movd\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOV_MM_RM>,
Sched<[WriteStore]>;
-// Low word of MMX to GPR.
-def MMX_X86movd2w : SDNode<"X86ISD::MMX_MOVD2W", SDTypeProfile<1, 1,
- [SDTCisVT<0, i32>, SDTCisVT<1, x86mmx>]>>;
def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR64:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst,
@@ -248,16 +254,21 @@ def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
[(set VR64:$dst, (bitconvert GR64:$src))],
IIC_MMX_MOV_MM_RM>, Sched<[WriteMove]>;
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
+def MMX_MOVD64to64rm : MMXRI<0x6E, MRMSrcMem, (outs VR64:$dst),
+ (ins i64mem:$src), "movd\t{$src, $dst|$dst, $src}",
+ [], IIC_MMX_MOVQ_RM>, Sched<[WriteLoad]>;
+
// These are 64 bit moves, but since the OS X assembler doesn't
// recognize a register-register movq, we write them as
// movd.
let SchedRW = [WriteMove] in {
def MMX_MOVD64from64rr : MMXRI<0x7E, MRMDestReg,
(outs GR64:$dst), (ins VR64:$src),
- "movd\t{$src, $dst|$dst, $src}",
+ "movd\t{$src, $dst|$dst, $src}",
[(set GR64:$dst,
(bitconvert VR64:$src))], IIC_MMX_MOV_REG_MM>;
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
"movq\t{$src, $dst|$dst, $src}", [],
IIC_MMX_MOVQ_RR>;
@@ -268,6 +279,12 @@ def MMX_MOVQ64rr_REV : MMXI<0x7F, MRMDestReg, (outs VR64:$dst), (ins VR64:$src),
}
} // SchedRW
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
+def MMX_MOVD64from64rm : MMXRI<0x7E, MRMDestMem,
+ (outs i64mem:$dst), (ins VR64:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [], IIC_MMX_MOV_REG_MM>, Sched<[WriteStore]>;
+
let SchedRW = [WriteLoad] in {
let canFoldAsLoad = 1 in
def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
@@ -453,6 +470,13 @@ defm MMX_PSRLQ : MMXI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
int_x86_mmx_psrl_q, int_x86_mmx_psrli_q,
MMX_SHIFT_ITINS>;
+def : Pat<(int_x86_mmx_psrl_w VR64:$src1, (load_mvmmx addr:$src2)),
+ (MMX_PSRLWrm VR64:$src1, addr:$src2)>;
+def : Pat<(int_x86_mmx_psrl_d VR64:$src1, (load_mvmmx addr:$src2)),
+ (MMX_PSRLDrm VR64:$src1, addr:$src2)>;
+def : Pat<(int_x86_mmx_psrl_q VR64:$src1, (load_mvmmx addr:$src2)),
+ (MMX_PSRLQrm VR64:$src1, addr:$src2)>;
+
defm MMX_PSLLW : MMXI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
int_x86_mmx_psll_w, int_x86_mmx_pslli_w,
MMX_SHIFT_ITINS>;
@@ -463,6 +487,13 @@ defm MMX_PSLLQ : MMXI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
int_x86_mmx_psll_q, int_x86_mmx_pslli_q,
MMX_SHIFT_ITINS>;
+def : Pat<(int_x86_mmx_psll_w VR64:$src1, (load_mvmmx addr:$src2)),
+ (MMX_PSLLWrm VR64:$src1, addr:$src2)>;
+def : Pat<(int_x86_mmx_psll_d VR64:$src1, (load_mvmmx addr:$src2)),
+ (MMX_PSLLDrm VR64:$src1, addr:$src2)>;
+def : Pat<(int_x86_mmx_psll_q VR64:$src1, (load_mvmmx addr:$src2)),
+ (MMX_PSLLQrm VR64:$src1, addr:$src2)>;
+
defm MMX_PSRAW : MMXI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
int_x86_mmx_psra_w, int_x86_mmx_psrai_w,
MMX_SHIFT_ITINS>;
@@ -470,6 +501,11 @@ defm MMX_PSRAD : MMXI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
int_x86_mmx_psra_d, int_x86_mmx_psrai_d,
MMX_SHIFT_ITINS>;
+def : Pat<(int_x86_mmx_psra_w VR64:$src1, (load_mvmmx addr:$src2)),
+ (MMX_PSRAWrm VR64:$src1, addr:$src2)>;
+def : Pat<(int_x86_mmx_psra_d VR64:$src1, (load_mvmmx addr:$src2)),
+ (MMX_PSRADrm VR64:$src1, addr:$src2)>;
+
// Comparison Instructions
defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b,
MMX_INTALU_ITINS>;
@@ -486,19 +522,19 @@ defm MMX_PCMPGTD : MMXI_binop_rm_int<0x66, "pcmpgtd", int_x86_mmx_pcmpgt_d,
MMX_INTALU_ITINS>;
// -- Unpack Instructions
-defm MMX_PUNPCKHBW : MMXI_binop_rm_int<0x68, "punpckhbw",
+defm MMX_PUNPCKHBW : MMXI_binop_rm_int<0x68, "punpckhbw",
int_x86_mmx_punpckhbw,
MMX_UNPCK_H_ITINS>;
-defm MMX_PUNPCKHWD : MMXI_binop_rm_int<0x69, "punpckhwd",
+defm MMX_PUNPCKHWD : MMXI_binop_rm_int<0x69, "punpckhwd",
int_x86_mmx_punpckhwd,
MMX_UNPCK_H_ITINS>;
-defm MMX_PUNPCKHDQ : MMXI_binop_rm_int<0x6A, "punpckhdq",
+defm MMX_PUNPCKHDQ : MMXI_binop_rm_int<0x6A, "punpckhdq",
int_x86_mmx_punpckhdq,
MMX_UNPCK_H_ITINS>;
-defm MMX_PUNPCKLBW : MMXI_binop_rm_int<0x60, "punpcklbw",
+defm MMX_PUNPCKLBW : MMXI_binop_rm_int<0x60, "punpcklbw",
int_x86_mmx_punpcklbw,
MMX_UNPCK_L_ITINS>;
-defm MMX_PUNPCKLWD : MMXI_binop_rm_int<0x61, "punpcklwd",
+defm MMX_PUNPCKLWD : MMXI_binop_rm_int<0x61, "punpcklwd",
int_x86_mmx_punpcklwd,
MMX_UNPCK_L_ITINS>;
defm MMX_PUNPCKLDQ : MMXI_binop_rm_int<0x62, "punpckldq",
@@ -518,13 +554,13 @@ defm MMX_PSHUFB : SS3I_binop_rm_int_mm<0x00, "pshufb", int_x86_ssse3_pshuf_b,
MMX_PSHUF_ITINS>;
def MMX_PSHUFWri : MMXIi8<0x70, MRMSrcReg,
- (outs VR64:$dst), (ins VR64:$src1, i8imm:$src2),
+ (outs VR64:$dst), (ins VR64:$src1, u8imm:$src2),
"pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR64:$dst,
(int_x86_sse_pshuf_w VR64:$src1, imm:$src2))],
IIC_MMX_PSHUF>, Sched<[WriteShuffle]>;
def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem,
- (outs VR64:$dst), (ins i64mem:$src1, i8imm:$src2),
+ (outs VR64:$dst), (ins i64mem:$src1, u8imm:$src2),
"pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR64:$dst,
(int_x86_sse_pshuf_w (load_mmx addr:$src1),
@@ -559,27 +595,27 @@ let Constraints = "$src1 = $dst" in {
// Extract / Insert
def MMX_PEXTRWirri: MMXIi8<0xC5, MRMSrcReg,
- (outs GR32orGR64:$dst), (ins VR64:$src1, i32i8imm:$src2),
+ (outs GR32orGR64:$dst), (ins VR64:$src1, i32u8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst, (int_x86_mmx_pextr_w VR64:$src1,
- (iPTR imm:$src2)))],
+ imm:$src2))],
IIC_MMX_PEXTR>, Sched<[WriteShuffle]>;
let Constraints = "$src1 = $dst" in {
def MMX_PINSRWirri : MMXIi8<0xC4, MRMSrcReg,
- (outs VR64:$dst),
- (ins VR64:$src1, GR32orGR64:$src2, i32i8imm:$src3),
+ (outs VR64:$dst),
+ (ins VR64:$src1, GR32orGR64:$src2, i32u8imm:$src3),
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR64:$dst, (int_x86_mmx_pinsr_w VR64:$src1,
- GR32orGR64:$src2, (iPTR imm:$src3)))],
+ GR32orGR64:$src2, imm:$src3))],
IIC_MMX_PINSRW>, Sched<[WriteShuffle]>;
def MMX_PINSRWirmi : MMXIi8<0xC4, MRMSrcMem,
(outs VR64:$dst),
- (ins VR64:$src1, i16mem:$src2, i32i8imm:$src3),
+ (ins VR64:$src1, i16mem:$src2, i32u8imm:$src3),
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR64:$dst, (int_x86_mmx_pinsr_w VR64:$src1,
(i32 (anyext (loadi16 addr:$src2))),
- (iPTR imm:$src3)))],
+ imm:$src3))],
IIC_MMX_PINSRW>, Sched<[WriteShuffleLd, ReadAfterLd]>;
}
diff --git a/lib/Target/X86/X86InstrSGX.td b/lib/Target/X86/X86InstrSGX.td
index 47c5dc5..84119ad 100644
--- a/lib/Target/X86/X86InstrSGX.td
+++ b/lib/Target/X86/X86InstrSGX.td
@@ -17,8 +17,8 @@
// ENCLS - Execute an Enclave System Function of Specified Leaf Number
def ENCLS : I<0x01, MRM_CF, (outs), (ins),
- "encls", []>, TB, Requires<[HasSGX]>;
+ "encls", []>, TB;
// ENCLU - Execute an Enclave User Function of Specified Leaf Number
def ENCLU : I<0x01, MRM_D7, (outs), (ins),
- "enclu", []>, TB, Requires<[HasSGX]>;
+ "enclu", []>, TB;
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index cc896f0..d2929d2 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -548,13 +548,13 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt,
X86MemOperand x86memop, string base_opc,
- string asm_opr> {
+ string asm_opr, Domain d = GenericDomain> {
def rr : SI<0x10, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, RC:$src2),
!strconcat(base_opc, asm_opr),
[(set VR128:$dst, (vt (OpNode VR128:$src1,
(scalar_to_vector RC:$src2))))],
- IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>;
+ IIC_SSE_MOV_S_RR, d>, Sched<[WriteFShuffle]>;
// For the disassembler
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
@@ -565,49 +565,55 @@ multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt,
}
multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
- X86MemOperand x86memop, string OpcodeStr> {
+ X86MemOperand x86memop, string OpcodeStr,
+ Domain d = GenericDomain> {
// AVX
defm V#NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}">,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}", d>,
VEX_4V, VEX_LIG;
def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
+ [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
VEX, VEX_LIG, Sched<[WriteStore]>;
// SSE1 & 2
let Constraints = "$src1 = $dst" in {
defm NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
- "\t{$src2, $dst|$dst, $src2}">;
+ "\t{$src2, $dst|$dst, $src2}", d>;
}
def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
+ [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
Sched<[WriteStore]>;
}
// Loading from memory automatically zeroing upper bits.
multiclass sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
- PatFrag mem_pat, string OpcodeStr> {
+ PatFrag mem_pat, string OpcodeStr,
+ Domain d = GenericDomain> {
def V#NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (mem_pat addr:$src))],
- IIC_SSE_MOV_S_RM>, VEX, VEX_LIG, Sched<[WriteLoad]>;
+ IIC_SSE_MOV_S_RM, d>, VEX, VEX_LIG, Sched<[WriteLoad]>;
def NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (mem_pat addr:$src))],
- IIC_SSE_MOV_S_RM>, Sched<[WriteLoad]>;
+ IIC_SSE_MOV_S_RM, d>, Sched<[WriteLoad]>;
}
-defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss">, XS;
-defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd">, XD;
+defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
+ SSEPackedSingle>, XS;
+defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
+ SSEPackedDouble>, XD;
let canFoldAsLoad = 1, isReMaterializable = 1 in {
- defm MOVSS : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
+ defm MOVSS : sse12_move_rm<FR32, f32mem, loadf32, "movss",
+ SSEPackedSingle>, XS;
let AddedComplexity = 20 in
- defm MOVSD : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
+ defm MOVSD : sse12_move_rm<FR64, f64mem, loadf64, "movsd",
+ SSEPackedDouble>, XD;
}
// Patterns
@@ -809,7 +815,7 @@ multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
string asm, Domain d,
OpndItins itins,
bit IsReMaterializable = 1> {
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>,
Sched<[WriteFShuffle]>;
@@ -1332,6 +1338,8 @@ let Predicates = [HasAVX] in {
(bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
(VMOVHPSrm VR128:$src1, addr:$src2)>;
+ // VMOVHPD patterns
+
// FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
// is during lowering, where it's not possible to recognize the load fold
// cause it has two uses through a bitcast. One use disappears at isel time
@@ -1344,6 +1352,11 @@ let Predicates = [HasAVX] in {
def : Pat<(v2f64 (X86Unpckl VR128:$src1,
(bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
(VMOVHPDrm VR128:$src1, addr:$src2)>;
+
+ def : Pat<(store (f64 (vector_extract
+ (v2f64 (X86VPermilpi VR128:$src, (i8 1))),
+ (iPTR 0))), addr:$dst),
+ (VMOVHPDmr addr:$dst, VR128:$src)>;
}
let Predicates = [UseSSE1] in {
@@ -1357,6 +1370,8 @@ let Predicates = [UseSSE1] in {
}
let Predicates = [UseSSE2] in {
+ // MOVHPD patterns
+
// FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
// is during lowering, where it's not possible to recognize the load fold
// cause it has two uses through a bitcast. One use disappears at isel time
@@ -1369,6 +1384,11 @@ let Predicates = [UseSSE2] in {
def : Pat<(v2f64 (X86Unpckl VR128:$src1,
(bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
(MOVHPDrm VR128:$src1, addr:$src2)>;
+
+ def : Pat<(store (f64 (vector_extract
+ (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
+ (iPTR 0))), addr:$dst),
+ (MOVHPDmr addr:$dst, VR128:$src)>;
}
//===----------------------------------------------------------------------===//
@@ -1477,7 +1497,7 @@ multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
X86MemOperand x86memop, string asm, Domain d,
OpndItins itins> {
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
[], itins.rr, d>, Sched<[itins.Sched]>;
let mayLoad = 1 in
@@ -1488,7 +1508,7 @@ let neverHasSideEffects = 1 in {
multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
X86MemOperand x86memop, string asm> {
-let neverHasSideEffects = 1, Predicates = [UseAVX] in {
+let hasSideEffects = 0, Predicates = [UseAVX] in {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
!strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
Sched<[WriteCvtI2F]>;
@@ -1497,7 +1517,7 @@ let neverHasSideEffects = 1, Predicates = [UseAVX] in {
(ins DstRC:$src1, x86memop:$src),
!strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
Sched<[WriteCvtI2FLd, ReadAfterLd]>;
-} // neverHasSideEffects = 1
+} // hasSideEffects = 0
}
let Predicates = [UseAVX] in {
@@ -1804,7 +1824,7 @@ def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
/// SSE 2 Only
// Convert scalar double to scalar single
-let neverHasSideEffects = 1, Predicates = [UseAVX] in {
+let hasSideEffects = 0, Predicates = [UseAVX] in {
def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
(ins FR64:$src1, FR64:$src2),
"cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
@@ -1869,7 +1889,7 @@ def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
// Convert scalar single to scalar double
// SSE2 instructions with XS prefix
-let neverHasSideEffects = 1, Predicates = [UseAVX] in {
+let hasSideEffects = 0, Predicates = [UseAVX] in {
def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
(ins FR32:$src1, FR32:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -2191,7 +2211,7 @@ def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
// Convert Packed DW Integers to Packed Double FP
let Predicates = [HasAVX] in {
-let neverHasSideEffects = 1, mayLoad = 1 in
+let hasSideEffects = 0, mayLoad = 1 in
def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[]>, VEX, Sched<[WriteCvtI2FLd]>;
@@ -2213,7 +2233,7 @@ def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
Sched<[WriteCvtI2F]>;
}
-let neverHasSideEffects = 1, mayLoad = 1 in
+let hasSideEffects = 0, mayLoad = 1 in
def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"cvtdq2pd\t{$src, $dst|$dst, $src}", [],
IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2FLd]>;
@@ -2319,26 +2339,26 @@ let Predicates = [UseSSE2] in {
multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
Operand CC, SDNode OpNode, ValueType VT,
PatFrag ld_frag, string asm, string asm_alt,
- OpndItins itins> {
+ OpndItins itins, ImmLeaf immLeaf> {
def rr : SIi8<0xC2, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
- [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
+ [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, immLeaf:$cc))],
itins.rr>, Sched<[itins.Sched]>;
def rm : SIi8<0xC2, MRMSrcMem,
(outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
[(set RC:$dst, (OpNode (VT RC:$src1),
- (ld_frag addr:$src2), imm:$cc))],
+ (ld_frag addr:$src2), immLeaf:$cc))],
itins.rm>,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
// Accept explicit immediate argument form instead of comparison code.
let isAsmParserOnly = 1, hasSideEffects = 0 in {
def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, i8imm:$cc), asm_alt, [],
+ (ins RC:$src1, RC:$src2, u8imm:$cc), asm_alt, [],
IIC_SSE_ALU_F32S_RR>, Sched<[itins.Sched]>;
let mayLoad = 1 in
def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2, i8imm:$cc), asm_alt, [],
+ (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm_alt, [],
IIC_SSE_ALU_F32S_RM>,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
@@ -2347,38 +2367,37 @@ multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmps, f32, loadf32,
"cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSE_ALU_F32S>,
- XS, VEX_4V, VEX_LIG;
+ SSE_ALU_F32S, i8immZExt5>, XS, VEX_4V, VEX_LIG;
defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmps, f64, loadf64,
"cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSE_ALU_F32S>, // same latency as 32 bit compare
+ SSE_ALU_F32S, i8immZExt5>, // same latency as 32 bit compare
XD, VEX_4V, VEX_LIG;
let Constraints = "$src1 = $dst" in {
defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmps, f32, loadf32,
"cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
- "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S>,
- XS;
+ "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S,
+ i8immZExt3>, XS;
defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmps, f64, loadf64,
"cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
"cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SSE_ALU_F64S>,
- XD;
+ SSE_ALU_F64S, i8immZExt3>, XD;
}
multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
- Intrinsic Int, string asm, OpndItins itins> {
+ Intrinsic Int, string asm, OpndItins itins,
+ ImmLeaf immLeaf> {
def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src, CC:$cc), asm,
[(set VR128:$dst, (Int VR128:$src1,
- VR128:$src, imm:$cc))],
+ VR128:$src, immLeaf:$cc))],
itins.rr>,
Sched<[itins.Sched]>;
def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, x86memop:$src, CC:$cc), asm,
[(set VR128:$dst, (Int VR128:$src1,
- (load addr:$src), imm:$cc))],
+ (load addr:$src), immLeaf:$cc))],
itins.rm>,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
@@ -2387,19 +2406,19 @@ let isCodeGenOnly = 1 in {
// Aliases to match intrinsics which expect XMM operand(s).
defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
"cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
- SSE_ALU_F32S>,
+ SSE_ALU_F32S, i8immZExt5>,
XS, VEX_4V;
defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
"cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
- SSE_ALU_F32S>, // same latency as f32
+ SSE_ALU_F32S, i8immZExt5>, // same latency as f32
XD, VEX_4V;
let Constraints = "$src1 = $dst" in {
defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
"cmp${cc}ss\t{$src, $dst|$dst, $src}",
- SSE_ALU_F32S>, XS;
+ SSE_ALU_F32S, i8immZExt3>, XS;
defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
"cmp${cc}sd\t{$src, $dst|$dst, $src}",
- SSE_ALU_F64S>,
+ SSE_ALU_F64S, i8immZExt3>,
XD;
}
}
@@ -2473,26 +2492,28 @@ let Defs = [EFLAGS] in {
// sse12_cmp_packed - sse 1 & 2 compare packed instructions
multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
Operand CC, Intrinsic Int, string asm,
- string asm_alt, Domain d,
- OpndItins itins = SSE_ALU_F32P> {
+ string asm_alt, Domain d, ImmLeaf immLeaf,
+ PatFrag ld_frag, OpndItins itins = SSE_ALU_F32P> {
+ let isCommutable = 1 in
def rri : PIi8<0xC2, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
- [(set RC:$dst, (Int RC:$src1, RC:$src2, imm:$cc))],
+ [(set RC:$dst, (Int RC:$src1, RC:$src2, immLeaf:$cc))],
itins.rr, d>,
Sched<[WriteFAdd]>;
def rmi : PIi8<0xC2, MRMSrcMem,
(outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
- [(set RC:$dst, (Int RC:$src1, (memop addr:$src2), imm:$cc))],
+ [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2), immLeaf:$cc))],
itins.rm, d>,
Sched<[WriteFAddLd, ReadAfterLd]>;
// Accept explicit immediate argument form instead of comparison code.
let isAsmParserOnly = 1, hasSideEffects = 0 in {
def rri_alt : PIi8<0xC2, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
+ (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc),
asm_alt, [], itins.rr, d>, Sched<[WriteFAdd]>;
+ let mayLoad = 1 in
def rmi_alt : PIi8<0xC2, MRMSrcMem,
- (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc),
asm_alt, [], itins.rm, d>,
Sched<[WriteFAddLd, ReadAfterLd]>;
}
@@ -2501,61 +2522,61 @@ multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
"cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedSingle>, PS, VEX_4V;
+ SSEPackedSingle, i8immZExt5, loadv4f32>, PS, VEX_4V;
defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
"cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedDouble>, PD, VEX_4V;
+ SSEPackedDouble, i8immZExt5, loadv2f64>, PD, VEX_4V;
defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
"cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedSingle>, PS, VEX_4V, VEX_L;
+ SSEPackedSingle, i8immZExt5, loadv8f32>, PS, VEX_4V, VEX_L;
defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
"cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedDouble>, PD, VEX_4V, VEX_L;
+ SSEPackedDouble, i8immZExt5, loadv4f64>, PD, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in {
defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
"cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
"cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SSEPackedSingle, SSE_ALU_F32P>, PS;
+ SSEPackedSingle, i8immZExt5, memopv4f32, SSE_ALU_F32P>, PS;
defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
"cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
"cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SSEPackedDouble, SSE_ALU_F64P>, PD;
+ SSEPackedDouble, i8immZExt5, memopv2f64, SSE_ALU_F64P>, PD;
}
let Predicates = [HasAVX] in {
def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
(VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
-def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
+def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (loadv4f32 addr:$src2), imm:$cc)),
(VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
(VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
+def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (loadv2f64 addr:$src2), imm:$cc)),
(VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
(VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
-def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
+def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (loadv8f32 addr:$src2), imm:$cc)),
(VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
(VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
-def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
+def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (loadv4f64 addr:$src2), imm:$cc)),
(VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
}
let Predicates = [UseSSE1] in {
def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
(CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
-def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
+def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memopv4f32 addr:$src2), imm:$cc)),
(CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
}
let Predicates = [UseSSE2] in {
def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
(CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
+def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memopv2f64 addr:$src2), imm:$cc)),
(CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
}
@@ -2568,12 +2589,12 @@ multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
ValueType vt, string asm, PatFrag mem_frag,
Domain d> {
def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2, i8imm:$src3), asm,
+ (ins RC:$src1, x86memop:$src2, u8imm:$src3), asm,
[(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
(i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
Sched<[WriteFShuffleLd, ReadAfterLd]>;
def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
+ (ins RC:$src1, RC:$src2, u8imm:$src3), asm,
[(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
(i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
Sched<[WriteFShuffle]>;
@@ -2729,24 +2750,6 @@ let Predicates = [HasAVX1Only] in {
(VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
}
-let Predicates = [HasAVX] in {
- // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
- // problem is during lowering, where it's not possible to recognize the load
- // fold cause it has two uses through a bitcast. One use disappears at isel
- // time and the fold opportunity reappears.
- def : Pat<(v2f64 (X86Movddup VR128:$src)),
- (VUNPCKLPDrr VR128:$src, VR128:$src)>;
-}
-
-let Predicates = [UseSSE2] in {
- // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
- // problem is during lowering, where it's not possible to recognize the load
- // fold cause it has two uses through a bitcast. One use disappears at isel
- // time and the fold opportunity reappears.
- def : Pat<(v2f64 (X86Movddup VR128:$src)),
- (UNPCKLPDrr VR128:$src, VR128:$src)>;
-}
-
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Extract Floating-Point Sign mask
//===----------------------------------------------------------------------===//
@@ -2838,7 +2841,7 @@ multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
ValueType OpVT128, ValueType OpVT256,
OpndItins itins, bit IsCommutable = 0> {
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoVLX] in
defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
VR128, loadv2i64, i128mem, itins, IsCommutable, 0>, VEX_4V;
@@ -2846,7 +2849,7 @@ let Constraints = "$src1 = $dst" in
defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
memopv2i64, i128mem, itins, IsCommutable, 1>;
-let Predicates = [HasAVX2] in
+let Predicates = [HasAVX2, NoVLX] in
defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
OpVT256, VR256, loadv4i64, i256mem, itins,
IsCommutable, 0>, VEX_4V, VEX_L;
@@ -2867,40 +2870,73 @@ defm PANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
// SSE 1 & 2 - Logical Instructions
//===----------------------------------------------------------------------===//
-/// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
-///
-multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
- SDNode OpNode, OpndItins itins> {
+// Multiclass for scalars using the X86 logical operation aliases for FP.
+multiclass sse12_fp_packed_scalar_logical_alias<
+ bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
+ defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
+ FR32, f32, f128mem, loadf32_128, SSEPackedSingle, itins, 0>,
+ PS, VEX_4V;
+
+ defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
+ FR64, f64, f128mem, loadf64_128, SSEPackedDouble, itins, 0>,
+ PD, VEX_4V;
+
+ let Constraints = "$src1 = $dst" in {
+ defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
+ f32, f128mem, memopfsf32_128, SSEPackedSingle, itins>, PS;
+
+ defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
+ f64, f128mem, memopfsf64_128, SSEPackedDouble, itins>, PD;
+ }
+}
+
+let isCodeGenOnly = 1 in {
+ defm FsAND : sse12_fp_packed_scalar_logical_alias<0x54, "and", X86fand,
+ SSE_BIT_ITINS_P>;
+ defm FsOR : sse12_fp_packed_scalar_logical_alias<0x56, "or", X86for,
+ SSE_BIT_ITINS_P>;
+ defm FsXOR : sse12_fp_packed_scalar_logical_alias<0x57, "xor", X86fxor,
+ SSE_BIT_ITINS_P>;
+
+ let isCommutable = 0 in
+ defm FsANDN : sse12_fp_packed_scalar_logical_alias<0x55, "andn", X86fandn,
+ SSE_BIT_ITINS_P>;
+}
+
+// Multiclass for vectors using the X86 logical operation aliases for FP.
+multiclass sse12_fp_packed_vector_logical_alias<
+ bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
+ let Predicates = [HasAVX, NoVLX] in {
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
- FR32, f32, f128mem, memopfsf32, SSEPackedSingle, itins, 0>,
+ VR128, v4f32, f128mem, loadv4f32, SSEPackedSingle, itins, 0>,
PS, VEX_4V;
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
- FR64, f64, f128mem, memopfsf64, SSEPackedDouble, itins, 0>,
+ VR128, v2f64, f128mem, loadv2f64, SSEPackedDouble, itins, 0>,
PD, VEX_4V;
+ }
let Constraints = "$src1 = $dst" in {
- defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
- f32, f128mem, memopfsf32, SSEPackedSingle, itins>,
+ defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
+ v4f32, f128mem, memopv4f32, SSEPackedSingle, itins>,
PS;
- defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
- f64, f128mem, memopfsf64, SSEPackedDouble, itins>,
+ defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
+ v2f64, f128mem, memopv2f64, SSEPackedDouble, itins>,
PD;
}
}
-// Alias bitwise logical operations using SSE logical ops on packed FP values.
let isCodeGenOnly = 1 in {
- defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand,
+ defm FvAND : sse12_fp_packed_vector_logical_alias<0x54, "and", X86fand,
SSE_BIT_ITINS_P>;
- defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for,
+ defm FvOR : sse12_fp_packed_vector_logical_alias<0x56, "or", X86for,
SSE_BIT_ITINS_P>;
- defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor,
+ defm FvXOR : sse12_fp_packed_vector_logical_alias<0x57, "xor", X86fxor,
SSE_BIT_ITINS_P>;
let isCommutable = 0 in
- defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", X86fandn,
+ defm FvANDN : sse12_fp_packed_vector_logical_alias<0x55, "andn", X86fandn,
SSE_BIT_ITINS_P>;
}
@@ -2908,6 +2944,7 @@ let isCodeGenOnly = 1 in {
///
multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
SDNode OpNode> {
+ let Predicates = [HasAVX, NoVLX] in {
defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f256mem,
[(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
@@ -2938,6 +2975,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
[(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
(loadv2i64 addr:$src2)))], 0>,
PD, VEX_4V;
+ }
let Constraints = "$src1 = $dst" in {
defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
@@ -2993,6 +3031,7 @@ let Predicates = [HasAVX1Only] in {
/// classes below
multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
SDNode OpNode, SizeItins itins> {
+ let Predicates = [HasAVX, NoVLX] in {
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
VR128, v4f32, f128mem, loadv4f32,
SSEPackedSingle, itins.s, 0>, PS, VEX_4V;
@@ -3006,6 +3045,7 @@ multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
OpNode, VR256, v4f64, f256mem, loadv4f64,
SSEPackedDouble, itins.d, 0>, PD, VEX_4V, VEX_L;
+ }
let Constraints = "$src1 = $dst" in {
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
@@ -3081,10 +3121,9 @@ let isCodeGenOnly = 1 in {
}
// Patterns used to select SSE scalar fp arithmetic instructions from
-// a scalar fp operation followed by a blend.
+// either:
//
-// These patterns know, for example, how to select an ADDSS from a
-// float add plus vector insert.
+// (1) a scalar fp operation followed by a blend
//
// The effect is that the backend no longer emits unnecessary vector
// insert instructions immediately after SSE scalar fp instructions
@@ -3096,218 +3135,14 @@ let isCodeGenOnly = 1 in {
// return A;
// }
//
-// previously we generated:
+// Previously we generated:
// addss %xmm0, %xmm1
// movss %xmm1, %xmm0
-//
-// we now generate:
+//
+// We now generate:
// addss %xmm1, %xmm0
-
-let Predicates = [UseSSE1] in {
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fadd
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))))),
- (ADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fsub
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))))),
- (SUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fmul
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))))),
- (MULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fdiv
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))))),
- (DIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
-}
-
-let Predicates = [UseSSE2] in {
- // SSE2 patterns to select scalar double-precision fp arithmetic instructions
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fadd
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))))),
- (ADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fsub
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))))),
- (SUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fmul
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))))),
- (MULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fdiv
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))))),
- (DIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
-}
-
-let Predicates = [UseSSE41] in {
- // If the subtarget has SSE4.1 but not AVX, the vector insert instruction is
- // lowered into a X86insertps or a X86Blendi rather than a X86Movss. When
- // selecting SSE scalar single-precision fp arithmetic instructions, make
- // sure that we correctly match them.
-
- def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
- (fadd (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (iPTR 0))),
- (ADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
- (fsub (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (iPTR 0))),
- (SUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
- (fmul (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (iPTR 0))),
- (MULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
- (fdiv (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (iPTR 0))),
- (DIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
-
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fadd
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (i8 1))),
- (ADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fsub
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (i8 1))),
- (SUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fmul
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (i8 1))),
- (MULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fdiv
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (i8 1))),
- (DIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
-
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fadd
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (i8 1))),
- (ADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fsub
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (i8 1))),
- (SUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fmul
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (i8 1))),
- (MULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fdiv
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (i8 1))),
- (DIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
-
- def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fadd
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
- (ADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fsub
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
- (SUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fmul
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
- (MULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fdiv
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
- (DIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
-}
-
-let Predicates = [HasAVX] in {
- // The following patterns select AVX Scalar single/double precision fp
- // arithmetic instructions.
-
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fadd
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))))),
- (VADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fsub
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))))),
- (VSUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fmul
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))))),
- (VMULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fdiv
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))))),
- (VDIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
- (fadd (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (iPTR 0))),
- (VADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
- (fsub (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (iPTR 0))),
- (VSUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
- (fmul (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (iPTR 0))),
- (VMULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
- (fdiv (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (iPTR 0))),
- (VDIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
-
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fadd
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (i8 1))),
- (VADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fsub
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (i8 1))),
- (VSUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fmul
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (i8 1))),
- (VMULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fdiv
- (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (i8 1))),
- (VDIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
-
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fadd
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (i8 1))),
- (VADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fsub
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (i8 1))),
- (VSUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fmul
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (i8 1))),
- (VMULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fdiv
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (i8 1))),
- (VDIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
-
- def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fadd
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
- (VADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fsub
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
- (VSUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fmul
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
- (VMULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fdiv
- (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
- (VDIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
-}
-
-// Patterns used to select SSE scalar fp arithmetic instructions from
-// a vector packed single/double fp operation followed by a vector insert.
+//
+// (2) a vector packed single/double fp operation followed by a vector insert
//
// The effect is that the backend converts the packed fp instruction
// followed by a vector insert into a single SSE scalar fp instruction.
@@ -3318,160 +3153,151 @@ let Predicates = [HasAVX] in {
// return (__m128) {c[0], a[1], a[2], a[3]};
// }
//
-// previously we generated:
+// Previously we generated:
// addps %xmm0, %xmm1
// movss %xmm1, %xmm0
-//
-// we now generate:
+//
+// We now generate:
// addss %xmm1, %xmm0
-let Predicates = [UseSSE1] in {
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
- (fadd (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
- (ADDSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
- (fsub (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
- (SUBSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
- (fmul (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
- (MULSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
- (fdiv (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
- (DIVSSrr_Int v4f32:$dst, v4f32:$src)>;
-}
+// TODO: Some canonicalization in lowering would simplify the number of
+// patterns we have to try to match.
+multiclass scalar_math_f32_patterns<SDNode Op, string OpcPrefix> {
+ let Predicates = [UseSSE1] in {
+ // extracted scalar math op with insert via movss
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))))),
+ (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
+ (COPY_TO_REGCLASS FR32:$src, VR128))>;
+
+ // vector math op with insert via movss
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
+ (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
+ (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
+ }
+
+ // With SSE 4.1, insertps/blendi are preferred to movsd, so match those too.
+ let Predicates = [UseSSE41] in {
+ // extracted scalar math op with insert via insertps
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (iPTR 0))),
+ (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
+ (COPY_TO_REGCLASS FR32:$src, VR128))>;
+
+ // extracted scalar math op with insert via blend
+ def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (i8 1))),
+ (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
+ (COPY_TO_REGCLASS FR32:$src, VR128))>;
+
+ // vector math op with insert via blend
+ def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
+ (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
+ (!cast<I>(OpcPrefix#SSrr_Int)v4f32:$dst, v4f32:$src)>;
-let Predicates = [UseSSE2] in {
- // SSE2 patterns to select scalar double-precision fp arithmetic instructions
- // from a packed double-precision fp instruction plus movsd.
-
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
- (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
- (ADDSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
- (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
- (SUBSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
- (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
- (MULSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
- (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
- (DIVSDrr_Int v2f64:$dst, v2f64:$src)>;
-}
+ }
-let Predicates = [UseSSE41] in {
- // With SSE4.1 we may see these operations using X86Blendi rather than
- // X86Movs{s,d}.
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
- (fadd (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
- (ADDSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
- (fsub (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
- (SUBSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
- (fmul (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
- (MULSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
- (fdiv (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
- (DIVSSrr_Int v4f32:$dst, v4f32:$src)>;
-
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
- (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
- (ADDSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
- (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
- (SUBSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
- (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
- (MULSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
- (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
- (DIVSDrr_Int v2f64:$dst, v2f64:$src)>;
-
- def : Pat<(v2f64 (X86Blendi (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)),
- (v2f64 VR128:$dst), (i8 2))),
- (ADDSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)),
- (v2f64 VR128:$dst), (i8 2))),
- (SUBSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)),
- (v2f64 VR128:$dst), (i8 2))),
- (MULSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)),
- (v2f64 VR128:$dst), (i8 2))),
- (DIVSDrr_Int v2f64:$dst, v2f64:$src)>;
+ // Repeat everything for AVX, except for the movss + scalar combo...
+ // because that one shouldn't occur with AVX codegen?
+ let Predicates = [HasAVX] in {
+ // extracted scalar math op with insert via insertps
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (iPTR 0))),
+ (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst,
+ (COPY_TO_REGCLASS FR32:$src, VR128))>;
+
+ // extracted scalar math op with insert via blend
+ def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (i8 1))),
+ (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst,
+ (COPY_TO_REGCLASS FR32:$src, VR128))>;
+
+ // vector math op with insert via movss
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
+ (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
+ (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
+
+ // vector math op with insert via blend
+ def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
+ (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
+ (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
+ }
}
-let Predicates = [HasAVX] in {
- // The following patterns select AVX Scalar single/double precision fp
- // arithmetic instructions from a packed single precision fp instruction
- // plus movss/movsd.
-
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
- (fadd (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
- (VADDSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
- (fsub (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
- (VSUBSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
- (fmul (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
- (VMULSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
- (fdiv (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
- (VDIVSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
- (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
- (VADDSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
- (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
- (VSUBSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
- (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
- (VMULSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
- (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
- (VDIVSDrr_Int v2f64:$dst, v2f64:$src)>;
-
- // Also handle X86Blendi-based patterns.
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
- (fadd (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
- (VADDSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
- (fsub (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
- (VSUBSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
- (fmul (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
- (VMULSSrr_Int v4f32:$dst, v4f32:$src)>;
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
- (fdiv (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
- (VDIVSSrr_Int v4f32:$dst, v4f32:$src)>;
-
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
- (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
- (VADDSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
- (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
- (VSUBSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
- (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
- (VMULSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
- (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
- (VDIVSDrr_Int v2f64:$dst, v2f64:$src)>;
-
- def : Pat<(v2f64 (X86Blendi (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)),
- (v2f64 VR128:$dst), (i8 2))),
- (VADDSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)),
- (v2f64 VR128:$dst), (i8 2))),
- (VSUBSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)),
- (v2f64 VR128:$dst), (i8 2))),
- (VMULSDrr_Int v2f64:$dst, v2f64:$src)>;
- def : Pat<(v2f64 (X86Blendi (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)),
- (v2f64 VR128:$dst), (i8 2))),
- (VDIVSDrr_Int v2f64:$dst, v2f64:$src)>;
+defm : scalar_math_f32_patterns<fadd, "ADD">;
+defm : scalar_math_f32_patterns<fsub, "SUB">;
+defm : scalar_math_f32_patterns<fmul, "MUL">;
+defm : scalar_math_f32_patterns<fdiv, "DIV">;
+
+multiclass scalar_math_f64_patterns<SDNode Op, string OpcPrefix> {
+ let Predicates = [UseSSE2] in {
+ // extracted scalar math op with insert via movsd
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
+ (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))))),
+ (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
+ (COPY_TO_REGCLASS FR64:$src, VR128))>;
+
+ // vector math op with insert via movsd
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
+ (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
+ (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
+ }
+
+ // With SSE 4.1, blendi is preferred to movsd, so match those too.
+ let Predicates = [UseSSE41] in {
+ // extracted scalar math op with insert via blend
+ def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
+ (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))), (i8 1))),
+ (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
+ (COPY_TO_REGCLASS FR64:$src, VR128))>;
+
+ // vector math op with insert via blend
+ def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
+ (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
+ (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
+ }
+
+ // Repeat everything for AVX.
+ let Predicates = [HasAVX] in {
+ // extracted scalar math op with insert via movsd
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
+ (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))))),
+ (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
+ (COPY_TO_REGCLASS FR64:$src, VR128))>;
+
+ // extracted scalar math op with insert via blend
+ def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
+ (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))), (i8 1))),
+ (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
+ (COPY_TO_REGCLASS FR64:$src, VR128))>;
+
+ // vector math op with insert via movsd
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
+ (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
+ (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
+
+ // vector math op with insert via blend
+ def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
+ (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
+ (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
+ }
}
+defm : scalar_math_f64_patterns<fadd, "ADD">;
+defm : scalar_math_f64_patterns<fsub, "SUB">;
+defm : scalar_math_f64_patterns<fmul, "MUL">;
+defm : scalar_math_f64_patterns<fdiv, "DIV">;
+
+
/// Unop Arithmetic
/// In addition, we also have a special variant of the scalar form here to
/// represent the associated intrinsic operation. This form is unlike the
@@ -3518,103 +3344,106 @@ def SSE_RCPS : OpndItins<
>;
}
-/// sse1_fp_unop_s - SSE1 unops in scalar form.
-multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
- SDNode OpNode, Intrinsic F32Int, OpndItins itins> {
-let Predicates = [HasAVX], hasSideEffects = 0 in {
- def V#NAME#SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst),
- (ins FR32:$src1, FR32:$src2),
- !strconcat("v", OpcodeStr,
- "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, VEX_4V, VEX_LIG, Sched<[itins.Sched]>;
- let mayLoad = 1 in {
- def V#NAME#SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1,f32mem:$src2),
- !strconcat("v", OpcodeStr,
- "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, VEX_4V, VEX_LIG,
- Sched<[itins.Sched.Folded, ReadAfterLd]>;
- let isCodeGenOnly = 1 in
- def V#NAME#SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, ssmem:$src2),
- !strconcat("v", OpcodeStr,
- "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, VEX_4V, VEX_LIG,
- Sched<[itins.Sched.Folded, ReadAfterLd]>;
+/// sse_fp_unop_s - SSE1 unops in scalar form
+/// For the non-AVX defs, we need $src1 to be tied to $dst because
+/// the HW instructions are 2 operand / destructive.
+multiclass sse_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ ValueType vt, ValueType ScalarVT,
+ X86MemOperand x86memop, Operand vec_memop,
+ ComplexPattern mem_cpat, Intrinsic Intr,
+ SDNode OpNode, OpndItins itins, Predicate target,
+ string Suffix> {
+ let hasSideEffects = 0 in {
+ def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1),
+ !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
+ [(set RC:$dst, (OpNode RC:$src1))], itins.rr>, Sched<[itins.Sched]>,
+ Requires<[target]>;
+ let mayLoad = 1 in
+ def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1),
+ !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
+ [(set RC:$dst, (OpNode (load addr:$src1)))], itins.rm>,
+ Sched<[itins.Sched.Folded, ReadAfterLd]>,
+ Requires<[target, OptForSize]>;
+
+ let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
+ def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
+ let mayLoad = 1 in
+ def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, vec_memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
+ }
}
-}
- def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set FR32:$dst, (OpNode FR32:$src))]>, Sched<[itins.Sched]>;
- // For scalar unary operations, fold a load into the operation
- // only in OptForSize mode. It eliminates an instruction, but it also
- // eliminates a whole-register clobber (the load), so it introduces a
- // partial register update condition.
- def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
- Requires<[UseSSE1, OptForSize]>, Sched<[itins.Sched.Folded]>;
-let isCodeGenOnly = 1 in {
- def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F32Int VR128:$src))], itins.rr>,
- Sched<[itins.Sched]>;
- def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F32Int sse_load_f32:$src))], itins.rm>,
- Sched<[itins.Sched.Folded]>;
-}
-}
-
-/// sse1_fp_unop_s_rw - SSE1 unops where vector form has a read-write operand.
-multiclass sse1_fp_unop_rw<bits<8> opc, string OpcodeStr, SDNode OpNode,
- OpndItins itins> {
-let Predicates = [HasAVX], hasSideEffects = 0 in {
- def V#NAME#SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst),
- (ins FR32:$src1, FR32:$src2),
- !strconcat("v", OpcodeStr,
- "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, VEX_4V, VEX_LIG, Sched<[itins.Sched]>;
- let mayLoad = 1 in {
- def V#NAME#SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1,f32mem:$src2),
- !strconcat("v", OpcodeStr,
- "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, VEX_4V, VEX_LIG,
- Sched<[itins.Sched.Folded, ReadAfterLd]>;
- let isCodeGenOnly = 1 in
- def V#NAME#SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, ssmem:$src2),
- !strconcat("v", OpcodeStr,
- "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, VEX_4V, VEX_LIG,
- Sched<[itins.Sched.Folded, ReadAfterLd]>;
+ let Predicates = [target] in {
+ def : Pat<(vt (OpNode mem_cpat:$src)),
+ (vt (COPY_TO_REGCLASS (vt (!cast<Instruction>(NAME#Suffix##m_Int)
+ (vt (IMPLICIT_DEF)), mem_cpat:$src)), RC))>;
+ // These are unary operations, but they are modeled as having 2 source operands
+ // because the high elements of the destination are unchanged in SSE.
+ def : Pat<(Intr VR128:$src),
+ (!cast<Instruction>(NAME#Suffix##r_Int) VR128:$src, VR128:$src)>;
+ def : Pat<(Intr (load addr:$src)),
+ (vt (COPY_TO_REGCLASS(!cast<Instruction>(NAME#Suffix##m)
+ addr:$src), VR128))>;
+ def : Pat<(Intr mem_cpat:$src),
+ (!cast<Instruction>(NAME#Suffix##m_Int)
+ (vt (IMPLICIT_DEF)), mem_cpat:$src)>;
}
}
- def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set FR32:$dst, (OpNode FR32:$src))]>, Sched<[itins.Sched]>;
- // For scalar unary operations, fold a load into the operation
- // only in OptForSize mode. It eliminates an instruction, but it also
- // eliminates a whole-register clobber (the load), so it introduces a
- // partial register update condition.
- def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
- Requires<[UseSSE1, OptForSize]>, Sched<[itins.Sched.Folded]>;
- let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
- def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [], itins.rr>, Sched<[itins.Sched]>;
- let mayLoad = 1, hasSideEffects = 0 in
- def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, ssmem:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [], itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
+multiclass avx_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ ValueType vt, ValueType ScalarVT,
+ X86MemOperand x86memop, Operand vec_memop,
+ ComplexPattern mem_cpat,
+ Intrinsic Intr, SDNode OpNode, OpndItins itins,
+ Predicate target, string Suffix> {
+ let hasSideEffects = 0 in {
+ def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [], itins.rr>, Sched<[itins.Sched]>;
+ let mayLoad = 1 in
+ def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [], itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
+ let isCodeGenOnly = 1 in {
+ // todo: uncomment when all r_Int forms will be added to X86InstrInfo.cpp
+ //def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst),
+ // (ins VR128:$src1, VR128:$src2),
+ // !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ // []>, Sched<[itins.Sched.Folded]>;
+ let mayLoad = 1 in
+ def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, vec_memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
+ }
}
+
+ let Predicates = [target] in {
+ def : Pat<(OpNode RC:$src), (!cast<Instruction>("V"#NAME#Suffix##r)
+ (ScalarVT (IMPLICIT_DEF)), RC:$src)>;
+
+ def : Pat<(vt (OpNode mem_cpat:$src)),
+ (!cast<Instruction>("V"#NAME#Suffix##m_Int) (vt (IMPLICIT_DEF)),
+ mem_cpat:$src)>;
+
+ // todo: use r_Int form when it will be ready
+ //def : Pat<(Intr VR128:$src), (!cast<Instruction>("V"#NAME#Suffix##r_Int)
+ // (VT (IMPLICIT_DEF)), VR128:$src)>;
+ def : Pat<(Intr VR128:$src),
+ (vt (COPY_TO_REGCLASS(
+ !cast<Instruction>("V"#NAME#Suffix##r) (ScalarVT (IMPLICIT_DEF)),
+ (ScalarVT (COPY_TO_REGCLASS VR128:$src, RC))), VR128))>;
+ def : Pat<(Intr mem_cpat:$src),
+ (!cast<Instruction>("V"#NAME#Suffix##m_Int)
+ (vt (IMPLICIT_DEF)), mem_cpat:$src)>;
+ }
+ let Predicates = [target, OptForSize] in
+ def : Pat<(ScalarVT (OpNode (load addr:$src))),
+ (!cast<Instruction>("V"#NAME#Suffix##m) (ScalarVT (IMPLICIT_DEF)),
+ addr:$src)>;
}
/// sse1_fp_unop_p - SSE1 unops in packed form.
@@ -3693,53 +3522,6 @@ let Predicates = [HasAVX] in {
} // isCodeGenOnly = 1
}
-/// sse2_fp_unop_s - SSE2 unops in scalar form.
-multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
- SDNode OpNode, Intrinsic F64Int, OpndItins itins> {
-let Predicates = [HasAVX], hasSideEffects = 0 in {
- def V#NAME#SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst),
- (ins FR64:$src1, FR64:$src2),
- !strconcat("v", OpcodeStr,
- "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, VEX_4V, VEX_LIG, Sched<[itins.Sched]>;
- let mayLoad = 1 in {
- def V#NAME#SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1,f64mem:$src2),
- !strconcat("v", OpcodeStr,
- "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, VEX_4V, VEX_LIG,
- Sched<[itins.Sched.Folded, ReadAfterLd]>;
- let isCodeGenOnly = 1 in
- def V#NAME#SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, sdmem:$src2),
- !strconcat("v", OpcodeStr,
- "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, VEX_4V, VEX_LIG,
- Sched<[itins.Sched.Folded, ReadAfterLd]>;
- }
-}
-
- def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
- !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set FR64:$dst, (OpNode FR64:$src))], itins.rr>,
- Sched<[itins.Sched]>;
- // See the comments in sse1_fp_unop_s for why this is OptForSize.
- def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
- !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set FR64:$dst, (OpNode (load addr:$src)))], itins.rm>, XD,
- Requires<[UseSSE2, OptForSize]>, Sched<[itins.Sched.Folded]>;
-let isCodeGenOnly = 1 in {
- def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F64Int VR128:$src))], itins.rr>,
- Sched<[itins.Sched]>;
- def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
- !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F64Int sse_load_f64:$src))], itins.rm>,
- Sched<[itins.Sched.Folded]>;
-}
-}
-
/// sse2_fp_unop_p - SSE2 unops in vector forms.
multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
SDNode OpNode, OpndItins itins> {
@@ -3776,90 +3558,47 @@ let Predicates = [HasAVX] in {
Sched<[itins.Sched.Folded]>;
}
+multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ OpndItins itins> {
+ defm SS : sse_fp_unop_s<opc, OpcodeStr##ss, FR32, v4f32, f32, f32mem,
+ ssmem, sse_load_f32,
+ !cast<Intrinsic>("int_x86_sse_"##OpcodeStr##_ss), OpNode,
+ itins, UseSSE1, "SS">, XS;
+ defm V#NAME#SS : avx_fp_unop_s<opc, "v"#OpcodeStr##ss, FR32, v4f32, f32,
+ f32mem, ssmem, sse_load_f32,
+ !cast<Intrinsic>("int_x86_sse_"##OpcodeStr##_ss), OpNode,
+ itins, HasAVX, "SS">, XS, VEX_4V, VEX_LIG;
+}
+
+multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ OpndItins itins> {
+ defm SD : sse_fp_unop_s<opc, OpcodeStr##sd, FR64, v2f64, f64, f64mem,
+ sdmem, sse_load_f64,
+ !cast<Intrinsic>("int_x86_sse2_"##OpcodeStr##_sd),
+ OpNode, itins, UseSSE2, "SD">, XD;
+ defm V#NAME#SD : avx_fp_unop_s<opc, "v"#OpcodeStr##sd, FR64, v2f64, f64,
+ f64mem, sdmem, sse_load_f64,
+ !cast<Intrinsic>("int_x86_sse2_"##OpcodeStr##_sd),
+ OpNode, itins, HasAVX, "SD">, XD, VEX_4V, VEX_LIG;
+}
+
// Square root.
-defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss,
- SSE_SQRTSS>,
+defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSS>,
sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPS>,
- sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd,
- SSE_SQRTSD>,
+ sse2_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSD>,
sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPD>;
// Reciprocal approximations. Note that these typically require refinement
// in order to obtain suitable precision.
-defm RSQRT : sse1_fp_unop_rw<0x52, "rsqrt", X86frsqrt, SSE_RSQRTSS>,
+defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, SSE_RSQRTSS>,
sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_RSQRTPS>,
sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps,
int_x86_avx_rsqrt_ps_256, SSE_RSQRTPS>;
-defm RCP : sse1_fp_unop_rw<0x53, "rcp", X86frcp, SSE_RCPS>,
+defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, SSE_RCPS>,
sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPP>,
sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps,
int_x86_avx_rcp_ps_256, SSE_RCPP>;
-let Predicates = [UseAVX] in {
- def : Pat<(f32 (fsqrt FR32:$src)),
- (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
- def : Pat<(f32 (fsqrt (load addr:$src))),
- (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
- Requires<[HasAVX, OptForSize]>;
- def : Pat<(f64 (fsqrt FR64:$src)),
- (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
- def : Pat<(f64 (fsqrt (load addr:$src))),
- (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
- Requires<[HasAVX, OptForSize]>;
-
- def : Pat<(f32 (X86frsqrt FR32:$src)),
- (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
- def : Pat<(f32 (X86frsqrt (load addr:$src))),
- (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
- Requires<[HasAVX, OptForSize]>;
-
- def : Pat<(f32 (X86frcp FR32:$src)),
- (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
- def : Pat<(f32 (X86frcp (load addr:$src))),
- (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
- Requires<[HasAVX, OptForSize]>;
-}
-let Predicates = [UseAVX] in {
- def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
- (COPY_TO_REGCLASS (VSQRTSSr (f32 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS VR128:$src, FR32)),
- VR128)>;
- def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
- (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
-
- def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
- (COPY_TO_REGCLASS (VSQRTSDr (f64 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS VR128:$src, FR64)),
- VR128)>;
- def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
- (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
-}
-
-let Predicates = [HasAVX] in {
- def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
- (COPY_TO_REGCLASS (VRSQRTSSr (f32 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS VR128:$src, FR32)),
- VR128)>;
- def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
- (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
-
- def : Pat<(int_x86_sse_rcp_ss VR128:$src),
- (COPY_TO_REGCLASS (VRCPSSr (f32 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS VR128:$src, FR32)),
- VR128)>;
- def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
- (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
-}
-
-// Reciprocal approximations. Note that these typically require refinement
-// in order to obtain suitable precision.
-let Predicates = [UseSSE1] in {
- def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
- (RSQRTSSr_Int VR128:$src, VR128:$src)>;
- def : Pat<(int_x86_sse_rcp_ss VR128:$src),
- (RCPSSr_Int VR128:$src, VR128:$src)>;
-}
-
// There is no f64 version of the reciprocal approximation instructions.
//===----------------------------------------------------------------------===//
@@ -3974,14 +3713,14 @@ let SchedRW = [WriteLoad] in {
// Flush cache
def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
"clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
- IIC_SSE_PREFETCH>, TB, Requires<[HasSSE2]>;
+ IIC_SSE_PREFETCH>, PS, Requires<[HasSSE2]>;
}
let SchedRW = [WriteNop] in {
// Pause. This "instruction" is encoded as "rep; nop", so even though it
// was introduced with SSE2, it's backward compatible.
-def PAUSE : I<0x90, RawFrm, (outs), (ins),
- "pause", [(int_x86_sse2_pause)], IIC_SSE_PAUSE>,
+def PAUSE : I<0x90, RawFrm, (outs), (ins),
+ "pause", [(int_x86_sse2_pause)], IIC_SSE_PAUSE>,
OBXS, Requires<[HasSSE2]>;
}
@@ -3989,7 +3728,7 @@ let SchedRW = [WriteFence] in {
// Load, store, and memory fence
def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
"sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
- TB, Requires<[HasSSE1]>;
+ PS, Requires<[HasSSE1]>;
def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
"lfence", [(int_x86_sse2_lfence)], IIC_SSE_LFENCE>,
TB, Requires<[HasSSE2]>;
@@ -4013,12 +3752,14 @@ def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
"stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
IIC_SSE_STMXCSR>, VEX, Sched<[WriteStore]>;
-def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
- "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
- IIC_SSE_LDMXCSR>, Sched<[WriteLoad]>;
-def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
- "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
- IIC_SSE_STMXCSR>, Sched<[WriteStore]>;
+let Predicates = [UseSSE1] in {
+def LDMXCSR : I<0xAE, MRM2m, (outs), (ins i32mem:$src),
+ "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
+ IIC_SSE_LDMXCSR>, TB, Sched<[WriteLoad]>;
+def STMXCSR : I<0xAE, MRM3m, (outs), (ins i32mem:$dst),
+ "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
+ IIC_SSE_STMXCSR>, TB, Sched<[WriteStore]>;
+}
//===---------------------------------------------------------------------===//
// SSE2 - Move Aligned/Unaligned Packed Integer Instructions
@@ -4026,7 +3767,7 @@ def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
let ExeDomain = SSEPackedInt in { // SSE integer instructions
-let neverHasSideEffects = 1, SchedRW = [WriteMove] in {
+let hasSideEffects = 0, SchedRW = [WriteMove] in {
def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
VEX;
@@ -4061,7 +3802,7 @@ def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
}
let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
- neverHasSideEffects = 1, SchedRW = [WriteLoad] in {
+ hasSideEffects = 0, SchedRW = [WriteLoad] in {
def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
VEX;
@@ -4078,7 +3819,7 @@ let Predicates = [HasAVX] in {
}
}
-let mayStore = 1, neverHasSideEffects = 1, SchedRW = [WriteStore] in {
+let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
(ins i128mem:$dst, VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
@@ -4098,7 +3839,7 @@ def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
}
let SchedRW = [WriteMove] in {
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
@@ -4119,7 +3860,7 @@ def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
} // SchedRW
let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
- neverHasSideEffects = 1, SchedRW = [WriteLoad] in {
+ hasSideEffects = 0, SchedRW = [WriteLoad] in {
def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqa\t{$src, $dst|$dst, $src}",
[/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
@@ -4131,7 +3872,7 @@ def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
XS, Requires<[UseSSE2]>;
}
-let mayStore = 1, neverHasSideEffects = 1, SchedRW = [WriteStore] in {
+let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}",
[/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
@@ -4211,7 +3952,7 @@ multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
string OpcodeStr, SDNode OpNode,
SDNode OpNode2, RegisterClass RC,
ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
- ShiftOpndItins itins,
+ PatFrag ld_frag, ShiftOpndItins itins,
bit Is2Addr = 1> {
// src2 is always 128-bit
def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
@@ -4227,10 +3968,10 @@ multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set RC:$dst, (DstVT (OpNode RC:$src1,
- (bc_frag (memopv2i64 addr:$src2)))))], itins.rm>,
+ (bc_frag (ld_frag addr:$src2)))))], itins.rm>,
Sched<[WriteVecShiftLd, ReadAfterLd]>;
def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
- (ins RC:$src1, i8imm:$src2),
+ (ins RC:$src1, u8imm:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
@@ -4338,45 +4079,45 @@ defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
let Predicates = [HasAVX] in {
defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
- VR128, v8i16, v8i16, bc_v8i16,
+ VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
- VR128, v4i32, v4i32, bc_v4i32,
+ VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
- VR128, v2i64, v2i64, bc_v2i64,
+ VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
- VR128, v8i16, v8i16, bc_v8i16,
+ VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
- VR128, v4i32, v4i32, bc_v4i32,
+ VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
- VR128, v2i64, v2i64, bc_v2i64,
+ VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
- VR128, v8i16, v8i16, bc_v8i16,
+ VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
- VR128, v4i32, v4i32, bc_v4i32,
+ VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] in {
// 128-bit logical shifts.
def VPSLLDQri : PDIi8<0x73, MRM7r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))]>,
+ (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))]>,
VEX_4V;
def VPSRLDQri : PDIi8<0x73, MRM3r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))]>,
+ (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))]>,
VEX_4V;
// PSRADQri doesn't exist in SSE[1-3].
}
@@ -4384,45 +4125,45 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] in {
let Predicates = [HasAVX2] in {
defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
- VR256, v16i16, v8i16, bc_v8i16,
+ VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
- VR256, v8i32, v4i32, bc_v4i32,
+ VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
- VR256, v4i64, v2i64, bc_v2i64,
+ VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
- VR256, v16i16, v8i16, bc_v8i16,
+ VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
- VR256, v8i32, v4i32, bc_v4i32,
+ VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
- VR256, v4i64, v2i64, bc_v2i64,
+ VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
- VR256, v16i16, v8i16, bc_v8i16,
+ VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
- VR256, v8i32, v4i32, bc_v4i32,
+ VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
-let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] in {
+let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
// 256-bit logical shifts.
def VPSLLDQYri : PDIi8<0x73, MRM7r,
- (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
+ (outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
"vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR256:$dst,
- (int_x86_avx2_psll_dq_bs VR256:$src1, imm:$src2))]>,
+ (v4i64 (X86vshldq VR256:$src1, (i8 imm:$src2))))]>,
VEX_4V, VEX_L;
def VPSRLDQYri : PDIi8<0x73, MRM3r,
- (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
+ (outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
"vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR256:$dst,
- (int_x86_avx2_psrl_dq_bs VR256:$src1, imm:$src2))]>,
+ (v4i64 (X86vshrdq VR256:$src1, (i8 imm:$src2))))]>,
VEX_4V, VEX_L;
// PSRADQYri doesn't exist in SSE[1-3].
}
@@ -4430,85 +4171,58 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] in {
let Constraints = "$src1 = $dst" in {
defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
- VR128, v8i16, v8i16, bc_v8i16,
+ VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
- VR128, v4i32, v4i32, bc_v4i32,
+ VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
- VR128, v2i64, v2i64, bc_v2i64,
+ VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
- VR128, v8i16, v8i16, bc_v8i16,
+ VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
- VR128, v4i32, v4i32, bc_v4i32,
+ VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
- VR128, v2i64, v2i64, bc_v2i64,
+ VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
- VR128, v8i16, v8i16, bc_v8i16,
+ VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
- VR128, v4i32, v4i32, bc_v4i32,
+ VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
-let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] in {
+let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
// 128-bit logical shifts.
def PSLLDQri : PDIi8<0x73, MRM7r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"pslldq\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))],
- IIC_SSE_INTSHDQ_P_RI>;
+ (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))],
+ IIC_SSE_INTSHDQ_P_RI>;
def PSRLDQri : PDIi8<0x73, MRM3r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"psrldq\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))],
- IIC_SSE_INTSHDQ_P_RI>;
+ (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))],
+ IIC_SSE_INTSHDQ_P_RI>;
// PSRADQri doesn't exist in SSE[1-3].
}
} // Constraints = "$src1 = $dst"
let Predicates = [HasAVX] in {
- def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
- (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
- def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
- (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
(VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
-
- // Shift up / down and insert zero's.
- def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
- (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
- def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
- (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
-}
-
-let Predicates = [HasAVX2] in {
- def : Pat<(int_x86_avx2_psll_dq VR256:$src1, imm:$src2),
- (VPSLLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
- def : Pat<(int_x86_avx2_psrl_dq VR256:$src1, imm:$src2),
- (VPSRLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
}
let Predicates = [UseSSE2] in {
- def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
- (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
- def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
- (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
(PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
-
- // Shift up / down and insert zero's.
- def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
- (PSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
- def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
- (PSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
}
//===---------------------------------------------------------------------===//
@@ -4537,14 +4251,14 @@ multiclass sse2_pshuffle<string OpcodeStr, ValueType vt128, ValueType vt256,
SDNode OpNode> {
let Predicates = [HasAVX] in {
def V#NAME#ri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, i8imm:$src2),
+ (ins VR128:$src1, u8imm:$src2),
!strconcat("v", OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
IIC_SSE_PSHUF_RI>, VEX, Sched<[WriteShuffle]>;
def V#NAME#mi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst),
- (ins i128mem:$src1, i8imm:$src2),
+ (ins i128mem:$src1, u8imm:$src2),
!strconcat("v", OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
@@ -4555,14 +4269,14 @@ let Predicates = [HasAVX] in {
let Predicates = [HasAVX2] in {
def V#NAME#Yri : Ii8<0x70, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, i8imm:$src2),
+ (ins VR256:$src1, u8imm:$src2),
!strconcat("v", OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(vt256 (OpNode VR256:$src1, (i8 imm:$src2))))],
IIC_SSE_PSHUF_RI>, VEX, VEX_L, Sched<[WriteShuffle]>;
def V#NAME#Ymi : Ii8<0x70, MRMSrcMem, (outs VR256:$dst),
- (ins i256mem:$src1, i8imm:$src2),
+ (ins i256mem:$src1, u8imm:$src2),
!strconcat("v", OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
@@ -4573,14 +4287,14 @@ let Predicates = [HasAVX2] in {
let Predicates = [UseSSE2] in {
def ri : Ii8<0x70, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
+ (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
IIC_SSE_PSHUF_RI>, Sched<[WriteShuffle]>;
def mi : Ii8<0x70, MRMSrcMem,
- (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
+ (outs VR128:$dst), (ins i128mem:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
@@ -4616,7 +4330,7 @@ let Predicates = [UseSSE2] in {
let ExeDomain = SSEPackedInt in {
multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
- bit Is2Addr = 1> {
+ PatFrag ld_frag, bit Is2Addr = 1> {
def rr : PDI<opc, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
!if(Is2Addr,
@@ -4634,7 +4348,7 @@ multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
(OutVT (OpNode VR128:$src1,
- (bc_frag (memopv2i64 addr:$src2)))))]>,
+ (bc_frag (ld_frag addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
@@ -4653,13 +4367,13 @@ multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(OutVT (OpNode VR256:$src1,
- (bc_frag (memopv4i64 addr:$src2)))))]>,
+ (bc_frag (loadv4i64 addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
- bit Is2Addr = 1> {
+ PatFrag ld_frag, bit Is2Addr = 1> {
def rr : SS48I<opc, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
!if(Is2Addr,
@@ -4677,7 +4391,7 @@ multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
(OutVT (OpNode VR128:$src1,
- (bc_frag (memopv2i64 addr:$src2)))))]>,
+ (bc_frag (ld_frag addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
@@ -4696,20 +4410,20 @@ multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(OutVT (OpNode VR256:$src1,
- (bc_frag (memopv4i64 addr:$src2)))))]>,
+ (bc_frag (loadv4i64 addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
let Predicates = [HasAVX] in {
defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss,
- bc_v8i16, 0>, VEX_4V;
+ bc_v8i16, loadv2i64, 0>, VEX_4V;
defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss,
- bc_v4i32, 0>, VEX_4V;
+ bc_v4i32, loadv2i64, 0>, VEX_4V;
defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus,
- bc_v8i16, 0>, VEX_4V;
+ bc_v8i16, loadv2i64, 0>, VEX_4V;
defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus,
- bc_v4i32, 0>, VEX_4V;
+ bc_v4i32, loadv2i64, 0>, VEX_4V;
}
let Predicates = [HasAVX2] in {
@@ -4726,16 +4440,16 @@ let Predicates = [HasAVX2] in {
let Constraints = "$src1 = $dst" in {
defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss,
- bc_v8i16>;
+ bc_v8i16, memopv2i64>;
defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss,
- bc_v4i32>;
+ bc_v4i32, memopv2i64>;
defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus,
- bc_v8i16>;
+ bc_v8i16, memopv2i64>;
let Predicates = [HasSSE41] in
defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus,
- bc_v4i32>;
+ bc_v4i32, memopv2i64>;
}
} // ExeDomain = SSEPackedInt
@@ -4745,7 +4459,8 @@ let Constraints = "$src1 = $dst" in {
let ExeDomain = SSEPackedInt in {
multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
- SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
+ SDNode OpNode, PatFrag bc_frag, PatFrag ld_frag,
+ bit Is2Addr = 1> {
def rr : PDI<opc, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
!if(Is2Addr,
@@ -4759,8 +4474,7 @@ multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
!strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst, (OpNode VR128:$src1,
- (bc_frag (memopv2i64
- addr:$src2))))],
+ (bc_frag (ld_frag addr:$src2))))],
IIC_SSE_UNPCK>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
@@ -4776,28 +4490,28 @@ multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
(outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst, (OpNode VR256:$src1,
- (bc_frag (memopv4i64 addr:$src2))))]>,
+ (bc_frag (loadv4i64 addr:$src2))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
let Predicates = [HasAVX] in {
defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
- bc_v16i8, 0>, VEX_4V;
+ bc_v16i8, loadv2i64, 0>, VEX_4V;
defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
- bc_v8i16, 0>, VEX_4V;
+ bc_v8i16, loadv2i64, 0>, VEX_4V;
defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
- bc_v4i32, 0>, VEX_4V;
+ bc_v4i32, loadv2i64, 0>, VEX_4V;
defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
- bc_v2i64, 0>, VEX_4V;
+ bc_v2i64, loadv2i64, 0>, VEX_4V;
defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
- bc_v16i8, 0>, VEX_4V;
+ bc_v16i8, loadv2i64, 0>, VEX_4V;
defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
- bc_v8i16, 0>, VEX_4V;
+ bc_v8i16, loadv2i64, 0>, VEX_4V;
defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
- bc_v4i32, 0>, VEX_4V;
+ bc_v4i32, loadv2i64, 0>, VEX_4V;
defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
- bc_v2i64, 0>, VEX_4V;
+ bc_v2i64, loadv2i64, 0>, VEX_4V;
}
let Predicates = [HasAVX2] in {
@@ -4822,22 +4536,22 @@ let Predicates = [HasAVX2] in {
let Constraints = "$src1 = $dst" in {
defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
- bc_v16i8>;
+ bc_v16i8, memopv2i64>;
defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
- bc_v8i16>;
+ bc_v8i16, memopv2i64>;
defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
- bc_v4i32>;
+ bc_v4i32, memopv2i64>;
defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
- bc_v2i64>;
+ bc_v2i64, memopv2i64>;
defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
- bc_v16i8>;
+ bc_v16i8, memopv2i64>;
defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
- bc_v8i16>;
+ bc_v8i16, memopv2i64>;
defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
- bc_v4i32>;
+ bc_v4i32, memopv2i64>;
defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
- bc_v2i64>;
+ bc_v2i64, memopv2i64>;
}
} // ExeDomain = SSEPackedInt
@@ -4849,7 +4563,7 @@ let ExeDomain = SSEPackedInt in {
multiclass sse2_pinsrw<bit Is2Addr = 1> {
def rri : Ii8<0xC4, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1,
- GR32orGR64:$src2, i32i8imm:$src3),
+ GR32orGR64:$src2, u8imm:$src3),
!if(Is2Addr,
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
"vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
@@ -4858,7 +4572,7 @@ multiclass sse2_pinsrw<bit Is2Addr = 1> {
IIC_SSE_PINSRW>, Sched<[WriteShuffle]>;
def rmi : Ii8<0xC4, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1,
- i16mem:$src2, i32i8imm:$src3),
+ i16mem:$src2, u8imm:$src3),
!if(Is2Addr,
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
"vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
@@ -4871,13 +4585,13 @@ multiclass sse2_pinsrw<bit Is2Addr = 1> {
// Extract
let Predicates = [HasAVX] in
def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
- (outs GR32orGR64:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
"vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
imm:$src2))]>, PD, VEX,
Sched<[WriteShuffle]>;
def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
- (outs GR32orGR64:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
imm:$src2))], IIC_SSE_PEXTRW>,
@@ -4974,6 +4688,10 @@ def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
[(set VR128:$dst,
(v2i64 (scalar_to_vector GR64:$src)))],
IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
+def VMOV64toPQIrm : VRS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteLoad]>;
let isCodeGenOnly = 1 in
def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
"movq\t{$src, $dst|$dst, $src}",
@@ -4995,6 +4713,10 @@ def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
[(set VR128:$dst,
(v2i64 (scalar_to_vector GR64:$src)))],
IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
+def MOV64toPQIrm : RS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "mov{d|q}\t{$src, $dst|$dst, $src}",
+ [], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
let isCodeGenOnly = 1 in
def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
@@ -5081,6 +4803,15 @@ def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
IIC_SSE_MOVD_ToGP>;
} //SchedRW
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
+def VMOVPQIto64rm : VRS2I<0x7E, MRMDestMem, (outs i64mem:$dst),
+ (ins VR128:$src), "movq\t{$src, $dst|$dst, $src}",
+ [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
+def MOVPQIto64rm : RS2I<0x7E, MRMDestMem, (outs i64mem:$dst), (ins VR128:$src),
+ "mov{d|q}\t{$src, $dst|$dst, $src}",
+ [], IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
+
//===---------------------------------------------------------------------===//
// Bitcast FR64 <-> GR64
//
@@ -5213,7 +4944,7 @@ def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
// Move Quadword Int to Packed Quadword Int
//
-let SchedRW = [WriteLoad] in {
+let ExeDomain = SSEPackedInt, SchedRW = [WriteLoad] in {
def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -5225,12 +4956,12 @@ def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
(v2i64 (scalar_to_vector (loadi64 addr:$src))))],
IIC_SSE_MOVDQ>, XS,
Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
-} // SchedRW
+} // ExeDomain, SchedRW
//===---------------------------------------------------------------------===//
// Move Packed Quadword Int to Quadword Int
//
-let SchedRW = [WriteStore] in {
+let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (vector_extract (v2i64 VR128:$src),
@@ -5241,7 +4972,7 @@ def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
[(store (i64 (vector_extract (v2i64 VR128:$src),
(iPTR 0))), addr:$dst)],
IIC_SSE_MOVDQ>;
-} // SchedRW
+} // ExeDomain, SchedRW
// For disassembler only
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
@@ -5262,7 +4993,7 @@ let Predicates = [UseSSE2] in
def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
(MOVPQI2QImr addr:$dst, VR128:$src)>;
-let isCodeGenOnly = 1, AddedComplexity = 20 in {
+let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, AddedComplexity = 20 in {
def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -5278,7 +5009,7 @@ def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
(loadi64 addr:$src))))))],
IIC_SSE_MOVDQ>,
XS, Requires<[UseSSE2]>, Sched<[WriteLoad]>;
-}
+} // ExeDomain, isCodeGenOnly, AddedComplexity
let Predicates = [UseAVX], AddedComplexity = 20 in {
def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
@@ -5304,7 +5035,7 @@ def : Pat<(v4i64 (X86vzload addr:$src)),
// Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
// IA32 document. movq xmm1, xmm2 does clear the high bits.
//
-let SchedRW = [WriteVecLogic] in {
+let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
let AddedComplexity = 15 in
def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vmovq\t{$src, $dst|$dst, $src}",
@@ -5317,9 +5048,9 @@ def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
[(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
IIC_SSE_MOVQ_RR>,
XS, Requires<[UseSSE2]>;
-} // SchedRW
+} // ExeDomain, SchedRW
-let isCodeGenOnly = 1, SchedRW = [WriteVecLogicLd] in {
+let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, SchedRW = [WriteVecLogicLd] in {
let AddedComplexity = 20 in
def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
@@ -5335,7 +5066,7 @@ def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
IIC_SSE_MOVDQ>,
XS, Requires<[UseSSE2]>;
}
-} // isCodeGenOnly, SchedRW
+} // ExeDomain, isCodeGenOnly, SchedRW
let AddedComplexity = 20 in {
let Predicates = [UseAVX] in {
@@ -5414,10 +5145,10 @@ let Predicates = [UseSSE3] in {
//===---------------------------------------------------------------------===//
multiclass sse3_replicate_dfp<string OpcodeStr> {
-let neverHasSideEffects = 1 in
def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [], IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
+ [(set VR128:$dst, (v2f64 (X86Movddup VR128:$src)))],
+ IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
@@ -5514,7 +5245,7 @@ def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
X86MemOperand x86memop, OpndItins itins,
- bit Is2Addr = 1> {
+ PatFrag ld_frag, bit Is2Addr = 1> {
def rr : I<0xD0, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, RC:$src2),
!if(Is2Addr,
@@ -5527,62 +5258,62 @@ multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))], itins.rr>,
+ [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2)))], itins.rr>,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
let Predicates = [HasAVX] in {
let ExeDomain = SSEPackedSingle in {
defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
- f128mem, SSE_ALU_F32P, 0>, XD, VEX_4V;
+ f128mem, SSE_ALU_F32P, loadv4f32, 0>, XD, VEX_4V;
defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
- f256mem, SSE_ALU_F32P, 0>, XD, VEX_4V, VEX_L;
+ f256mem, SSE_ALU_F32P, loadv8f32, 0>, XD, VEX_4V, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
- f128mem, SSE_ALU_F64P, 0>, PD, VEX_4V;
+ f128mem, SSE_ALU_F64P, loadv2f64, 0>, PD, VEX_4V;
defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
- f256mem, SSE_ALU_F64P, 0>, PD, VEX_4V, VEX_L;
+ f256mem, SSE_ALU_F64P, loadv4f64, 0>, PD, VEX_4V, VEX_L;
}
}
let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
let ExeDomain = SSEPackedSingle in
defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
- f128mem, SSE_ALU_F32P>, XD;
+ f128mem, SSE_ALU_F32P, memopv4f32>, XD;
let ExeDomain = SSEPackedDouble in
defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
- f128mem, SSE_ALU_F64P>, PD;
+ f128mem, SSE_ALU_F64P, memopv2f64>, PD;
}
// Patterns used to select 'addsub' instructions.
let Predicates = [HasAVX] in {
def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
(VADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
- def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 (memop addr:$rhs)))),
+ def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (loadv4f32 addr:$rhs))),
(VADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
(VADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
- def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 (memop addr:$rhs)))),
+ def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (loadv2f64 addr:$rhs))),
(VADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (v8f32 VR256:$rhs))),
(VADDSUBPSYrr VR256:$lhs, VR256:$rhs)>;
- def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (v8f32 (memop addr:$rhs)))),
+ def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (loadv8f32 addr:$rhs))),
(VADDSUBPSYrm VR256:$lhs, f256mem:$rhs)>;
def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (v4f64 VR256:$rhs))),
(VADDSUBPDYrr VR256:$lhs, VR256:$rhs)>;
- def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (v4f64 (memop addr:$rhs)))),
+ def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (loadv4f64 addr:$rhs))),
(VADDSUBPDYrm VR256:$lhs, f256mem:$rhs)>;
}
let Predicates = [UseSSE3] in {
def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
(ADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
- def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 (memop addr:$rhs)))),
+ def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (memopv4f32 addr:$rhs))),
(ADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
(ADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
- def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 (memop addr:$rhs)))),
+ def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (memopv2f64 addr:$rhs))),
(ADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
}
@@ -5592,7 +5323,8 @@ let Predicates = [UseSSE3] in {
// Horizontal ops
multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
- X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> {
+ X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
+ bit Is2Addr = 1> {
def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
@@ -5604,11 +5336,12 @@ multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
+ [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
}
multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
- X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> {
+ X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
+ bit Is2Addr = 1> {
def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
@@ -5620,41 +5353,45 @@ multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
+ [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
}
let Predicates = [HasAVX] in {
let ExeDomain = SSEPackedSingle in {
defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
- X86fhadd, 0>, VEX_4V;
+ X86fhadd, loadv4f32, 0>, VEX_4V;
defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
- X86fhsub, 0>, VEX_4V;
+ X86fhsub, loadv4f32, 0>, VEX_4V;
defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
- X86fhadd, 0>, VEX_4V, VEX_L;
+ X86fhadd, loadv8f32, 0>, VEX_4V, VEX_L;
defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
- X86fhsub, 0>, VEX_4V, VEX_L;
+ X86fhsub, loadv8f32, 0>, VEX_4V, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
- X86fhadd, 0>, VEX_4V;
+ X86fhadd, loadv2f64, 0>, VEX_4V;
defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
- X86fhsub, 0>, VEX_4V;
+ X86fhsub, loadv2f64, 0>, VEX_4V;
defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
- X86fhadd, 0>, VEX_4V, VEX_L;
+ X86fhadd, loadv4f64, 0>, VEX_4V, VEX_L;
defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
- X86fhsub, 0>, VEX_4V, VEX_L;
+ X86fhsub, loadv4f64, 0>, VEX_4V, VEX_L;
}
}
let Constraints = "$src1 = $dst" in {
let ExeDomain = SSEPackedSingle in {
- defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd>;
- defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub>;
+ defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd,
+ memopv4f32>;
+ defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub,
+ memopv4f32>;
}
let ExeDomain = SSEPackedDouble in {
- defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd>;
- defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub>;
+ defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd,
+ memopv2f64>;
+ defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub,
+ memopv2f64>;
}
}
@@ -5664,8 +5401,8 @@ let Constraints = "$src1 = $dst" in {
/// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
-multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128> {
+multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
+ PatFrag ld_frag> {
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
@@ -5677,7 +5414,7 @@ multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
(IntId128
- (bitconvert (memopv2i64 addr:$src))))], IIC_SSE_PABS_RM>,
+ (bitconvert (ld_frag addr:$src))))], IIC_SSE_PABS_RM>,
Sched<[WriteVecALULd]>;
}
@@ -5695,7 +5432,7 @@ multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst,
(IntId256
- (bitconvert (memopv4i64 addr:$src))))]>,
+ (bitconvert (loadv4i64 addr:$src))))]>,
Sched<[WriteVecALULd]>;
}
@@ -5710,12 +5447,12 @@ def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i8 15)))>;
def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i8 31)))>;
let Predicates = [HasAVX] in {
- defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb",
- int_x86_ssse3_pabs_b_128>, VEX;
- defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw",
- int_x86_ssse3_pabs_w_128>, VEX;
- defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd",
- int_x86_ssse3_pabs_d_128>, VEX;
+ defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", int_x86_ssse3_pabs_b_128,
+ loadv2i64>, VEX;
+ defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", int_x86_ssse3_pabs_w_128,
+ loadv2i64>, VEX;
+ defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", int_x86_ssse3_pabs_d_128,
+ loadv2i64>, VEX;
def : Pat<(xor
(bc_v2i64 (v16i1sextv16i8)),
@@ -5753,12 +5490,12 @@ let Predicates = [HasAVX2] in {
(VPABSDrr256 VR256:$src)>;
}
-defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb",
- int_x86_ssse3_pabs_b_128>;
-defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw",
- int_x86_ssse3_pabs_w_128>;
-defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd",
- int_x86_ssse3_pabs_d_128>;
+defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", int_x86_ssse3_pabs_b_128,
+ memopv2i64>;
+defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", int_x86_ssse3_pabs_w_128,
+ memopv2i64>;
+defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", int_x86_ssse3_pabs_d_128,
+ memopv2i64>;
let Predicates = [HasSSSE3] in {
def : Pat<(xor
@@ -5830,7 +5567,7 @@ multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
/// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
Intrinsic IntId128, OpndItins itins,
- bit Is2Addr = 1> {
+ PatFrag ld_frag, bit Is2Addr = 1> {
let isCommutable = 1 in
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
@@ -5846,7 +5583,7 @@ multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
(IntId128 VR128:$src1,
- (bitconvert (memopv2i64 addr:$src2))))]>,
+ (bitconvert (ld_frag addr:$src2))))]>,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
@@ -5895,17 +5632,17 @@ let isCommutable = 0 in {
SSE_PSHUFB, 0>, VEX_4V;
defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
int_x86_ssse3_phadd_sw_128,
- SSE_PHADDSUBSW, 0>, VEX_4V;
+ SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
int_x86_ssse3_phsub_sw_128,
- SSE_PHADDSUBSW, 0>, VEX_4V;
+ SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw",
int_x86_ssse3_pmadd_ub_sw_128,
- SSE_PMADD, 0>, VEX_4V;
+ SSE_PMADD, loadv2i64, 0>, VEX_4V;
}
defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
int_x86_ssse3_pmul_hr_sw_128,
- SSE_PMULHRSW, 0>, VEX_4V;
+ SSE_PMULHRSW, loadv2i64, 0>, VEX_4V;
}
let ImmT = NoImm, Predicates = [HasAVX2] in {
@@ -5970,16 +5707,17 @@ let isCommutable = 0 in {
memopv2i64, i128mem, SSE_PSHUFB>;
defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
int_x86_ssse3_phadd_sw_128,
- SSE_PHADDSUBSW>;
+ SSE_PHADDSUBSW, memopv2i64>;
defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
int_x86_ssse3_phsub_sw_128,
- SSE_PHADDSUBSW>;
+ SSE_PHADDSUBSW, memopv2i64>;
defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw",
- int_x86_ssse3_pmadd_ub_sw_128, SSE_PMADD>;
+ int_x86_ssse3_pmadd_ub_sw_128,
+ SSE_PMADD, memopv2i64>;
}
defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
int_x86_ssse3_pmul_hr_sw_128,
- SSE_PMULHRSW>;
+ SSE_PMULHRSW, memopv2i64>;
}
//===---------------------------------------------------------------------===//
@@ -5987,9 +5725,9 @@ defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
//===---------------------------------------------------------------------===//
multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
- let neverHasSideEffects = 1 in {
+ let hasSideEffects = 0 in {
def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ (ins VR128:$src1, VR128:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
@@ -5997,7 +5735,7 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
[], IIC_SSE_PALIGNRR>, Sched<[WriteShuffle]>;
let mayLoad = 1 in
def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
@@ -6007,15 +5745,15 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
}
multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
- let neverHasSideEffects = 1 in {
+ let hasSideEffects = 0 in {
def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, i8imm:$src3),
+ (ins VR256:$src1, VR256:$src2, u8imm:$src3),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, Sched<[WriteShuffle]>;
let mayLoad = 1 in
def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, i256mem:$src2, i8imm:$src3),
+ (ins VR256:$src1, i256mem:$src2, u8imm:$src3),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
@@ -6094,552 +5832,271 @@ def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORrrr)>,
// SSE4.1 - Packed Move with Sign/Zero Extend
//===----------------------------------------------------------------------===//
-multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId,
- OpndItins itins = DEFAULT_ITINS> {
- def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId VR128:$src))], itins.rr>,
- Sched<[itins.Sched]>;
-
- def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst,
- (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))],
- itins.rm>, Sched<[itins.Sched.Folded]>;
-}
-
-multiclass SS41I_binop_rm_int16_y<bits<8> opc, string OpcodeStr,
- Intrinsic IntId, X86FoldableSchedWrite Sched> {
- def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (IntId VR128:$src))]>, Sched<[Sched]>;
-
- def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (IntId (load addr:$src)))]>,
- Sched<[Sched.Folded]>;
-}
-
-let Predicates = [HasAVX] in {
-defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw",
- int_x86_sse41_pmovsxbw,
- DEFAULT_ITINS_SHUFFLESCHED>, VEX;
-defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd",
- int_x86_sse41_pmovsxwd,
- DEFAULT_ITINS_SHUFFLESCHED>, VEX;
-defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq",
- int_x86_sse41_pmovsxdq,
- DEFAULT_ITINS_SHUFFLESCHED>, VEX;
-defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw",
- int_x86_sse41_pmovzxbw,
- DEFAULT_ITINS_SHUFFLESCHED>, VEX;
-defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd",
- int_x86_sse41_pmovzxwd,
- DEFAULT_ITINS_SHUFFLESCHED>, VEX;
-defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq",
- int_x86_sse41_pmovzxdq,
- DEFAULT_ITINS_SHUFFLESCHED>, VEX;
-}
-
-let Predicates = [HasAVX2] in {
-defm VPMOVSXBW : SS41I_binop_rm_int16_y<0x20, "vpmovsxbw",
- int_x86_avx2_pmovsxbw,
- WriteShuffle>, VEX, VEX_L;
-defm VPMOVSXWD : SS41I_binop_rm_int16_y<0x23, "vpmovsxwd",
- int_x86_avx2_pmovsxwd,
- WriteShuffle>, VEX, VEX_L;
-defm VPMOVSXDQ : SS41I_binop_rm_int16_y<0x25, "vpmovsxdq",
- int_x86_avx2_pmovsxdq,
- WriteShuffle>, VEX, VEX_L;
-defm VPMOVZXBW : SS41I_binop_rm_int16_y<0x30, "vpmovzxbw",
- int_x86_avx2_pmovzxbw,
- WriteShuffle>, VEX, VEX_L;
-defm VPMOVZXWD : SS41I_binop_rm_int16_y<0x33, "vpmovzxwd",
- int_x86_avx2_pmovzxwd,
- WriteShuffle>, VEX, VEX_L;
-defm VPMOVZXDQ : SS41I_binop_rm_int16_y<0x35, "vpmovzxdq",
- int_x86_avx2_pmovzxdq,
- WriteShuffle>, VEX, VEX_L;
-}
-
-defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw,
- SSE_INTALU_ITINS_SHUFF_P>;
-defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd,
- SSE_INTALU_ITINS_SHUFF_P>;
-defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq,
- SSE_INTALU_ITINS_SHUFF_P>;
-defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw,
- SSE_INTALU_ITINS_SHUFF_P>;
-defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd,
- SSE_INTALU_ITINS_SHUFF_P>;
-defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq,
- SSE_INTALU_ITINS_SHUFF_P>;
-
-let Predicates = [HasAVX] in {
- // Common patterns involving scalar load.
- def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
- (VPMOVSXBWrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
- (VPMOVSXBWrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxbw (bc_v16i8 (loadv2i64 addr:$src))),
- (VPMOVSXBWrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
- (VPMOVSXWDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
- (VPMOVSXWDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxwd (bc_v8i16 (loadv2i64 addr:$src))),
- (VPMOVSXWDrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
- (VPMOVSXDQrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
- (VPMOVSXDQrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxdq (bc_v4i32 (loadv2i64 addr:$src))),
- (VPMOVSXDQrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
- (VPMOVZXBWrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
- (VPMOVZXBWrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxbw (bc_v16i8 (loadv2i64 addr:$src))),
- (VPMOVZXBWrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
- (VPMOVZXWDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
- (VPMOVZXWDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxwd (bc_v8i16 (loadv2i64 addr:$src))),
- (VPMOVZXWDrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
- (VPMOVZXDQrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
- (VPMOVZXDQrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxdq (bc_v4i32 (loadv2i64 addr:$src))),
- (VPMOVZXDQrm addr:$src)>;
-}
-
-let Predicates = [UseSSE41] in {
- // Common patterns involving scalar load.
- def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
- (PMOVSXBWrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
- (PMOVSXBWrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxbw (bc_v16i8 (loadv2i64 addr:$src))),
- (PMOVSXBWrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
- (PMOVSXWDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
- (PMOVSXWDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxwd (bc_v8i16 (loadv2i64 addr:$src))),
- (PMOVSXWDrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
- (PMOVSXDQrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
- (PMOVSXDQrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxdq (bc_v4i32 (loadv2i64 addr:$src))),
- (PMOVSXDQrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
- (PMOVZXBWrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
- (PMOVZXBWrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxbw (bc_v16i8 (loadv2i64 addr:$src))),
- (PMOVZXBWrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
- (PMOVZXWDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
- (PMOVZXWDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxwd (bc_v8i16 (loadv2i64 addr:$src))),
- (PMOVZXWDrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
- (PMOVZXDQrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
- (PMOVZXDQrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxdq (bc_v4i32 (loadv2i64 addr:$src))),
- (PMOVZXDQrm addr:$src)>;
-}
-
-multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId,
- OpndItins itins = DEFAULT_ITINS> {
- def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
+ RegisterClass OutRC, RegisterClass InRC,
+ OpndItins itins> {
+ def rr : SS48I<opc, MRMSrcReg, (outs OutRC:$dst), (ins InRC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId VR128:$src))], itins.rr>,
+ [], itins.rr>,
Sched<[itins.Sched]>;
- def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst,
- (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))],
- itins.rm>, Sched<[itins.Sched.Folded]>;
-}
-
-multiclass SS41I_binop_rm_int8_y<bits<8> opc, string OpcodeStr,
- Intrinsic IntId, X86FoldableSchedWrite Sched> {
- def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (IntId VR128:$src))]>, Sched<[Sched]>;
-
- def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i32mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst,
- (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
- Sched<[Sched.Folded]>;
-}
-
-let Predicates = [HasAVX] in {
-defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd,
- DEFAULT_ITINS_SHUFFLESCHED>, VEX;
-defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq,
- DEFAULT_ITINS_SHUFFLESCHED>, VEX;
-defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd,
- DEFAULT_ITINS_SHUFFLESCHED>, VEX;
-defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq,
- DEFAULT_ITINS_SHUFFLESCHED>, VEX;
-}
-
-let Predicates = [HasAVX2] in {
-defm VPMOVSXBD : SS41I_binop_rm_int8_y<0x21, "vpmovsxbd",
- int_x86_avx2_pmovsxbd, WriteShuffle>,
- VEX, VEX_L;
-defm VPMOVSXWQ : SS41I_binop_rm_int8_y<0x24, "vpmovsxwq",
- int_x86_avx2_pmovsxwq, WriteShuffle>,
- VEX, VEX_L;
-defm VPMOVZXBD : SS41I_binop_rm_int8_y<0x31, "vpmovzxbd",
- int_x86_avx2_pmovzxbd, WriteShuffle>,
- VEX, VEX_L;
-defm VPMOVZXWQ : SS41I_binop_rm_int8_y<0x34, "vpmovzxwq",
- int_x86_avx2_pmovzxwq, WriteShuffle>,
- VEX, VEX_L;
-}
-
-defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd,
- SSE_INTALU_ITINS_SHUFF_P>;
-defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq,
- SSE_INTALU_ITINS_SHUFF_P>;
-defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd,
- SSE_INTALU_ITINS_SHUFF_P>;
-defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq,
- SSE_INTALU_ITINS_SHUFF_P>;
-
-let Predicates = [HasAVX] in {
- // Common patterns involving scalar load
- def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
- (VPMOVSXBDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
- (VPMOVSXWQrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
- (VPMOVZXBDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
- (VPMOVZXWQrm addr:$src)>;
-}
-
-let Predicates = [UseSSE41] in {
- // Common patterns involving scalar load
- def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
- (PMOVSXBDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
- (PMOVSXWQrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
- (PMOVZXBDrm addr:$src)>;
- def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
- (PMOVZXWQrm addr:$src)>;
-}
-
-multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId,
- X86FoldableSchedWrite Sched> {
- def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId VR128:$src))]>, Sched<[Sched]>;
-
- // Expecting a i16 load any extended to i32 value.
- def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
+ def rm : SS48I<opc, MRMSrcMem, (outs OutRC:$dst), (ins MemOp:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId (bitconvert
- (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
- Sched<[Sched.Folded]>;
+ [],
+ itins.rm>, Sched<[itins.Sched.Folded]>;
}
-multiclass SS41I_binop_rm_int4_y<bits<8> opc, string OpcodeStr,
- Intrinsic IntId, X86FoldableSchedWrite Sched> {
- def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (IntId VR128:$src))]>, Sched<[Sched]>;
-
- // Expecting a i16 load any extended to i32 value.
- def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i16mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (IntId (bitconvert
- (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
- Sched<[Sched.Folded]>;
-}
-
-let Predicates = [HasAVX] in {
-defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq,
- WriteShuffle>, VEX;
-defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq,
- WriteShuffle>, VEX;
-}
-let Predicates = [HasAVX2] in {
-defm VPMOVSXBQ : SS41I_binop_rm_int4_y<0x22, "vpmovsxbq", int_x86_avx2_pmovsxbq,
- WriteShuffle>, VEX, VEX_L;
-defm VPMOVZXBQ : SS41I_binop_rm_int4_y<0x32, "vpmovzxbq", int_x86_avx2_pmovzxbq,
- WriteShuffle>, VEX, VEX_L;
+multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
+ X86MemOperand MemOp, X86MemOperand MemYOp,
+ OpndItins SSEItins, OpndItins AVXItins,
+ OpndItins AVX2Itins> {
+ defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128, SSEItins>;
+ let Predicates = [HasAVX] in
+ defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
+ VR128, VR128, AVXItins>, VEX;
+ let Predicates = [HasAVX2] in
+ defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
+ VR256, VR128, AVX2Itins>, VEX, VEX_L;
+}
+
+multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr,
+ X86MemOperand MemOp, X86MemOperand MemYOp> {
+ defm PMOVSX#NAME : SS41I_pmovx_rm_all<opc, !strconcat("pmovsx", OpcodeStr),
+ MemOp, MemYOp,
+ SSE_INTALU_ITINS_SHUFF_P,
+ DEFAULT_ITINS_SHUFFLESCHED,
+ DEFAULT_ITINS_SHUFFLESCHED>;
+ defm PMOVZX#NAME : SS41I_pmovx_rm_all<!add(opc, 0x10),
+ !strconcat("pmovzx", OpcodeStr),
+ MemOp, MemYOp,
+ SSE_INTALU_ITINS_SHUFF_P,
+ DEFAULT_ITINS_SHUFFLESCHED,
+ DEFAULT_ITINS_SHUFFLESCHED>;
+}
+
+defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem>;
+defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem>;
+defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem>;
+
+defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem>;
+defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem>;
+
+defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem>;
+
+// AVX2 Patterns
+multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtOp> {
+ // Register-Register patterns
+ def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
+ (!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
+ def : Pat<(v8i32 (ExtOp (v16i8 VR128:$src))),
+ (!cast<I>(OpcPrefix#BDYrr) VR128:$src)>;
+ def : Pat<(v4i64 (ExtOp (v16i8 VR128:$src))),
+ (!cast<I>(OpcPrefix#BQYrr) VR128:$src)>;
+
+ def : Pat<(v8i32 (ExtOp (v8i16 VR128:$src))),
+ (!cast<I>(OpcPrefix#WDYrr) VR128:$src)>;
+ def : Pat<(v4i64 (ExtOp (v8i16 VR128:$src))),
+ (!cast<I>(OpcPrefix#WQYrr) VR128:$src)>;
+
+ def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))),
+ (!cast<I>(OpcPrefix#DQYrr) VR128:$src)>;
+
+ // On AVX2, we also support 256bit inputs.
+ // FIXME: remove these patterns when the old shuffle lowering goes away.
+ def : Pat<(v16i16 (ExtOp (v32i8 VR256:$src))),
+ (!cast<I>(OpcPrefix#BWYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+ def : Pat<(v8i32 (ExtOp (v32i8 VR256:$src))),
+ (!cast<I>(OpcPrefix#BDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+ def : Pat<(v4i64 (ExtOp (v32i8 VR256:$src))),
+ (!cast<I>(OpcPrefix#BQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+
+ def : Pat<(v8i32 (ExtOp (v16i16 VR256:$src))),
+ (!cast<I>(OpcPrefix#WDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+ def : Pat<(v4i64 (ExtOp (v16i16 VR256:$src))),
+ (!cast<I>(OpcPrefix#WQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+
+ def : Pat<(v4i64 (ExtOp (v8i32 VR256:$src))),
+ (!cast<I>(OpcPrefix#DQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+
+ // Simple Register-Memory patterns
+ def : Pat<(v16i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
+ (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
+ def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
+ (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
+ def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
+ (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
+
+ def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
+ (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
+ def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
+ (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
+
+ def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
+ (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
+
+ // AVX2 Register-Memory patterns
+ def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
+ def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
+ def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
+ def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
+
+ def : Pat<(v8i32 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
+ (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
+ def : Pat<(v8i32 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
+ def : Pat<(v8i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
+ def : Pat<(v8i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
+
+ def : Pat<(v4i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
+ (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
+ def : Pat<(v4i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
+ (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
+ def : Pat<(v4i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
+ def : Pat<(v4i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
+
+ def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
+ def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
+ def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
+ def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
+
+ def : Pat<(v4i64 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
+ (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
+ def : Pat<(v4i64 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
+ def : Pat<(v4i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
+ def : Pat<(v4i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
+
+ def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
+ def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
+ def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
+ def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
}
-defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq,
- WriteShuffle>;
-defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq,
- WriteShuffle>;
let Predicates = [HasAVX2] in {
- def : Pat<(v16i16 (X86vsext (v16i8 VR128:$src))), (VPMOVSXBWYrr VR128:$src)>;
- def : Pat<(v8i32 (X86vsext (v16i8 VR128:$src))), (VPMOVSXBDYrr VR128:$src)>;
- def : Pat<(v4i64 (X86vsext (v16i8 VR128:$src))), (VPMOVSXBQYrr VR128:$src)>;
-
- def : Pat<(v8i32 (X86vsext (v8i16 VR128:$src))), (VPMOVSXWDYrr VR128:$src)>;
- def : Pat<(v4i64 (X86vsext (v8i16 VR128:$src))), (VPMOVSXWQYrr VR128:$src)>;
-
- def : Pat<(v4i64 (X86vsext (v4i32 VR128:$src))), (VPMOVSXDQYrr VR128:$src)>;
-
- def : Pat<(v16i16 (X86vsext (v32i8 VR256:$src))),
- (VPMOVSXBWYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v8i32 (X86vsext (v32i8 VR256:$src))),
- (VPMOVSXBDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v4i64 (X86vsext (v32i8 VR256:$src))),
- (VPMOVSXBQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
-
- def : Pat<(v8i32 (X86vsext (v16i16 VR256:$src))),
- (VPMOVSXWDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v4i64 (X86vsext (v16i16 VR256:$src))),
- (VPMOVSXWQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
-
- def : Pat<(v4i64 (X86vsext (v8i32 VR256:$src))),
- (VPMOVSXDQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
-
- def : Pat<(v8i32 (X86vsext (v8i16 (bitconvert (v2i64 (load addr:$src)))))),
- (VPMOVSXWDYrm addr:$src)>;
- def : Pat<(v4i64 (X86vsext (v4i32 (bitconvert (v2i64 (load addr:$src)))))),
- (VPMOVSXDQYrm addr:$src)>;
-
- def : Pat<(v8i32 (X86vsext (v16i8 (bitconvert (v2i64
- (scalar_to_vector (loadi64 addr:$src))))))),
- (VPMOVSXBDYrm addr:$src)>;
- def : Pat<(v8i32 (X86vsext (v16i8 (bitconvert (v2f64
- (scalar_to_vector (loadf64 addr:$src))))))),
- (VPMOVSXBDYrm addr:$src)>;
-
- def : Pat<(v4i64 (X86vsext (v8i16 (bitconvert (v2i64
- (scalar_to_vector (loadi64 addr:$src))))))),
- (VPMOVSXWQYrm addr:$src)>;
- def : Pat<(v4i64 (X86vsext (v8i16 (bitconvert (v2f64
- (scalar_to_vector (loadf64 addr:$src))))))),
- (VPMOVSXWQYrm addr:$src)>;
-
- def : Pat<(v4i64 (X86vsext (v16i8 (bitconvert (v4i32
- (scalar_to_vector (loadi32 addr:$src))))))),
- (VPMOVSXBQYrm addr:$src)>;
+ defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", X86vsext>;
+ defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", X86vzext>;
+}
+
+// SSE4.1/AVX patterns.
+multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
+ SDNode ExtOp, PatFrag ExtLoad16> {
+ def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
+ (!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
+ def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
+ (!cast<I>(OpcPrefix#BDrr) VR128:$src)>;
+ def : Pat<(v2i64 (ExtOp (v16i8 VR128:$src))),
+ (!cast<I>(OpcPrefix#BQrr) VR128:$src)>;
+
+ def : Pat<(v4i32 (ExtOp (v8i16 VR128:$src))),
+ (!cast<I>(OpcPrefix#WDrr) VR128:$src)>;
+ def : Pat<(v2i64 (ExtOp (v8i16 VR128:$src))),
+ (!cast<I>(OpcPrefix#WQrr) VR128:$src)>;
+
+ def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
+ (!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
+
+ def : Pat<(v8i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
+ (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
+ def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
+ (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
+ def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
+ (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
+
+ def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
+ (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
+ def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
+ (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
+
+ def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
+ (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
+
+ def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
+ (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
+ def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
+ (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
+ def : Pat<(v8i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
+ def : Pat<(v8i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
+ def : Pat<(v8i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
+
+ def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
+ (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
+ def : Pat<(v4i32 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
+ (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
+ def : Pat<(v4i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
+ def : Pat<(v4i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
+
+ def : Pat<(v2i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (ExtLoad16 addr:$src)))))),
+ (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
+ def : Pat<(v2i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
+ (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
+ def : Pat<(v2i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
+ def : Pat<(v2i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
+
+ def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
+ (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
+ def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
+ (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
+ def : Pat<(v4i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
+ def : Pat<(v4i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
+ def : Pat<(v4i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
+
+ def : Pat<(v2i64 (ExtOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
+ (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
+ def : Pat<(v2i64 (ExtOp (v8i16 (vzmovl_v4i32 addr:$src)))),
+ (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
+ def : Pat<(v2i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
+ def : Pat<(v2i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
+
+ def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
+ (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
+ def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
+ (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
+ def : Pat<(v2i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
+ def : Pat<(v2i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
+ def : Pat<(v2i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
+ (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
}
let Predicates = [HasAVX] in {
- // Common patterns involving scalar load
- def : Pat<(int_x86_sse41_pmovsxbq
- (bitconvert (v4i32 (X86vzmovl
- (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (VPMOVSXBQrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovzxbq
- (bitconvert (v4i32 (X86vzmovl
- (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (VPMOVZXBQrm addr:$src)>;
+ defm : SS41I_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
+ defm : SS41I_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
}
let Predicates = [UseSSE41] in {
- def : Pat<(v8i16 (X86vsext (v16i8 VR128:$src))), (PMOVSXBWrr VR128:$src)>;
- def : Pat<(v4i32 (X86vsext (v16i8 VR128:$src))), (PMOVSXBDrr VR128:$src)>;
- def : Pat<(v2i64 (X86vsext (v16i8 VR128:$src))), (PMOVSXBQrr VR128:$src)>;
-
- def : Pat<(v4i32 (X86vsext (v8i16 VR128:$src))), (PMOVSXWDrr VR128:$src)>;
- def : Pat<(v2i64 (X86vsext (v8i16 VR128:$src))), (PMOVSXWQrr VR128:$src)>;
-
- def : Pat<(v2i64 (X86vsext (v4i32 VR128:$src))), (PMOVSXDQrr VR128:$src)>;
-
- // Common patterns involving scalar load
- def : Pat<(int_x86_sse41_pmovsxbq
- (bitconvert (v4i32 (X86vzmovl
- (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (PMOVSXBQrm addr:$src)>;
-
- def : Pat<(int_x86_sse41_pmovzxbq
- (bitconvert (v4i32 (X86vzmovl
- (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (PMOVZXBQrm addr:$src)>;
-
- def : Pat<(v4i32 (X86vsext (v8i16 (bitconvert (v2i64
- (scalar_to_vector (loadi64 addr:$src))))))),
- (PMOVSXWDrm addr:$src)>;
- def : Pat<(v4i32 (X86vsext (v8i16 (bitconvert (v2f64
- (scalar_to_vector (loadf64 addr:$src))))))),
- (PMOVSXWDrm addr:$src)>;
- def : Pat<(v4i32 (X86vsext (v16i8 (bitconvert (v4i32
- (scalar_to_vector (loadi32 addr:$src))))))),
- (PMOVSXBDrm addr:$src)>;
- def : Pat<(v2i64 (X86vsext (v8i16 (bitconvert (v4i32
- (scalar_to_vector (loadi32 addr:$src))))))),
- (PMOVSXWQrm addr:$src)>;
- def : Pat<(v2i64 (X86vsext (v16i8 (bitconvert (v4i32
- (scalar_to_vector (extloadi32i16 addr:$src))))))),
- (PMOVSXBQrm addr:$src)>;
- def : Pat<(v2i64 (X86vsext (v4i32 (bitconvert (v2i64
- (scalar_to_vector (loadi64 addr:$src))))))),
- (PMOVSXDQrm addr:$src)>;
- def : Pat<(v2i64 (X86vsext (v4i32 (bitconvert (v2f64
- (scalar_to_vector (loadf64 addr:$src))))))),
- (PMOVSXDQrm addr:$src)>;
- def : Pat<(v8i16 (X86vsext (v16i8 (bitconvert (v2i64
- (scalar_to_vector (loadi64 addr:$src))))))),
- (PMOVSXBWrm addr:$src)>;
- def : Pat<(v8i16 (X86vsext (v16i8 (bitconvert (v2f64
- (scalar_to_vector (loadf64 addr:$src))))))),
- (PMOVSXBWrm addr:$src)>;
-}
-
-let Predicates = [HasAVX2] in {
- def : Pat<(v16i16 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBWYrr VR128:$src)>;
- def : Pat<(v8i32 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBDYrr VR128:$src)>;
- def : Pat<(v4i64 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBQYrr VR128:$src)>;
-
- def : Pat<(v8i32 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWDYrr VR128:$src)>;
- def : Pat<(v4i64 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWQYrr VR128:$src)>;
-
- def : Pat<(v4i64 (X86vzext (v4i32 VR128:$src))), (VPMOVZXDQYrr VR128:$src)>;
-
- def : Pat<(v16i16 (X86vzext (v32i8 VR256:$src))),
- (VPMOVZXBWYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v8i32 (X86vzext (v32i8 VR256:$src))),
- (VPMOVZXBDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v4i64 (X86vzext (v32i8 VR256:$src))),
- (VPMOVZXBQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
-
- def : Pat<(v8i32 (X86vzext (v16i16 VR256:$src))),
- (VPMOVZXWDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v4i64 (X86vzext (v16i16 VR256:$src))),
- (VPMOVZXWQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
-
- def : Pat<(v4i64 (X86vzext (v8i32 VR256:$src))),
- (VPMOVZXDQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
-}
-
-let Predicates = [HasAVX] in {
- def : Pat<(v8i16 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBWrr VR128:$src)>;
- def : Pat<(v4i32 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBDrr VR128:$src)>;
- def : Pat<(v2i64 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBQrr VR128:$src)>;
-
- def : Pat<(v4i32 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWDrr VR128:$src)>;
- def : Pat<(v2i64 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWQrr VR128:$src)>;
-
- def : Pat<(v2i64 (X86vzext (v4i32 VR128:$src))), (VPMOVZXDQrr VR128:$src)>;
-
- def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
- (VPMOVZXBWrm addr:$src)>;
- def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
- (VPMOVZXBWrm addr:$src)>;
- def : Pat<(v4i32 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (VPMOVZXBDrm addr:$src)>;
- def : Pat<(v2i64 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))),
- (VPMOVZXBQrm addr:$src)>;
-
- def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
- (VPMOVZXWDrm addr:$src)>;
- def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
- (VPMOVZXWDrm addr:$src)>;
- def : Pat<(v2i64 (X86vzext (v8i16 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (VPMOVZXWQrm addr:$src)>;
-
- def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
- (VPMOVZXDQrm addr:$src)>;
- def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
- (VPMOVZXDQrm addr:$src)>;
- def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (X86vzload addr:$src)))))),
- (VPMOVZXDQrm addr:$src)>;
-
- def : Pat<(v8i16 (X86vsext (v16i8 VR128:$src))), (VPMOVSXBWrr VR128:$src)>;
- def : Pat<(v4i32 (X86vsext (v16i8 VR128:$src))), (VPMOVSXBDrr VR128:$src)>;
- def : Pat<(v2i64 (X86vsext (v16i8 VR128:$src))), (VPMOVSXBQrr VR128:$src)>;
-
- def : Pat<(v4i32 (X86vsext (v8i16 VR128:$src))), (VPMOVSXWDrr VR128:$src)>;
- def : Pat<(v2i64 (X86vsext (v8i16 VR128:$src))), (VPMOVSXWQrr VR128:$src)>;
-
- def : Pat<(v2i64 (X86vsext (v4i32 VR128:$src))), (VPMOVSXDQrr VR128:$src)>;
-
- def : Pat<(v4i32 (X86vsext (v8i16 (bitconvert (v2i64
- (scalar_to_vector (loadi64 addr:$src))))))),
- (VPMOVSXWDrm addr:$src)>;
- def : Pat<(v2i64 (X86vsext (v4i32 (bitconvert (v2i64
- (scalar_to_vector (loadi64 addr:$src))))))),
- (VPMOVSXDQrm addr:$src)>;
- def : Pat<(v4i32 (X86vsext (v8i16 (bitconvert (v2f64
- (scalar_to_vector (loadf64 addr:$src))))))),
- (VPMOVSXWDrm addr:$src)>;
- def : Pat<(v2i64 (X86vsext (v4i32 (bitconvert (v2f64
- (scalar_to_vector (loadf64 addr:$src))))))),
- (VPMOVSXDQrm addr:$src)>;
- def : Pat<(v8i16 (X86vsext (v16i8 (bitconvert (v2i64
- (scalar_to_vector (loadi64 addr:$src))))))),
- (VPMOVSXBWrm addr:$src)>;
- def : Pat<(v8i16 (X86vsext (v16i8 (bitconvert (v2f64
- (scalar_to_vector (loadf64 addr:$src))))))),
- (VPMOVSXBWrm addr:$src)>;
-
- def : Pat<(v4i32 (X86vsext (v16i8 (bitconvert (v4i32
- (scalar_to_vector (loadi32 addr:$src))))))),
- (VPMOVSXBDrm addr:$src)>;
- def : Pat<(v2i64 (X86vsext (v8i16 (bitconvert (v4i32
- (scalar_to_vector (loadi32 addr:$src))))))),
- (VPMOVSXWQrm addr:$src)>;
- def : Pat<(v2i64 (X86vsext (v16i8 (bitconvert (v4i32
- (scalar_to_vector (extloadi32i16 addr:$src))))))),
- (VPMOVSXBQrm addr:$src)>;
-}
-
-let Predicates = [UseSSE41] in {
- def : Pat<(v8i16 (X86vzext (v16i8 VR128:$src))), (PMOVZXBWrr VR128:$src)>;
- def : Pat<(v4i32 (X86vzext (v16i8 VR128:$src))), (PMOVZXBDrr VR128:$src)>;
- def : Pat<(v2i64 (X86vzext (v16i8 VR128:$src))), (PMOVZXBQrr VR128:$src)>;
-
- def : Pat<(v4i32 (X86vzext (v8i16 VR128:$src))), (PMOVZXWDrr VR128:$src)>;
- def : Pat<(v2i64 (X86vzext (v8i16 VR128:$src))), (PMOVZXWQrr VR128:$src)>;
-
- def : Pat<(v2i64 (X86vzext (v4i32 VR128:$src))), (PMOVZXDQrr VR128:$src)>;
-
- def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
- (PMOVZXBWrm addr:$src)>;
- def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
- (PMOVZXBWrm addr:$src)>;
- def : Pat<(v4i32 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (PMOVZXBDrm addr:$src)>;
- def : Pat<(v2i64 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))),
- (PMOVZXBQrm addr:$src)>;
-
- def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
- (PMOVZXWDrm addr:$src)>;
- def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
- (PMOVZXWDrm addr:$src)>;
- def : Pat<(v2i64 (X86vzext (v8i16 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (PMOVZXWQrm addr:$src)>;
-
- def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
- (PMOVZXDQrm addr:$src)>;
- def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
- (PMOVZXDQrm addr:$src)>;
- def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (X86vzload addr:$src)))))),
- (PMOVZXDQrm addr:$src)>;
+ defm : SS41I_pmovx_patterns<"PMOVSX", "s", X86vsext, extloadi32i16>;
+ defm : SS41I_pmovx_patterns<"PMOVZX", "z", X86vzext, loadi16_anyext>;
}
//===----------------------------------------------------------------------===//
@@ -6649,20 +6106,20 @@ let Predicates = [UseSSE41] in {
/// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
+ (ins VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
imm:$src2))]>,
Sched<[WriteShuffle]>;
- let neverHasSideEffects = 1, mayStore = 1,
+ let hasSideEffects = 0, mayStore = 1,
SchedRW = [WriteShuffleLd, WriteRMW] in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
- (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
+ (ins i8mem:$dst, VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(store (i8 (trunc (assertzext (X86pextrb (v16i8 VR128:$src1),
- imm:$src2)))), addr:$dst)]>;
+ imm:$src2)))), addr:$dst)]>;
}
let Predicates = [HasAVX] in
@@ -6675,19 +6132,19 @@ defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
+ (ins VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, Sched<[WriteShuffle]>;
- let neverHasSideEffects = 1, mayStore = 1,
+ let hasSideEffects = 0, mayStore = 1,
SchedRW = [WriteShuffleLd, WriteRMW] in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
- (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
+ (ins i16mem:$dst, VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(store (i16 (trunc (assertzext (X86pextrw (v8i16 VR128:$src1),
- imm:$src2)))), addr:$dst)]>;
+ imm:$src2)))), addr:$dst)]>;
}
let Predicates = [HasAVX] in
@@ -6699,7 +6156,7 @@ defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
/// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
+ (ins VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR32:$dst,
@@ -6707,7 +6164,7 @@ multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
Sched<[WriteShuffle]>;
let SchedRW = [WriteShuffleLd, WriteRMW] in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
- (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
+ (ins i32mem:$dst, VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(store (extractelt (v4i32 VR128:$src1), imm:$src2),
@@ -6722,7 +6179,7 @@ defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
/// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
+ (ins VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR64:$dst,
@@ -6730,7 +6187,7 @@ multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
Sched<[WriteShuffle]>, REX_W;
let SchedRW = [WriteShuffleLd, WriteRMW] in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
- (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
+ (ins i64mem:$dst, VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(store (extractelt (v2i64 VR128:$src1), imm:$src2),
@@ -6747,7 +6204,7 @@ defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
OpndItins itins = DEFAULT_ITINS> {
def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
+ (ins VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR32orGR64:$dst,
@@ -6755,7 +6212,7 @@ multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
itins.rr>, Sched<[WriteFBlend]>;
let SchedRW = [WriteFBlendLd, WriteRMW] in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
- (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
+ (ins f32mem:$dst, VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
@@ -6786,7 +6243,7 @@ def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, GR32orGR64:$src2, i32i8imm:$src3),
+ (ins VR128:$src1, GR32orGR64:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
@@ -6795,7 +6252,7 @@ multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
(X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>,
Sched<[WriteShuffle]>;
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
+ (ins VR128:$src1, i8mem:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
@@ -6812,7 +6269,7 @@ let Constraints = "$src1 = $dst" in
multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
+ (ins VR128:$src1, GR32:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
@@ -6821,7 +6278,7 @@ multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
(v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
Sched<[WriteShuffle]>;
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
+ (ins VR128:$src1, i32mem:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
@@ -6838,7 +6295,7 @@ let Constraints = "$src1 = $dst" in
multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
+ (ins VR128:$src1, GR64:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
@@ -6847,7 +6304,7 @@ multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
(v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
Sched<[WriteShuffle]>;
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
+ (ins VR128:$src1, i64mem:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
@@ -6869,7 +6326,7 @@ let Constraints = "$src1 = $dst" in
multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1,
OpndItins itins = DEFAULT_ITINS> {
def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ (ins VR128:$src1, VR128:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
@@ -6878,7 +6335,7 @@ multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1,
(X86insertps VR128:$src1, VR128:$src2, imm:$src3))], itins.rr>,
Sched<[WriteFShuffle]>;
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f32mem:$src2, i8imm:$src3),
+ (ins VR128:$src1, f32mem:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
@@ -6932,7 +6389,7 @@ let ExeDomain = SSEPackedSingle in {
// Intrinsic operation, reg.
// Vector intrinsic operation, reg
def PSr : SS4AIi8<opcps, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
+ (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
!strconcat(OpcodeStr,
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))],
@@ -6940,7 +6397,7 @@ let ExeDomain = SSEPackedSingle in {
// Vector intrinsic operation, mem
def PSm : SS4AIi8<opcps, MRMSrcMem,
- (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
+ (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
!strconcat(OpcodeStr,
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
@@ -6951,7 +6408,7 @@ let ExeDomain = SSEPackedSingle in {
let ExeDomain = SSEPackedDouble in {
// Vector intrinsic operation, reg
def PDr : SS4AIi8<opcpd, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
+ (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
!strconcat(OpcodeStr,
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))],
@@ -6959,7 +6416,7 @@ let ExeDomain = SSEPackedDouble in {
// Vector intrinsic operation, mem
def PDm : SS4AIi8<opcpd, MRMSrcMem,
- (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
+ (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
!strconcat(OpcodeStr,
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
@@ -6976,7 +6433,7 @@ let ExeDomain = GenericDomain in {
// Operation, reg.
let hasSideEffects = 0 in
def SSr : SS4AIi8<opcss, MRMSrcReg,
- (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32i8imm:$src3),
+ (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32u8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
"ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -6987,7 +6444,7 @@ let ExeDomain = GenericDomain in {
// Intrinsic operation, reg.
let isCodeGenOnly = 1 in
def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
"ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -6998,7 +6455,7 @@ let ExeDomain = GenericDomain in {
// Intrinsic operation, mem.
def SSm : SS4AIi8<opcss, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
+ (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32u8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
"ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -7011,7 +6468,7 @@ let ExeDomain = GenericDomain in {
// Operation, reg.
let hasSideEffects = 0 in
def SDr : SS4AIi8<opcsd, MRMSrcReg,
- (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32i8imm:$src3),
+ (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32u8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
"sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -7022,7 +6479,7 @@ let ExeDomain = GenericDomain in {
// Intrinsic operation, reg.
let isCodeGenOnly = 1 in
def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
"sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -7033,7 +6490,7 @@ let ExeDomain = GenericDomain in {
// Intrinsic operation, mem.
def SDm : SS4AIi8<opcsd, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
+ (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32u8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
"sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -7059,7 +6516,9 @@ let Predicates = [HasAVX] in {
defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
int_x86_sse41_round_ss,
int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
+}
+let Predicates = [UseAVX] in {
def : Pat<(ffloor FR32:$src),
(VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
def : Pat<(f64 (ffloor FR64:$src)),
@@ -7080,7 +6539,9 @@ let Predicates = [HasAVX] in {
(VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
def : Pat<(f64 (ftrunc FR64:$src)),
(VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
+}
+let Predicates = [HasAVX] in {
def : Pat<(v4f32 (ffloor VR128:$src)),
(VROUNDPSr VR128:$src, (i32 0x1))>;
def : Pat<(v4f32 (fnearbyint VR128:$src)),
@@ -7284,7 +6745,7 @@ let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128,
+ Intrinsic IntId128, PatFrag ld_frag,
X86FoldableSchedWrite Sched> {
def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src),
@@ -7295,7 +6756,7 @@ multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
(ins i128mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
- (IntId128 (bitconvert (memopv2i64 addr:$src))))]>,
+ (IntId128 (bitconvert (ld_frag addr:$src))))]>,
Sched<[Sched.Folded]>;
}
@@ -7303,53 +6764,12 @@ multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
// model, although the naming is misleading.
let Predicates = [HasAVX] in
defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
- int_x86_sse41_phminposuw,
+ int_x86_sse41_phminposuw, loadv2i64,
WriteVecIMul>, VEX;
defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
- int_x86_sse41_phminposuw,
+ int_x86_sse41_phminposuw, memopv2i64,
WriteVecIMul>;
-/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
-multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128, bit Is2Addr = 1,
- OpndItins itins = DEFAULT_ITINS> {
- let isCommutable = 1 in
- def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !if(Is2Addr,
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))],
- itins.rr>, Sched<[itins.Sched]>;
- def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !if(Is2Addr,
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst,
- (IntId128 VR128:$src1, (bitconvert (memopv2i64 addr:$src2))))],
- itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
-}
-
-/// SS41I_binop_rm_int_y - Simple SSE 4.1 binary operator
-multiclass SS41I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
- Intrinsic IntId256,
- X86FoldableSchedWrite Sched> {
- let isCommutable = 1 in
- def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
- Sched<[Sched]>;
- def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, i256mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst,
- (IntId256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2))))]>,
- Sched<[Sched.Folded, ReadAfterLd]>;
-}
-
-
/// SS48I_binop_rm - Simple SSE41 binary operator.
multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
@@ -7398,7 +6818,7 @@ multiclass SS48I_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
-let Predicates = [HasAVX] in {
+let Predicates = [HasAVX, NoVLX] in {
let isCommutable = 0 in
defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", X86smin, v16i8, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
@@ -7429,7 +6849,7 @@ let Predicates = [HasAVX] in {
SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
}
-let Predicates = [HasAVX2] in {
+let Predicates = [HasAVX2, NoVLX] in {
let isCommutable = 0 in
defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", X86smin, v32i8, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
@@ -7483,7 +6903,7 @@ let Constraints = "$src1 = $dst" in {
SSE_INTMUL_ITINS_P, 1>;
}
-let Predicates = [HasAVX] in {
+let Predicates = [HasAVX, NoVLX] in {
defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
memopv2i64, i128mem, 0, SSE_PMULLD_ITINS>,
VEX_4V;
@@ -7493,10 +6913,10 @@ let Predicates = [HasAVX] in {
}
let Predicates = [HasAVX2] in {
defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
- memopv4i64, i256mem, 0, SSE_PMULLD_ITINS>,
+ loadv4i64, i256mem, 0, SSE_PMULLD_ITINS>,
VEX_4V, VEX_L;
defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
- memopv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
}
@@ -7514,7 +6934,7 @@ multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
OpndItins itins = DEFAULT_ITINS> {
let isCommutable = 1 in
def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, i8imm:$src3),
+ (ins RC:$src1, RC:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -7523,7 +6943,7 @@ multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
[(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))], itins.rr>,
Sched<[itins.Sched]>;
def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2, i8imm:$src3),
+ (ins RC:$src1, x86memop:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -7580,13 +7000,13 @@ let Predicates = [HasAVX] in {
let Predicates = [HasAVX2] in {
let isCommutable = 0 in {
- defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
- VR256, loadv4i64, i256mem, 0,
- DEFAULT_ITINS_BLENDSCHED>, VEX_4V, VEX_L;
defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
VR256, loadv4i64, i256mem, 0,
DEFAULT_ITINS_MPSADSCHED>, VEX_4V, VEX_L;
}
+ defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
+ VR256, loadv4i64, i256mem, 0,
+ DEFAULT_ITINS_BLENDSCHED>, VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
@@ -7734,7 +7154,7 @@ let Predicates = [UseAVX] in {
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
(VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
- (VBLENDPSrri (v4i32 (V_SET0)), VR128:$src, (i8 1))>;
+ (VPBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
(VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
@@ -7769,7 +7189,7 @@ let Predicates = [UseSSE41] in {
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
(BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
- (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
+ (PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
(BLENDPDrri (v2f64 (V_SET0)), VR128:$src, (i8 1))>;
}
@@ -7909,141 +7329,149 @@ let Constraints = "$src1 = $dst" in
//===----------------------------------------------------------------------===//
// Packed Compare Implicit Length Strings, Return Mask
-multiclass pseudo_pcmpistrm<string asm> {
+multiclass pseudo_pcmpistrm<string asm, PatFrag ld_frag> {
def REG : PseudoI<(outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ (ins VR128:$src1, VR128:$src2, u8imm:$src3),
[(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
imm:$src3))]>;
def MEM : PseudoI<(outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
[(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1,
- (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>;
+ (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
}
let Defs = [EFLAGS], usesCustomInserter = 1 in {
- defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
- defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[UseSSE42]>;
+ defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128", loadv2i64>,
+ Requires<[HasAVX]>;
+ defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128", memopv2i64>,
+ Requires<[UseSSE42]>;
}
multiclass pcmpistrm_SS42AI<string asm> {
def rr : SS42AI<0x62, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ (ins VR128:$src1, VR128:$src2, u8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
[]>, Sched<[WritePCmpIStrM]>;
let mayLoad = 1 in
def rm :SS42AI<0x62, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
[]>, Sched<[WritePCmpIStrMLd, ReadAfterLd]>;
}
-let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1 in {
+let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in {
let Predicates = [HasAVX] in
defm VPCMPISTRM128 : pcmpistrm_SS42AI<"vpcmpistrm">, VEX;
defm PCMPISTRM128 : pcmpistrm_SS42AI<"pcmpistrm"> ;
}
// Packed Compare Explicit Length Strings, Return Mask
-multiclass pseudo_pcmpestrm<string asm> {
+multiclass pseudo_pcmpestrm<string asm, PatFrag ld_frag> {
def REG : PseudoI<(outs VR128:$dst),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ (ins VR128:$src1, VR128:$src3, u8imm:$src5),
[(set VR128:$dst, (int_x86_sse42_pcmpestrm128
VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
def MEM : PseudoI<(outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
[(set VR128:$dst, (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX,
- (bc_v16i8 (memopv2i64 addr:$src3)), EDX, imm:$src5))]>;
+ (bc_v16i8 (ld_frag addr:$src3)), EDX, imm:$src5))]>;
}
let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
- defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
- defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[UseSSE42]>;
+ defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128", loadv2i64>,
+ Requires<[HasAVX]>;
+ defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128", memopv2i64>,
+ Requires<[UseSSE42]>;
}
multiclass SS42AI_pcmpestrm<string asm> {
def rr : SS42AI<0x60, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ (ins VR128:$src1, VR128:$src3, u8imm:$src5),
!strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
[]>, Sched<[WritePCmpEStrM]>;
let mayLoad = 1 in
def rm : SS42AI<0x60, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
!strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
[]>, Sched<[WritePCmpEStrMLd, ReadAfterLd]>;
}
-let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
+let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
let Predicates = [HasAVX] in
defm VPCMPESTRM128 : SS42AI_pcmpestrm<"vpcmpestrm">, VEX;
defm PCMPESTRM128 : SS42AI_pcmpestrm<"pcmpestrm">;
}
// Packed Compare Implicit Length Strings, Return Index
-multiclass pseudo_pcmpistri<string asm> {
+multiclass pseudo_pcmpistri<string asm, PatFrag ld_frag> {
def REG : PseudoI<(outs GR32:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ (ins VR128:$src1, VR128:$src2, u8imm:$src3),
[(set GR32:$dst, EFLAGS,
(X86pcmpistri VR128:$src1, VR128:$src2, imm:$src3))]>;
def MEM : PseudoI<(outs GR32:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
[(set GR32:$dst, EFLAGS, (X86pcmpistri VR128:$src1,
- (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>;
+ (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
}
let Defs = [EFLAGS], usesCustomInserter = 1 in {
- defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI">, Requires<[HasAVX]>;
- defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI">, Requires<[UseSSE42]>;
+ defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI", loadv2i64>,
+ Requires<[HasAVX]>;
+ defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI", memopv2i64>,
+ Requires<[UseSSE42]>;
}
multiclass SS42AI_pcmpistri<string asm> {
def rr : SS42AI<0x63, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ (ins VR128:$src1, VR128:$src2, u8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
[]>, Sched<[WritePCmpIStrI]>;
let mayLoad = 1 in
def rm : SS42AI<0x63, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
[]>, Sched<[WritePCmpIStrILd, ReadAfterLd]>;
}
-let Defs = [ECX, EFLAGS], neverHasSideEffects = 1 in {
+let Defs = [ECX, EFLAGS], hasSideEffects = 0 in {
let Predicates = [HasAVX] in
defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
}
// Packed Compare Explicit Length Strings, Return Index
-multiclass pseudo_pcmpestri<string asm> {
+multiclass pseudo_pcmpestri<string asm, PatFrag ld_frag> {
def REG : PseudoI<(outs GR32:$dst),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ (ins VR128:$src1, VR128:$src3, u8imm:$src5),
[(set GR32:$dst, EFLAGS,
(X86pcmpestri VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
def MEM : PseudoI<(outs GR32:$dst),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
[(set GR32:$dst, EFLAGS,
- (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (memopv2i64 addr:$src3)), EDX,
+ (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (ld_frag addr:$src3)), EDX,
imm:$src5))]>;
}
let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
- defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI">, Requires<[HasAVX]>;
- defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI">, Requires<[UseSSE42]>;
+ defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI", loadv2i64>,
+ Requires<[HasAVX]>;
+ defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI", memopv2i64>,
+ Requires<[UseSSE42]>;
}
multiclass SS42AI_pcmpestri<string asm> {
def rr : SS42AI<0x61, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ (ins VR128:$src1, VR128:$src3, u8imm:$src5),
!strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
[]>, Sched<[WritePCmpEStrI]>;
let mayLoad = 1 in
def rm : SS42AI<0x61, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
!strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
[]>, Sched<[WritePCmpEStrILd, ReadAfterLd]>;
}
-let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
+let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
let Predicates = [HasAVX] in
defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
@@ -8123,13 +7551,13 @@ multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
let Constraints = "$src1 = $dst", Predicates = [HasSHA] in {
def SHA1RNDS4rri : Ii8<0xCC, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ (ins VR128:$src1, VR128:$src2, u8imm:$src3),
"sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst,
(int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
(i8 imm:$src3)))]>, TA;
def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
"sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst,
(int_x86_sha1rnds4 VR128:$src1,
@@ -8157,8 +7585,8 @@ def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
// AES-NI Instructions
//===----------------------------------------------------------------------===//
-multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128, bit Is2Addr = 1> {
+multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
+ PatFrag ld_frag, bit Is2Addr = 1> {
def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!if(Is2Addr,
@@ -8172,31 +7600,31 @@ multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
- (IntId128 VR128:$src1, (memopv2i64 addr:$src2)))]>,
+ (IntId128 VR128:$src1, (ld_frag addr:$src2)))]>,
Sched<[WriteAESDecEncLd, ReadAfterLd]>;
}
// Perform One Round of an AES Encryption/Decryption Flow
let Predicates = [HasAVX, HasAES] in {
defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
- int_x86_aesni_aesenc, 0>, VEX_4V;
+ int_x86_aesni_aesenc, loadv2i64, 0>, VEX_4V;
defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
- int_x86_aesni_aesenclast, 0>, VEX_4V;
+ int_x86_aesni_aesenclast, loadv2i64, 0>, VEX_4V;
defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
- int_x86_aesni_aesdec, 0>, VEX_4V;
+ int_x86_aesni_aesdec, loadv2i64, 0>, VEX_4V;
defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
- int_x86_aesni_aesdeclast, 0>, VEX_4V;
+ int_x86_aesni_aesdeclast, loadv2i64, 0>, VEX_4V;
}
let Constraints = "$src1 = $dst" in {
defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
- int_x86_aesni_aesenc>;
+ int_x86_aesni_aesenc, memopv2i64>;
defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
- int_x86_aesni_aesenclast>;
+ int_x86_aesni_aesenclast, memopv2i64>;
defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
- int_x86_aesni_aesdec>;
+ int_x86_aesni_aesdec, memopv2i64>;
defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
- int_x86_aesni_aesdeclast>;
+ int_x86_aesni_aesdeclast, memopv2i64>;
}
// Perform the AES InvMixColumn Transformation
@@ -8227,26 +7655,26 @@ def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
// AES Round Key Generation Assist
let Predicates = [HasAVX, HasAES] in {
def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, i8imm:$src2),
+ (ins VR128:$src1, u8imm:$src2),
"vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
Sched<[WriteAESKeyGen]>, VEX;
def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
- (ins i128mem:$src1, i8imm:$src2),
+ (ins i128mem:$src1, u8imm:$src2),
"vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_aesni_aeskeygenassist (loadv2i64 addr:$src1), imm:$src2))]>,
Sched<[WriteAESKeyGenLd]>, VEX;
}
def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, i8imm:$src2),
+ (ins VR128:$src1, u8imm:$src2),
"aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
Sched<[WriteAESKeyGen]>;
def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
- (ins i128mem:$src1, i8imm:$src2),
+ (ins i128mem:$src1, u8imm:$src2),
"aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
@@ -8257,15 +7685,16 @@ def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
//===----------------------------------------------------------------------===//
// AVX carry-less Multiplication instructions
+let isCommutable = 1 in
def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ (ins VR128:$src1, VR128:$src2, u8imm:$src3),
"vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR128:$dst,
(int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>,
Sched<[WriteCLMul]>;
def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
"vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
(loadv2i64 addr:$src2), imm:$src3))]>,
@@ -8273,15 +7702,16 @@ def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
// Carry-less Multiplication instructions
let Constraints = "$src1 = $dst" in {
+let isCommutable = 1 in
def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ (ins VR128:$src1, VR128:$src2, u8imm:$src3),
"pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst,
(int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))],
IIC_SSE_PCLMULQDQ_RR>, Sched<[WriteCLMul]>;
def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
"pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
(memopv2i64 addr:$src2), imm:$src3))],
@@ -8320,7 +7750,7 @@ let Predicates = [HasSSE4A] in {
let Constraints = "$src = $dst" in {
def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
- (ins VR128:$src, i8imm:$len, i8imm:$idx),
+ (ins VR128:$src, u8imm:$len, u8imm:$idx),
"extrq\t{$idx, $len, $src|$src, $len, $idx}",
[(set VR128:$dst, (int_x86_sse4a_extrqi VR128:$src, imm:$len,
imm:$idx))]>, PD;
@@ -8331,7 +7761,7 @@ def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
VR128:$mask))]>, PD;
def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src, VR128:$src2, i8imm:$len, i8imm:$idx),
+ (ins VR128:$src, VR128:$src2, u8imm:$len, u8imm:$idx),
"insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
[(set VR128:$dst, (int_x86_sse4a_insertqi VR128:$src,
VR128:$src2, imm:$len, imm:$idx))]>, XD;
@@ -8422,14 +7852,14 @@ def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
//===----------------------------------------------------------------------===//
// VINSERTF128 - Insert packed floating-point values
//
-let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
+let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR128:$src2, i8imm:$src3),
+ (ins VR256:$src1, VR128:$src2, u8imm:$src3),
"vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>, Sched<[WriteFShuffle]>, VEX_4V, VEX_L;
let mayLoad = 1 in
def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
+ (ins VR256:$src1, f128mem:$src2, u8imm:$src3),
"vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>, Sched<[WriteFShuffleLd, ReadAfterLd]>, VEX_4V, VEX_L;
}
@@ -8496,14 +7926,14 @@ def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
//===----------------------------------------------------------------------===//
// VEXTRACTF128 - Extract packed floating-point values
//
-let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
+let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
- (ins VR256:$src1, i8imm:$src2),
+ (ins VR256:$src1, u8imm:$src2),
"vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, Sched<[WriteFShuffle]>, VEX, VEX_L;
let mayStore = 1 in
def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
- (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
+ (ins f128mem:$dst, VR256:$src1, u8imm:$src2),
"vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, Sched<[WriteStore]>, VEX, VEX_L;
}
@@ -8624,15 +8054,15 @@ multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
Sched<[WriteFShuffleLd, ReadAfterLd]>;
def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, i8imm:$src2),
+ (ins RC:$src1, u8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
Sched<[WriteFShuffle]>;
def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
- (ins x86memop_f:$src1, i8imm:$src2),
+ (ins x86memop_f:$src1, u8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
- (vt (X86VPermilpi (memop addr:$src1), (i8 imm:$src2))))]>, VEX,
+ (vt (X86VPermilpi (load addr:$src1), (i8 imm:$src2))))]>, VEX,
Sched<[WriteFShuffleLd]>;
}
@@ -8689,13 +8119,13 @@ def : Pat<(v2i64 (X86VPermilpi (loadv2i64 addr:$src1), (i8 imm:$imm))),
//
let ExeDomain = SSEPackedSingle in {
def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, i8imm:$src3),
+ (ins VR256:$src1, VR256:$src2, u8imm:$src3),
"vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
(i8 imm:$src3))))]>, VEX_4V, VEX_L,
Sched<[WriteFShuffle]>;
def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
+ (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
"vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv8f32 addr:$src2),
(i8 imm:$src3)))]>, VEX_4V, VEX_L,
@@ -8756,7 +8186,7 @@ multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
"vcvtph2ps\t{$src, $dst|$dst, $src}",
[(set RC:$dst, (Int VR128:$src))]>,
T8PD, VEX, Sched<[WriteCvtF2F]>;
- let neverHasSideEffects = 1, mayLoad = 1 in
+ let hasSideEffects = 0, mayLoad = 1 in
def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
"vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8PD, VEX,
Sched<[WriteCvtF2FLd]>;
@@ -8764,14 +8194,14 @@ multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
- (ins RC:$src1, i32i8imm:$src2),
+ (ins RC:$src1, i32u8imm:$src2),
"vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
TAPD, VEX, Sched<[WriteCvtF2F]>;
- let neverHasSideEffects = 1, mayStore = 1,
+ let hasSideEffects = 0, mayStore = 1,
SchedRW = [WriteCvtF2FLd, WriteRMW] in
def mr : Ii8<0x1D, MRMDestMem, (outs),
- (ins x86memop:$dst, RC:$src1, i32i8imm:$src2),
+ (ins x86memop:$dst, RC:$src1, i32u8imm:$src2),
"vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
TAPD, VEX;
}
@@ -8814,13 +8244,13 @@ multiclass AVX2_binop_rmi_int<bits<8> opc, string OpcodeStr,
X86MemOperand x86memop> {
let isCommutable = 1 in
def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, i8imm:$src3),
+ (ins RC:$src1, RC:$src2, u8imm:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
Sched<[WriteBlend]>, VEX_4V;
def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2, i8imm:$src3),
+ (ins RC:$src1, x86memop:$src2, u8imm:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst,
@@ -9061,14 +8491,14 @@ defm VPERMPS : avx2_perm<0x16, "vpermps", loadv8f32, v8f32, WriteFShuffle256>;
multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
ValueType OpVT, X86FoldableSchedWrite Sched> {
def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, i8imm:$src2),
+ (ins VR256:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
Sched<[Sched]>, VEX, VEX_L;
def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
- (ins i256mem:$src1, i8imm:$src2),
+ (ins i256mem:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
@@ -9087,13 +8517,13 @@ defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
// VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
//
def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, i8imm:$src3),
+ (ins VR256:$src1, VR256:$src2, u8imm:$src3),
"vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
(i8 imm:$src3))))]>, Sched<[WriteShuffle256]>,
VEX_4V, VEX_L;
def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
+ (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
"vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv4i64 addr:$src2),
(i8 imm:$src3)))]>,
@@ -9122,14 +8552,14 @@ def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)),
//===----------------------------------------------------------------------===//
// VINSERTI128 - Insert packed integer values
//
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR128:$src2, i8imm:$src3),
+ (ins VR256:$src1, VR128:$src2, u8imm:$src3),
"vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>, Sched<[WriteShuffle256]>, VEX_4V, VEX_L;
let mayLoad = 1 in
def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, i128mem:$src2, i8imm:$src3),
+ (ins VR256:$src1, i128mem:$src2, u8imm:$src3),
"vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>, Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
}
@@ -9177,14 +8607,14 @@ def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
// VEXTRACTI128 - Extract packed integer values
//
def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
- (ins VR256:$src1, i8imm:$src2),
+ (ins VR256:$src1, u8imm:$src2),
"vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_avx2_vextracti128 VR256:$src1, imm:$src2))]>,
Sched<[WriteShuffle256]>, VEX, VEX_L;
-let neverHasSideEffects = 1, mayStore = 1 in
+let hasSideEffects = 0, mayStore = 1 in
def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
- (ins i128mem:$dst, VR256:$src1, i8imm:$src2),
+ (ins i128mem:$dst, VR256:$src1, u8imm:$src2),
"vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
Sched<[WriteStore]>, VEX, VEX_L;
@@ -9260,6 +8690,115 @@ defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
int_x86_avx2_maskstore_q,
int_x86_avx2_maskstore_q_256>, VEX_W;
+def: Pat<(masked_store addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src)),
+ (VMASKMOVPSYmr addr:$ptr, VR256:$mask, VR256:$src)>;
+
+def: Pat<(masked_store addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src)),
+ (VPMASKMOVDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
+
+def: Pat<(masked_store addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src)),
+ (VMASKMOVPSmr addr:$ptr, VR128:$mask, VR128:$src)>;
+
+def: Pat<(masked_store addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src)),
+ (VPMASKMOVDmr addr:$ptr, VR128:$mask, VR128:$src)>;
+
+def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
+ (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
+
+def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask),
+ (bc_v8f32 (v8i32 immAllZerosV)))),
+ (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
+
+def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src0))),
+ (VBLENDVPSYrr VR256:$src0, (VMASKMOVPSYrm VR256:$mask, addr:$ptr),
+ VR256:$mask)>;
+
+def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
+ (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
+
+def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 immAllZerosV))),
+ (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
+
+def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src0))),
+ (VBLENDVPSYrr VR256:$src0, (VPMASKMOVDYrm VR256:$mask, addr:$ptr),
+ VR256:$mask)>;
+
+def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
+ (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
+
+def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask),
+ (bc_v4f32 (v4i32 immAllZerosV)))),
+ (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
+
+def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src0))),
+ (VBLENDVPSrr VR128:$src0, (VMASKMOVPSrm VR128:$mask, addr:$ptr),
+ VR128:$mask)>;
+
+def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
+ (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
+
+def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 immAllZerosV))),
+ (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
+
+def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src0))),
+ (VBLENDVPSrr VR128:$src0, (VPMASKMOVDrm VR128:$mask, addr:$ptr),
+ VR128:$mask)>;
+
+def: Pat<(masked_store addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src)),
+ (VMASKMOVPDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
+
+def: Pat<(masked_store addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src)),
+ (VPMASKMOVQYmr addr:$ptr, VR256:$mask, VR256:$src)>;
+
+def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
+ (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
+
+def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
+ (v4f64 immAllZerosV))),
+ (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
+
+def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src0))),
+ (VBLENDVPDYrr VR256:$src0, (VMASKMOVPDYrm VR256:$mask, addr:$ptr),
+ VR256:$mask)>;
+
+def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
+ (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
+
+def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
+ (bc_v4i64 (v8i32 immAllZerosV)))),
+ (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
+
+def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src0))),
+ (VBLENDVPDYrr VR256:$src0, (VPMASKMOVQYrm VR256:$mask, addr:$ptr),
+ VR256:$mask)>;
+
+def: Pat<(masked_store addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src)),
+ (VMASKMOVPDmr addr:$ptr, VR128:$mask, VR128:$src)>;
+
+def: Pat<(masked_store addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src)),
+ (VPMASKMOVQmr addr:$ptr, VR128:$mask, VR128:$src)>;
+
+def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
+ (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
+
+def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
+ (v2f64 immAllZerosV))),
+ (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
+
+def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src0))),
+ (VBLENDVPDrr VR128:$src0, (VMASKMOVPDrm VR128:$mask, addr:$ptr),
+ VR128:$mask)>;
+
+def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
+ (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
+
+def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
+ (bc_v2i64 (v4i32 immAllZerosV)))),
+ (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
+
+def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src0))),
+ (VBLENDVPDrr VR128:$src0, (VPMASKMOVQrm VR128:$mask, addr:$ptr),
+ VR128:$mask)>;
//===----------------------------------------------------------------------===//
// Variable Bit Shifts
diff --git a/lib/Target/X86/X86InstrShiftRotate.td b/lib/Target/X86/X86InstrShiftRotate.td
index d0bb523..c706d43 100644
--- a/lib/Target/X86/X86InstrShiftRotate.td
+++ b/lib/Target/X86/X86InstrShiftRotate.td
@@ -49,6 +49,7 @@ def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
"shl{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))],
IIC_SR>;
+} // isConvertibleToThreeAddress = 1
// NOTE: We don't include patterns for shifts of a register by one, because
// 'add reg,reg' is cheaper (and we have a Pat pattern for shift-by-one).
@@ -62,7 +63,6 @@ def SHL32r1 : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
"shl{q}\t$dst", [], IIC_SR>;
} // hasSideEffects = 0
-} // isConvertibleToThreeAddress = 1
} // Constraints = "$src = $dst", SchedRW
@@ -289,11 +289,11 @@ def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
"sar{w}\t{%cl, $dst|$dst, cl}",
[(store (sra (loadi16 addr:$dst), CL), addr:$dst)],
IIC_SR>, OpSize16;
-def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
+def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
"sar{l}\t{%cl, $dst|$dst, cl}",
[(store (sra (loadi32 addr:$dst), CL), addr:$dst)],
IIC_SR>, OpSize32;
-def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
+def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
"sar{q}\t{%cl, $dst|$dst, cl}",
[(store (sra (loadi64 addr:$dst), CL), addr:$dst)],
IIC_SR>;
@@ -347,7 +347,7 @@ def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
let Uses = [CL] in
def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
"rcl{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
-
+
def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
"rcl{w}\t$dst", [], IIC_SR>, OpSize16;
def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
@@ -381,7 +381,7 @@ def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
let Uses = [CL] in
def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
"rcr{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
-
+
def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
"rcr{w}\t$dst", [], IIC_SR>, OpSize16;
def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
@@ -397,7 +397,7 @@ def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
let Uses = [CL] in
def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
"rcr{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize32;
-
+
def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
"rcr{q}\t$dst", [], IIC_SR>;
def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$cnt),
@@ -493,7 +493,7 @@ def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"rol{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))],
IIC_SR>, OpSize32;
-def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
+def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"rol{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))],
@@ -600,7 +600,7 @@ def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"ror{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))],
IIC_SR>, OpSize32;
-def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
+def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"ror{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))],
@@ -635,11 +635,11 @@ def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
"ror{w}\t{%cl, $dst|$dst, cl}",
[(store (rotr (loadi16 addr:$dst), CL), addr:$dst)],
IIC_SR>, OpSize16;
-def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
+def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
"ror{l}\t{%cl, $dst|$dst, cl}",
[(store (rotr (loadi32 addr:$dst), CL), addr:$dst)],
IIC_SR>, OpSize32;
-def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
+def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
"ror{q}\t{%cl, $dst|$dst, cl}",
[(store (rotr (loadi64 addr:$dst), CL), addr:$dst)],
IIC_SR>;
@@ -688,19 +688,19 @@ def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
let Uses = [CL] in {
-def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
+def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))],
IIC_SHD16_REG_CL>,
TB, OpSize16;
-def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
+def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))],
IIC_SHD16_REG_CL>,
TB, OpSize16;
-def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
+def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))],
@@ -710,58 +710,58 @@ def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst),
"shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))],
IIC_SHD32_REG_CL>, TB, OpSize32;
-def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
+def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))],
- IIC_SHD64_REG_CL>,
+ IIC_SHD64_REG_CL>,
TB;
-def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
+def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))],
- IIC_SHD64_REG_CL>,
+ IIC_SHD64_REG_CL>,
TB;
}
let isCommutable = 1 in { // These instructions commute to each other.
def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
- (outs GR16:$dst),
+ (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2, i8imm:$src3),
"shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2,
(i8 imm:$src3)))], IIC_SHD16_REG_IM>,
TB, OpSize16;
def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
- (outs GR16:$dst),
+ (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2, i8imm:$src3),
"shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2,
(i8 imm:$src3)))], IIC_SHD16_REG_IM>,
TB, OpSize16;
def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
- (outs GR32:$dst),
+ (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2, i8imm:$src3),
"shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2,
(i8 imm:$src3)))], IIC_SHD32_REG_IM>,
TB, OpSize32;
def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
- (outs GR32:$dst),
+ (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2, i8imm:$src3),
"shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2,
(i8 imm:$src3)))], IIC_SHD32_REG_IM>,
TB, OpSize32;
def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
- (outs GR64:$dst),
+ (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2, i8imm:$src3),
"shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
(i8 imm:$src3)))], IIC_SHD64_REG_IM>,
TB;
def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
- (outs GR64:$dst),
+ (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2, i8imm:$src3),
"shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
@@ -789,7 +789,7 @@ def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL),
addr:$dst)], IIC_SHD32_MEM_CL>, TB, OpSize32;
-
+
def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
@@ -807,7 +807,7 @@ def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
(i8 imm:$src3)), addr:$dst)],
IIC_SHD16_MEM_IM>,
TB, OpSize16;
-def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
+def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
(outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
"shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
@@ -822,7 +822,7 @@ def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
(i8 imm:$src3)), addr:$dst)],
IIC_SHD32_MEM_IM>,
TB, OpSize32;
-def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
+def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
(outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
"shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(store (X86shrd (loadi32 addr:$dst), GR32:$src2,
@@ -837,7 +837,7 @@ def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
(i8 imm:$src3)), addr:$dst)],
IIC_SHD64_MEM_IM>,
TB;
-def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
+def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
(outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
"shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
@@ -859,7 +859,7 @@ def ROT64L2R_imm8 : SDNodeXForm<imm, [{
}]>;
multiclass bmi_rotate<string asm, RegisterClass RC, X86MemOperand x86memop> {
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def ri : Ii8<0xF0, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, i8imm:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, TAXD, VEX, Sched<[WriteShift]>;
@@ -872,7 +872,7 @@ let neverHasSideEffects = 1 in {
}
multiclass bmi_shift<string asm, RegisterClass RC, X86MemOperand x86memop> {
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def rr : I<0xF7, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
VEX_4VOp3, Sched<[WriteShift]>;
diff --git a/lib/Target/X86/X86InstrSystem.td b/lib/Target/X86/X86InstrSystem.td
index 8cabdd0..0350566 100644
--- a/lib/Target/X86/X86InstrSystem.td
+++ b/lib/Target/X86/X86InstrSystem.td
@@ -38,9 +38,6 @@ def INT3 : I<0xcc, RawFrm, (outs), (ins), "int3",
[(int_x86_int (i8 3))], IIC_INT3>;
} // SchedRW
-def : Pat<(debugtrap),
- (INT3)>;
-
// The long form of "int $3" turns into int3 as a size optimization.
// FIXME: This doesn't work because InstAlias can't match immediate constants.
//def : InstAlias<"int\t$3", (INT3)>;
@@ -71,6 +68,10 @@ def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", [], IIC_IRET>,
Requires<[In64BitMode]>;
} // SchedRW
+def : Pat<(debugtrap),
+ (INT3)>, Requires<[NotPS4]>;
+def : Pat<(debugtrap),
+ (INT (i8 0x41))>, Requires<[IsPS4]>;
//===----------------------------------------------------------------------===//
// Input/Output Instructions.
@@ -207,7 +208,7 @@ def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src),
let SchedRW = [WriteSystem] in {
def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", [], IIC_SWAPGS>, TB;
-def LAR16rm : I<0x02, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
+def LAR16rm : I<0x02, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"lar{w}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RM>, TB,
OpSize16;
def LAR16rr : I<0x02, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
@@ -215,14 +216,14 @@ def LAR16rr : I<0x02, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
OpSize16;
// i16mem operand in LAR32rm and GR32 operand in LAR32rr is not a typo.
-def LAR32rm : I<0x02, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
+def LAR32rm : I<0x02, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
"lar{l}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RM>, TB,
OpSize32;
def LAR32rr : I<0x02, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"lar{l}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RR>, TB,
OpSize32;
// i16mem operand in LAR64rm and GR32 operand in LAR32rr is not a typo.
-def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
+def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
"lar{q}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RM>, TB;
def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
"lar{q}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RR>, TB;
@@ -240,7 +241,7 @@ def LSL32rr : I<0x03, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"lsl{l}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RR>, TB,
OpSize32;
def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "lsl{q}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RM>, TB;
+ "lsl{q}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RM>, TB;
def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"lsl{q}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RR>, TB;
@@ -260,7 +261,7 @@ def LTRr : I<0x00, MRM3r, (outs), (ins GR16:$src),
"ltr{w}\t$src", [], IIC_LTR>, TB;
def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src),
"ltr{w}\t$src", [], IIC_LTR>, TB;
-
+
def PUSHCS16 : I<0x0E, RawFrm, (outs), (ins),
"push{w}\t{%cs|cs}", [], IIC_PUSH_SR>,
OpSize16, Requires<[Not64BitMode]>;
@@ -347,31 +348,31 @@ def LDS16rm : I<0xc5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
"lds{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16;
def LDS32rm : I<0xc5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
"lds{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32;
-
+
def LSS16rm : I<0xb2, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
"lss{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize16;
def LSS32rm : I<0xb2, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
"lss{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize32;
def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
"lss{q}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
-
+
def LES16rm : I<0xc4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
"les{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16;
def LES32rm : I<0xc4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
"les{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32;
-
+
def LFS16rm : I<0xb4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
"lfs{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize16;
def LFS32rm : I<0xb4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
"lfs{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize32;
def LFS64rm : RI<0xb4, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
"lfs{q}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
-
+
def LGS16rm : I<0xb5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
"lgs{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize16;
def LGS32rm : I<0xb5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
"lgs{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize32;
-
+
def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
"lgs{q}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
@@ -408,7 +409,7 @@ def SLDT16m : I<0x00, MRM0m, (outs i16mem:$dst), (ins),
"sldt{w}\t$dst", [], IIC_SLDT>, TB;
def SLDT32r : I<0x00, MRM0r, (outs GR32:$dst), (ins),
"sldt{l}\t$dst", [], IIC_SLDT>, OpSize32, TB;
-
+
// LLDT is not interpreted specially in 64-bit mode because there is no sign
// extension.
def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins),
@@ -437,19 +438,21 @@ def LLDT16m : I<0x00, MRM2m, (outs), (ins i16mem:$src),
//===----------------------------------------------------------------------===//
// Specialized register support
let SchedRW = [WriteSystem] in {
+let Uses = [EAX, ECX, EDX] in
def WRMSR : I<0x30, RawFrm, (outs), (ins), "wrmsr", [], IIC_WRMSR>, TB;
+let Defs = [EAX, EDX], Uses = [ECX] in
def RDMSR : I<0x32, RawFrm, (outs), (ins), "rdmsr", [], IIC_RDMSR>, TB;
let Defs = [RAX, RDX], Uses = [ECX] in
def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", [(X86rdpmc)], IIC_RDPMC>,
TB;
-def SMSW16r : I<0x01, MRM4r, (outs GR16:$dst), (ins),
+def SMSW16r : I<0x01, MRM4r, (outs GR16:$dst), (ins),
"smsw{w}\t$dst", [], IIC_SMSW>, OpSize16, TB;
-def SMSW32r : I<0x01, MRM4r, (outs GR32:$dst), (ins),
+def SMSW32r : I<0x01, MRM4r, (outs GR32:$dst), (ins),
"smsw{l}\t$dst", [], IIC_SMSW>, OpSize32, TB;
// no m form encodable; use SMSW16m
-def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
+def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
"smsw{q}\t$dst", [], IIC_SMSW>, TB;
// For memory operands, there is only a 16-bit form
@@ -485,15 +488,28 @@ let Uses = [RDX, RAX] in {
def XSAVE : I<0xAE, MRM4m, (outs opaque512mem:$dst), (ins),
"xsave\t$dst", []>, TB;
def XSAVE64 : RI<0xAE, MRM4m, (outs opaque512mem:$dst), (ins),
- "xsave{q|64}\t$dst", []>, TB, Requires<[In64BitMode]>;
+ "xsave64\t$dst", []>, TB, Requires<[In64BitMode]>;
def XRSTOR : I<0xAE, MRM5m, (outs), (ins opaque512mem:$dst),
"xrstor\t$dst", []>, TB;
def XRSTOR64 : RI<0xAE, MRM5m, (outs), (ins opaque512mem:$dst),
- "xrstor{q|64}\t$dst", []>, TB, Requires<[In64BitMode]>;
+ "xrstor64\t$dst", []>, TB, Requires<[In64BitMode]>;
def XSAVEOPT : I<0xAE, MRM6m, (outs opaque512mem:$dst), (ins),
- "xsaveopt\t$dst", []>, TB;
+ "xsaveopt\t$dst", []>, PS;
def XSAVEOPT64 : RI<0xAE, MRM6m, (outs opaque512mem:$dst), (ins),
- "xsaveopt{q|64}\t$dst", []>, TB, Requires<[In64BitMode]>;
+ "xsaveopt64\t$dst", []>, PS, Requires<[In64BitMode]>;
+
+ def XRSTORS : I<0xC7, MRM3m, (outs), (ins opaque512mem:$dst),
+ "xrstors\t$dst", []>, TB;
+ def XRSTORS64 : RI<0xC7, MRM3m, (outs), (ins opaque512mem:$dst),
+ "xrstors64\t$dst", []>, TB, Requires<[In64BitMode]>;
+ def XSAVEC : I<0xC7, MRM4m, (outs opaque512mem:$dst), (ins),
+ "xsavec\t$dst", []>, TB;
+ def XSAVEC64 : RI<0xC7, MRM4m, (outs opaque512mem:$dst), (ins),
+ "xsavec64\t$dst", []>, TB, Requires<[In64BitMode]>;
+ def XSAVES : I<0xC7, MRM5m, (outs opaque512mem:$dst), (ins),
+ "xsaves\t$dst", []>, TB;
+ def XSAVES64 : RI<0xC7, MRM5m, (outs opaque512mem:$dst), (ins),
+ "xsaves64\t$dst", []>, TB, Requires<[In64BitMode]>;
}
} // SchedRW
@@ -559,7 +575,13 @@ def INVPCID64 : I<0x82, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
//===----------------------------------------------------------------------===//
// SMAP Instruction
-let Predicates = [HasSMAP], Defs = [EFLAGS] in {
+let Defs = [EFLAGS] in {
def CLAC : I<0x01, MRM_CA, (outs), (ins), "clac", []>, TB;
def STAC : I<0x01, MRM_CB, (outs), (ins), "stac", []>, TB;
}
+
+//===----------------------------------------------------------------------===//
+// SMX Instruction
+let Uses = [RAX, RBX, RCX, RDX], Defs = [RAX, RBX, RCX] in {
+ def GETSEC : I<0x37, RawFrm, (outs), (ins), "getsec", []>, TB;
+}
diff --git a/lib/Target/X86/X86InstrTSX.td b/lib/Target/X86/X86InstrTSX.td
index 4940efc..7267d75 100644
--- a/lib/Target/X86/X86InstrTSX.td
+++ b/lib/Target/X86/X86InstrTSX.td
@@ -23,9 +23,12 @@ def XBEGIN : I<0, Pseudo, (outs GR32:$dst), (ins),
"# XBEGIN", [(set GR32:$dst, (int_x86_xbegin))]>,
Requires<[HasRTM]>;
-let isBranch = 1, isTerminator = 1, Defs = [EAX] in
-def XBEGIN_4 : Ii32PCRel<0xc7, MRM_F8, (outs), (ins brtarget:$dst),
- "xbegin\t$dst", []>, Requires<[HasRTM]>;
+let isBranch = 1, isTerminator = 1, Defs = [EAX] in {
+def XBEGIN_2 : Ii16PCRel<0xc7, MRM_F8, (outs), (ins brtarget16:$dst),
+ "xbegin\t$dst", []>, OpSize16, Requires<[HasRTM]>;
+def XBEGIN_4 : Ii32PCRel<0xc7, MRM_F8, (outs), (ins brtarget32:$dst),
+ "xbegin\t$dst", []>, OpSize32, Requires<[HasRTM]>;
+}
def XEND : I<0x01, MRM_D5, (outs), (ins),
"xend", [(int_x86_xend)]>, TB, Requires<[HasRTM]>;
diff --git a/lib/Target/X86/X86InstrXOP.td b/lib/Target/X86/X86InstrXOP.td
index 45e2ff0..8455b8d 100644
--- a/lib/Target/X86/X86InstrXOP.td
+++ b/lib/Target/X86/X86InstrXOP.td
@@ -20,21 +20,23 @@ multiclass xop2op<bits<8> opc, string OpcodeStr, Intrinsic Int, PatFrag memop> {
[(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP;
}
-defm VPHSUBWD : xop2op<0xE2, "vphsubwd", int_x86_xop_vphsubwd, memopv2i64>;
-defm VPHSUBDQ : xop2op<0xE3, "vphsubdq", int_x86_xop_vphsubdq, memopv2i64>;
-defm VPHSUBBW : xop2op<0xE1, "vphsubbw", int_x86_xop_vphsubbw, memopv2i64>;
-defm VPHADDWQ : xop2op<0xC7, "vphaddwq", int_x86_xop_vphaddwq, memopv2i64>;
-defm VPHADDWD : xop2op<0xC6, "vphaddwd", int_x86_xop_vphaddwd, memopv2i64>;
-defm VPHADDUWQ : xop2op<0xD7, "vphadduwq", int_x86_xop_vphadduwq, memopv2i64>;
-defm VPHADDUWD : xop2op<0xD6, "vphadduwd", int_x86_xop_vphadduwd, memopv2i64>;
-defm VPHADDUDQ : xop2op<0xDB, "vphaddudq", int_x86_xop_vphaddudq, memopv2i64>;
-defm VPHADDUBW : xop2op<0xD1, "vphaddubw", int_x86_xop_vphaddubw, memopv2i64>;
-defm VPHADDUBQ : xop2op<0xD3, "vphaddubq", int_x86_xop_vphaddubq, memopv2i64>;
-defm VPHADDUBD : xop2op<0xD2, "vphaddubd", int_x86_xop_vphaddubd, memopv2i64>;
-defm VPHADDDQ : xop2op<0xCB, "vphadddq", int_x86_xop_vphadddq, memopv2i64>;
-defm VPHADDBW : xop2op<0xC1, "vphaddbw", int_x86_xop_vphaddbw, memopv2i64>;
-defm VPHADDBQ : xop2op<0xC3, "vphaddbq", int_x86_xop_vphaddbq, memopv2i64>;
-defm VPHADDBD : xop2op<0xC2, "vphaddbd", int_x86_xop_vphaddbd, memopv2i64>;
+let ExeDomain = SSEPackedInt in {
+ defm VPHSUBWD : xop2op<0xE2, "vphsubwd", int_x86_xop_vphsubwd, loadv2i64>;
+ defm VPHSUBDQ : xop2op<0xE3, "vphsubdq", int_x86_xop_vphsubdq, loadv2i64>;
+ defm VPHSUBBW : xop2op<0xE1, "vphsubbw", int_x86_xop_vphsubbw, loadv2i64>;
+ defm VPHADDWQ : xop2op<0xC7, "vphaddwq", int_x86_xop_vphaddwq, loadv2i64>;
+ defm VPHADDWD : xop2op<0xC6, "vphaddwd", int_x86_xop_vphaddwd, loadv2i64>;
+ defm VPHADDUWQ : xop2op<0xD7, "vphadduwq", int_x86_xop_vphadduwq, loadv2i64>;
+ defm VPHADDUWD : xop2op<0xD6, "vphadduwd", int_x86_xop_vphadduwd, loadv2i64>;
+ defm VPHADDUDQ : xop2op<0xDB, "vphaddudq", int_x86_xop_vphaddudq, loadv2i64>;
+ defm VPHADDUBW : xop2op<0xD1, "vphaddubw", int_x86_xop_vphaddubw, loadv2i64>;
+ defm VPHADDUBQ : xop2op<0xD3, "vphaddubq", int_x86_xop_vphaddubq, loadv2i64>;
+ defm VPHADDUBD : xop2op<0xD2, "vphaddubd", int_x86_xop_vphaddubd, loadv2i64>;
+ defm VPHADDDQ : xop2op<0xCB, "vphadddq", int_x86_xop_vphadddq, loadv2i64>;
+ defm VPHADDBW : xop2op<0xC1, "vphaddbw", int_x86_xop_vphaddbw, loadv2i64>;
+ defm VPHADDBQ : xop2op<0xC3, "vphaddbq", int_x86_xop_vphaddbq, loadv2i64>;
+ defm VPHADDBD : xop2op<0xC2, "vphaddbd", int_x86_xop_vphaddbd, loadv2i64>;
+}
// Scalar load 2 addr operand instructions
multiclass xop2opsld<bits<8> opc, string OpcodeStr, Intrinsic Int,
@@ -47,11 +49,6 @@ multiclass xop2opsld<bits<8> opc, string OpcodeStr, Intrinsic Int,
[(set VR128:$dst, (Int (bitconvert mem_cpat:$src)))]>, XOP;
}
-defm VFRCZSS : xop2opsld<0x82, "vfrczss", int_x86_xop_vfrcz_ss,
- ssmem, sse_load_f32>;
-defm VFRCZSD : xop2opsld<0x83, "vfrczsd", int_x86_xop_vfrcz_sd,
- sdmem, sse_load_f64>;
-
multiclass xop2op128<bits<8> opc, string OpcodeStr, Intrinsic Int,
PatFrag memop> {
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
@@ -62,9 +59,6 @@ multiclass xop2op128<bits<8> opc, string OpcodeStr, Intrinsic Int,
[(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP;
}
-defm VFRCZPS : xop2op128<0x80, "vfrczps", int_x86_xop_vfrcz_ps, memopv4f32>;
-defm VFRCZPD : xop2op128<0x81, "vfrczpd", int_x86_xop_vfrcz_pd, memopv2f64>;
-
multiclass xop2op256<bits<8> opc, string OpcodeStr, Intrinsic Int,
PatFrag memop> {
def rrY : IXOP<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
@@ -75,8 +69,19 @@ multiclass xop2op256<bits<8> opc, string OpcodeStr, Intrinsic Int,
[(set VR256:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP, VEX_L;
}
-defm VFRCZPS : xop2op256<0x80, "vfrczps", int_x86_xop_vfrcz_ps_256, memopv8f32>;
-defm VFRCZPD : xop2op256<0x81, "vfrczpd", int_x86_xop_vfrcz_pd_256, memopv4f64>;
+let ExeDomain = SSEPackedSingle in {
+ defm VFRCZSS : xop2opsld<0x82, "vfrczss", int_x86_xop_vfrcz_ss,
+ ssmem, sse_load_f32>;
+ defm VFRCZPS : xop2op128<0x80, "vfrczps", int_x86_xop_vfrcz_ps, loadv4f32>;
+ defm VFRCZPS : xop2op256<0x80, "vfrczps", int_x86_xop_vfrcz_ps_256, loadv8f32>;
+}
+
+let ExeDomain = SSEPackedDouble in {
+ defm VFRCZSD : xop2opsld<0x83, "vfrczsd", int_x86_xop_vfrcz_sd,
+ sdmem, sse_load_f64>;
+ defm VFRCZPD : xop2op128<0x81, "vfrczpd", int_x86_xop_vfrcz_pd, loadv2f64>;
+ defm VFRCZPD : xop2op256<0x81, "vfrczpd", int_x86_xop_vfrcz_pd_256, loadv4f64>;
+}
multiclass xop3op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst),
@@ -87,28 +92,30 @@ multiclass xop3op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
(ins VR128:$src1, i128mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
- (Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2))))]>,
+ (Int VR128:$src1, (bitconvert (loadv2i64 addr:$src2))))]>,
XOP_4V, VEX_W;
def mr : IXOP<opc, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
- (Int (bitconvert (memopv2i64 addr:$src1)), VR128:$src2))]>,
+ (Int (bitconvert (loadv2i64 addr:$src1)), VR128:$src2))]>,
XOP_4VOp3;
}
-defm VPSHLW : xop3op<0x95, "vpshlw", int_x86_xop_vpshlw>;
-defm VPSHLQ : xop3op<0x97, "vpshlq", int_x86_xop_vpshlq>;
-defm VPSHLD : xop3op<0x96, "vpshld", int_x86_xop_vpshld>;
-defm VPSHLB : xop3op<0x94, "vpshlb", int_x86_xop_vpshlb>;
-defm VPSHAW : xop3op<0x99, "vpshaw", int_x86_xop_vpshaw>;
-defm VPSHAQ : xop3op<0x9B, "vpshaq", int_x86_xop_vpshaq>;
-defm VPSHAD : xop3op<0x9A, "vpshad", int_x86_xop_vpshad>;
-defm VPSHAB : xop3op<0x98, "vpshab", int_x86_xop_vpshab>;
-defm VPROTW : xop3op<0x91, "vprotw", int_x86_xop_vprotw>;
-defm VPROTQ : xop3op<0x93, "vprotq", int_x86_xop_vprotq>;
-defm VPROTD : xop3op<0x92, "vprotd", int_x86_xop_vprotd>;
-defm VPROTB : xop3op<0x90, "vprotb", int_x86_xop_vprotb>;
+let ExeDomain = SSEPackedInt in {
+ defm VPSHLW : xop3op<0x95, "vpshlw", int_x86_xop_vpshlw>;
+ defm VPSHLQ : xop3op<0x97, "vpshlq", int_x86_xop_vpshlq>;
+ defm VPSHLD : xop3op<0x96, "vpshld", int_x86_xop_vpshld>;
+ defm VPSHLB : xop3op<0x94, "vpshlb", int_x86_xop_vpshlb>;
+ defm VPSHAW : xop3op<0x99, "vpshaw", int_x86_xop_vpshaw>;
+ defm VPSHAQ : xop3op<0x9B, "vpshaq", int_x86_xop_vpshaq>;
+ defm VPSHAD : xop3op<0x9A, "vpshad", int_x86_xop_vpshad>;
+ defm VPSHAB : xop3op<0x98, "vpshab", int_x86_xop_vpshab>;
+ defm VPROTW : xop3op<0x91, "vprotw", int_x86_xop_vprotw>;
+ defm VPROTQ : xop3op<0x93, "vprotq", int_x86_xop_vprotq>;
+ defm VPROTD : xop3op<0x92, "vprotd", int_x86_xop_vprotd>;
+ defm VPROTB : xop3op<0x90, "vprotb", int_x86_xop_vprotb>;
+}
multiclass xop3opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
def ri : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
@@ -119,16 +126,19 @@ multiclass xop3opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
(ins i128mem:$src1, i8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
- (Int (bitconvert (memopv2i64 addr:$src1)), imm:$src2))]>, XOP;
+ (Int (bitconvert (loadv2i64 addr:$src1)), imm:$src2))]>, XOP;
}
-defm VPROTW : xop3opimm<0xC1, "vprotw", int_x86_xop_vprotwi>;
-defm VPROTQ : xop3opimm<0xC3, "vprotq", int_x86_xop_vprotqi>;
-defm VPROTD : xop3opimm<0xC2, "vprotd", int_x86_xop_vprotdi>;
-defm VPROTB : xop3opimm<0xC0, "vprotb", int_x86_xop_vprotbi>;
+let ExeDomain = SSEPackedInt in {
+ defm VPROTW : xop3opimm<0xC1, "vprotw", int_x86_xop_vprotwi>;
+ defm VPROTQ : xop3opimm<0xC3, "vprotq", int_x86_xop_vprotqi>;
+ defm VPROTD : xop3opimm<0xC2, "vprotd", int_x86_xop_vprotdi>;
+ defm VPROTB : xop3opimm<0xC0, "vprotb", int_x86_xop_vprotbi>;
+}
// Instruction where second source can be memory, but third must be register
multiclass xop4opm2<bits<8> opc, string OpcodeStr, Intrinsic Int> {
+ let isCommutable = 1 in
def rr : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, VR128:$src3),
!strconcat(OpcodeStr,
@@ -140,48 +150,66 @@ multiclass xop4opm2<bits<8> opc, string OpcodeStr, Intrinsic Int> {
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
- (Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2)),
+ (Int VR128:$src1, (bitconvert (loadv2i64 addr:$src2)),
VR128:$src3))]>, XOP_4V, VEX_I8IMM;
}
-defm VPMADCSWD : xop4opm2<0xB6, "vpmadcswd", int_x86_xop_vpmadcswd>;
-defm VPMADCSSWD : xop4opm2<0xA6, "vpmadcsswd", int_x86_xop_vpmadcsswd>;
-defm VPMACSWW : xop4opm2<0x95, "vpmacsww", int_x86_xop_vpmacsww>;
-defm VPMACSWD : xop4opm2<0x96, "vpmacswd", int_x86_xop_vpmacswd>;
-defm VPMACSSWW : xop4opm2<0x85, "vpmacssww", int_x86_xop_vpmacssww>;
-defm VPMACSSWD : xop4opm2<0x86, "vpmacsswd", int_x86_xop_vpmacsswd>;
-defm VPMACSSDQL : xop4opm2<0x87, "vpmacssdql", int_x86_xop_vpmacssdql>;
-defm VPMACSSDQH : xop4opm2<0x8F, "vpmacssdqh", int_x86_xop_vpmacssdqh>;
-defm VPMACSSDD : xop4opm2<0x8E, "vpmacssdd", int_x86_xop_vpmacssdd>;
-defm VPMACSDQL : xop4opm2<0x97, "vpmacsdql", int_x86_xop_vpmacsdql>;
-defm VPMACSDQH : xop4opm2<0x9F, "vpmacsdqh", int_x86_xop_vpmacsdqh>;
-defm VPMACSDD : xop4opm2<0x9E, "vpmacsdd", int_x86_xop_vpmacsdd>;
+let ExeDomain = SSEPackedInt in {
+ defm VPMADCSWD : xop4opm2<0xB6, "vpmadcswd", int_x86_xop_vpmadcswd>;
+ defm VPMADCSSWD : xop4opm2<0xA6, "vpmadcsswd", int_x86_xop_vpmadcsswd>;
+ defm VPMACSWW : xop4opm2<0x95, "vpmacsww", int_x86_xop_vpmacsww>;
+ defm VPMACSWD : xop4opm2<0x96, "vpmacswd", int_x86_xop_vpmacswd>;
+ defm VPMACSSWW : xop4opm2<0x85, "vpmacssww", int_x86_xop_vpmacssww>;
+ defm VPMACSSWD : xop4opm2<0x86, "vpmacsswd", int_x86_xop_vpmacsswd>;
+ defm VPMACSSDQL : xop4opm2<0x87, "vpmacssdql", int_x86_xop_vpmacssdql>;
+ defm VPMACSSDQH : xop4opm2<0x8F, "vpmacssdqh", int_x86_xop_vpmacssdqh>;
+ defm VPMACSSDD : xop4opm2<0x8E, "vpmacssdd", int_x86_xop_vpmacssdd>;
+ defm VPMACSDQL : xop4opm2<0x97, "vpmacsdql", int_x86_xop_vpmacsdql>;
+ defm VPMACSDQH : xop4opm2<0x9F, "vpmacsdqh", int_x86_xop_vpmacsdqh>;
+ defm VPMACSDD : xop4opm2<0x9E, "vpmacsdd", int_x86_xop_vpmacsdd>;
+}
// Instruction where second source can be memory, third must be imm8
-multiclass xop4opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
+multiclass xopvpcom<bits<8> opc, string Suffix, Intrinsic Int> {
+ let isCommutable = 1 in
def ri : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst, (Int VR128:$src1, VR128:$src2, imm:$src3))]>,
+ (ins VR128:$src1, VR128:$src2, XOPCC:$cc),
+ !strconcat("vpcom${cc}", Suffix,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (Int VR128:$src1, VR128:$src2, i8immZExt3:$cc))]>,
XOP_4V;
def mi : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ (ins VR128:$src1, i128mem:$src2, XOPCC:$cc),
+ !strconcat("vpcom${cc}", Suffix,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
- (Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2)),
- imm:$src3))]>, XOP_4V;
+ (Int VR128:$src1, (bitconvert (loadv2i64 addr:$src2)),
+ i8immZExt3:$cc))]>, XOP_4V;
+ let isAsmParserOnly = 1, hasSideEffects = 0 in {
+ def ri_alt : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ !strconcat("vpcom", Suffix,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, XOP_4V;
+ let mayLoad = 1 in
+ def mi_alt : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ !strconcat("vpcom", Suffix,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, XOP_4V;
+ }
}
-defm VPCOMB : xop4opimm<0xCC, "vpcomb", int_x86_xop_vpcomb>;
-defm VPCOMW : xop4opimm<0xCD, "vpcomw", int_x86_xop_vpcomw>;
-defm VPCOMD : xop4opimm<0xCE, "vpcomd", int_x86_xop_vpcomd>;
-defm VPCOMQ : xop4opimm<0xCF, "vpcomq", int_x86_xop_vpcomq>;
-defm VPCOMUB : xop4opimm<0xEC, "vpcomub", int_x86_xop_vpcomub>;
-defm VPCOMUW : xop4opimm<0xED, "vpcomuw", int_x86_xop_vpcomuw>;
-defm VPCOMUD : xop4opimm<0xEE, "vpcomud", int_x86_xop_vpcomud>;
-defm VPCOMUQ : xop4opimm<0xEF, "vpcomuq", int_x86_xop_vpcomuq>;
+let ExeDomain = SSEPackedInt in { // SSE integer instructions
+ defm VPCOMB : xopvpcom<0xCC, "b", int_x86_xop_vpcomb>;
+ defm VPCOMW : xopvpcom<0xCD, "w", int_x86_xop_vpcomw>;
+ defm VPCOMD : xopvpcom<0xCE, "d", int_x86_xop_vpcomd>;
+ defm VPCOMQ : xopvpcom<0xCF, "q", int_x86_xop_vpcomq>;
+ defm VPCOMUB : xopvpcom<0xEC, "ub", int_x86_xop_vpcomub>;
+ defm VPCOMUW : xopvpcom<0xED, "uw", int_x86_xop_vpcomuw>;
+ defm VPCOMUD : xopvpcom<0xEE, "ud", int_x86_xop_vpcomud>;
+ defm VPCOMUQ : xopvpcom<0xEF, "uq", int_x86_xop_vpcomuq>;
+}
// Instruction where either second or third source can be memory
multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
@@ -197,20 +225,22 @@ multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
(Int VR128:$src1, VR128:$src2,
- (bitconvert (memopv2i64 addr:$src3))))]>,
+ (bitconvert (loadv2i64 addr:$src3))))]>,
XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
def mr : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, VR128:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
- (Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2)),
+ (Int VR128:$src1, (bitconvert (loadv2i64 addr:$src2)),
VR128:$src3))]>,
XOP_4V, VEX_I8IMM;
}
-defm VPPERM : xop4op<0xA3, "vpperm", int_x86_xop_vpperm>;
-defm VPCMOV : xop4op<0xA2, "vpcmov", int_x86_xop_vpcmov>;
+let ExeDomain = SSEPackedInt in {
+ defm VPPERM : xop4op<0xA3, "vpperm", int_x86_xop_vpperm>;
+ defm VPCMOV : xop4op<0xA2, "vpcmov", int_x86_xop_vpcmov>;
+}
multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
def rrY : IXOPi8<opc, MRMSrcReg, (outs VR256:$dst),
@@ -225,19 +255,20 @@ multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR256:$dst,
(Int VR256:$src1, VR256:$src2,
- (bitconvert (memopv4i64 addr:$src3))))]>,
+ (bitconvert (loadv4i64 addr:$src3))))]>,
XOP_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L;
def mrY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, VR256:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR256:$dst,
- (Int VR256:$src1, (bitconvert (memopv4i64 addr:$src2)),
+ (Int VR256:$src1, (bitconvert (loadv4i64 addr:$src2)),
VR256:$src3))]>,
XOP_4V, VEX_I8IMM, VEX_L;
}
-defm VPCMOV : xop4op256<0xA2, "vpcmov", int_x86_xop_vpcmov_256>;
+let ExeDomain = SSEPackedInt in
+ defm VPCMOV : xop4op256<0xA2, "vpcmov", int_x86_xop_vpcmov_256>;
multiclass xop5op<bits<8> opc, string OpcodeStr, Intrinsic Int128,
Intrinsic Int256, PatFrag ld_128, PatFrag ld_256> {
@@ -282,8 +313,11 @@ multiclass xop5op<bits<8> opc, string OpcodeStr, Intrinsic Int128,
VEX_L;
}
-defm VPERMIL2PD : xop5op<0x49, "vpermil2pd", int_x86_xop_vpermil2pd,
- int_x86_xop_vpermil2pd_256, memopv2f64, memopv4f64>;
-defm VPERMIL2PS : xop5op<0x48, "vpermil2ps", int_x86_xop_vpermil2ps,
- int_x86_xop_vpermil2ps_256, memopv4f32, memopv8f32>;
+let ExeDomain = SSEPackedDouble in
+ defm VPERMIL2PD : xop5op<0x49, "vpermil2pd", int_x86_xop_vpermil2pd,
+ int_x86_xop_vpermil2pd_256, loadv2f64, loadv4f64>;
+
+let ExeDomain = SSEPackedSingle in
+ defm VPERMIL2PS : xop5op<0x48, "vpermil2ps", int_x86_xop_vpermil2ps,
+ int_x86_xop_vpermil2ps_256, loadv4f32, loadv8f32>;
diff --git a/lib/Target/X86/X86IntrinsicsInfo.h b/lib/Target/X86/X86IntrinsicsInfo.h
index d252f72..e436811 100644
--- a/lib/Target/X86/X86IntrinsicsInfo.h
+++ b/lib/Target/X86/X86IntrinsicsInfo.h
@@ -20,8 +20,9 @@ enum IntrinsicType {
INTR_NO_TYPE,
GATHER, SCATTER, PREFETCH, RDSEED, RDRAND, RDPMC, RDTSC, XTEST, ADX,
INTR_TYPE_1OP, INTR_TYPE_2OP, INTR_TYPE_3OP,
- CMP_MASK, CMP_MASK_CC, VSHIFT, VSHIFT_MASK, COMI,
- INTR_TYPE_1OP_MASK_RM
+ CMP_MASK, CMP_MASK_CC, VSHIFT, VSHIFT_MASK, COMI,
+ INTR_TYPE_1OP_MASK_RM, INTR_TYPE_2OP_MASK, FMA_OP_MASK, INTR_TYPE_SCALAR_MASK_RM,
+ COMPRESS_EXPAND_IN_REG, COMPRESS_TO_MEM, EXPAND_FROM_MEM, BLEND
};
struct IntrinsicData {
@@ -51,7 +52,7 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(addcarry_u64, ADX, X86ISD::ADC, 0),
X86_INTRINSIC_DATA(addcarryx_u32, ADX, X86ISD::ADC, 0),
X86_INTRINSIC_DATA(addcarryx_u64, ADX, X86ISD::ADC, 0),
-
+
X86_INTRINSIC_DATA(avx512_gather_dpd_512, GATHER, X86::VGATHERDPDZrm, 0),
X86_INTRINSIC_DATA(avx512_gather_dpi_512, GATHER, X86::VPGATHERDDZrm, 0),
X86_INTRINSIC_DATA(avx512_gather_dpq_512, GATHER, X86::VPGATHERDQZrm, 0),
@@ -60,7 +61,7 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(avx512_gather_qpi_512, GATHER, X86::VPGATHERQDZrm, 0),
X86_INTRINSIC_DATA(avx512_gather_qpq_512, GATHER, X86::VPGATHERQQZrm, 0),
X86_INTRINSIC_DATA(avx512_gather_qps_512, GATHER, X86::VGATHERQPSZrm, 0),
-
+
X86_INTRINSIC_DATA(avx512_gatherpf_dpd_512, PREFETCH,
X86::VGATHERPF0DPDm, X86::VGATHERPF1DPDm),
X86_INTRINSIC_DATA(avx512_gatherpf_dps_512, PREFETCH,
@@ -69,7 +70,55 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86::VGATHERPF0QPDm, X86::VGATHERPF1QPDm),
X86_INTRINSIC_DATA(avx512_gatherpf_qps_512, PREFETCH,
X86::VGATHERPF0QPSm, X86::VGATHERPF1QPSm),
-
+
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_d_128,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_d_256,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_d_512,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_pd_128,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_pd_256,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_pd_512,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_ps_128,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_ps_256,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_ps_512,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_q_128,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_q_256,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_store_q_512,
+ COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_d_128,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_d_256,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_d_512,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_pd_128,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_pd_256,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_pd_512,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_ps_128,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_ps_256,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_ps_512,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_q_128,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_q_256,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_load_q_512,
+ EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_scatter_dpd_512, SCATTER, X86::VSCATTERDPDZmr, 0),
X86_INTRINSIC_DATA(avx512_scatter_dpi_512, SCATTER, X86::VPSCATTERDDZmr, 0),
X86_INTRINSIC_DATA(avx512_scatter_dpq_512, SCATTER, X86::VPSCATTERDQZmr, 0),
@@ -78,7 +127,7 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(avx512_scatter_qpi_512, SCATTER, X86::VPSCATTERQDZmr, 0),
X86_INTRINSIC_DATA(avx512_scatter_qpq_512, SCATTER, X86::VPSCATTERQQZmr, 0),
X86_INTRINSIC_DATA(avx512_scatter_qps_512, SCATTER, X86::VSCATTERQPSZmr, 0),
-
+
X86_INTRINSIC_DATA(avx512_scatterpf_dpd_512, PREFETCH,
X86::VSCATTERPF0DPDm, X86::VSCATTERPF1DPDm),
X86_INTRINSIC_DATA(avx512_scatterpf_dps_512, PREFETCH,
@@ -87,7 +136,7 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86::VSCATTERPF0QPDm, X86::VSCATTERPF1QPDm),
X86_INTRINSIC_DATA(avx512_scatterpf_qps_512, PREFETCH,
X86::VSCATTERPF0QPSm, X86::VSCATTERPF1QPSm),
-
+
X86_INTRINSIC_DATA(rdpmc, RDPMC, X86ISD::RDPMC_DAG, 0),
X86_INTRINSIC_DATA(rdrand_16, RDRAND, X86ISD::RDRAND, 0),
X86_INTRINSIC_DATA(rdrand_32, RDRAND, X86ISD::RDRAND, 0),
@@ -97,7 +146,7 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(rdseed_64, RDSEED, X86ISD::RDSEED, 0),
X86_INTRINSIC_DATA(rdtsc, RDTSC, X86ISD::RDTSC_DAG, 0),
X86_INTRINSIC_DATA(rdtscp, RDTSC, X86ISD::RDTSCP_DAG, 0),
-
+
X86_INTRINSIC_DATA(subborrow_u32, ADX, X86ISD::SBB, 0),
X86_INTRINSIC_DATA(subborrow_u64, ADX, X86ISD::SBB, 0),
X86_INTRINSIC_DATA(xtest, XTEST, X86ISD::XTEST, 0),
@@ -122,6 +171,12 @@ static const IntrinsicData* getIntrinsicWithChain(unsigned IntNo) {
* the alphabetical order.
*/
static const IntrinsicData IntrinsicsWithoutChain[] = {
+ X86_INTRINSIC_DATA(avx2_packssdw, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
+ X86_INTRINSIC_DATA(avx2_packsswb, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
+ X86_INTRINSIC_DATA(avx2_packusdw, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
+ X86_INTRINSIC_DATA(avx2_packuswb, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
+ X86_INTRINSIC_DATA(avx2_permd, INTR_TYPE_2OP, X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx2_permps, INTR_TYPE_2OP, X86ISD::VPERMV, 0),
X86_INTRINSIC_DATA(avx2_phadd_d, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(avx2_phadd_w, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(avx2_phsub_d, INTR_TYPE_2OP, X86ISD::HSUB, 0),
@@ -138,27 +193,79 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx2_pminu_b, INTR_TYPE_2OP, X86ISD::UMIN, 0),
X86_INTRINSIC_DATA(avx2_pminu_d, INTR_TYPE_2OP, X86ISD::UMIN, 0),
X86_INTRINSIC_DATA(avx2_pminu_w, INTR_TYPE_2OP, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx2_pmovsxbd, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovsxbq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovsxbw, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovsxdq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovsxwd, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovsxwq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovzxbd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovzxbq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovzxbw, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovzxdq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovzxwd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovzxwq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmul_dq, INTR_TYPE_2OP, X86ISD::PMULDQ, 0),
+ X86_INTRINSIC_DATA(avx2_pmulh_w, INTR_TYPE_2OP, ISD::MULHS, 0),
+ X86_INTRINSIC_DATA(avx2_pmulhu_w, INTR_TYPE_2OP, ISD::MULHU, 0),
+ X86_INTRINSIC_DATA(avx2_pmulu_dq, INTR_TYPE_2OP, X86ISD::PMULUDQ, 0),
+ X86_INTRINSIC_DATA(avx2_pshuf_b, INTR_TYPE_2OP, X86ISD::PSHUFB, 0),
+ X86_INTRINSIC_DATA(avx2_psign_b, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
+ X86_INTRINSIC_DATA(avx2_psign_d, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
+ X86_INTRINSIC_DATA(avx2_psign_w, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
X86_INTRINSIC_DATA(avx2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx2_pslli_d, VSHIFT, X86ISD::VSHLI, 0),
X86_INTRINSIC_DATA(avx2_pslli_q, VSHIFT, X86ISD::VSHLI, 0),
X86_INTRINSIC_DATA(avx2_pslli_w, VSHIFT, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx2_psllv_d, INTR_TYPE_2OP, ISD::SHL, 0),
+ X86_INTRINSIC_DATA(avx2_psllv_d_256, INTR_TYPE_2OP, ISD::SHL, 0),
+ X86_INTRINSIC_DATA(avx2_psllv_q, INTR_TYPE_2OP, ISD::SHL, 0),
+ X86_INTRINSIC_DATA(avx2_psllv_q_256, INTR_TYPE_2OP, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx2_psra_d, INTR_TYPE_2OP, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx2_psra_w, INTR_TYPE_2OP, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx2_psrai_d, VSHIFT, X86ISD::VSRAI, 0),
X86_INTRINSIC_DATA(avx2_psrai_w, VSHIFT, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx2_psrav_d, INTR_TYPE_2OP, ISD::SRA, 0),
+ X86_INTRINSIC_DATA(avx2_psrav_d_256, INTR_TYPE_2OP, ISD::SRA, 0),
X86_INTRINSIC_DATA(avx2_psrl_d, INTR_TYPE_2OP, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx2_psrl_q, INTR_TYPE_2OP, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx2_psrl_w, INTR_TYPE_2OP, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx2_psrli_d, VSHIFT, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(avx2_psrli_q, VSHIFT, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(avx2_psrli_w, VSHIFT, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx2_psrlv_d, INTR_TYPE_2OP, ISD::SRL, 0),
+ X86_INTRINSIC_DATA(avx2_psrlv_d_256, INTR_TYPE_2OP, ISD::SRL, 0),
+ X86_INTRINSIC_DATA(avx2_psrlv_q, INTR_TYPE_2OP, ISD::SRL, 0),
+ X86_INTRINSIC_DATA(avx2_psrlv_q_256, INTR_TYPE_2OP, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx2_psubus_b, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(avx2_psubus_w, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(avx2_vperm2i128, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
X86_INTRINSIC_DATA(avx512_exp2_pd, INTR_TYPE_1OP_MASK_RM,X86ISD::EXP2, 0),
X86_INTRINSIC_DATA(avx512_exp2_ps, INTR_TYPE_1OP_MASK_RM,X86ISD::EXP2, 0),
+ X86_INTRINSIC_DATA(avx512_mask_add_pd_512, INTR_TYPE_2OP_MASK, ISD::FADD,
+ X86ISD::FADD_RND),
+ X86_INTRINSIC_DATA(avx512_mask_add_ps_512, INTR_TYPE_2OP_MASK, ISD::FADD,
+ X86ISD::FADD_RND),
+ X86_INTRINSIC_DATA(avx512_mask_blend_b_128, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_b_256, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_b_512, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_d_128, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_d_256, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_d_512, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_pd_128, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_pd_256, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_pd_512, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_ps_128, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_ps_256, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_ps_512, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_q_128, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_q_256, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_q_512, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_w_128, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_w_256, BLEND, X86ISD::SELECT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_blend_w_512, BLEND, X86ISD::SELECT, 0),
X86_INTRINSIC_DATA(avx512_mask_cmp_b_128, CMP_MASK_CC, X86ISD::CMPM, 0),
X86_INTRINSIC_DATA(avx512_mask_cmp_b_256, CMP_MASK_CC, X86ISD::CMPM, 0),
X86_INTRINSIC_DATA(avx512_mask_cmp_b_512, CMP_MASK_CC, X86ISD::CMPM, 0),
@@ -171,6 +278,64 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_cmp_w_128, CMP_MASK_CC, X86ISD::CMPM, 0),
X86_INTRINSIC_DATA(avx512_mask_cmp_w_256, CMP_MASK_CC, X86ISD::CMPM, 0),
X86_INTRINSIC_DATA(avx512_mask_cmp_w_512, CMP_MASK_CC, X86ISD::CMPM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_d_128, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_d_256, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_d_512, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_pd_128, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_pd_256, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_pd_512, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_ps_128, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_ps_256, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_ps_512, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_q_128, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_q_256, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_compress_q_512, COMPRESS_EXPAND_IN_REG,
+ X86ISD::COMPRESS, 0),
+
+ X86_INTRINSIC_DATA(avx512_mask_div_pd_512, INTR_TYPE_2OP_MASK, ISD::FDIV,
+ X86ISD::FDIV_RND),
+ X86_INTRINSIC_DATA(avx512_mask_div_ps_512, INTR_TYPE_2OP_MASK, ISD::FDIV,
+ X86ISD::FDIV_RND),
+ X86_INTRINSIC_DATA(avx512_mask_expand_d_128, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_d_256, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_d_512, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_pd_128, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_pd_256, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_pd_512, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_ps_128, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_ps_256, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_ps_512, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_q_128, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_q_256, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_expand_q_512, COMPRESS_EXPAND_IN_REG,
+ X86ISD::EXPAND, 0),
+
+ X86_INTRINSIC_DATA(avx512_mask_mul_pd_512, INTR_TYPE_2OP_MASK, ISD::FMUL,
+ X86ISD::FMUL_RND),
+ X86_INTRINSIC_DATA(avx512_mask_mul_ps_512, INTR_TYPE_2OP_MASK, ISD::FMUL,
+ X86ISD::FMUL_RND),
X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_128, CMP_MASK, X86ISD::PCMPEQM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_256, CMP_MASK, X86ISD::PCMPEQM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_512, CMP_MASK, X86ISD::PCMPEQM, 0),
@@ -195,12 +360,32 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_128, CMP_MASK, X86ISD::PCMPGTM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_256, CMP_MASK, X86ISD::PCMPGTM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_512, CMP_MASK, X86ISD::PCMPGTM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_d, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_q, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_pslli_d, VSHIFT_MASK, X86ISD::VSHLI, 0),
X86_INTRINSIC_DATA(avx512_mask_pslli_q, VSHIFT_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psllv_d, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psllv_q, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_d, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_q, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psrai_d, VSHIFT_MASK, X86ISD::VSRAI, 0),
X86_INTRINSIC_DATA(avx512_mask_psrai_q, VSHIFT_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav_d, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav_q, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_d, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_q, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrli_d, VSHIFT_MASK, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(avx512_mask_psrli_q, VSHIFT_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrlv_d, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrlv_q, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_rndscale_sd, INTR_TYPE_SCALAR_MASK_RM,
+ X86ISD::RNDSCALE, 0),
+ X86_INTRINSIC_DATA(avx512_mask_rndscale_ss, INTR_TYPE_SCALAR_MASK_RM,
+ X86ISD::RNDSCALE, 0),
+ X86_INTRINSIC_DATA(avx512_mask_sub_pd_512, INTR_TYPE_2OP_MASK, ISD::FSUB,
+ X86ISD::FSUB_RND),
+ X86_INTRINSIC_DATA(avx512_mask_sub_ps_512, INTR_TYPE_2OP_MASK, ISD::FSUB,
+ X86ISD::FSUB_RND),
X86_INTRINSIC_DATA(avx512_mask_ucmp_b_128, CMP_MASK_CC, X86ISD::CMPMU, 0),
X86_INTRINSIC_DATA(avx512_mask_ucmp_b_256, CMP_MASK_CC, X86ISD::CMPMU, 0),
X86_INTRINSIC_DATA(avx512_mask_ucmp_b_512, CMP_MASK_CC, X86ISD::CMPMU, 0),
@@ -215,27 +400,118 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_ucmp_w_512, CMP_MASK_CC, X86ISD::CMPMU, 0),
X86_INTRINSIC_DATA(avx512_rcp28_pd, INTR_TYPE_1OP_MASK_RM,X86ISD::RCP28, 0),
X86_INTRINSIC_DATA(avx512_rcp28_ps, INTR_TYPE_1OP_MASK_RM,X86ISD::RCP28, 0),
+ X86_INTRINSIC_DATA(avx512_rcp28_sd, INTR_TYPE_SCALAR_MASK_RM, X86ISD::RCP28, 0),
+ X86_INTRINSIC_DATA(avx512_rcp28_ss, INTR_TYPE_SCALAR_MASK_RM, X86ISD::RCP28, 0),
X86_INTRINSIC_DATA(avx512_rsqrt28_pd, INTR_TYPE_1OP_MASK_RM,X86ISD::RSQRT28, 0),
X86_INTRINSIC_DATA(avx512_rsqrt28_ps, INTR_TYPE_1OP_MASK_RM,X86ISD::RSQRT28, 0),
+ X86_INTRINSIC_DATA(avx512_rsqrt28_sd, INTR_TYPE_SCALAR_MASK_RM,X86ISD::RSQRT28, 0),
+ X86_INTRINSIC_DATA(avx512_rsqrt28_ss, INTR_TYPE_SCALAR_MASK_RM,X86ISD::RSQRT28, 0),
X86_INTRINSIC_DATA(avx_hadd_pd_256, INTR_TYPE_2OP, X86ISD::FHADD, 0),
X86_INTRINSIC_DATA(avx_hadd_ps_256, INTR_TYPE_2OP, X86ISD::FHADD, 0),
X86_INTRINSIC_DATA(avx_hsub_pd_256, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
X86_INTRINSIC_DATA(avx_hsub_ps_256, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
+ X86_INTRINSIC_DATA(avx_max_pd_256, INTR_TYPE_2OP, X86ISD::FMAX, 0),
+ X86_INTRINSIC_DATA(avx_max_ps_256, INTR_TYPE_2OP, X86ISD::FMAX, 0),
+ X86_INTRINSIC_DATA(avx_min_pd_256, INTR_TYPE_2OP, X86ISD::FMIN, 0),
+ X86_INTRINSIC_DATA(avx_min_ps_256, INTR_TYPE_2OP, X86ISD::FMIN, 0),
X86_INTRINSIC_DATA(avx_sqrt_pd_256, INTR_TYPE_1OP, ISD::FSQRT, 0),
X86_INTRINSIC_DATA(avx_sqrt_ps_256, INTR_TYPE_1OP, ISD::FSQRT, 0),
X86_INTRINSIC_DATA(avx_vperm2f128_pd_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
X86_INTRINSIC_DATA(avx_vperm2f128_ps_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
X86_INTRINSIC_DATA(avx_vperm2f128_si_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmadd_pd_128, FMA_OP_MASK, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmadd_pd_256, FMA_OP_MASK, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmadd_pd_512, FMA_OP_MASK, X86ISD::FMADD,
+ X86ISD::FMADD_RND),
+ X86_INTRINSIC_DATA(fma_mask_vfmadd_ps_128, FMA_OP_MASK, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmadd_ps_256, FMA_OP_MASK, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmadd_ps_512, FMA_OP_MASK, X86ISD::FMADD,
+ X86ISD::FMADD_RND),
+ X86_INTRINSIC_DATA(fma_mask_vfmaddsub_pd_128, FMA_OP_MASK, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmaddsub_pd_256, FMA_OP_MASK, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmaddsub_pd_512, FMA_OP_MASK, X86ISD::FMADDSUB,
+ X86ISD::FMADDSUB_RND),
+ X86_INTRINSIC_DATA(fma_mask_vfmaddsub_ps_128, FMA_OP_MASK, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmaddsub_ps_256, FMA_OP_MASK, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmaddsub_ps_512, FMA_OP_MASK, X86ISD::FMADDSUB,
+ X86ISD::FMADDSUB_RND),
+ X86_INTRINSIC_DATA(fma_mask_vfmsub_pd_128, FMA_OP_MASK, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmsub_pd_256, FMA_OP_MASK, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmsub_pd_512, FMA_OP_MASK, X86ISD::FMSUB,
+ X86ISD::FMSUB_RND),
+ X86_INTRINSIC_DATA(fma_mask_vfmsub_ps_128, FMA_OP_MASK, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmsub_ps_256, FMA_OP_MASK, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmsub_ps_512, FMA_OP_MASK, X86ISD::FMSUB,
+ X86ISD::FMSUB_RND),
+ X86_INTRINSIC_DATA(fma_mask_vfmsubadd_pd_128, FMA_OP_MASK, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmsubadd_pd_256, FMA_OP_MASK, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmsubadd_pd_512, FMA_OP_MASK, X86ISD::FMSUBADD,
+ X86ISD::FMSUBADD_RND),
+ X86_INTRINSIC_DATA(fma_mask_vfmsubadd_ps_128, FMA_OP_MASK, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmsubadd_ps_256, FMA_OP_MASK, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfmsubadd_ps_512, FMA_OP_MASK, X86ISD::FMSUBADD,
+ X86ISD::FMSUBADD_RND),
+ X86_INTRINSIC_DATA(fma_mask_vfnmadd_pd_128, FMA_OP_MASK, X86ISD::FNMADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfnmadd_pd_256, FMA_OP_MASK, X86ISD::FNMADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfnmadd_pd_512, FMA_OP_MASK, X86ISD::FNMADD,
+ X86ISD::FNMADD_RND),
+ X86_INTRINSIC_DATA(fma_mask_vfnmadd_ps_128, FMA_OP_MASK, X86ISD::FNMADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfnmadd_ps_256, FMA_OP_MASK, X86ISD::FNMADD, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfnmadd_ps_512, FMA_OP_MASK, X86ISD::FNMADD,
+ X86ISD::FNMADD_RND),
+ X86_INTRINSIC_DATA(fma_mask_vfnmsub_pd_128, FMA_OP_MASK, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfnmsub_pd_256, FMA_OP_MASK, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfnmsub_pd_512, FMA_OP_MASK, X86ISD::FNMSUB,
+ X86ISD::FNMSUB_RND),
+ X86_INTRINSIC_DATA(fma_mask_vfnmsub_ps_128, FMA_OP_MASK, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfnmsub_ps_256, FMA_OP_MASK, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(fma_mask_vfnmsub_ps_512, FMA_OP_MASK, X86ISD::FNMSUB,
+ X86ISD::FNMSUB_RND),
+ X86_INTRINSIC_DATA(fma_vfmadd_pd, INTR_TYPE_3OP, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(fma_vfmadd_pd_256, INTR_TYPE_3OP, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(fma_vfmadd_ps, INTR_TYPE_3OP, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(fma_vfmadd_ps_256, INTR_TYPE_3OP, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(fma_vfmaddsub_pd, INTR_TYPE_3OP, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(fma_vfmaddsub_pd_256, INTR_TYPE_3OP, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(fma_vfmaddsub_ps, INTR_TYPE_3OP, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(fma_vfmaddsub_ps_256, INTR_TYPE_3OP, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(fma_vfmsub_pd, INTR_TYPE_3OP, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(fma_vfmsub_pd_256, INTR_TYPE_3OP, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(fma_vfmsub_ps, INTR_TYPE_3OP, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(fma_vfmsub_ps_256, INTR_TYPE_3OP, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(fma_vfmsubadd_pd, INTR_TYPE_3OP, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(fma_vfmsubadd_pd_256, INTR_TYPE_3OP, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(fma_vfmsubadd_ps, INTR_TYPE_3OP, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(fma_vfmsubadd_ps_256, INTR_TYPE_3OP, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(fma_vfnmadd_pd, INTR_TYPE_3OP, X86ISD::FNMADD, 0),
+ X86_INTRINSIC_DATA(fma_vfnmadd_pd_256, INTR_TYPE_3OP, X86ISD::FNMADD, 0),
+ X86_INTRINSIC_DATA(fma_vfnmadd_ps, INTR_TYPE_3OP, X86ISD::FNMADD, 0),
+ X86_INTRINSIC_DATA(fma_vfnmadd_ps_256, INTR_TYPE_3OP, X86ISD::FNMADD, 0),
+ X86_INTRINSIC_DATA(fma_vfnmsub_pd, INTR_TYPE_3OP, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(fma_vfnmsub_pd_256, INTR_TYPE_3OP, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(fma_vfnmsub_ps, INTR_TYPE_3OP, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(fma_vfnmsub_ps_256, INTR_TYPE_3OP, X86ISD::FNMSUB, 0),
X86_INTRINSIC_DATA(sse2_comieq_sd, COMI, X86ISD::COMI, ISD::SETEQ),
X86_INTRINSIC_DATA(sse2_comige_sd, COMI, X86ISD::COMI, ISD::SETGE),
X86_INTRINSIC_DATA(sse2_comigt_sd, COMI, X86ISD::COMI, ISD::SETGT),
X86_INTRINSIC_DATA(sse2_comile_sd, COMI, X86ISD::COMI, ISD::SETLE),
X86_INTRINSIC_DATA(sse2_comilt_sd, COMI, X86ISD::COMI, ISD::SETLT),
X86_INTRINSIC_DATA(sse2_comineq_sd, COMI, X86ISD::COMI, ISD::SETNE),
+ X86_INTRINSIC_DATA(sse2_max_pd, INTR_TYPE_2OP, X86ISD::FMAX, 0),
+ X86_INTRINSIC_DATA(sse2_min_pd, INTR_TYPE_2OP, X86ISD::FMIN, 0),
+ X86_INTRINSIC_DATA(sse2_packssdw_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
+ X86_INTRINSIC_DATA(sse2_packsswb_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
+ X86_INTRINSIC_DATA(sse2_packuswb_128, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
X86_INTRINSIC_DATA(sse2_pmaxs_w, INTR_TYPE_2OP, X86ISD::SMAX, 0),
X86_INTRINSIC_DATA(sse2_pmaxu_b, INTR_TYPE_2OP, X86ISD::UMAX, 0),
X86_INTRINSIC_DATA(sse2_pmins_w, INTR_TYPE_2OP, X86ISD::SMIN, 0),
X86_INTRINSIC_DATA(sse2_pminu_b, INTR_TYPE_2OP, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(sse2_pmulh_w, INTR_TYPE_2OP, ISD::MULHS, 0),
+ X86_INTRINSIC_DATA(sse2_pmulhu_w, INTR_TYPE_2OP, ISD::MULHU, 0),
+ X86_INTRINSIC_DATA(sse2_pmulu_dq, INTR_TYPE_2OP, X86ISD::PMULUDQ, 0),
+ X86_INTRINSIC_DATA(sse2_pshuf_d, INTR_TYPE_2OP, X86ISD::PSHUFD, 0),
+ X86_INTRINSIC_DATA(sse2_pshufh_w, INTR_TYPE_2OP, X86ISD::PSHUFHW, 0),
+ X86_INTRINSIC_DATA(sse2_pshufl_w, INTR_TYPE_2OP, X86ISD::PSHUFLW, 0),
X86_INTRINSIC_DATA(sse2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(sse2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(sse2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0),
@@ -266,6 +542,7 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(sse3_hsub_pd, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
X86_INTRINSIC_DATA(sse3_hsub_ps, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
X86_INTRINSIC_DATA(sse41_insertps, INTR_TYPE_3OP, X86ISD::INSERTPS, 0),
+ X86_INTRINSIC_DATA(sse41_packusdw, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
X86_INTRINSIC_DATA(sse41_pmaxsb, INTR_TYPE_2OP, X86ISD::SMAX, 0),
X86_INTRINSIC_DATA(sse41_pmaxsd, INTR_TYPE_2OP, X86ISD::SMAX, 0),
X86_INTRINSIC_DATA(sse41_pmaxud, INTR_TYPE_2OP, X86ISD::UMAX, 0),
@@ -274,12 +551,27 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(sse41_pminsd, INTR_TYPE_2OP, X86ISD::SMIN, 0),
X86_INTRINSIC_DATA(sse41_pminud, INTR_TYPE_2OP, X86ISD::UMIN, 0),
X86_INTRINSIC_DATA(sse41_pminuw, INTR_TYPE_2OP, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(sse41_pmovsxbd, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmovsxbq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmovsxbw, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmovsxdq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmovsxwd, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmovsxwq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmovzxbd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmovzxbq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmovzxbw, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmovzxdq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmovzxwd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmovzxwq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(sse41_pmuldq, INTR_TYPE_2OP, X86ISD::PMULDQ, 0),
X86_INTRINSIC_DATA(sse_comieq_ss, COMI, X86ISD::COMI, ISD::SETEQ),
X86_INTRINSIC_DATA(sse_comige_ss, COMI, X86ISD::COMI, ISD::SETGE),
X86_INTRINSIC_DATA(sse_comigt_ss, COMI, X86ISD::COMI, ISD::SETGT),
X86_INTRINSIC_DATA(sse_comile_ss, COMI, X86ISD::COMI, ISD::SETLE),
X86_INTRINSIC_DATA(sse_comilt_ss, COMI, X86ISD::COMI, ISD::SETLT),
X86_INTRINSIC_DATA(sse_comineq_ss, COMI, X86ISD::COMI, ISD::SETNE),
+ X86_INTRINSIC_DATA(sse_max_ps, INTR_TYPE_2OP, X86ISD::FMAX, 0),
+ X86_INTRINSIC_DATA(sse_min_ps, INTR_TYPE_2OP, X86ISD::FMIN, 0),
X86_INTRINSIC_DATA(sse_sqrt_ps, INTR_TYPE_1OP, ISD::FSQRT, 0),
X86_INTRINSIC_DATA(sse_ucomieq_ss, COMI, X86ISD::UCOMI, ISD::SETEQ),
X86_INTRINSIC_DATA(sse_ucomige_ss, COMI, X86ISD::UCOMI, ISD::SETGE),
@@ -290,7 +582,11 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(ssse3_phadd_d_128, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(ssse3_phadd_w_128, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(ssse3_phsub_d_128, INTR_TYPE_2OP, X86ISD::HSUB, 0),
- X86_INTRINSIC_DATA(ssse3_phsub_w_128, INTR_TYPE_2OP, X86ISD::HSUB, 0)
+ X86_INTRINSIC_DATA(ssse3_phsub_w_128, INTR_TYPE_2OP, X86ISD::HSUB, 0),
+ X86_INTRINSIC_DATA(ssse3_pshuf_b_128, INTR_TYPE_2OP, X86ISD::PSHUFB, 0),
+ X86_INTRINSIC_DATA(ssse3_psign_b_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
+ X86_INTRINSIC_DATA(ssse3_psign_d_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
+ X86_INTRINSIC_DATA(ssse3_psign_w_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0)
};
/*
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index 4e0d594..6af59d4 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -74,11 +74,11 @@ namespace llvm {
X86AsmPrinter::StackMapShadowTracker::~StackMapShadowTracker() {}
void
- X86AsmPrinter::StackMapShadowTracker::startFunction(MachineFunction &MF) {
+ X86AsmPrinter::StackMapShadowTracker::startFunction(MachineFunction &F) {
+ MF = &F;
CodeEmitter.reset(TM.getTarget().createMCCodeEmitter(
- *TM.getSubtargetImpl()->getInstrInfo(),
- *TM.getSubtargetImpl()->getRegisterInfo(), *TM.getSubtargetImpl(),
- MF.getContext()));
+ *MF->getSubtarget().getInstrInfo(), *MF->getSubtarget().getRegisterInfo(),
+ MF->getSubtarget(), MF->getContext()));
}
void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst,
@@ -100,7 +100,7 @@ namespace llvm {
if (InShadow && CurrentShadowSize < RequiredShadowSize) {
InShadow = false;
EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize,
- TM.getSubtarget<X86Subtarget>().is64Bit(), STI);
+ MF->getSubtarget<X86Subtarget>().is64Bit(), STI);
}
}
@@ -112,8 +112,8 @@ namespace llvm {
X86MCInstLower::X86MCInstLower(const MachineFunction &mf,
X86AsmPrinter &asmprinter)
-: Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()),
- MAI(*TM.getMCAsmInfo()), AsmPrinter(asmprinter) {}
+ : Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), MAI(*TM.getMCAsmInfo()),
+ AsmPrinter(asmprinter) {}
MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>();
@@ -124,7 +124,7 @@ MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
/// operand to an MCSymbol.
MCSymbol *X86MCInstLower::
GetSymbolFromOperand(const MachineOperand &MO) const {
- const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = TM.getDataLayout();
assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference");
SmallString<128> Name;
@@ -390,9 +390,8 @@ static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
Inst.addOperand(Seg);
}
-static unsigned getRetOpcode(const X86Subtarget &Subtarget)
-{
- return Subtarget.is64Bit() ? X86::RETQ : X86::RETL;
+static unsigned getRetOpcode(const X86Subtarget &Subtarget) {
+ return Subtarget.is64Bit() ? X86::RETQ : X86::RETL;
}
void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
@@ -510,6 +509,7 @@ ReSimplify:
// inputs modeled as normal uses instead of implicit uses. As such, truncate
// off all but the first operand (the callee). FIXME: Change isel.
case X86::TAILJMPr64:
+ case X86::TAILJMPr64_REX:
case X86::CALL64r:
case X86::CALL64pcrel32: {
unsigned Opcode = OutMI.getOpcode();
@@ -546,6 +546,24 @@ ReSimplify:
break;
}
+ case X86::DEC16r:
+ case X86::DEC32r:
+ case X86::INC16r:
+ case X86::INC32r:
+ // If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions.
+ if (!AsmPrinter.getSubtarget().is64Bit()) {
+ unsigned Opcode;
+ switch (OutMI.getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case X86::DEC16r: Opcode = X86::DEC16r_alt; break;
+ case X86::DEC32r: Opcode = X86::DEC32r_alt; break;
+ case X86::INC16r: Opcode = X86::INC16r_alt; break;
+ case X86::INC32r: Opcode = X86::INC32r_alt; break;
+ }
+ OutMI.setOpcode(Opcode);
+ }
+ break;
+
// These are pseudo-ops for OR to help with the OR->ADD transformation. We do
// this with an ugly goto in case the resultant OR uses EAX and needs the
// short form.
@@ -559,28 +577,6 @@ ReSimplify:
case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify;
case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify;
- // The assembler backend wants to see branches in their small form and relax
- // them to their large form. The JIT can only handle the large form because
- // it does not do relaxation. For now, translate the large form to the
- // small one here.
- case X86::JMP_4: OutMI.setOpcode(X86::JMP_1); break;
- case X86::JO_4: OutMI.setOpcode(X86::JO_1); break;
- case X86::JNO_4: OutMI.setOpcode(X86::JNO_1); break;
- case X86::JB_4: OutMI.setOpcode(X86::JB_1); break;
- case X86::JAE_4: OutMI.setOpcode(X86::JAE_1); break;
- case X86::JE_4: OutMI.setOpcode(X86::JE_1); break;
- case X86::JNE_4: OutMI.setOpcode(X86::JNE_1); break;
- case X86::JBE_4: OutMI.setOpcode(X86::JBE_1); break;
- case X86::JA_4: OutMI.setOpcode(X86::JA_1); break;
- case X86::JS_4: OutMI.setOpcode(X86::JS_1); break;
- case X86::JNS_4: OutMI.setOpcode(X86::JNS_1); break;
- case X86::JP_4: OutMI.setOpcode(X86::JP_1); break;
- case X86::JNP_4: OutMI.setOpcode(X86::JNP_1); break;
- case X86::JL_4: OutMI.setOpcode(X86::JL_1); break;
- case X86::JGE_4: OutMI.setOpcode(X86::JGE_1); break;
- case X86::JLE_4: OutMI.setOpcode(X86::JLE_1); break;
- case X86::JG_4: OutMI.setOpcode(X86::JG_1); break;
-
// Atomic load and store require a separate pseudo-inst because Acquire
// implies mayStore and Release implies mayLoad; fix these to regular MOV
// instructions here
@@ -625,13 +621,13 @@ ReSimplify:
// MOV64ao8, MOV64o8a
// XCHG16ar, XCHG32ar, XCHG64ar
case X86::MOV8mr_NOREX:
- case X86::MOV8mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao8); break;
+ case X86::MOV8mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o32a); break;
case X86::MOV8rm_NOREX:
- case X86::MOV8rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o8a); break;
- case X86::MOV16mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao16); break;
- case X86::MOV16rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o16a); break;
- case X86::MOV32mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break;
- case X86::MOV32rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break;
+ case X86::MOV8rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao32); break;
+ case X86::MOV16mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o32a); break;
+ case X86::MOV16rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao32); break;
+ case X86::MOV32mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break;
+ case X86::MOV32rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break;
case X86::ADC8ri: SimplifyShortImmForm(OutMI, X86::ADC8i8); break;
case X86::ADC16ri: SimplifyShortImmForm(OutMI, X86::ADC16i16); break;
@@ -808,6 +804,58 @@ static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, const MCSu
} // while (NumBytes)
}
+static void LowerSTATEPOINT(MCStreamer &OS, StackMaps &SM,
+ const MachineInstr &MI, bool Is64Bit,
+ const TargetMachine& TM,
+ const MCSubtargetInfo& STI,
+ X86MCInstLower &MCInstLowering) {
+ assert(Is64Bit && "Statepoint currently only supports X86-64");
+
+ // Lower call target and choose correct opcode
+ const MachineOperand &call_target = StatepointOpers(&MI).getCallTarget();
+ MCOperand call_target_mcop;
+ unsigned call_opcode;
+ switch (call_target.getType()) {
+ case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_ExternalSymbol:
+ call_target_mcop = MCInstLowering.LowerSymbolOperand(
+ call_target,
+ MCInstLowering.GetSymbolFromOperand(call_target));
+ call_opcode = X86::CALL64pcrel32;
+ // Currently, we only support relative addressing with statepoints.
+ // Otherwise, we'll need a scratch register to hold the target
+ // address. You'll fail asserts during load & relocation if this
+ // symbol is to far away. (TODO: support non-relative addressing)
+ break;
+ case MachineOperand::MO_Immediate:
+ call_target_mcop = MCOperand::CreateImm(call_target.getImm());
+ call_opcode = X86::CALL64pcrel32;
+ // Currently, we only support relative addressing with statepoints.
+ // Otherwise, we'll need a scratch register to hold the target
+ // immediate. You'll fail asserts during load & relocation if this
+ // address is to far away. (TODO: support non-relative addressing)
+ break;
+ case MachineOperand::MO_Register:
+ call_target_mcop = MCOperand::CreateReg(call_target.getReg());
+ call_opcode = X86::CALL64r;
+ break;
+ default:
+ llvm_unreachable("Unsupported operand type in statepoint call target");
+ break;
+ }
+
+ // Emit call
+ MCInst call_inst;
+ call_inst.setOpcode(call_opcode);
+ call_inst.addOperand(call_target_mcop);
+ OS.EmitInstruction(call_inst, STI);
+
+ // Record our statepoint node in the same section used by STACKMAP
+ // and PATCHPOINT
+ SM.recordStatepoint(MI);
+}
+
+
// Lower a stackmap of the form:
// <id>, <shadowBytes>, ...
void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) {
@@ -941,8 +989,7 @@ static std::string getShuffleComment(const MachineOperand &DstOp,
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
X86MCInstLower MCInstLowering(*MF, *this);
- const X86RegisterInfo *RI = static_cast<const X86RegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
+ const X86RegisterInfo *RI = MF->getSubtarget<X86Subtarget>().getRegisterInfo();
switch (MI->getOpcode()) {
case TargetOpcode::DBG_VALUE:
@@ -963,8 +1010,14 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
break;
}
case X86::TAILJMPr:
+ case X86::TAILJMPm:
case X86::TAILJMPd:
+ case X86::TAILJMPr64:
+ case X86::TAILJMPm64:
case X86::TAILJMPd64:
+ case X86::TAILJMPr64_REX:
+ case X86::TAILJMPm64_REX:
+ case X86::TAILJMPd64_REX:
// Lower these as normal, but add some comments.
OutStreamer.AddComment("TAILCALL");
break;
@@ -1030,6 +1083,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addExpr(DotExpr));
return;
}
+ case TargetOpcode::STATEPOINT:
+ return LowerSTATEPOINT(OutStreamer, SM, *MI, Subtarget->is64Bit(), TM,
+ getSubtargetInfo(), MCInstLowering);
case TargetOpcode::STACKMAP:
return LowerSTACKMAP(*MI);
diff --git a/lib/Target/X86/X86MachineFunctionInfo.cpp b/lib/Target/X86/X86MachineFunctionInfo.cpp
index 568dc22..ac2cdc8 100644
--- a/lib/Target/X86/X86MachineFunctionInfo.cpp
+++ b/lib/Target/X86/X86MachineFunctionInfo.cpp
@@ -8,7 +8,26 @@
//===----------------------------------------------------------------------===//
#include "X86MachineFunctionInfo.h"
+#include "X86RegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
void X86MachineFunctionInfo::anchor() { }
+
+void X86MachineFunctionInfo::setRestoreBasePointer(const MachineFunction *MF) {
+ if (!RestoreBasePointerOffset) {
+ const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
+ MF->getSubtarget().getRegisterInfo());
+ unsigned SlotSize = RegInfo->getSlotSize();
+ for (const MCPhysReg *CSR =
+ RegInfo->X86RegisterInfo::getCalleeSavedRegs(MF);
+ unsigned Reg = *CSR;
+ ++CSR)
+ {
+ if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
+ RestoreBasePointerOffset -= SlotSize;
+ }
+ }
+}
+
diff --git a/lib/Target/X86/X86MachineFunctionInfo.h b/lib/Target/X86/X86MachineFunctionInfo.h
index 79a51b3..d598b55 100644
--- a/lib/Target/X86/X86MachineFunctionInfo.h
+++ b/lib/Target/X86/X86MachineFunctionInfo.h
@@ -14,6 +14,7 @@
#ifndef LLVM_LIB_TARGET_X86_X86MACHINEFUNCTIONINFO_H
#define LLVM_LIB_TARGET_X86_X86MACHINEFUNCTIONINFO_H
+#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineValueType.h"
#include <vector>
@@ -31,6 +32,12 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
/// contains stack pointer re-alignment code which requires FP.
bool ForceFramePointer;
+ /// RestoreBasePointerOffset - Non-zero if the function has base pointer
+ /// and makes call to llvm.eh.sjlj.setjmp. When non-zero, the value is a
+ /// displacement from the frame pointer to a slot where the base pointer
+ /// is stashed.
+ signed char RestoreBasePointerOffset;
+
/// CalleeSavedFrameSize - Size of the callee-saved register portion of the
/// stack frame in bytes.
unsigned CalleeSavedFrameSize;
@@ -43,6 +50,9 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
/// ReturnAddrIndex - FrameIndex for return slot.
int ReturnAddrIndex;
+ /// \brief FrameIndex for return slot.
+ int FrameAddrIndex;
+
/// TailCallReturnAddrDelta - The number of bytes by which return address
/// stack slot is moved as the result of tail call optimization.
int TailCallReturnAddrDelta;
@@ -70,28 +80,22 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
unsigned ArgumentStackSize;
/// NumLocalDynamics - Number of local-dynamic TLS accesses.
unsigned NumLocalDynamics;
-
-public:
- /// Describes a register that needs to be forwarded from the prologue to a
- /// musttail call.
- struct Forward {
- Forward(unsigned VReg, MCPhysReg PReg, MVT VT)
- : VReg(VReg), PReg(PReg), VT(VT) {}
- unsigned VReg;
- MCPhysReg PReg;
- MVT VT;
- };
+ /// HasPushSequences - Keeps track of whether this function uses sequences
+ /// of pushes to pass function parameters.
+ bool HasPushSequences;
private:
/// ForwardedMustTailRegParms - A list of virtual and physical registers
/// that must be forwarded to every musttail call.
- std::vector<Forward> ForwardedMustTailRegParms;
+ SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
public:
X86MachineFunctionInfo() : ForceFramePointer(false),
+ RestoreBasePointerOffset(0),
CalleeSavedFrameSize(0),
BytesToPopOnReturn(0),
ReturnAddrIndex(0),
+ FrameAddrIndex(0),
TailCallReturnAddrDelta(0),
SRetReturnReg(0),
GlobalBaseReg(0),
@@ -100,13 +104,16 @@ public:
VarArgsGPOffset(0),
VarArgsFPOffset(0),
ArgumentStackSize(0),
- NumLocalDynamics(0) {}
+ NumLocalDynamics(0),
+ HasPushSequences(false) {}
explicit X86MachineFunctionInfo(MachineFunction &MF)
: ForceFramePointer(false),
+ RestoreBasePointerOffset(0),
CalleeSavedFrameSize(0),
BytesToPopOnReturn(0),
ReturnAddrIndex(0),
+ FrameAddrIndex(0),
TailCallReturnAddrDelta(0),
SRetReturnReg(0),
GlobalBaseReg(0),
@@ -115,11 +122,19 @@ public:
VarArgsGPOffset(0),
VarArgsFPOffset(0),
ArgumentStackSize(0),
- NumLocalDynamics(0) {}
+ NumLocalDynamics(0),
+ HasPushSequences(false) {}
bool getForceFramePointer() const { return ForceFramePointer;}
void setForceFramePointer(bool forceFP) { ForceFramePointer = forceFP; }
+ bool getHasPushSequences() const { return HasPushSequences; }
+ void setHasPushSequences(bool HasPush) { HasPushSequences = HasPush; }
+
+ bool getRestoreBasePointer() const { return RestoreBasePointerOffset!=0; }
+ void setRestoreBasePointer(const MachineFunction *MF);
+ int getRestoreBasePointerOffset() const {return RestoreBasePointerOffset; }
+
unsigned getCalleeSavedFrameSize() const { return CalleeSavedFrameSize; }
void setCalleeSavedFrameSize(unsigned bytes) { CalleeSavedFrameSize = bytes; }
@@ -129,6 +144,9 @@ public:
int getRAIndex() const { return ReturnAddrIndex; }
void setRAIndex(int Index) { ReturnAddrIndex = Index; }
+ int getFAIndex() const { return FrameAddrIndex; }
+ void setFAIndex(int Index) { FrameAddrIndex = Index; }
+
int getTCReturnAddrDelta() const { return TailCallReturnAddrDelta; }
void setTCReturnAddrDelta(int delta) {TailCallReturnAddrDelta = delta;}
@@ -156,7 +174,7 @@ public:
unsigned getNumLocalDynamicTLSAccesses() const { return NumLocalDynamics; }
void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamics; }
- std::vector<Forward> &getForwardedMustTailRegParms() {
+ SmallVectorImpl<ForwardedRegister> &getForwardedMustTailRegParms() {
return ForwardedMustTailRegParms;
}
};
diff --git a/lib/Target/X86/X86PadShortFunction.cpp b/lib/Target/X86/X86PadShortFunction.cpp
index adc05b2..143e70b 100644
--- a/lib/Target/X86/X86PadShortFunction.cpp
+++ b/lib/Target/X86/X86PadShortFunction.cpp
@@ -51,7 +51,7 @@ namespace {
struct PadShortFunc : public MachineFunctionPass {
static char ID;
PadShortFunc() : MachineFunctionPass(ID)
- , Threshold(4), TM(nullptr), TII(nullptr) {}
+ , Threshold(4), STI(nullptr), TII(nullptr) {}
bool runOnMachineFunction(MachineFunction &MF) override;
@@ -79,7 +79,7 @@ namespace {
// VisitedBBs - Cache of previously visited BBs.
DenseMap<MachineBasicBlock*, VisitedBBInfo> VisitedBBs;
- const TargetMachine *TM;
+ const X86Subtarget *STI;
const TargetInstrInfo *TII;
};
@@ -93,19 +93,16 @@ FunctionPass *llvm::createX86PadShortFunctions() {
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
- const AttributeSet &FnAttrs = MF.getFunction()->getAttributes();
- if (FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize) ||
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::MinSize)) {
+ if (MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
+ MF.getFunction()->hasFnAttribute(Attribute::MinSize)) {
return false;
}
- TM = &MF.getTarget();
- if (!TM->getSubtarget<X86Subtarget>().padShortFunctions())
+ STI = &MF.getSubtarget<X86Subtarget>();
+ if (!STI->padShortFunctions())
return false;
- TII = TM->getSubtargetImpl()->getInstrInfo();
+ TII = STI->getInstrInfo();
// Search through basic blocks and mark the ones that have early returns
ReturnBBs.clear();
@@ -195,8 +192,7 @@ bool PadShortFunc::cyclesUntilReturn(MachineBasicBlock *MBB,
return true;
}
- CyclesToEnd += TII->getInstrLatency(
- TM->getSubtargetImpl()->getInstrItineraryData(), MI);
+ CyclesToEnd += TII->getInstrLatency(STI->getInstrItineraryData(), MI);
}
VisitedBBs[MBB] = VisitedBBInfo(false, CyclesToEnd);
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index a4a366d..cab7ce8 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -66,21 +66,22 @@ X86RegisterInfo::X86RegisterInfo(const X86Subtarget &STI)
Is64Bit = Subtarget.is64Bit();
IsWin64 = Subtarget.isTargetWin64();
+ // Use a callee-saved register as the base pointer. These registers must
+ // not conflict with any ABI requirements. For example, in 32-bit mode PIC
+ // requires GOT in the EBX register before function calls via PLT GOT pointer.
if (Is64Bit) {
SlotSize = 8;
- StackPtr = (Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64()) ?
- X86::RSP : X86::ESP;
- FramePtr = (Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64()) ?
- X86::RBP : X86::EBP;
+ bool Use64BitReg =
+ Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
+ StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
+ FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
+ BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
} else {
SlotSize = 4;
StackPtr = X86::ESP;
FramePtr = X86::EBP;
+ BasePtr = X86::ESI;
}
- // Use a callee-saved register as the base pointer. These registers must
- // not conflict with any ABI requirements. For example, in 32-bit mode PIC
- // requires GOT in the EBX register before function calls via PLT GOT pointer.
- BasePtr = Is64Bit ? X86::RBX : X86::ESI;
}
bool
@@ -354,7 +355,9 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
"Stack realignment in presence of dynamic allocas is not supported with"
"this calling convention.");
- for (MCSubRegIterator I(getBaseRegister(), this, /*IncludeSelf=*/true);
+ unsigned BasePtr = getX86SubSuperRegister(getBaseRegister(), MVT::i64,
+ false);
+ for (MCSubRegIterator I(BasePtr, this, /*IncludeSelf=*/true);
I.isValid(); ++I)
Reserved.set(*I);
}
@@ -445,10 +448,8 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
const Function *F = MF.getFunction();
unsigned StackAlign =
MF.getSubtarget().getFrameLowering()->getStackAlignment();
- bool requiresRealignment =
- ((MFI->getMaxAlignment() > StackAlign) ||
- F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::StackAlignment));
+ bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
+ F->hasFnAttribute(Attribute::StackAlignment));
// If we've requested that we force align the stack do so now.
if (ForceStackAlign)
@@ -468,8 +469,6 @@ void
X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
- assert(SPAdj == 0 && "Unexpected");
-
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
@@ -506,6 +505,9 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
} else
FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
+ if (BasePtr == StackPtr)
+ FIOffset += SPAdj;
+
// The frame index format for stackmaps and patchpoints is different from the
// X86 format. It only has a FI and an offset.
if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
@@ -535,6 +537,14 @@ unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return TFI->hasFP(MF) ? FramePtr : StackPtr;
}
+unsigned X86RegisterInfo::getPtrSizedFrameRegister(
+ const MachineFunction &MF) const {
+ unsigned FrameReg = getFrameRegister(MF);
+ if (Subtarget.isTarget64BitILP32())
+ FrameReg = getX86SubSuperRegister(FrameReg, MVT::i32, false);
+ return FrameReg;
+}
+
namespace llvm {
unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
bool High) {
diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h
index cc0a7b2..406b1fc 100644
--- a/lib/Target/X86/X86RegisterInfo.h
+++ b/lib/Target/X86/X86RegisterInfo.h
@@ -122,6 +122,7 @@ public:
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const override;
+ unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const;
unsigned getStackRegister() const { return StackPtr; }
unsigned getBaseRegister() const { return BasePtr; }
// FIXME: Move to FrameInfok
diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td
index 311a717..2e735fa 100644
--- a/lib/Target/X86/X86RegisterInfo.td
+++ b/lib/Target/X86/X86RegisterInfo.td
@@ -263,14 +263,22 @@ def FS : X86Reg<"fs", 4>;
def GS : X86Reg<"gs", 5>;
// Debug registers
-def DR0 : X86Reg<"dr0", 0>;
-def DR1 : X86Reg<"dr1", 1>;
-def DR2 : X86Reg<"dr2", 2>;
-def DR3 : X86Reg<"dr3", 3>;
-def DR4 : X86Reg<"dr4", 4>;
-def DR5 : X86Reg<"dr5", 5>;
-def DR6 : X86Reg<"dr6", 6>;
-def DR7 : X86Reg<"dr7", 7>;
+def DR0 : X86Reg<"dr0", 0>;
+def DR1 : X86Reg<"dr1", 1>;
+def DR2 : X86Reg<"dr2", 2>;
+def DR3 : X86Reg<"dr3", 3>;
+def DR4 : X86Reg<"dr4", 4>;
+def DR5 : X86Reg<"dr5", 5>;
+def DR6 : X86Reg<"dr6", 6>;
+def DR7 : X86Reg<"dr7", 7>;
+def DR8 : X86Reg<"dr8", 8>;
+def DR9 : X86Reg<"dr9", 9>;
+def DR10 : X86Reg<"dr10", 10>;
+def DR11 : X86Reg<"dr11", 11>;
+def DR12 : X86Reg<"dr12", 12>;
+def DR13 : X86Reg<"dr13", 13>;
+def DR14 : X86Reg<"dr14", 14>;
+def DR15 : X86Reg<"dr15", 15>;
// Control registers
def CR0 : X86Reg<"cr0", 0>;
@@ -317,7 +325,7 @@ def GR8 : RegisterClass<"X86", [i8], 8,
R8B, R9B, R10B, R11B, R14B, R15B, R12B, R13B)> {
let AltOrders = [(sub GR8, AH, BH, CH, DH)];
let AltOrderSelect = [{
- return MF.getTarget().getSubtarget<X86Subtarget>().is64Bit();
+ return MF.getSubtarget<X86Subtarget>().is64Bit();
}];
}
@@ -369,7 +377,7 @@ def GR8_NOREX : RegisterClass<"X86", [i8], 8,
(add AL, CL, DL, AH, CH, DH, BL, BH)> {
let AltOrders = [(sub GR8_NOREX, AH, BH, CH, DH)];
let AltOrderSelect = [{
- return MF.getTarget().getSubtarget<X86Subtarget>().is64Bit();
+ return MF.getSubtarget<X86Subtarget>().is64Bit();
}];
}
// GR16_NOREX - GR16 registers which do not require a REX prefix.
@@ -461,18 +469,18 @@ def VR256X : RegisterClass<"X86", [v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
256, (sequence "YMM%u", 0, 31)>;
// Mask registers
-def VK1 : RegisterClass<"X86", [i1], 16, (sequence "K%u", 0, 7)> {let Size = 16;}
-def VK2 : RegisterClass<"X86", [v2i1], 16, (add VK1)> {let Size = 16;}
-def VK4 : RegisterClass<"X86", [v4i1], 16, (add VK2)> {let Size = 16;}
-def VK8 : RegisterClass<"X86", [v8i1], 16, (add VK4)> {let Size = 16;}
+def VK1 : RegisterClass<"X86", [i1], 8, (sequence "K%u", 0, 7)> {let Size = 8;}
+def VK2 : RegisterClass<"X86", [v2i1], 8, (add VK1)> {let Size = 8;}
+def VK4 : RegisterClass<"X86", [v4i1], 8, (add VK2)> {let Size = 8;}
+def VK8 : RegisterClass<"X86", [v8i1], 8, (add VK4)> {let Size = 8;}
def VK16 : RegisterClass<"X86", [v16i1], 16, (add VK8)> {let Size = 16;}
def VK32 : RegisterClass<"X86", [v32i1], 32, (add VK16)> {let Size = 32;}
def VK64 : RegisterClass<"X86", [v64i1], 64, (add VK32)> {let Size = 64;}
-def VK1WM : RegisterClass<"X86", [i1], 16, (sub VK1, K0)> {let Size = 16;}
-def VK2WM : RegisterClass<"X86", [v2i1], 16, (sub VK2, K0)> {let Size = 16;}
-def VK4WM : RegisterClass<"X86", [v4i1], 16, (sub VK4, K0)> {let Size = 16;}
-def VK8WM : RegisterClass<"X86", [v8i1], 16, (sub VK8, K0)> {let Size = 16;}
+def VK1WM : RegisterClass<"X86", [i1], 8, (sub VK1, K0)> {let Size = 8;}
+def VK2WM : RegisterClass<"X86", [v2i1], 8, (sub VK2, K0)> {let Size = 8;}
+def VK4WM : RegisterClass<"X86", [v4i1], 8, (sub VK4, K0)> {let Size = 8;}
+def VK8WM : RegisterClass<"X86", [v8i1], 8, (sub VK8, K0)> {let Size = 8;}
def VK16WM : RegisterClass<"X86", [v16i1], 16, (add VK8WM)> {let Size = 16;}
def VK32WM : RegisterClass<"X86", [v32i1], 32, (add VK16WM)> {let Size = 32;}
def VK64WM : RegisterClass<"X86", [v64i1], 64, (add VK32WM)> {let Size = 64;}
diff --git a/lib/Target/X86/X86SchedHaswell.td b/lib/Target/X86/X86SchedHaswell.td
index 73a3230..61c0600 100644
--- a/lib/Target/X86/X86SchedHaswell.td
+++ b/lib/Target/X86/X86SchedHaswell.td
@@ -1895,7 +1895,7 @@ def : InstRW<[WriteMULr], (instregex "(V?)MUL(P|S)(S|D)rr")>;
// x,m / v,v,m.
def WriteMULm : SchedWriteRes<[HWPort01, HWPort23]> {
- let Latency = 4;
+ let Latency = 9;
let NumMicroOps = 2;
let ResourceCycles = [1, 1];
}
diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp
index 821044f..7feabf6 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -57,7 +57,8 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
bool isVolatile,
MachinePointerInfo DstPtrInfo) const {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
- const X86Subtarget &Subtarget = DAG.getTarget().getSubtarget<X86Subtarget>();
+ const X86Subtarget &Subtarget =
+ DAG.getMachineFunction().getSubtarget<X86Subtarget>();
#ifndef NDEBUG
// If the base register might conflict with our physical registers, bail out.
@@ -199,17 +200,15 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
return Chain;
}
-SDValue
-X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain, SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
- MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const {
+SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
+ SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
// This requires the copy size to be a constant, preferably
// within a subtarget-specific limit.
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
- const X86Subtarget &Subtarget = DAG.getTarget().getSubtarget<X86Subtarget>();
+ const X86Subtarget &Subtarget =
+ DAG.getMachineFunction().getSubtarget<X86Subtarget>();
if (!ConstantSize)
return SDValue();
uint64_t SizeVal = ConstantSize->getZExtValue();
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index 9d877c9..de30c75 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -257,17 +257,17 @@ void X86Subtarget::initializeEnvironment() {
HasVLX = false;
HasADX = false;
HasSHA = false;
- HasSGX = false;
HasPRFCHW = false;
HasRDSEED = false;
- HasSMAP = false;
IsBTMemSlow = false;
IsSHLDSlow = false;
IsUAMemFast = false;
- HasVectorUAMem = false;
+ IsUAMem32Slow = false;
+ HasSSEUnalignedMem = false;
HasCmpxchg16b = false;
UseLeaForSP = false;
- HasSlowDivide = false;
+ HasSlowDivide32 = false;
+ HasSlowDivide64 = false;
PadShortFunctions = false;
CallRegIndirect = false;
LEAUsesAG = false;
@@ -280,46 +280,6 @@ void X86Subtarget::initializeEnvironment() {
MaxInlineSizeThreshold = 128;
}
-static std::string computeDataLayout(const Triple &TT) {
- // X86 is little endian
- std::string Ret = "e";
-
- Ret += DataLayout::getManglingComponent(TT);
- // X86 and x32 have 32 bit pointers.
- if ((TT.isArch64Bit() &&
- (TT.getEnvironment() == Triple::GNUX32 || TT.isOSNaCl())) ||
- !TT.isArch64Bit())
- Ret += "-p:32:32";
-
- // Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
- if (TT.isArch64Bit() || TT.isOSWindows() || TT.isOSNaCl())
- Ret += "-i64:64";
- else
- Ret += "-f64:32:64";
-
- // Some ABIs align long double to 128 bits, others to 32.
- if (TT.isOSNaCl())
- ; // No f80
- else if (TT.isArch64Bit() || TT.isOSDarwin())
- Ret += "-f80:128";
- else
- Ret += "-f80:32";
-
- // The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
- if (TT.isArch64Bit())
- Ret += "-n8:16:32:64";
- else
- Ret += "-n8:16:32";
-
- // The stack is aligned to 32 bits on some ABIs and 128 bits on others.
- if (!TT.isArch64Bit() && TT.isOSWindows())
- Ret += "-S32";
- else
- Ret += "-S128";
-
- return Ret;
-}
-
X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU,
StringRef FS) {
initializeEnvironment();
@@ -332,16 +292,16 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
unsigned StackAlignOverride)
: X86GenSubtargetInfo(TT, CPU, FS), X86ProcFamily(Others),
PICStyle(PICStyles::None), TargetTriple(TT),
- DL(computeDataLayout(TargetTriple)),
StackAlignOverride(StackAlignOverride),
In64BitMode(TargetTriple.getArch() == Triple::x86_64),
In32BitMode(TargetTriple.getArch() == Triple::x86 &&
TargetTriple.getEnvironment() != Triple::CODE16),
In16BitMode(TargetTriple.getArch() == Triple::x86 &&
TargetTriple.getEnvironment() == Triple::CODE16),
- TSInfo(DL), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
- TLInfo(TM), FrameLowering(TargetFrameLowering::StackGrowsDown,
- getStackAlignment(), is64Bit() ? -8 : -4) {
+ TSInfo(*TM.getDataLayout()),
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
+ FrameLowering(TargetFrameLowering::StackGrowsDown, getStackAlignment(),
+ is64Bit() ? -8 : -4) {
// Determine the PICStyle based on the target selected.
if (TM.getRelocationModel() == Reloc::Static) {
// Unless we're in PIC or DynamicNoPIC mode, set the PIC style to None.
diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h
index 091b6c4..4c31f78 100644
--- a/lib/Target/X86/X86Subtarget.h
+++ b/lib/Target/X86/X86Subtarget.h
@@ -31,7 +31,7 @@ class GlobalValue;
class StringRef;
class TargetMachine;
-/// PICStyles - The X86 backend supports a number of different styles of PIC.
+/// The X86 backend supports a number of different styles of PIC.
///
namespace PICStyles {
enum Style {
@@ -58,138 +58,136 @@ protected:
Others, IntelAtom, IntelSLM
};
- /// X86ProcFamily - X86 processor family: Intel Atom, and others
+ /// X86 processor family: Intel Atom, and others
X86ProcFamilyEnum X86ProcFamily;
- /// PICStyle - Which PIC style to use
- ///
+ /// Which PIC style to use
PICStyles::Style PICStyle;
- /// X86SSELevel - MMX, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, or
- /// none supported.
+ /// MMX, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, or none supported.
X86SSEEnum X86SSELevel;
- /// X863DNowLevel - 3DNow or 3DNow Athlon, or none supported.
- ///
+ /// 3DNow, 3DNow Athlon, or none supported.
X863DNowEnum X863DNowLevel;
- /// HasCMov - True if this processor has conditional move instructions
+ /// True if this processor has conditional move instructions
/// (generally pentium pro+).
bool HasCMov;
- /// HasX86_64 - True if the processor supports X86-64 instructions.
- ///
+ /// True if the processor supports X86-64 instructions.
bool HasX86_64;
- /// HasPOPCNT - True if the processor supports POPCNT.
+ /// True if the processor supports POPCNT.
bool HasPOPCNT;
- /// HasSSE4A - True if the processor supports SSE4A instructions.
+ /// True if the processor supports SSE4A instructions.
bool HasSSE4A;
- /// HasAES - Target has AES instructions
+ /// Target has AES instructions
bool HasAES;
- /// HasPCLMUL - Target has carry-less multiplication
+ /// Target has carry-less multiplication
bool HasPCLMUL;
- /// HasFMA - Target has 3-operand fused multiply-add
+ /// Target has 3-operand fused multiply-add
bool HasFMA;
- /// HasFMA4 - Target has 4-operand fused multiply-add
+ /// Target has 4-operand fused multiply-add
bool HasFMA4;
- /// HasXOP - Target has XOP instructions
+ /// Target has XOP instructions
bool HasXOP;
- /// HasTBM - Target has TBM instructions.
+ /// Target has TBM instructions.
bool HasTBM;
- /// HasMOVBE - True if the processor has the MOVBE instruction.
+ /// True if the processor has the MOVBE instruction.
bool HasMOVBE;
- /// HasRDRAND - True if the processor has the RDRAND instruction.
+ /// True if the processor has the RDRAND instruction.
bool HasRDRAND;
- /// HasF16C - Processor has 16-bit floating point conversion instructions.
+ /// Processor has 16-bit floating point conversion instructions.
bool HasF16C;
- /// HasFSGSBase - Processor has FS/GS base insturctions.
+ /// Processor has FS/GS base insturctions.
bool HasFSGSBase;
- /// HasLZCNT - Processor has LZCNT instruction.
+ /// Processor has LZCNT instruction.
bool HasLZCNT;
- /// HasBMI - Processor has BMI1 instructions.
+ /// Processor has BMI1 instructions.
bool HasBMI;
- /// HasBMI2 - Processor has BMI2 instructions.
+ /// Processor has BMI2 instructions.
bool HasBMI2;
- /// HasRTM - Processor has RTM instructions.
+ /// Processor has RTM instructions.
bool HasRTM;
- /// HasHLE - Processor has HLE.
+ /// Processor has HLE.
bool HasHLE;
- /// HasADX - Processor has ADX instructions.
+ /// Processor has ADX instructions.
bool HasADX;
- /// HasSHA - Processor has SHA instructions.
+ /// Processor has SHA instructions.
bool HasSHA;
- /// HasSGX - Processor has SGX instructions.
- bool HasSGX;
-
- /// HasPRFCHW - Processor has PRFCHW instructions.
+ /// Processor has PRFCHW instructions.
bool HasPRFCHW;
- /// HasRDSEED - Processor has RDSEED instructions.
+ /// Processor has RDSEED instructions.
bool HasRDSEED;
- /// HasSMAP - Processor has SMAP instructions.
- bool HasSMAP;
-
- /// IsBTMemSlow - True if BT (bit test) of memory instructions are slow.
+ /// True if BT (bit test) of memory instructions are slow.
bool IsBTMemSlow;
- /// IsSHLDSlow - True if SHLD instructions are slow.
+ /// True if SHLD instructions are slow.
bool IsSHLDSlow;
- /// IsUAMemFast - True if unaligned memory access is fast.
+ /// True if unaligned memory access is fast.
bool IsUAMemFast;
- /// HasVectorUAMem - True if SIMD operations can have unaligned memory
- /// operands. This may require setting a feature bit in the processor.
- bool HasVectorUAMem;
+ /// True if unaligned 32-byte memory accesses are slow.
+ bool IsUAMem32Slow;
+
+ /// True if SSE operations can have unaligned memory operands.
+ /// This may require setting a configuration bit in the processor.
+ bool HasSSEUnalignedMem;
- /// HasCmpxchg16b - True if this processor has the CMPXCHG16B instruction;
+ /// True if this processor has the CMPXCHG16B instruction;
/// this is true for most x86-64 chips, but not the first AMD chips.
bool HasCmpxchg16b;
- /// UseLeaForSP - True if the LEA instruction should be used for adjusting
+ /// True if the LEA instruction should be used for adjusting
/// the stack pointer. This is an optimization for Intel Atom processors.
bool UseLeaForSP;
- /// HasSlowDivide - True if smaller divides are significantly faster than
- /// full divides and should be used when possible.
- bool HasSlowDivide;
+ /// True if 8-bit divisions are significantly faster than
+ /// 32-bit divisions and should be used when possible.
+ bool HasSlowDivide32;
+
+ /// True if 16-bit divides are significantly faster than
+ /// 64-bit divisions and should be used when possible.
+ bool HasSlowDivide64;
- /// PadShortFunctions - True if the short functions should be padded to prevent
+ /// True if the short functions should be padded to prevent
/// a stall when returning too early.
bool PadShortFunctions;
- /// CallRegIndirect - True if the Calls with memory reference should be converted
+ /// True if the Calls with memory reference should be converted
/// to a register-based indirect call.
bool CallRegIndirect;
- /// LEAUsesAG - True if the LEA instruction inputs have to be ready at
- /// address generation (AG) time.
+
+ /// True if the LEA instruction inputs have to be ready at address generation
+ /// (AG) time.
bool LEAUsesAG;
- /// SlowLEA - True if the LEA instruction with certain arguments is slow
+ /// True if the LEA instruction with certain arguments is slow
bool SlowLEA;
- /// SlowIncDec - True if INC and DEC instructions are slow when writing to flags
+ /// True if INC and DEC instructions are slow when writing to flags
bool SlowIncDec;
/// Use the RSQRT* instructions to optimize square root calculations.
@@ -201,7 +199,7 @@ protected:
/// For this to be profitable, the cost of FDIV must be
/// substantially higher than normal FP ops like FADD and FMUL.
bool UseReciprocalEst;
-
+
/// Processor has AVX-512 PreFetch Instructions
bool HasPFI;
@@ -220,7 +218,7 @@ protected:
/// Processor has AVX-512 Vector Length eXtenstions
bool HasVLX;
- /// stackAlignment - The minimum alignment known to hold of the stack frame on
+ /// The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned stackAlignment;
@@ -228,26 +226,24 @@ protected:
///
unsigned MaxInlineSizeThreshold;
- /// TargetTriple - What processor and OS we're targeting.
+ /// What processor and OS we're targeting.
Triple TargetTriple;
/// Instruction itineraries for scheduling
InstrItineraryData InstrItins;
private:
- // Calculates type size & alignment
- const DataLayout DL;
- /// StackAlignOverride - Override the stack alignment.
+ /// Override the stack alignment.
unsigned StackAlignOverride;
- /// In64BitMode - True if compiling for 64-bit, false for 16-bit or 32-bit.
+ /// True if compiling for 64-bit, false for 16-bit or 32-bit.
bool In64BitMode;
- /// In32BitMode - True if compiling for 32-bit, false for 16-bit or 64-bit.
+ /// True if compiling for 32-bit, false for 16-bit or 64-bit.
bool In32BitMode;
- /// In16BitMode - True if compiling for 16-bit, false for 32-bit or 64-bit.
+ /// True if compiling for 16-bit, false for 32-bit or 64-bit.
bool In16BitMode;
X86SelectionDAGInfo TSInfo;
@@ -269,7 +265,6 @@ public:
return &TLInfo;
}
const X86InstrInfo *getInstrInfo() const override { return &InstrInfo; }
- const DataLayout *getDataLayout() const override { return &DL; }
const X86FrameLowering *getFrameLowering() const override {
return &FrameLowering;
}
@@ -280,12 +275,12 @@ public:
return &getInstrInfo()->getRegisterInfo();
}
- /// getStackAlignment - Returns the minimum alignment known to hold of the
+ /// Returns the minimum alignment known to hold of the
/// stack frame on entry to the function and which must be maintained by every
/// function for this subtarget.
unsigned getStackAlignment() const { return stackAlignment; }
- /// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
+ /// Returns the maximum memset / memcpy size
/// that still makes it profitable to inline the call.
unsigned getMaxInlineSizeThreshold() const { return MaxInlineSizeThreshold; }
@@ -294,7 +289,7 @@ public:
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
private:
- /// \brief Initialize the full set of dependencies so we can use an initializer
+ /// Initialize the full set of dependencies so we can use an initializer
/// list for X86Subtarget.
X86Subtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
void initializeEnvironment();
@@ -316,13 +311,13 @@ public:
/// Is this x86_64 with the ILP32 programming model (x32 ABI)?
bool isTarget64BitILP32() const {
return In64BitMode && (TargetTriple.getEnvironment() == Triple::GNUX32 ||
- TargetTriple.getOS() == Triple::NaCl);
+ TargetTriple.isOSNaCl());
}
/// Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
bool isTarget64BitLP64() const {
return In64BitMode && (TargetTriple.getEnvironment() != Triple::GNUX32 &&
- TargetTriple.getOS() != Triple::NaCl);
+ !TargetTriple.isOSNaCl());
}
PICStyles::Style getPICStyle() const { return PICStyle; }
@@ -363,17 +358,17 @@ public:
bool hasHLE() const { return HasHLE; }
bool hasADX() const { return HasADX; }
bool hasSHA() const { return HasSHA; }
- bool hasSGX() const { return HasSGX; }
bool hasPRFCHW() const { return HasPRFCHW; }
bool hasRDSEED() const { return HasRDSEED; }
- bool hasSMAP() const { return HasSMAP; }
bool isBTMemSlow() const { return IsBTMemSlow; }
bool isSHLDSlow() const { return IsSHLDSlow; }
bool isUnalignedMemAccessFast() const { return IsUAMemFast; }
- bool hasVectorUAMem() const { return HasVectorUAMem; }
+ bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
+ bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
bool useLeaForSP() const { return UseLeaForSP; }
- bool hasSlowDivide() const { return HasSlowDivide; }
+ bool hasSlowDivide32() const { return HasSlowDivide32; }
+ bool hasSlowDivide64() const { return HasSlowDivide64; }
bool padShortFunctions() const { return PadShortFunctions; }
bool callRegIndirect() const { return CallRegIndirect; }
bool LEAusesAG() const { return LEAUsesAG; }
@@ -394,16 +389,14 @@ public:
const Triple &getTargetTriple() const { return TargetTriple; }
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
- bool isTargetFreeBSD() const {
- return TargetTriple.getOS() == Triple::FreeBSD;
- }
- bool isTargetSolaris() const {
- return TargetTriple.getOS() == Triple::Solaris;
- }
+ bool isTargetFreeBSD() const { return TargetTriple.isOSFreeBSD(); }
+ bool isTargetDragonFly() const { return TargetTriple.isOSDragonFly(); }
+ bool isTargetSolaris() const { return TargetTriple.isOSSolaris(); }
+ bool isTargetPS4() const { return TargetTriple.isPS4(); }
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
- bool isTargetMacho() const { return TargetTriple.isOSBinFormatMachO(); }
+ bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
@@ -469,13 +462,11 @@ public:
unsigned char ClassifyGlobalReference(const GlobalValue *GV,
const TargetMachine &TM)const;
- /// ClassifyBlockAddressReference - Classify a blockaddress reference for the
- /// current subtarget according to how we should reference it in a non-pcrel
- /// context.
+ /// Classify a blockaddress reference for the current subtarget according to
+ /// how we should reference it in a non-pcrel context.
unsigned char ClassifyBlockAddressReference() const;
- /// IsLegalToCallImmediateAddr - Return true if the subtarget allows calls
- /// to immediate address.
+ /// Return true if the subtarget allows calls to immediate address.
bool IsLegalToCallImmediateAddr(const TargetMachine &TM) const;
/// This function returns the name of a function which has an interface
@@ -494,8 +485,7 @@ public:
bool enableEarlyIfConversion() const override;
- /// getInstrItins = Return the instruction itineraries based on the
- /// subtarget selection.
+ /// Return the instruction itineraries based on the subtarget selection.
const InstrItineraryData *getInstrItineraryData() const override {
return &InstrItins;
}
diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp
index 8802feb..4bde053 100644
--- a/lib/Target/X86/X86TargetMachine.cpp
+++ b/lib/Target/X86/X86TargetMachine.cpp
@@ -14,9 +14,10 @@
#include "X86TargetMachine.h"
#include "X86.h"
#include "X86TargetObjectFile.h"
+#include "X86TargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
-#include "llvm/PassManager.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
@@ -47,6 +48,46 @@ static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
llvm_unreachable("unknown subtarget type");
}
+static std::string computeDataLayout(const Triple &TT) {
+ // X86 is little endian
+ std::string Ret = "e";
+
+ Ret += DataLayout::getManglingComponent(TT);
+ // X86 and x32 have 32 bit pointers.
+ if ((TT.isArch64Bit() &&
+ (TT.getEnvironment() == Triple::GNUX32 || TT.isOSNaCl())) ||
+ !TT.isArch64Bit())
+ Ret += "-p:32:32";
+
+ // Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
+ if (TT.isArch64Bit() || TT.isOSWindows() || TT.isOSNaCl())
+ Ret += "-i64:64";
+ else
+ Ret += "-f64:32:64";
+
+ // Some ABIs align long double to 128 bits, others to 32.
+ if (TT.isOSNaCl())
+ ; // No f80
+ else if (TT.isArch64Bit() || TT.isOSDarwin())
+ Ret += "-f80:128";
+ else
+ Ret += "-f80:32";
+
+ // The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
+ if (TT.isArch64Bit())
+ Ret += "-n8:16:32:64";
+ else
+ Ret += "-n8:16:32";
+
+ // The stack is aligned to 32 bits on some ABIs and 128 bits on others.
+ if (!TT.isArch64Bit() && TT.isOSWindows())
+ Ret += "-S32";
+ else
+ Ret += "-S128";
+
+ return Ret;
+}
+
/// X86TargetMachine ctor - Create an X86 target.
///
X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT, StringRef CPU,
@@ -55,6 +96,7 @@ X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT, StringRef CPU,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
TLOF(createTLOF(Triple(getTargetTriple()))),
+ DL(computeDataLayout(Triple(TT))),
Subtarget(TT, CPU, FS, *this, Options.StackAlignmentOverride) {
// default to hard float ABI
if (Options.FloatABIType == FloatABI::Default)
@@ -74,11 +116,8 @@ X86TargetMachine::~X86TargetMachine() {}
const X86Subtarget *
X86TargetMachine::getSubtargetImpl(const Function &F) const {
- AttributeSet FnAttrs = F.getAttributes();
- Attribute CPUAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu");
- Attribute FSAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features");
+ Attribute CPUAttr = F.getFnAttribute("target-cpu");
+ Attribute FSAttr = F.getFnAttribute("target-features");
std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
? CPUAttr.getValueAsString().str()
@@ -92,8 +131,7 @@ X86TargetMachine::getSubtargetImpl(const Function &F) const {
// function before we can generate a subtarget. We also need to use
// it as a key for the subtarget since that can be the only difference
// between two functions.
- Attribute SFAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "use-soft-float");
+ Attribute SFAttr = F.getFnAttribute("use-soft-float");
bool SoftFloat = !SFAttr.hasAttribute(Attribute::None)
? SFAttr.getValueAsString() == "true"
: Options.UseSoftFloat;
@@ -120,15 +158,12 @@ UseVZeroUpper("x86-use-vzeroupper", cl::Hidden,
cl::init(true));
//===----------------------------------------------------------------------===//
-// X86 Analysis Pass Setup
+// X86 TTI query.
//===----------------------------------------------------------------------===//
-void X86TargetMachine::addAnalysisPasses(PassManagerBase &PM) {
- // Add first the target-independent BasicTTI pass, then our X86 pass. This
- // allows the X86 pass to delegate to the target independent layer when
- // appropriate.
- PM.add(createBasicTargetTransformInfoPass(this));
- PM.add(createX86TargetTransformInfoPass(this));
+TargetIRAnalysis X86TargetMachine::getTargetIRAnalysis() {
+ return TargetIRAnalysis(
+ [this](Function &F) { return TargetTransformInfo(X86TTIImpl(this, F)); });
}
@@ -147,16 +182,12 @@ public:
return getTM<X86TargetMachine>();
}
- const X86Subtarget &getX86Subtarget() const {
- return *getX86TargetMachine().getSubtargetImpl();
- }
-
void addIRPasses() override;
bool addInstSelector() override;
bool addILPOpts() override;
- bool addPreRegAlloc() override;
- bool addPostRegAlloc() override;
- bool addPreEmitPass() override;
+ void addPreRegAlloc() override;
+ void addPostRegAlloc() override;
+ void addPreEmitPass() override;
};
} // namespace
@@ -175,7 +206,8 @@ bool X86PassConfig::addInstSelector() {
addPass(createX86ISelDag(getX86TargetMachine(), getOptLevel()));
// For ELF, cleanup any local-dynamic TLS accesses.
- if (getX86Subtarget().isTargetELF() && getOptLevel() != CodeGenOpt::None)
+ if (Triple(TM->getTargetTriple()).isOSBinFormatELF() &&
+ getOptLevel() != CodeGenOpt::None)
addPass(createCleanupLocalDynamicTLSPass());
addPass(createX86GlobalBaseRegPass());
@@ -188,32 +220,23 @@ bool X86PassConfig::addILPOpts() {
return true;
}
-bool X86PassConfig::addPreRegAlloc() {
- return false; // -print-machineinstr shouldn't print after this.
+void X86PassConfig::addPreRegAlloc() {
+ addPass(createX86CallFrameOptimization());
}
-bool X86PassConfig::addPostRegAlloc() {
+void X86PassConfig::addPostRegAlloc() {
addPass(createX86FloatingPointStackifierPass());
- return true; // -print-machineinstr should print after this.
}
-bool X86PassConfig::addPreEmitPass() {
- bool ShouldPrint = false;
- if (getOptLevel() != CodeGenOpt::None && getX86Subtarget().hasSSE2()) {
+void X86PassConfig::addPreEmitPass() {
+ if (getOptLevel() != CodeGenOpt::None)
addPass(createExecutionDependencyFixPass(&X86::VR128RegClass));
- ShouldPrint = true;
- }
- if (UseVZeroUpper) {
+ if (UseVZeroUpper)
addPass(createX86IssueVZeroUpperPass());
- ShouldPrint = true;
- }
if (getOptLevel() != CodeGenOpt::None) {
addPass(createX86PadShortFunctions());
addPass(createX86FixupLEAs());
- ShouldPrint = true;
}
-
- return ShouldPrint;
}
diff --git a/lib/Target/X86/X86TargetMachine.h b/lib/Target/X86/X86TargetMachine.h
index 916278c..283858d 100644
--- a/lib/Target/X86/X86TargetMachine.h
+++ b/lib/Target/X86/X86TargetMachine.h
@@ -24,22 +24,22 @@ class StringRef;
class X86TargetMachine final : public LLVMTargetMachine {
std::unique_ptr<TargetLoweringObjectFile> TLOF;
- X86Subtarget Subtarget;
+ // Calculates type size & alignment
+ const DataLayout DL;
+ X86Subtarget Subtarget;
mutable StringMap<std::unique_ptr<X86Subtarget>> SubtargetMap;
public:
- X86TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
+ X86TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL);
~X86TargetMachine() override;
-
+ const DataLayout *getDataLayout() const override { return &DL; }
const X86Subtarget *getSubtargetImpl() const override { return &Subtarget; }
const X86Subtarget *getSubtargetImpl(const Function &F) const override;
- /// \brief Register X86 analysis passes with a pass manager.
- void addAnalysisPasses(PassManagerBase &PM) override;
+ TargetIRAnalysis getTargetIRAnalysis() override;
// Set up the pass pipeline.
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
diff --git a/lib/Target/X86/X86TargetObjectFile.cpp b/lib/Target/X86/X86TargetObjectFile.cpp
index f8bcd61..1d1c32e 100644
--- a/lib/Target/X86/X86TargetObjectFile.cpp
+++ b/lib/Target/X86/X86TargetObjectFile.cpp
@@ -21,6 +21,11 @@
using namespace llvm;
using namespace dwarf;
+X86_64MachoTargetObjectFile::X86_64MachoTargetObjectFile()
+ : TargetLoweringObjectFileMachO() {
+ SupportIndirectSymViaGOTPCRel = true;
+}
+
const MCExpr *X86_64MachoTargetObjectFile::getTTypeGlobalReference(
const GlobalValue *GV, unsigned Encoding, Mangler &Mang,
const TargetMachine &TM, MachineModuleInfo *MMI,
@@ -46,6 +51,17 @@ MCSymbol *X86_64MachoTargetObjectFile::getCFIPersonalitySymbol(
return TM.getSymbol(GV, Mang);
}
+const MCExpr *X86_64MachoTargetObjectFile::getIndirectSymViaGOTPCRel(
+ const MCSymbol *Sym, int64_t Offset) const {
+ // On Darwin/X86-64, we need to use foo@GOTPCREL+4 to access the got entry
+ // from a data section. In case there's an additional offset, then use
+ // foo@GOTPCREL+4+<offset>.
+ const MCExpr *Res =
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOTPCREL, getContext());
+ const MCExpr *Off = MCConstantExpr::Create(Offset+4, getContext());
+ return MCBinaryExpr::CreateAdd(Res, Off, getContext());
+}
+
void
X86LinuxTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM) {
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
diff --git a/lib/Target/X86/X86TargetObjectFile.h b/lib/Target/X86/X86TargetObjectFile.h
index 6a6988a..f745538 100644
--- a/lib/Target/X86/X86TargetObjectFile.h
+++ b/lib/Target/X86/X86TargetObjectFile.h
@@ -19,6 +19,8 @@ namespace llvm {
/// x86-64.
class X86_64MachoTargetObjectFile : public TargetLoweringObjectFileMachO {
public:
+ X86_64MachoTargetObjectFile();
+
const MCExpr *
getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
Mangler &Mang, const TargetMachine &TM,
@@ -30,6 +32,10 @@ namespace llvm {
MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV, Mangler &Mang,
const TargetMachine &TM,
MachineModuleInfo *MMI) const override;
+
+ const MCExpr *
+ getIndirectSymViaGOTPCRel(const MCSymbol *Sym,
+ int64_t Offset) const override;
};
/// X86LinuxTargetObjectFile - This implementation is used for linux x86
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index 2b70fd0..5136619 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -14,9 +14,9 @@
///
//===----------------------------------------------------------------------===//
-#include "X86.h"
-#include "X86TargetMachine.h"
+#include "X86TargetTransformInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/CostTable.h"
@@ -25,123 +25,22 @@ using namespace llvm;
#define DEBUG_TYPE "x86tti"
-// Declare the pass initialization routine locally as target-specific passes
-// don't have a target-wide initialization entry point, and so we rely on the
-// pass constructor initialization.
-namespace llvm {
-void initializeX86TTIPass(PassRegistry &);
-}
-
-namespace {
-
-class X86TTI final : public ImmutablePass, public TargetTransformInfo {
- const X86Subtarget *ST;
- const X86TargetLowering *TLI;
-
- /// Estimate the overhead of scalarizing an instruction. Insert and Extract
- /// are set if the result needs to be inserted and/or extracted from vectors.
- unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
-
-public:
- X86TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) {
- llvm_unreachable("This pass cannot be directly constructed");
- }
-
- X86TTI(const X86TargetMachine *TM)
- : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
- TLI(TM->getSubtargetImpl()->getTargetLowering()) {
- initializeX86TTIPass(*PassRegistry::getPassRegistry());
- }
-
- void initializePass() override {
- pushTTIStack(this);
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- TargetTransformInfo::getAnalysisUsage(AU);
- }
-
- /// Pass identification.
- static char ID;
-
- /// Provide necessary pointer adjustments for the two base classes.
- void *getAdjustedAnalysisPointer(const void *ID) override {
- if (ID == &TargetTransformInfo::ID)
- return (TargetTransformInfo*)this;
- return this;
- }
-
- /// \name Scalar TTI Implementations
- /// @{
- PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
-
- /// @}
-
- /// \name Vector TTI Implementations
- /// @{
-
- unsigned getNumberOfRegisters(bool Vector) const override;
- unsigned getRegisterBitWidth(bool Vector) const override;
- unsigned getMaxInterleaveFactor() const override;
- unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
- OperandValueKind, OperandValueProperties,
- OperandValueProperties) const override;
- unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
- int Index, Type *SubTp) const override;
- unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const override;
- unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy) const override;
- unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index) const override;
- unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
- unsigned AddressSpace) const override;
-
- unsigned getAddressComputationCost(Type *PtrTy,
- bool IsComplex) const override;
-
- unsigned getReductionCost(unsigned Opcode, Type *Ty,
- bool IsPairwiseForm) const override;
-
- unsigned getIntImmCost(int64_t) const;
-
- unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
-
- unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
- Type *Ty) const override;
- unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
- Type *Ty) const override;
-
- /// @}
-};
-
-} // end anonymous namespace
-
-INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti",
- "X86 Target Transform Info", true, true, false)
-char X86TTI::ID = 0;
-
-ImmutablePass *
-llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) {
- return new X86TTI(TM);
-}
-
-
//===----------------------------------------------------------------------===//
//
// X86 cost model.
//
//===----------------------------------------------------------------------===//
-X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const {
+TargetTransformInfo::PopcntSupportKind
+X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
// TODO: Currently the __builtin_popcount() implementation using SSE3
// instructions is inefficient. Once the problem is fixed, we should
// call ST->hasSSE3() instead of ST->hasPOPCNT().
- return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software;
+ return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
}
-unsigned X86TTI::getNumberOfRegisters(bool Vector) const {
+unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
if (Vector && !ST->hasSSE1())
return 0;
@@ -153,7 +52,7 @@ unsigned X86TTI::getNumberOfRegisters(bool Vector) const {
return 8;
}
-unsigned X86TTI::getRegisterBitWidth(bool Vector) const {
+unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
if (Vector) {
if (ST->hasAVX512()) return 512;
if (ST->hasAVX()) return 256;
@@ -167,7 +66,7 @@ unsigned X86TTI::getRegisterBitWidth(bool Vector) const {
}
-unsigned X86TTI::getMaxInterleaveFactor() const {
+unsigned X86TTIImpl::getMaxInterleaveFactor() {
if (ST->isAtom())
return 1;
@@ -179,10 +78,10 @@ unsigned X86TTI::getMaxInterleaveFactor() const {
return 2;
}
-unsigned X86TTI::getArithmeticInstrCost(
- unsigned Opcode, Type *Ty, OperandValueKind Op1Info,
- OperandValueKind Op2Info, OperandValueProperties Opd1PropInfo,
- OperandValueProperties Opd2PropInfo) const {
+unsigned X86TTIImpl::getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
+ TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
+ TTI::OperandValueProperties Opd2PropInfo) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
@@ -352,7 +251,7 @@ unsigned X86TTI::getArithmeticInstrCost(
{ ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
{ ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
{ ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
- { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
+ { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
{ ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
{ ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
@@ -437,17 +336,16 @@ unsigned X86TTI::getArithmeticInstrCost(
return LT.first * 6;
// Fallback to the default implementation.
- return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info,
- Op2Info);
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
}
-unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
- Type *SubTp) const {
+unsigned X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
+ Type *SubTp) {
// We only estimate the cost of reverse and alternate shuffles.
- if (Kind != SK_Reverse && Kind != SK_Alternate)
- return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
+ if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
+ return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
- if (Kind == SK_Reverse) {
+ if (Kind == TTI::SK_Reverse) {
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
unsigned Cost = 1;
if (LT.second.getSizeInBits() > 128)
@@ -457,7 +355,7 @@ unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
return Cost * LT.first;
}
- if (Kind == SK_Alternate) {
+ if (Kind == TTI::SK_Alternate) {
// 64-bit packed float vectors (v2f32) are widened to type v4f32.
// 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
@@ -525,7 +423,7 @@ unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
{ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
{ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
};
-
+
if (ST->hasSSSE3()) {
int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
if (Idx != -1)
@@ -538,7 +436,7 @@ unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
{ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
{ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
-
+
// This is expanded into a long sequence of four extract + four insert.
{ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
@@ -546,17 +444,17 @@ unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
{ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
};
- // Fall-back (SSE3 and SSE2).
+ // Fall-back (SSE3 and SSE2).
int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
if (Idx != -1)
return LT.first * SSEAltShuffleTbl[Idx].Cost;
- return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
+ return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
}
- return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
+ return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
}
-unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
+unsigned X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
@@ -638,7 +536,7 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
// The function getSimpleVT only handles simple value types.
if (!SrcTy.isSimple() || !DstTy.isSimple())
- return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
+ return BaseT::getCastInstrCost(Opcode, Dst, Src);
static const TypeConversionCostTblEntry<MVT::SimpleValueType>
AVX2ConversionTbl[] = {
@@ -757,11 +655,11 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
return AVXConversionTbl[Idx].Cost;
}
- return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
+ return BaseT::getCastInstrCost(Opcode, Dst, Src);
}
-unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy) const {
+unsigned X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
@@ -827,11 +725,11 @@ unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
return LT.first * SSE42CostTbl[Idx].Cost;
}
- return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
+ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
-unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index) const {
+unsigned X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) {
assert(Val->isVectorTy() && "This must be a vector type");
if (Index != -1U) {
@@ -851,26 +749,27 @@ unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
return 0;
}
- return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
+ return BaseT::getVectorInstrCost(Opcode, Val, Index);
}
-unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert,
- bool Extract) const {
+unsigned X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
+ bool Extract) {
assert (Ty->isVectorTy() && "Can only scalarize vectors");
unsigned Cost = 0;
for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
if (Insert)
- Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
+ Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
if (Extract)
- Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
+ Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
}
return Cost;
}
-unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
- unsigned AddressSpace) const {
+unsigned X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
+ unsigned Alignment,
+ unsigned AddressSpace) {
// Handle non-power-of-two vectors such as <3 x float>
if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
unsigned NumElem = VTy->getVectorNumElements();
@@ -888,10 +787,8 @@ unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
// Assume that all other non-power-of-two numbers are scalarized.
if (!isPowerOf2_32(NumElem)) {
- unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode,
- VTy->getScalarType(),
- Alignment,
- AddressSpace);
+ unsigned Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(),
+ Alignment, AddressSpace);
unsigned SplitCost = getScalarizationOverhead(Src,
Opcode == Instruction::Load,
Opcode==Instruction::Store);
@@ -915,7 +812,60 @@ unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
return Cost;
}
-unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
+unsigned X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
+ unsigned Alignment,
+ unsigned AddressSpace) {
+ VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
+ if (!SrcVTy)
+ // To calculate scalar take the regular cost, without mask
+ return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
+
+ unsigned NumElem = SrcVTy->getVectorNumElements();
+ VectorType *MaskTy =
+ VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
+ if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy, 1)) ||
+ (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy, 1)) ||
+ !isPowerOf2_32(NumElem)) {
+ // Scalarization
+ unsigned MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
+ unsigned ScalarCompareCost =
+ getCmpSelInstrCost(Instruction::ICmp,
+ Type::getInt8Ty(getGlobalContext()), NULL);
+ unsigned BranchCost = getCFInstrCost(Instruction::Br);
+ unsigned MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
+
+ unsigned ValueSplitCost =
+ getScalarizationOverhead(SrcVTy, Opcode == Instruction::Load,
+ Opcode == Instruction::Store);
+ unsigned MemopCost =
+ NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
+ Alignment, AddressSpace);
+ return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
+ }
+
+ // Legalize the type.
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(SrcVTy);
+ unsigned Cost = 0;
+ if (LT.second != TLI->getValueType(SrcVTy).getSimpleVT() &&
+ LT.second.getVectorNumElements() == NumElem)
+ // Promotion requires expand/truncate for data and a shuffle for mask.
+ Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, 0) +
+ getShuffleCost(TTI::SK_Alternate, MaskTy, 0, 0);
+
+ else if (LT.second.getVectorNumElements() > NumElem) {
+ VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
+ LT.second.getVectorNumElements());
+ // Expanding requires fill mask with zeroes
+ Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
+ }
+ if (!ST->hasAVX512())
+ return Cost + LT.first*4; // Each maskmov costs 4
+
+ // AVX-512 masked load/store is cheapper
+ return Cost+LT.first;
+}
+
+unsigned X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
// Address computations in vectorized code with non-consecutive addresses will
// likely result in more instructions compared to scalar code where the
// computation can more often be merged into the index mode. The resulting
@@ -925,22 +875,22 @@ unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
if (Ty->isVectorTy() && IsComplex)
return NumVectorInstToHideOverhead;
- return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex);
+ return BaseT::getAddressComputationCost(Ty, IsComplex);
}
-unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
- bool IsPairwise) const {
-
+unsigned X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
+ bool IsPairwise) {
+
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
-
+
MVT MTy = LT.second;
-
+
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
-
- // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
- // and make it as the cost.
-
+
+ // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
+ // and make it as the cost.
+
static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
{ ISD::FADD, MVT::v2f64, 2 },
{ ISD::FADD, MVT::v4f32, 4 },
@@ -948,7 +898,7 @@ unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
{ ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
{ ISD::ADD, MVT::v8i16, 5 },
};
-
+
static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
{ ISD::FADD, MVT::v4f32, 4 },
{ ISD::FADD, MVT::v4f64, 5 },
@@ -967,7 +917,7 @@ unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
{ ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
{ ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
};
-
+
static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
{ ISD::FADD, MVT::v4f32, 3 },
{ ISD::FADD, MVT::v4f64, 3 },
@@ -978,14 +928,14 @@ unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
{ ISD::ADD, MVT::v8i16, 4 },
{ ISD::ADD, MVT::v8i32, 5 },
};
-
+
if (IsPairwise) {
if (ST->hasAVX()) {
int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
if (Idx != -1)
return LT.first * AVX1CostTblPairWise[Idx].Cost;
}
-
+
if (ST->hasSSE42()) {
int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
if (Idx != -1)
@@ -997,7 +947,7 @@ unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
if (Idx != -1)
return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
}
-
+
if (ST->hasSSE42()) {
int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
if (Idx != -1)
@@ -1005,23 +955,23 @@ unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
}
}
- return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise);
+ return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
}
/// \brief Calculate the cost of materializing a 64-bit value. This helper
/// method might only calculate a fraction of a larger immediate. Therefore it
/// is valid to return a cost of ZERO.
-unsigned X86TTI::getIntImmCost(int64_t Val) const {
+unsigned X86TTIImpl::getIntImmCost(int64_t Val) {
if (Val == 0)
- return TCC_Free;
+ return TTI::TCC_Free;
if (isInt<32>(Val))
- return TCC_Basic;
+ return TTI::TCC_Basic;
- return 2 * TCC_Basic;
+ return 2 * TTI::TCC_Basic;
}
-unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
+unsigned X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
@@ -1033,10 +983,10 @@ unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
// Fixme: Create a cost model for types larger than i128 once the codegen
// issues have been fixed.
if (BitSize > 128)
- return TCC_Free;
+ return TTI::TCC_Free;
if (Imm == 0)
- return TCC_Free;
+ return TTI::TCC_Free;
// Sign-extend all constants to a multiple of 64-bit.
APInt ImmVal = Imm;
@@ -1055,26 +1005,27 @@ unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
return std::max(1U, Cost);
}
-unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
- Type *Ty) const {
+unsigned X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
+ const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
// There is no cost model for constants with a bit size of 0. Return TCC_Free
// here, so that constant hoisting will ignore this constant.
if (BitSize == 0)
- return TCC_Free;
+ return TTI::TCC_Free;
unsigned ImmIdx = ~0U;
switch (Opcode) {
- default: return TCC_Free;
+ default:
+ return TTI::TCC_Free;
case Instruction::GetElementPtr:
// Always hoist the base address of a GetElementPtr. This prevents the
// creation of new constants for every base constant that gets constant
// folded with the offset.
if (Idx == 0)
- return 2 * TCC_Basic;
- return TCC_Free;
+ return 2 * TTI::TCC_Basic;
+ return TTI::TCC_Free;
case Instruction::Store:
ImmIdx = 0;
break;
@@ -1096,7 +1047,7 @@ unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
case Instruction::LShr:
case Instruction::AShr:
if (Idx == 1)
- return TCC_Free;
+ return TTI::TCC_Free;
break;
case Instruction::Trunc:
case Instruction::ZExt:
@@ -1114,27 +1065,28 @@ unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
if (Idx == ImmIdx) {
unsigned NumConstants = (BitSize + 63) / 64;
- unsigned Cost = X86TTI::getIntImmCost(Imm, Ty);
- return (Cost <= NumConstants * TCC_Basic)
- ? static_cast<unsigned>(TCC_Free)
- : Cost;
+ unsigned Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
+ return (Cost <= NumConstants * TTI::TCC_Basic)
+ ? static_cast<unsigned>(TTI::TCC_Free)
+ : Cost;
}
- return X86TTI::getIntImmCost(Imm, Ty);
+ return X86TTIImpl::getIntImmCost(Imm, Ty);
}
-unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
- const APInt &Imm, Type *Ty) const {
+unsigned X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
+ const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
// There is no cost model for constants with a bit size of 0. Return TCC_Free
// here, so that constant hoisting will ignore this constant.
if (BitSize == 0)
- return TCC_Free;
+ return TTI::TCC_Free;
switch (IID) {
- default: return TCC_Free;
+ default:
+ return TTI::TCC_Free;
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow:
case Intrinsic::ssub_with_overflow:
@@ -1142,17 +1094,33 @@ unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow:
if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
- return TCC_Free;
+ return TTI::TCC_Free;
break;
case Intrinsic::experimental_stackmap:
if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
- return TCC_Free;
+ return TTI::TCC_Free;
break;
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64:
if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
- return TCC_Free;
+ return TTI::TCC_Free;
break;
}
- return X86TTI::getIntImmCost(Imm, Ty);
+ return X86TTIImpl::getIntImmCost(Imm, Ty);
+}
+
+bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) {
+ int DataWidth = DataTy->getPrimitiveSizeInBits();
+
+ // Todo: AVX512 allows gather/scatter, works with strided and random as well
+ if ((DataWidth < 32) || (Consecutive == 0))
+ return false;
+ if (ST->hasAVX512() || ST->hasAVX2())
+ return true;
+ return false;
}
+
+bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) {
+ return isLegalMaskedLoad(DataType, Consecutive);
+}
+
diff --git a/lib/Target/X86/X86TargetTransformInfo.h b/lib/Target/X86/X86TargetTransformInfo.h
new file mode 100644
index 0000000..9f0adcf
--- /dev/null
+++ b/lib/Target/X86/X86TargetTransformInfo.h
@@ -0,0 +1,112 @@
+//===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file a TargetTransformInfo::Concept conforming object specific to the
+/// X86 target machine. It uses the target's detailed information to
+/// provide more precise answers to certain TTI queries, while letting the
+/// target independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
+
+#include "X86.h"
+#include "X86TargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+
+class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
+ typedef BasicTTIImplBase<X86TTIImpl> BaseT;
+ typedef TargetTransformInfo TTI;
+ friend BaseT;
+
+ const X86Subtarget *ST;
+ const X86TargetLowering *TLI;
+
+ unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract);
+
+ const X86Subtarget *getST() const { return ST; }
+ const X86TargetLowering *getTLI() const { return TLI; }
+
+public:
+ explicit X86TTIImpl(const X86TargetMachine *TM, Function &F)
+ : BaseT(TM), ST(TM->getSubtargetImpl(F)), TLI(ST->getTargetLowering()) {}
+
+ // Provide value semantics. MSVC requires that we spell all of these out.
+ X86TTIImpl(const X86TTIImpl &Arg)
+ : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
+ X86TTIImpl(X86TTIImpl &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
+ TLI(std::move(Arg.TLI)) {}
+ X86TTIImpl &operator=(const X86TTIImpl &RHS) {
+ BaseT::operator=(static_cast<const BaseT &>(RHS));
+ ST = RHS.ST;
+ TLI = RHS.TLI;
+ return *this;
+ }
+ X86TTIImpl &operator=(X86TTIImpl &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ ST = std::move(RHS.ST);
+ TLI = std::move(RHS.TLI);
+ return *this;
+ }
+
+ /// \name Scalar TTI Implementations
+ /// @{
+ TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
+
+ /// @}
+
+ /// \name Vector TTI Implementations
+ /// @{
+
+ unsigned getNumberOfRegisters(bool Vector);
+ unsigned getRegisterBitWidth(bool Vector);
+ unsigned getMaxInterleaveFactor();
+ unsigned getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty,
+ TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+ TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+ TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+ TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
+ unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
+ Type *SubTp);
+ unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
+ unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace);
+ unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace);
+
+ unsigned getAddressComputationCost(Type *PtrTy, bool IsComplex);
+
+ unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm);
+
+ unsigned getIntImmCost(int64_t);
+
+ unsigned getIntImmCost(const APInt &Imm, Type *Ty);
+
+ unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty);
+ unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty);
+ bool isLegalMaskedLoad(Type *DataType, int Consecutive);
+ bool isLegalMaskedStore(Type *DataType, int Consecutive);
+
+ /// @}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp
index d93baeb..99ba4c0 100644
--- a/lib/Target/X86/X86VZeroUpper.cpp
+++ b/lib/Target/X86/X86VZeroUpper.cpp
@@ -9,7 +9,7 @@
//
// This file defines the pass which inserts x86 AVX vzeroupper instructions
// before calls to SSE encoded functions. This avoids transition latency
-// penalty when tranfering control between AVX encoded instructions and old
+// penalty when transferring control between AVX encoded instructions and old
// SSE encoding mode.
//
//===----------------------------------------------------------------------===//
@@ -171,7 +171,7 @@ void VZeroUpperInserter::addDirtySuccessor(MachineBasicBlock &MBB) {
}
/// processBasicBlock - Loop over all of the instructions in the basic block,
-/// inserting vzero upper instructions before function calls.
+/// inserting vzeroupper instructions before function calls.
void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// Start by assuming that the block PASS_THROUGH, which implies no unguarded
@@ -202,7 +202,7 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// If the call won't clobber any YMM register, skip it as well. It usually
// happens on helper function calls (such as '_chkstk', '_ftol2') where
// standard calling convention is not used (RegMask is not used to mark
- // register clobbered and register usage (def/imp-def/use) is well-dfined
+ // register clobbered and register usage (def/imp-def/use) is well-defined
// and explicitly specified.
if (MI->isCall() && !callClobbersAnyYmmReg(MI))
continue;
@@ -245,25 +245,29 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
}
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
-/// vzero upper instructions before function calls.
+/// vzeroupper instructions before function calls.
bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
- const X86Subtarget &ST = MF.getTarget().getSubtarget<X86Subtarget>();
+ const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
if (!ST.hasAVX() || ST.hasAVX512())
return false;
- TII = MF.getSubtarget().getInstrInfo();
+ TII = ST.getInstrInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
EverMadeChange = false;
+ bool FnHasLiveInYmm = checkFnHasLiveInYmm(MRI);
+
// Fast check: if the function doesn't use any ymm registers, we don't need
// to insert any VZEROUPPER instructions. This is constant-time, so it is
// cheap in the common case of no ymm use.
- bool YMMUsed = false;
- const TargetRegisterClass *RC = &X86::VR256RegClass;
- for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end();
- i != e; i++) {
- if (!MRI.reg_nodbg_empty(*i)) {
- YMMUsed = true;
- break;
+ bool YMMUsed = FnHasLiveInYmm;
+ if (!YMMUsed) {
+ const TargetRegisterClass *RC = &X86::VR256RegClass;
+ for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end(); i != e;
+ i++) {
+ if (!MRI.reg_nodbg_empty(*i)) {
+ YMMUsed = true;
+ break;
+ }
}
}
if (!YMMUsed) {
@@ -282,7 +286,7 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
// If any YMM regs are live in to this function, add the entry block to the
// DirtySuccessors list
- if (checkFnHasLiveInYmm(MRI))
+ if (FnHasLiveInYmm)
addDirtySuccessor(MF.front());
// Re-visit all blocks that are successors of EXITS_DIRTY bsocks. Add
diff --git a/lib/Target/XCore/CMakeLists.txt b/lib/Target/XCore/CMakeLists.txt
index 5ad0754..0a609ef 100644
--- a/lib/Target/XCore/CMakeLists.txt
+++ b/lib/Target/XCore/CMakeLists.txt
@@ -22,7 +22,6 @@ add_llvm_target(XCoreCodeGen
XCoreSubtarget.cpp
XCoreTargetMachine.cpp
XCoreTargetObjectFile.cpp
- XCoreTargetTransformInfo.cpp
XCoreSelectionDAGInfo.cpp
XCoreFrameToArgsOffsetElim.cpp
)
diff --git a/lib/Target/XCore/XCore.h b/lib/Target/XCore/XCore.h
index 140ba2a..ba6ca84 100644
--- a/lib/Target/XCore/XCore.h
+++ b/lib/Target/XCore/XCore.h
@@ -32,8 +32,6 @@ namespace llvm {
CodeGenOpt::Level OptLevel);
ModulePass *createXCoreLowerThreadLocalPass();
- ImmutablePass *createXCoreTargetTransformInfoPass(const XCoreTargetMachine *TM);
-
} // end namespace llvm;
#endif
diff --git a/lib/Target/XCore/XCoreAsmPrinter.cpp b/lib/Target/XCore/XCoreAsmPrinter.cpp
index 82e4e36..4f7a7e9 100644
--- a/lib/Target/XCore/XCoreAsmPrinter.cpp
+++ b/lib/Target/XCore/XCoreAsmPrinter.cpp
@@ -50,14 +50,13 @@ using namespace llvm;
namespace {
class XCoreAsmPrinter : public AsmPrinter {
- const XCoreSubtarget &Subtarget;
XCoreMCInstLower MCInstLowering;
XCoreTargetStreamer &getTargetStreamer();
public:
- explicit XCoreAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), Subtarget(TM.getSubtarget<XCoreSubtarget>()),
- MCInstLowering(*this) {}
+ explicit XCoreAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)), MCInstLowering(*this) {}
const char *getPassName() const override {
return "XCore Assembly Printer";
@@ -105,7 +104,6 @@ void XCoreAsmPrinter::emitArrayBound(MCSymbol *Sym, const GlobalVariable *GV) {
OutContext));
if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
GV->hasCommonLinkage()) {
- // TODO Use COMDAT groups for LinkOnceLinkage
OutStreamer.EmitSymbolAttribute(SymGlob, MCSA_Weak);
}
}
@@ -117,7 +115,7 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
EmitSpecialLLVMGlobal(GV))
return;
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
OutStreamer.SwitchSection(
getObjFileLowering().SectionForGlobal(GV, *Mang, TM));
@@ -140,7 +138,6 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
emitArrayBound(GVSym, GV);
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
- // TODO Use COMDAT groups for LinkOnceLinkage
if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
GV->hasCommonLinkage())
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Weak);
@@ -210,7 +207,7 @@ printInlineJT(const MachineInstr *MI, int opNum, raw_ostream &O,
void XCoreAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
raw_ostream &O) {
- const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *DL = TM.getDataLayout();
const MachineOperand &MO = MI->getOperand(opNum);
switch (MO.getType()) {
case MachineOperand::MO_Register:
diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp
index 7c74340..e0ac0e5 100644
--- a/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -226,8 +226,7 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo *MMI = &MF.getMMI();
const MCRegisterInfo *MRI = MMI->getContext().getRegisterInfo();
- const XCoreInstrInfo &TII =
- *static_cast<const XCoreInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo();
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
// Debug location must be unknown since the first debug location is used
// to determine the end of the prologue.
@@ -341,8 +340,7 @@ void XCoreFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
- const XCoreInstrInfo &TII =
- *static_cast<const XCoreInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo();
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
DebugLoc dl = MBBI->getDebugLoc();
unsigned RetOpcode = MBBI->getOpcode();
@@ -480,8 +478,7 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
void XCoreFrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- const XCoreInstrInfo &TII =
- *static_cast<const XCoreInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo();
if (!hasReservedCallFrame(MF)) {
// Turn the adjcallstackdown instruction into 'extsp <amt>' and the
// adjcallstackup instruction into 'ldaw sp, sp[<amt>]'
diff --git a/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index 86bc6f2..f79b78b 100644
--- a/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -37,12 +37,10 @@ using namespace llvm;
///
namespace {
class XCoreDAGToDAGISel : public SelectionDAGISel {
- const XCoreSubtarget &Subtarget;
public:
XCoreDAGToDAGISel(XCoreTargetMachine &TM, CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(TM, OptLevel),
- Subtarget(*TM.getSubtargetImpl()) { }
+ : SelectionDAGISel(TM, OptLevel) {}
SDNode *Select(SDNode *N) override;
SDNode *SelectBRIND(SDNode *N);
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 96c43ae..6e8a95a 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -68,15 +68,15 @@ getTargetNodeName(unsigned Opcode) const
}
}
-XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM)
- : TargetLowering(TM), TM(TM),
- Subtarget(TM.getSubtarget<XCoreSubtarget>()) {
+XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
+ const XCoreSubtarget &Subtarget)
+ : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
// Set up the register classes.
addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
// Compute derived properties from the register classes
- computeRegisterProperties();
+ computeRegisterProperties(Subtarget.getRegisterInfo());
// Division is expensive
setIntDivIsCheap(false);
@@ -127,12 +127,14 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM)
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
// Loads
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand);
+ }
// Custom expand misaligned loads / stores.
setOperationAction(ISD::LOAD, MVT::i32, Custom);
@@ -805,8 +807,7 @@ SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
return SDValue();
MachineFunction &MF = DAG.getMachineFunction();
- const TargetRegisterInfo *RegInfo =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
+ const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
RegInfo->getFrameRegister(MF), MVT::i32);
}
@@ -852,8 +853,7 @@ LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
// Absolute SP = (FP + FrameToArgs) + Offset
- const TargetRegisterInfo *RegInfo =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
+ const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
RegInfo->getFrameRegister(MF), MVT::i32);
SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
@@ -1371,8 +1371,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
XCore::R0, XCore::R1, XCore::R2, XCore::R3
};
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
- unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs,
- array_lengthof(ArgRegs));
+ unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
if (FirstVAReg < array_lengthof(ArgRegs)) {
int offset = 0;
// Save remaining registers, storing higher register numbers at a higher
@@ -1548,8 +1547,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
MachineBasicBlock *
XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo &TII =
- *getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
assert((MI->getOpcode() == XCore::SELECT_CC) &&
"Unexpected instr type to insert");
@@ -1922,7 +1920,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
if (Ty->getTypeID() == Type::VoidTyID)
return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
- const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
+ const DataLayout *TD = TM.getDataLayout();
unsigned Size = TD->getTypeAllocSize(Ty);
if (AM.BaseGV) {
return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
@@ -1959,10 +1957,10 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
// XCore Inline Assembly Support
//===----------------------------------------------------------------------===//
-std::pair<unsigned, const TargetRegisterClass*>
-XCoreTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const {
+std::pair<unsigned, const TargetRegisterClass *>
+XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
default : break;
@@ -1972,5 +1970,5 @@ getRegForInlineAsmConstraint(const std::string &Constraint,
}
// Use the default implementation in TargetLowering to convert the register
// constraint into a member of a register class.
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h
index 13154c6..213ae4a 100644
--- a/lib/Target/XCore/XCoreISelLowering.h
+++ b/lib/Target/XCore/XCoreISelLowering.h
@@ -93,8 +93,8 @@ namespace llvm {
class XCoreTargetLowering : public TargetLowering
{
public:
-
- explicit XCoreTargetLowering(const TargetMachine &TM);
+ explicit XCoreTargetLowering(const TargetMachine &TM,
+ const XCoreSubtarget &Subtarget);
using TargetLowering::isZExtFree;
bool isZExtFree(SDValue Val, EVT VT2) const override;
@@ -172,8 +172,9 @@ namespace llvm {
SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
// Inline asm support
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint,
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
MVT VT) const override;
// Expand specifics
diff --git a/lib/Target/XCore/XCoreInstrInfo.td b/lib/Target/XCore/XCoreInstrInfo.td
index d34ed7a..8e9bb45 100644
--- a/lib/Target/XCore/XCoreInstrInfo.td
+++ b/lib/Target/XCore/XCoreInstrInfo.td
@@ -381,7 +381,7 @@ def Int_MemBarrier : PseudoInstXCore<(outs), (ins), "#MEMBARRIER",
// Three operand short
defm ADD : F3R_2RUS<0b00010, 0b10010, "add", add>;
defm SUB : F3R_2RUS<0b00011, 0b10011, "sub", sub>;
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
defm EQ : F3R_2RUS_np<0b00110, 0b10110, "eq">;
def LSS_3r : F3R_np<0b11000, "lss">;
def LSU_3r : F3R_np<0b11001, "lsu">;
@@ -432,7 +432,7 @@ def LDAWF_l3r : _FL3R<0b000111100, (outs GRRegs:$dst),
[(set GRRegs:$dst,
(ldawf GRRegs:$addr, GRRegs:$offset))]>;
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def LDAWF_l2rus : _FL2RUS<0b100111100, (outs GRRegs:$dst),
(ins GRRegs:$addr, i32imm:$offset),
"ldaw $dst, $addr[$offset]", []>;
@@ -443,7 +443,7 @@ def LDAWB_l3r : _FL3R<0b001001100, (outs GRRegs:$dst),
[(set GRRegs:$dst,
(ldawb GRRegs:$addr, GRRegs:$offset))]>;
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def LDAWB_l2rus : _FL2RUS<0b101001100, (outs GRRegs:$dst),
(ins GRRegs:$addr, i32imm:$offset),
"ldaw $dst, $addr[-$offset]", []>;
@@ -538,7 +538,7 @@ def LMUL_l6r : _FL6R<
// Register - U6
//let Uses = [DP] in ...
-let neverHasSideEffects = 1, isReMaterializable = 1 in
+let hasSideEffects = 0, isReMaterializable = 1 in
def LDAWDP_ru6: _FRU6<0b011000, (outs RRegs:$a), (ins i32imm:$b),
"ldaw $a, dp[$b]", []>;
@@ -564,7 +564,7 @@ def STWDP_lru6 : _FLRU6<0b010100, (outs), (ins RRegs:$a, i32imm:$b),
[(store RRegs:$a, (dprelwrapper tglobaladdr:$b))]>;
//let Uses = [CP] in ..
-let mayLoad = 1, isReMaterializable = 1, neverHasSideEffects = 1 in {
+let mayLoad = 1, isReMaterializable = 1, hasSideEffects = 0 in {
def LDWCP_ru6 : _FRU6<0b011011, (outs RRegs:$a), (ins i32imm:$b),
"ldw $a, cp[$b]", []>;
def LDWCP_lru6: _FLRU6<0b011011, (outs RRegs:$a), (ins i32imm:$b),
@@ -593,7 +593,7 @@ def LDWSP_lru6 : _FLRU6<0b010111, (outs RRegs:$a), (ins i32imm:$b),
[(set RRegs:$a, (XCoreLdwsp immU16:$b))]>;
}
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def LDAWSP_ru6 : _FRU6<0b011001, (outs RRegs:$a), (ins i32imm:$b),
"ldaw $a, sp[$b]", []>;
@@ -628,7 +628,7 @@ defm BRBF: FRU6_LRU6_backwards_branch<0b011111, "bf">;
// U6
let Defs = [SP], Uses = [SP] in {
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
defm EXTSP : FU6_LU6_np<0b0111011110, "extsp">;
let mayStore = 1 in
@@ -639,7 +639,7 @@ defm RETSP : FU6_LU6<0b0111011111, "retsp", XCoreRetsp>;
}
}
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
defm EXTDP : FU6_LU6_np<0b0111001110, "extdp">;
let Uses = [R11], isCall=1 in
@@ -656,7 +656,7 @@ def BRFU_lu6 : _FLU6<0b0111001100, (outs), (ins brtarget:$a), "bu $a", []>;
}
//let Uses = [CP] in ...
-let Defs = [R11], neverHasSideEffects = 1, isReMaterializable = 1 in
+let Defs = [R11], hasSideEffects = 0, isReMaterializable = 1 in
def LDAWCP_u6: _FU6<0b0111111101, (outs), (ins i32imm:$a), "ldaw r11, cp[$a]",
[]>;
@@ -690,17 +690,17 @@ defm KRESTSP : FU6_LU6_np<0b0111101111, "krestsp">;
// U10
let Defs = [R11], isReMaterializable = 1 in {
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def LDAPF_u10 : _FU10<0b110110, (outs), (ins pcrel_imm:$a), "ldap r11, $a", []>;
def LDAPF_lu10 : _FLU10<0b110110, (outs), (ins pcrel_imm:$a), "ldap r11, $a",
[(set R11, (pcrelwrapper tglobaladdr:$a))]>;
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def LDAPB_u10 : _FU10<0b110111, (outs), (ins pcrel_imm_neg:$a), "ldap r11, $a",
[]>;
-let neverHasSideEffects = 1 in
+let hasSideEffects = 0 in
def LDAPB_lu10 : _FLU10<0b110111, (outs), (ins pcrel_imm_neg:$a),
"ldap r11, $a",
[(set R11, (pcrelwrapper tglobaladdr:$a))]>;
@@ -729,7 +729,7 @@ def BLRB_lu10 : _FLU10<0b110101, (outs), (ins pcrel_imm_neg:$a), "bl $a", []>;
}
let Defs = [R11], mayLoad = 1, isReMaterializable = 1,
- neverHasSideEffects = 1 in {
+ hasSideEffects = 0 in {
def LDWCP_u10 : _FU10<0b111001, (outs), (ins i32imm:$a), "ldw r11, cp[$a]", []>;
def LDWCP_lu10 : _FLU10<0b111001, (outs), (ins i32imm:$a), "ldw r11, cp[$a]",
@@ -772,7 +772,7 @@ def ANDNOT_2r :
[(set GRRegs:$dst, (and GRRegs:$src1, (not GRRegs:$src2)))]>;
}
-let isReMaterializable = 1, neverHasSideEffects = 1 in
+let isReMaterializable = 1, hasSideEffects = 0 in
def MKMSK_rus : _FRUSBitp<0b101001, (outs GRRegs:$dst), (ins i32imm:$size),
"mkmsk $dst, $size", []>;
@@ -972,13 +972,13 @@ def BR_JT32 : PseudoInstXCore<(outs), (ins InlineJT32:$t, GRRegs:$i),
let isBranch=1, isIndirectBranch=1, isTerminator=1, isBarrier = 1 in
def BRU_1r : _F1R<0b001010, (outs), (ins GRRegs:$a), "bru $a", []>;
-let Defs=[SP], neverHasSideEffects=1 in
+let Defs=[SP], hasSideEffects=0 in
def SETSP_1r : _F1R<0b001011, (outs), (ins GRRegs:$a), "set sp, $a", []>;
-let neverHasSideEffects=1 in
+let hasSideEffects=0 in
def SETDP_1r : _F1R<0b001100, (outs), (ins GRRegs:$a), "set dp, $a", []>;
-let neverHasSideEffects=1 in
+let hasSideEffects=0 in
def SETCP_1r : _F1R<0b001101, (outs), (ins GRRegs:$a), "set cp, $a", []>;
let hasCtrlDep = 1 in
diff --git a/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/lib/Target/XCore/XCoreLowerThreadLocal.cpp
index ac3bae5..b4c6a50 100644
--- a/lib/Target/XCore/XCoreLowerThreadLocal.cpp
+++ b/lib/Target/XCore/XCoreLowerThreadLocal.cpp
@@ -137,7 +137,7 @@ static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) {
if (PN->getIncomingValue(I) == CE) {
BasicBlock *PredBB = PN->getIncomingBlock(I);
if (PredBB->getTerminator()->getNumSuccessors() > 1)
- PredBB = SplitEdge(PredBB, PN->getParent(), P);
+ PredBB = SplitEdge(PredBB, PN->getParent());
Instruction *InsertPos = PredBB->getTerminator();
Instruction *NewInst = createReplacementInstr(CE, InsertPos);
PN->setOperand(I, NewInst);
diff --git a/lib/Target/XCore/XCoreSubtarget.cpp b/lib/Target/XCore/XCoreSubtarget.cpp
index 7227411..7996020 100644
--- a/lib/Target/XCore/XCoreSubtarget.cpp
+++ b/lib/Target/XCore/XCoreSubtarget.cpp
@@ -27,6 +27,5 @@ void XCoreSubtarget::anchor() { }
XCoreSubtarget::XCoreSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM)
- : XCoreGenSubtargetInfo(TT, CPU, FS),
- DL("e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32-f64:32-a:0:32-n32"),
- InstrInfo(), FrameLowering(*this), TLInfo(TM), TSInfo(DL) {}
+ : XCoreGenSubtargetInfo(TT, CPU, FS), InstrInfo(), FrameLowering(*this),
+ TLInfo(TM, *this), TSInfo(*TM.getDataLayout()) {}
diff --git a/lib/Target/XCore/XCoreSubtarget.h b/lib/Target/XCore/XCoreSubtarget.h
index 695578d..da51ef1 100644
--- a/lib/Target/XCore/XCoreSubtarget.h
+++ b/lib/Target/XCore/XCoreSubtarget.h
@@ -31,7 +31,6 @@ class StringRef;
class XCoreSubtarget : public XCoreGenSubtargetInfo {
virtual void anchor();
- const DataLayout DL; // Calculates type size & alignment
XCoreInstrInfo InstrInfo;
XCoreFrameLowering FrameLowering;
XCoreTargetLowering TLInfo;
@@ -61,7 +60,6 @@ public:
const TargetRegisterInfo *getRegisterInfo() const override {
return &InstrInfo.getRegisterInfo();
}
- const DataLayout *getDataLayout() const override { return &DL; }
};
} // End llvm namespace
diff --git a/lib/Target/XCore/XCoreTargetMachine.cpp b/lib/Target/XCore/XCoreTargetMachine.cpp
index 0fa8c21..7998fc1 100644
--- a/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -12,10 +12,11 @@
#include "XCoreTargetMachine.h"
#include "XCoreTargetObjectFile.h"
+#include "XCoreTargetTransformInfo.h"
#include "XCore.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Module.h"
-#include "llvm/PassManager.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -28,6 +29,7 @@ XCoreTargetMachine::XCoreTargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
TLOF(make_unique<XCoreTargetObjectFile>()),
+ DL("e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32-f64:32-a:0:32-n32"),
Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
}
@@ -48,7 +50,7 @@ public:
void addIRPasses() override;
bool addPreISel() override;
bool addInstSelector() override;
- bool addPreEmitPass() override;
+ void addPreEmitPass() override;
};
} // namespace
@@ -72,9 +74,8 @@ bool XCorePassConfig::addInstSelector() {
return false;
}
-bool XCorePassConfig::addPreEmitPass() {
- addPass(createXCoreFrameToArgsOffsetEliminationPass());
- return false;
+void XCorePassConfig::addPreEmitPass() {
+ addPass(createXCoreFrameToArgsOffsetEliminationPass(), false);
}
// Force static initialization.
@@ -82,10 +83,7 @@ extern "C" void LLVMInitializeXCoreTarget() {
RegisterTargetMachine<XCoreTargetMachine> X(TheXCoreTarget);
}
-void XCoreTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
- // Add first the target-independent BasicTTI pass, then our XCore pass. This
- // allows the XCore pass to delegate to the target independent layer when
- // appropriate.
- PM.add(createBasicTargetTransformInfoPass(this));
- PM.add(createXCoreTargetTransformInfoPass(this));
+TargetIRAnalysis XCoreTargetMachine::getTargetIRAnalysis() {
+ return TargetIRAnalysis(
+ [this](Function &) { return TargetTransformInfo(XCoreTTIImpl(this)); });
}
diff --git a/lib/Target/XCore/XCoreTargetMachine.h b/lib/Target/XCore/XCoreTargetMachine.h
index 8ff9269..c5df07c 100644
--- a/lib/Target/XCore/XCoreTargetMachine.h
+++ b/lib/Target/XCore/XCoreTargetMachine.h
@@ -21,6 +21,7 @@ namespace llvm {
class XCoreTargetMachine : public LLVMTargetMachine {
std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ const DataLayout DL; // Calculates type size & alignment
XCoreSubtarget Subtarget;
public:
XCoreTargetMachine(const Target &T, StringRef TT,
@@ -29,12 +30,13 @@ public:
CodeGenOpt::Level OL);
~XCoreTargetMachine() override;
+ const DataLayout *getDataLayout() const override { return &DL; }
const XCoreSubtarget *getSubtargetImpl() const override { return &Subtarget; }
// Pass Pipeline Configuration
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
- void addAnalysisPasses(PassManagerBase &PM) override;
+ TargetIRAnalysis getTargetIRAnalysis() override;
TargetLoweringObjectFile *getObjFileLowering() const override {
return TLOF.get();
}
diff --git a/lib/Target/XCore/XCoreTargetObjectFile.cpp b/lib/Target/XCore/XCoreTargetObjectFile.cpp
index 86d0de6..c435b36 100644
--- a/lib/Target/XCore/XCoreTargetObjectFile.cpp
+++ b/lib/Target/XCore/XCoreTargetObjectFile.cpp
@@ -21,66 +21,43 @@ using namespace llvm;
void XCoreTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
- BSSSection =
- Ctx.getELFSection(".dp.bss", ELF::SHT_NOBITS,
- ELF::SHF_ALLOC | ELF::SHF_WRITE |
- ELF::XCORE_SHF_DP_SECTION,
- SectionKind::getBSS());
- BSSSectionLarge =
- Ctx.getELFSection(".dp.bss.large", ELF::SHT_NOBITS,
- ELF::SHF_ALLOC | ELF::SHF_WRITE |
- ELF::XCORE_SHF_DP_SECTION,
- SectionKind::getBSS());
- DataSection =
- Ctx.getELFSection(".dp.data", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_WRITE |
- ELF::XCORE_SHF_DP_SECTION,
- SectionKind::getDataRel());
- DataSectionLarge =
- Ctx.getELFSection(".dp.data.large", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_WRITE |
- ELF::XCORE_SHF_DP_SECTION,
- SectionKind::getDataRel());
- DataRelROSection =
- Ctx.getELFSection(".dp.rodata", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_WRITE |
- ELF::XCORE_SHF_DP_SECTION,
- SectionKind::getReadOnlyWithRel());
- DataRelROSectionLarge =
- Ctx.getELFSection(".dp.rodata.large", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_WRITE |
- ELF::XCORE_SHF_DP_SECTION,
- SectionKind::getReadOnlyWithRel());
+ BSSSection = Ctx.getELFSection(".dp.bss", ELF::SHT_NOBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE |
+ ELF::XCORE_SHF_DP_SECTION);
+ BSSSectionLarge = Ctx.getELFSection(".dp.bss.large", ELF::SHT_NOBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE |
+ ELF::XCORE_SHF_DP_SECTION);
+ DataSection = Ctx.getELFSection(".dp.data", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE |
+ ELF::XCORE_SHF_DP_SECTION);
+ DataSectionLarge = Ctx.getELFSection(".dp.data.large", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE |
+ ELF::XCORE_SHF_DP_SECTION);
+ DataRelROSection = Ctx.getELFSection(".dp.rodata", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE |
+ ELF::XCORE_SHF_DP_SECTION);
+ DataRelROSectionLarge = Ctx.getELFSection(
+ ".dp.rodata.large", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE | ELF::XCORE_SHF_DP_SECTION);
ReadOnlySection =
- Ctx.getELFSection(".cp.rodata", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC |
- ELF::XCORE_SHF_CP_SECTION,
- SectionKind::getReadOnlyWithRel());
+ Ctx.getELFSection(".cp.rodata", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::XCORE_SHF_CP_SECTION);
ReadOnlySectionLarge =
- Ctx.getELFSection(".cp.rodata.large", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC |
- ELF::XCORE_SHF_CP_SECTION,
- SectionKind::getReadOnlyWithRel());
- MergeableConst4Section =
- Ctx.getELFSection(".cp.rodata.cst4", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_MERGE |
- ELF::XCORE_SHF_CP_SECTION,
- SectionKind::getMergeableConst4());
- MergeableConst8Section =
- Ctx.getELFSection(".cp.rodata.cst8", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_MERGE |
- ELF::XCORE_SHF_CP_SECTION,
- SectionKind::getMergeableConst8());
- MergeableConst16Section =
- Ctx.getELFSection(".cp.rodata.cst16", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_MERGE |
- ELF::XCORE_SHF_CP_SECTION,
- SectionKind::getMergeableConst16());
+ Ctx.getELFSection(".cp.rodata.large", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::XCORE_SHF_CP_SECTION);
+ MergeableConst4Section = Ctx.getELFSection(
+ ".cp.rodata.cst4", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_MERGE | ELF::XCORE_SHF_CP_SECTION, 4, "");
+ MergeableConst8Section = Ctx.getELFSection(
+ ".cp.rodata.cst8", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_MERGE | ELF::XCORE_SHF_CP_SECTION, 8, "");
+ MergeableConst16Section = Ctx.getELFSection(
+ ".cp.rodata.cst16", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_MERGE | ELF::XCORE_SHF_CP_SECTION, 16, "");
CStringSection =
- Ctx.getELFSection(".cp.rodata.string", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_MERGE | ELF::SHF_STRINGS |
- ELF::XCORE_SHF_CP_SECTION,
- SectionKind::getReadOnlyWithRel());
+ Ctx.getELFSection(".cp.rodata.string", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_MERGE | ELF::SHF_STRINGS |
+ ELF::XCORE_SHF_CP_SECTION);
// TextSection - see MObjectFileInfo.cpp
// StaticCtorSection - see MObjectFileInfo.cpp
// StaticDtorSection - see MObjectFileInfo.cpp
@@ -128,7 +105,7 @@ XCoreTargetObjectFile::getExplicitSectionGlobal(const GlobalValue *GV,
if (IsCPRel && !Kind.isReadOnly())
report_fatal_error("Using .cp. section for writeable object.");
return getContext().getELFSection(SectionName, getXCoreSectionType(Kind),
- getXCoreSectionFlags(Kind, IsCPRel), Kind);
+ getXCoreSectionFlags(Kind, IsCPRel));
}
const MCSection *XCoreTargetObjectFile::
@@ -146,8 +123,7 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
}
Type *ObjType = GV->getType()->getPointerElementType();
if (TM.getCodeModel() == CodeModel::Small || !ObjType->isSized() ||
- TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(ObjType) <
- CodeModelLargeSize) {
+ TM.getDataLayout()->getTypeAllocSize(ObjType) < CodeModelLargeSize) {
if (Kind.isReadOnly()) return UseCPRel? ReadOnlySection
: DataRelROSection;
if (Kind.isBSS() || Kind.isCommon())return BSSSection;
diff --git a/lib/Target/XCore/XCoreTargetTransformInfo.cpp b/lib/Target/XCore/XCoreTargetTransformInfo.cpp
deleted file mode 100644
index da232da..0000000
--- a/lib/Target/XCore/XCoreTargetTransformInfo.cpp
+++ /dev/null
@@ -1,80 +0,0 @@
-//===-- XCoreTargetTransformInfo.cpp - XCore specific TTI pass ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-/// This file implements a TargetTransformInfo analysis pass specific to the
-/// XCore target machine. It uses the target's detailed information to provide
-/// more precise answers to certain TTI queries, while letting the target
-/// independent and default TTI implementations handle the rest.
-///
-//===----------------------------------------------------------------------===//
-
-#include "XCore.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Target/CostTable.h"
-#include "llvm/Target/TargetLowering.h"
-using namespace llvm;
-
-#define DEBUG_TYPE "xcoretti"
-
-// Declare the pass initialization routine locally as target-specific passes
-// don't have a target-wide initialization entry point, and so we rely on the
-// pass constructor initialization.
-namespace llvm {
-void initializeXCoreTTIPass(PassRegistry &);
-}
-
-namespace {
-
-class XCoreTTI final : public ImmutablePass, public TargetTransformInfo {
-public:
- XCoreTTI() : ImmutablePass(ID) {
- llvm_unreachable("This pass cannot be directly constructed");
- }
-
- XCoreTTI(const XCoreTargetMachine *TM)
- : ImmutablePass(ID) {
- initializeXCoreTTIPass(*PassRegistry::getPassRegistry());
- }
-
- void initializePass() override {
- pushTTIStack(this);
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- TargetTransformInfo::getAnalysisUsage(AU);
- }
-
- static char ID;
-
- void *getAdjustedAnalysisPointer(const void *ID) override {
- if (ID == &TargetTransformInfo::ID)
- return (TargetTransformInfo*)this;
- return this;
- }
-
- unsigned getNumberOfRegisters(bool Vector) const override {
- if (Vector) {
- return 0;
- }
- return 12;
- }
-};
-
-} // end anonymous namespace
-
-INITIALIZE_AG_PASS(XCoreTTI, TargetTransformInfo, "xcoretti",
- "XCore Target Transform Info", true, true, false)
-char XCoreTTI::ID = 0;
-
-
-ImmutablePass *
-llvm::createXCoreTargetTransformInfoPass(const XCoreTargetMachine *TM) {
- return new XCoreTTI(TM);
-}
diff --git a/lib/Target/XCore/XCoreTargetTransformInfo.h b/lib/Target/XCore/XCoreTargetTransformInfo.h
new file mode 100644
index 0000000..70b47df
--- /dev/null
+++ b/lib/Target/XCore/XCoreTargetTransformInfo.h
@@ -0,0 +1,72 @@
+//===-- XCoreTargetTransformInfo.h - XCore specific TTI ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file a TargetTransformInfo::Concept conforming object specific to the
+/// XCore target machine. It uses the target's detailed information to
+/// provide more precise answers to certain TTI queries, while letting the
+/// target independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_XCORE_XCORETARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_XCORE_XCORETARGETTRANSFORMINFO_H
+
+#include "XCore.h"
+#include "XCoreTargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+
+class XCoreTTIImpl : public BasicTTIImplBase<XCoreTTIImpl> {
+ typedef BasicTTIImplBase<XCoreTTIImpl> BaseT;
+ typedef TargetTransformInfo TTI;
+ friend BaseT;
+
+ const XCoreSubtarget *ST;
+ const XCoreTargetLowering *TLI;
+
+ const XCoreSubtarget *getST() const { return ST; }
+ const XCoreTargetLowering *getTLI() const { return TLI; }
+
+public:
+ explicit XCoreTTIImpl(const XCoreTargetMachine *TM)
+ : BaseT(TM), ST(TM->getSubtargetImpl()), TLI(ST->getTargetLowering()) {}
+
+ // Provide value semantics. MSVC requires that we spell all of these out.
+ XCoreTTIImpl(const XCoreTTIImpl &Arg)
+ : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
+ XCoreTTIImpl(XCoreTTIImpl &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
+ TLI(std::move(Arg.TLI)) {}
+ XCoreTTIImpl &operator=(const XCoreTTIImpl &RHS) {
+ BaseT::operator=(static_cast<const BaseT &>(RHS));
+ ST = RHS.ST;
+ TLI = RHS.TLI;
+ return *this;
+ }
+ XCoreTTIImpl &operator=(XCoreTTIImpl &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ ST = std::move(RHS.ST);
+ TLI = std::move(RHS.TLI);
+ return *this;
+ }
+
+ unsigned getNumberOfRegisters(bool Vector) {
+ if (Vector) {
+ return 0;
+ }
+ return 12;
+ }
+};
+
+} // end namespace llvm
+
+#endif