diff options
Diffstat (limited to 'lib/Target/XCore')
35 files changed, 5168 insertions, 0 deletions
diff --git a/lib/Target/XCore/AsmPrinter/CMakeLists.txt b/lib/Target/XCore/AsmPrinter/CMakeLists.txt new file mode 100644 index 0000000..7c7c2f4 --- /dev/null +++ b/lib/Target/XCore/AsmPrinter/CMakeLists.txt @@ -0,0 +1,6 @@ +include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. ) + +add_llvm_library(LLVMXCoreAsmPrinter + XCoreAsmPrinter.cpp + ) +add_dependencies(LLVMXCoreAsmPrinter XCoreCodeGenTable_gen) diff --git a/lib/Target/XCore/AsmPrinter/Makefile b/lib/Target/XCore/AsmPrinter/Makefile new file mode 100644 index 0000000..82dc1df --- /dev/null +++ b/lib/Target/XCore/AsmPrinter/Makefile @@ -0,0 +1,16 @@ +##===- lib/Target/XCore/AsmPrinter/Makefile ----------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## + +LEVEL = ../../../.. +LIBRARYNAME = LLVMXCoreAsmPrinter + +# Hack: we need to include 'main' XCore target directory to grab private headers +CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. + +include $(LEVEL)/Makefile.common diff --git a/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp b/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp new file mode 100644 index 0000000..d18f55d --- /dev/null +++ b/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp @@ -0,0 +1,319 @@ +//===-- XCoreAsmPrinter.cpp - XCore LLVM assembly writer ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains a printer that converts from our internal representation +// of machine-dependent LLVM code to the XAS-format XCore assembly language. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "asm-printer" +#include "XCore.h" +#include "XCoreInstrInfo.h" +#include "XCoreSubtarget.h" +#include "XCoreMCAsmInfo.h" +#include "XCoreTargetMachine.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/CodeGen/DwarfWriter.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetLoweringObjectFile.h" +#include "llvm/Target/TargetRegistry.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/FormattedStream.h" +#include "llvm/Support/MathExtras.h" +#include <algorithm> +#include <cctype> +using namespace llvm; + +static cl::opt<unsigned> MaxThreads("xcore-max-threads", cl::Optional, + cl::desc("Maximum number of threads (for emulation thread-local storage)"), + cl::Hidden, + cl::value_desc("number"), + cl::init(8)); + +namespace { + class XCoreAsmPrinter : public AsmPrinter { + const XCoreSubtarget &Subtarget; + public: + explicit XCoreAsmPrinter(formatted_raw_ostream &O, TargetMachine &TM, + MCContext &Ctx, MCStreamer &Streamer, + const MCAsmInfo *T) + : AsmPrinter(O, TM, Ctx, Streamer, T), + Subtarget(TM.getSubtarget<XCoreSubtarget>()) {} + + virtual const char *getPassName() const { + return "XCore Assembly Printer"; + } + + void printMemOperand(const MachineInstr *MI, int opNum); + void printOperand(const MachineInstr *MI, int opNum); + bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + unsigned AsmVariant, const char *ExtraCode); + + void emitGlobalDirective(const MCSymbol *Sym); + + void emitArrayBound(const MCSymbol *Sym, const GlobalVariable *GV); + virtual void EmitGlobalVariable(const GlobalVariable *GV); + + void emitFunctionStart(MachineFunction &MF); + + void printInstruction(const MachineInstr *MI); // autogenerated. + static const char *getRegisterName(unsigned RegNo); + + bool runOnMachineFunction(MachineFunction &MF); + void EmitInstruction(const MachineInstr *MI); + void EmitFunctionBodyEnd(); + + void getAnalysisUsage(AnalysisUsage &AU) const { + AsmPrinter::getAnalysisUsage(AU); + AU.setPreservesAll(); + AU.addRequired<MachineModuleInfo>(); + AU.addRequired<DwarfWriter>(); + } + }; +} // end of anonymous namespace + +#include "XCoreGenAsmWriter.inc" + +void XCoreAsmPrinter::emitGlobalDirective(const MCSymbol *Sym) { + O << MAI->getGlobalDirective() << *Sym << "\n"; +} + +void XCoreAsmPrinter::emitArrayBound(const MCSymbol *Sym, + const GlobalVariable *GV) { + assert(((GV->hasExternalLinkage() || + GV->hasWeakLinkage()) || + GV->hasLinkOnceLinkage()) && "Unexpected linkage"); + if (const ArrayType *ATy = dyn_cast<ArrayType>( + cast<PointerType>(GV->getType())->getElementType())) { + O << MAI->getGlobalDirective() << *Sym; + O << ".globound" << "\n"; + O << "\t.set\t" << *Sym; + O << ".globound" << "," << ATy->getNumElements() << "\n"; + if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage()) { + // TODO Use COMDAT groups for LinkOnceLinkage + O << MAI->getWeakDefDirective() << *Sym << ".globound" << "\n"; + } + } +} + +void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) { + // Check to see if this is a special global used by LLVM, if so, emit it. + if (!GV->hasInitializer() || + EmitSpecialLLVMGlobal(GV)) + return; + + const TargetData *TD = TM.getTargetData(); + OutStreamer.SwitchSection(getObjFileLowering().SectionForGlobal(GV, Mang,TM)); + + + MCSymbol *GVSym = GetGlobalValueSymbol(GV); + Constant *C = GV->getInitializer(); + unsigned Align = (unsigned)TD->getPreferredTypeAlignmentShift(C->getType()); + + // Mark the start of the global + O << "\t.cc_top " << *GVSym << ".data," << *GVSym << "\n"; + + switch (GV->getLinkage()) { + case GlobalValue::AppendingLinkage: + llvm_report_error("AppendingLinkage is not supported by this target!"); + case GlobalValue::LinkOnceAnyLinkage: + case GlobalValue::LinkOnceODRLinkage: + case GlobalValue::WeakAnyLinkage: + case GlobalValue::WeakODRLinkage: + case GlobalValue::ExternalLinkage: + emitArrayBound(GVSym, GV); + emitGlobalDirective(GVSym); + // TODO Use COMDAT groups for LinkOnceLinkage + if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage()) + O << MAI->getWeakDefDirective() << *GVSym << "\n"; + // FALL THROUGH + case GlobalValue::InternalLinkage: + case GlobalValue::PrivateLinkage: + case GlobalValue::LinkerPrivateLinkage: + break; + case GlobalValue::DLLImportLinkage: + llvm_unreachable("DLLImport linkage is not supported by this target!"); + case GlobalValue::DLLExportLinkage: + llvm_unreachable("DLLExport linkage is not supported by this target!"); + default: + llvm_unreachable("Unknown linkage type!"); + } + + EmitAlignment(Align, GV, 2); + + unsigned Size = TD->getTypeAllocSize(C->getType()); + if (GV->isThreadLocal()) { + Size *= MaxThreads; + } + if (MAI->hasDotTypeDotSizeDirective()) { + O << "\t.type " << *GVSym << ",@object\n"; + O << "\t.size " << *GVSym << "," << Size << "\n"; + } + O << *GVSym << ":\n"; + + EmitGlobalConstant(C); + if (GV->isThreadLocal()) { + for (unsigned i = 1; i < MaxThreads; ++i) + EmitGlobalConstant(C); + } + // The ABI requires that unsigned scalar types smaller than 32 bits + // are padded to 32 bits. + if (Size < 4) + OutStreamer.EmitZeros(4 - Size, 0); + + // Mark the end of the global + O << "\t.cc_bottom " << *GVSym << ".data\n"; +} + +/// Emit the directives on the start of functions +void XCoreAsmPrinter::emitFunctionStart(MachineFunction &MF) { + // Print out the label for the function. + const Function *F = MF.getFunction(); + + OutStreamer.SwitchSection(getObjFileLowering().SectionForGlobal(F, Mang, TM)); + + // Mark the start of the function + O << "\t.cc_top " << *CurrentFnSym << ".function," << *CurrentFnSym << "\n"; + + switch (F->getLinkage()) { + default: llvm_unreachable("Unknown linkage type!"); + case Function::InternalLinkage: // Symbols default to internal. + case Function::PrivateLinkage: + case Function::LinkerPrivateLinkage: + break; + case Function::ExternalLinkage: + emitGlobalDirective(CurrentFnSym); + break; + case Function::LinkOnceAnyLinkage: + case Function::LinkOnceODRLinkage: + case Function::WeakAnyLinkage: + case Function::WeakODRLinkage: + // TODO Use COMDAT groups for LinkOnceLinkage + O << MAI->getGlobalDirective() << *CurrentFnSym << "\n"; + O << MAI->getWeakDefDirective() << *CurrentFnSym << "\n"; + break; + } + // (1 << 1) byte aligned + EmitAlignment(MF.getAlignment(), F, 1); + if (MAI->hasDotTypeDotSizeDirective()) + O << "\t.type " << *CurrentFnSym << ",@function\n"; + + O << *CurrentFnSym << ":\n"; +} + + +/// EmitFunctionBodyEnd - Targets can override this to emit stuff after +/// the last basic block in the function. +void XCoreAsmPrinter::EmitFunctionBodyEnd() { + // Emit function end directives + O << "\t.cc_bottom " << *CurrentFnSym << ".function\n"; +} + +/// runOnMachineFunction - This uses the printMachineInstruction() +/// method to print assembly for each instruction. +/// +bool XCoreAsmPrinter::runOnMachineFunction(MachineFunction &MF) { + SetupMachineFunction(MF); + + // Print out constants referenced by the function + EmitConstantPool(); + + // Emit the function start directives + emitFunctionStart(MF); + + // Emit pre-function debug information. + DW->BeginFunction(&MF); + + EmitFunctionBody(); + return false; +} + +void XCoreAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum) +{ + printOperand(MI, opNum); + + if (MI->getOperand(opNum+1).isImm() + && MI->getOperand(opNum+1).getImm() == 0) + return; + + O << "+"; + printOperand(MI, opNum+1); +} + +void XCoreAsmPrinter::printOperand(const MachineInstr *MI, int opNum) { + const MachineOperand &MO = MI->getOperand(opNum); + switch (MO.getType()) { + case MachineOperand::MO_Register: + O << getRegisterName(MO.getReg()); + break; + case MachineOperand::MO_Immediate: + O << MO.getImm(); + break; + case MachineOperand::MO_MachineBasicBlock: + O << *MO.getMBB()->getSymbol(OutContext); + break; + case MachineOperand::MO_GlobalAddress: + O << *GetGlobalValueSymbol(MO.getGlobal()); + break; + case MachineOperand::MO_ExternalSymbol: + O << MO.getSymbolName(); + break; + case MachineOperand::MO_ConstantPoolIndex: + O << MAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() + << '_' << MO.getIndex(); + break; + case MachineOperand::MO_JumpTableIndex: + O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() + << '_' << MO.getIndex(); + break; + case MachineOperand::MO_BlockAddress: + O << *GetBlockAddressSymbol(MO.getBlockAddress()); + break; + default: + llvm_unreachable("not implemented"); + } +} + +/// PrintAsmOperand - Print out an operand for an inline asm expression. +/// +bool XCoreAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + unsigned AsmVariant, + const char *ExtraCode) { + printOperand(MI, OpNo); + return false; +} + +void XCoreAsmPrinter::EmitInstruction(const MachineInstr *MI) { + // Check for mov mnemonic + unsigned src, dst, srcSR, dstSR; + if (TM.getInstrInfo()->isMoveInstr(*MI, src, dst, srcSR, dstSR)) { + O << "\tmov " << getRegisterName(dst) << ", "; + O << getRegisterName(src); + OutStreamer.AddBlankLine(); + return; + } + printInstruction(MI); + OutStreamer.AddBlankLine(); +} + +// Force static initialization. +extern "C" void LLVMInitializeXCoreAsmPrinter() { + RegisterAsmPrinter<XCoreAsmPrinter> X(TheXCoreTarget); +} diff --git a/lib/Target/XCore/CMakeLists.txt b/lib/Target/XCore/CMakeLists.txt new file mode 100644 index 0000000..0965323 --- /dev/null +++ b/lib/Target/XCore/CMakeLists.txt @@ -0,0 +1,24 @@ +set(LLVM_TARGET_DEFINITIONS XCore.td) + +tablegen(XCoreGenRegisterInfo.h.inc -gen-register-desc-header) +tablegen(XCoreGenRegisterNames.inc -gen-register-enums) +tablegen(XCoreGenRegisterInfo.inc -gen-register-desc) +tablegen(XCoreGenInstrNames.inc -gen-instr-enums) +tablegen(XCoreGenInstrInfo.inc -gen-instr-desc) +tablegen(XCoreGenAsmWriter.inc -gen-asm-writer) +tablegen(XCoreGenDAGISel.inc -gen-dag-isel) +tablegen(XCoreGenCallingConv.inc -gen-callingconv) +tablegen(XCoreGenSubtarget.inc -gen-subtarget) + +add_llvm_target(XCore + MCSectionXCore.cpp + XCoreFrameInfo.cpp + XCoreInstrInfo.cpp + XCoreISelDAGToDAG.cpp + XCoreISelLowering.cpp + XCoreMCAsmInfo.cpp + XCoreRegisterInfo.cpp + XCoreSubtarget.cpp + XCoreTargetMachine.cpp + XCoreTargetObjectFile.cpp + ) diff --git a/lib/Target/XCore/MCSectionXCore.cpp b/lib/Target/XCore/MCSectionXCore.cpp new file mode 100644 index 0000000..5acceaf --- /dev/null +++ b/lib/Target/XCore/MCSectionXCore.cpp @@ -0,0 +1,35 @@ +//===- MCSectionXCore.cpp - XCore-specific section representation ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the MCSectionXCore class. +// +//===----------------------------------------------------------------------===// + +#include "MCSectionXCore.h" +#include "llvm/MC/MCContext.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +MCSectionXCore * +MCSectionXCore::Create(const StringRef &Section, unsigned Type, + unsigned Flags, SectionKind K, + bool isExplicit, MCContext &Ctx) { + return new (Ctx) MCSectionXCore(Section, Type, Flags, K, isExplicit); +} + + +/// PrintTargetSpecificSectionFlags - This handles the XCore-specific cp/dp +/// section flags. +void MCSectionXCore::PrintTargetSpecificSectionFlags(const MCAsmInfo &MAI, + raw_ostream &OS) const { + if (getFlags() & MCSectionXCore::SHF_CP_SECTION) + OS << 'c'; + if (getFlags() & MCSectionXCore::SHF_DP_SECTION) + OS << 'd'; +} diff --git a/lib/Target/XCore/MCSectionXCore.h b/lib/Target/XCore/MCSectionXCore.h new file mode 100644 index 0000000..02f8f95 --- /dev/null +++ b/lib/Target/XCore/MCSectionXCore.h @@ -0,0 +1,54 @@ +//===- MCSectionXCore.h - XCore-specific section representation -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the MCSectionXCore class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_MCSECTION_XCORE_H +#define LLVM_MCSECTION_XCORE_H + +#include "llvm/MC/MCSectionELF.h" + +namespace llvm { + +class MCSectionXCore : public MCSectionELF { + MCSectionXCore(const StringRef &Section, unsigned Type, unsigned Flags, + SectionKind K, bool isExplicit) + : MCSectionELF(Section, Type, Flags, K, isExplicit) {} + +public: + + enum { + /// SHF_CP_SECTION - All sections with the "c" flag are grouped together + /// by the linker to form the constant pool and the cp register is set to + /// the start of the constant pool by the boot code. + SHF_CP_SECTION = FIRST_TARGET_DEP_FLAG, + + /// SHF_DP_SECTION - All sections with the "d" flag are grouped together + /// by the linker to form the data section and the dp register is set to + /// the start of the section by the boot code. + SHF_DP_SECTION = FIRST_TARGET_DEP_FLAG << 1 + }; + + static MCSectionXCore *Create(const StringRef &Section, unsigned Type, + unsigned Flags, SectionKind K, + bool isExplicit, MCContext &Ctx); + + + /// PrintTargetSpecificSectionFlags - This handles the XCore-specific cp/dp + /// section flags. + virtual void PrintTargetSpecificSectionFlags(const MCAsmInfo &MAI, + raw_ostream &OS) const; + +}; + +} // end namespace llvm + +#endif diff --git a/lib/Target/XCore/Makefile b/lib/Target/XCore/Makefile new file mode 100644 index 0000000..1b70974 --- /dev/null +++ b/lib/Target/XCore/Makefile @@ -0,0 +1,24 @@ +##===- lib/Target/XCore/Makefile ---------------------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## + +LEVEL = ../../.. +LIBRARYNAME = LLVMXCoreCodeGen +TARGET = XCore + +# Make sure that tblgen is run, first thing. +BUILT_SOURCES = XCoreGenRegisterInfo.h.inc XCoreGenRegisterNames.inc \ + XCoreGenRegisterInfo.inc XCoreGenInstrNames.inc \ + XCoreGenInstrInfo.inc XCoreGenAsmWriter.inc \ + XCoreGenDAGISel.inc XCoreGenCallingConv.inc \ + XCoreGenSubtarget.inc + +DIRS = AsmPrinter TargetInfo + +include $(LEVEL)/Makefile.common + diff --git a/lib/Target/XCore/README.txt b/lib/Target/XCore/README.txt new file mode 100644 index 0000000..deaeb0f --- /dev/null +++ b/lib/Target/XCore/README.txt @@ -0,0 +1,8 @@ +To-do +----- + +* Instruction encodings +* Tailcalls +* Investigate loop alignment +* Add builtins +* Make better use of lmul / macc diff --git a/lib/Target/XCore/TargetInfo/CMakeLists.txt b/lib/Target/XCore/TargetInfo/CMakeLists.txt new file mode 100644 index 0000000..0a568de --- /dev/null +++ b/lib/Target/XCore/TargetInfo/CMakeLists.txt @@ -0,0 +1,7 @@ +include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. ) + +add_llvm_library(LLVMXCoreInfo + XCoreTargetInfo.cpp + ) + +add_dependencies(LLVMXCoreInfo XCoreTable_gen) diff --git a/lib/Target/XCore/TargetInfo/Makefile b/lib/Target/XCore/TargetInfo/Makefile new file mode 100644 index 0000000..f8a4095 --- /dev/null +++ b/lib/Target/XCore/TargetInfo/Makefile @@ -0,0 +1,16 @@ +##===- lib/Target/XCore/TargetInfo/Makefile ----------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## + +LEVEL = ../../../.. +LIBRARYNAME = LLVMXCoreInfo + +# Hack: we need to include 'main' target directory to grab private headers +CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. + +include $(LEVEL)/Makefile.common diff --git a/lib/Target/XCore/TargetInfo/XCoreTargetInfo.cpp b/lib/Target/XCore/TargetInfo/XCoreTargetInfo.cpp new file mode 100644 index 0000000..7aa8965 --- /dev/null +++ b/lib/Target/XCore/TargetInfo/XCoreTargetInfo.cpp @@ -0,0 +1,19 @@ +//===-- XCoreTargetInfo.cpp - XCore Target Implementation -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "XCore.h" +#include "llvm/Module.h" +#include "llvm/Target/TargetRegistry.h" +using namespace llvm; + +Target llvm::TheXCoreTarget; + +extern "C" void LLVMInitializeXCoreTargetInfo() { + RegisterTarget<Triple::xcore> X(TheXCoreTarget, "xcore", "XCore"); +} diff --git a/lib/Target/XCore/XCore.h b/lib/Target/XCore/XCore.h new file mode 100644 index 0000000..8937fbe --- /dev/null +++ b/lib/Target/XCore/XCore.h @@ -0,0 +1,41 @@ +//===-- XCore.h - Top-level interface for XCore representation --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the entry points for global functions defined in the LLVM +// XCore back-end. +// +//===----------------------------------------------------------------------===// + +#ifndef TARGET_XCORE_H +#define TARGET_XCORE_H + +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + class FunctionPass; + class TargetMachine; + class XCoreTargetMachine; + class formatted_raw_ostream; + + FunctionPass *createXCoreISelDag(XCoreTargetMachine &TM); + + extern Target TheXCoreTarget; + +} // end namespace llvm; + +// Defines symbolic names for XCore registers. This defines a mapping from +// register name to register number. +// +#include "XCoreGenRegisterNames.inc" + +// Defines symbolic names for the XCore instructions. +// +#include "XCoreGenInstrNames.inc" + +#endif diff --git a/lib/Target/XCore/XCore.td b/lib/Target/XCore/XCore.td new file mode 100644 index 0000000..b07445d --- /dev/null +++ b/lib/Target/XCore/XCore.td @@ -0,0 +1,49 @@ +//===- XCore.td - Describe the XCore Target Machine --------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Target-independent interfaces which we are implementing +//===----------------------------------------------------------------------===// + +include "llvm/Target/Target.td" + +//===----------------------------------------------------------------------===// +// Descriptions +//===----------------------------------------------------------------------===// + +include "XCoreRegisterInfo.td" +include "XCoreInstrInfo.td" +include "XCoreCallingConv.td" + +def XCoreInstrInfo : InstrInfo { + let TSFlagsFields = []; + let TSFlagsShifts = []; +} + +//===----------------------------------------------------------------------===// +// XCore processors supported. +//===----------------------------------------------------------------------===// + +class Proc<string Name, list<SubtargetFeature> Features> + : Processor<Name, NoItineraries, Features>; + +def : Proc<"generic", []>; +def : Proc<"xs1b-generic", []>; + +//===----------------------------------------------------------------------===// +// Declare the target which we are implementing +//===----------------------------------------------------------------------===// + +def XCore : Target { + // Pull in Instruction Info: + let InstructionSet = XCoreInstrInfo; +} diff --git a/lib/Target/XCore/XCoreCallingConv.td b/lib/Target/XCore/XCoreCallingConv.td new file mode 100644 index 0000000..8107e32 --- /dev/null +++ b/lib/Target/XCore/XCoreCallingConv.td @@ -0,0 +1,33 @@ +//===- XCoreCallingConv.td - Calling Conventions for XCore -*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// This describes the calling conventions for XCore architecture. +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// XCore Return Value Calling Convention +//===----------------------------------------------------------------------===// +def RetCC_XCore : CallingConv<[ + // i32 are returned in registers R0, R1, R2, R3 + CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>> +]>; + +//===----------------------------------------------------------------------===// +// XCore Argument Calling Conventions +//===----------------------------------------------------------------------===// +def CC_XCore : CallingConv<[ + // Promote i8/i16 arguments to i32. + CCIfType<[i8, i16], CCPromoteToType<i32>>, + + // The first 4 integer arguments are passed in integer registers. + CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>>, + + // Integer values get stored in stack slots that are 4 bytes in + // size and 4-byte aligned. + CCIfType<[i32], CCAssignToStack<4, 4>> +]>; diff --git a/lib/Target/XCore/XCoreFrameInfo.cpp b/lib/Target/XCore/XCoreFrameInfo.cpp new file mode 100644 index 0000000..f50dc96 --- /dev/null +++ b/lib/Target/XCore/XCoreFrameInfo.cpp @@ -0,0 +1,27 @@ +//===-- XCoreFrameInfo.cpp - Frame info for XCore Target ---------*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains XCore frame information that doesn't fit anywhere else +// cleanly... +// +//===----------------------------------------------------------------------===// + +#include "XCore.h" +#include "XCoreFrameInfo.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// XCoreFrameInfo: +//===----------------------------------------------------------------------===// + +XCoreFrameInfo::XCoreFrameInfo(const TargetMachine &tm): + TargetFrameInfo(TargetFrameInfo::StackGrowsDown, 4, 0) +{ + // Do nothing +} diff --git a/lib/Target/XCore/XCoreFrameInfo.h b/lib/Target/XCore/XCoreFrameInfo.h new file mode 100644 index 0000000..2c67577 --- /dev/null +++ b/lib/Target/XCore/XCoreFrameInfo.h @@ -0,0 +1,34 @@ +//===-- XCoreFrameInfo.h - Frame info for XCore Target -----------*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains XCore frame information that doesn't fit anywhere else +// cleanly... +// +//===----------------------------------------------------------------------===// + +#ifndef XCOREFRAMEINFO_H +#define XCOREFRAMEINFO_H + +#include "llvm/Target/TargetFrameInfo.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + class XCoreFrameInfo: public TargetFrameInfo { + + public: + XCoreFrameInfo(const TargetMachine &tm); + + //! Stack slot size (4 bytes) + static int stackSlotSize() { + return 4; + } + }; +} + +#endif // XCOREFRAMEINFO_H diff --git a/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/lib/Target/XCore/XCoreISelDAGToDAG.cpp new file mode 100644 index 0000000..383fd91 --- /dev/null +++ b/lib/Target/XCore/XCoreISelDAGToDAG.cpp @@ -0,0 +1,222 @@ +//===-- XCoreISelDAGToDAG.cpp - A dag to dag inst selector for XCore ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines an instruction selector for the XCore target. +// +//===----------------------------------------------------------------------===// + +#include "XCore.h" +#include "XCoreISelLowering.h" +#include "XCoreTargetMachine.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/Intrinsics.h" +#include "llvm/CallingConv.h" +#include "llvm/Constants.h" +#include "llvm/LLVMContext.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/Target/TargetLowering.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include <queue> +#include <set> +using namespace llvm; + +/// XCoreDAGToDAGISel - XCore specific code to select XCore machine +/// instructions for SelectionDAG operations. +/// +namespace { + class XCoreDAGToDAGISel : public SelectionDAGISel { + XCoreTargetLowering &Lowering; + const XCoreSubtarget &Subtarget; + + public: + XCoreDAGToDAGISel(XCoreTargetMachine &TM) + : SelectionDAGISel(TM), + Lowering(*TM.getTargetLowering()), + Subtarget(*TM.getSubtargetImpl()) { } + + SDNode *Select(SDNode *N); + + /// getI32Imm - Return a target constant with the specified value, of type + /// i32. + inline SDValue getI32Imm(unsigned Imm) { + return CurDAG->getTargetConstant(Imm, MVT::i32); + } + + // Complex Pattern Selectors. + bool SelectADDRspii(SDNode *Op, SDValue Addr, SDValue &Base, + SDValue &Offset); + bool SelectADDRdpii(SDNode *Op, SDValue Addr, SDValue &Base, + SDValue &Offset); + bool SelectADDRcpii(SDNode *Op, SDValue Addr, SDValue &Base, + SDValue &Offset); + + virtual void InstructionSelect(); + + virtual const char *getPassName() const { + return "XCore DAG->DAG Pattern Instruction Selection"; + } + + // Include the pieces autogenerated from the target description. + #include "XCoreGenDAGISel.inc" + }; +} // end anonymous namespace + +/// createXCoreISelDag - This pass converts a legalized DAG into a +/// XCore-specific DAG, ready for instruction scheduling. +/// +FunctionPass *llvm::createXCoreISelDag(XCoreTargetMachine &TM) { + return new XCoreDAGToDAGISel(TM); +} + +bool XCoreDAGToDAGISel::SelectADDRspii(SDNode *Op, SDValue Addr, + SDValue &Base, SDValue &Offset) { + FrameIndexSDNode *FIN = 0; + if ((FIN = dyn_cast<FrameIndexSDNode>(Addr))) { + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32); + Offset = CurDAG->getTargetConstant(0, MVT::i32); + return true; + } + if (Addr.getOpcode() == ISD::ADD) { + ConstantSDNode *CN = 0; + if ((FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) + && (CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) + && (CN->getSExtValue() % 4 == 0 && CN->getSExtValue() >= 0)) { + // Constant positive word offset from frame index + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32); + Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32); + return true; + } + } + return false; +} + +bool XCoreDAGToDAGISel::SelectADDRdpii(SDNode *Op, SDValue Addr, + SDValue &Base, SDValue &Offset) { + if (Addr.getOpcode() == XCoreISD::DPRelativeWrapper) { + Base = Addr.getOperand(0); + Offset = CurDAG->getTargetConstant(0, MVT::i32); + return true; + } + if (Addr.getOpcode() == ISD::ADD) { + ConstantSDNode *CN = 0; + if ((Addr.getOperand(0).getOpcode() == XCoreISD::DPRelativeWrapper) + && (CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) + && (CN->getSExtValue() % 4 == 0)) { + // Constant word offset from a object in the data region + Base = Addr.getOperand(0).getOperand(0); + Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32); + return true; + } + } + return false; +} + +bool XCoreDAGToDAGISel::SelectADDRcpii(SDNode *Op, SDValue Addr, + SDValue &Base, SDValue &Offset) { + if (Addr.getOpcode() == XCoreISD::CPRelativeWrapper) { + Base = Addr.getOperand(0); + Offset = CurDAG->getTargetConstant(0, MVT::i32); + return true; + } + if (Addr.getOpcode() == ISD::ADD) { + ConstantSDNode *CN = 0; + if ((Addr.getOperand(0).getOpcode() == XCoreISD::CPRelativeWrapper) + && (CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) + && (CN->getSExtValue() % 4 == 0)) { + // Constant word offset from a object in the data region + Base = Addr.getOperand(0).getOperand(0); + Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32); + return true; + } + } + return false; +} + +/// InstructionSelect - This callback is invoked by +/// SelectionDAGISel when it has created a SelectionDAG for us to codegen. +void XCoreDAGToDAGISel::InstructionSelect() { + // Select target instructions for the DAG. + SelectRoot(*CurDAG); + + CurDAG->RemoveDeadNodes(); +} + +SDNode *XCoreDAGToDAGISel::Select(SDNode *N) { + DebugLoc dl = N->getDebugLoc(); + EVT NVT = N->getValueType(0); + if (NVT == MVT::i32) { + switch (N->getOpcode()) { + default: break; + case ISD::Constant: { + if (Predicate_immMskBitp(N)) { + SDValue MskSize = Transform_msksize_xform(N); + return CurDAG->getMachineNode(XCore::MKMSK_rus, dl, + MVT::i32, MskSize); + } + else if (! Predicate_immU16(N)) { + unsigned Val = cast<ConstantSDNode>(N)->getZExtValue(); + SDValue CPIdx = + CurDAG->getTargetConstantPool(ConstantInt::get( + Type::getInt32Ty(*CurDAG->getContext()), Val), + TLI.getPointerTy()); + return CurDAG->getMachineNode(XCore::LDWCP_lru6, dl, MVT::i32, + MVT::Other, CPIdx, + CurDAG->getEntryNode()); + } + break; + } + case ISD::SMUL_LOHI: { + // FIXME fold addition into the macc instruction + SDValue Zero(CurDAG->getMachineNode(XCore::LDC_ru6, dl, MVT::i32, + CurDAG->getTargetConstant(0, MVT::i32)), 0); + SDValue Ops[] = { Zero, Zero, N->getOperand(0), N->getOperand(1) }; + SDNode *ResNode = CurDAG->getMachineNode(XCore::MACCS_l4r, dl, + MVT::i32, MVT::i32, Ops, 4); + ReplaceUses(SDValue(N, 0), SDValue(ResNode, 1)); + ReplaceUses(SDValue(N, 1), SDValue(ResNode, 0)); + return NULL; + } + case ISD::UMUL_LOHI: { + // FIXME fold addition into the macc / lmul instruction + SDValue Zero(CurDAG->getMachineNode(XCore::LDC_ru6, dl, MVT::i32, + CurDAG->getTargetConstant(0, MVT::i32)), 0); + SDValue Ops[] = { N->getOperand(0), N->getOperand(1), + Zero, Zero }; + SDNode *ResNode = CurDAG->getMachineNode(XCore::LMUL_l6r, dl, MVT::i32, + MVT::i32, Ops, 4); + ReplaceUses(SDValue(N, 0), SDValue(ResNode, 1)); + ReplaceUses(SDValue(N, 1), SDValue(ResNode, 0)); + return NULL; + } + case XCoreISD::LADD: { + SDValue Ops[] = { N->getOperand(0), N->getOperand(1), + N->getOperand(2) }; + return CurDAG->getMachineNode(XCore::LADD_l5r, dl, MVT::i32, MVT::i32, + Ops, 3); + } + case XCoreISD::LSUB: { + SDValue Ops[] = { N->getOperand(0), N->getOperand(1), + N->getOperand(2) }; + return CurDAG->getMachineNode(XCore::LSUB_l5r, dl, MVT::i32, MVT::i32, + Ops, 3); + } + // Other cases are autogenerated. + } + } + return SelectCode(N); +} diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp new file mode 100644 index 0000000..bf8c38f --- /dev/null +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -0,0 +1,1198 @@ +//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the XCoreTargetLowering class. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "xcore-lower" + +#include "XCoreISelLowering.h" +#include "XCoreMachineFunctionInfo.h" +#include "XCore.h" +#include "XCoreTargetObjectFile.h" +#include "XCoreTargetMachine.h" +#include "XCoreSubtarget.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/Intrinsics.h" +#include "llvm/CallingConv.h" +#include "llvm/GlobalVariable.h" +#include "llvm/GlobalAlias.h" +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/ADT/VectorExtras.h" +#include <queue> +#include <set> +using namespace llvm; + +const char *XCoreTargetLowering:: +getTargetNodeName(unsigned Opcode) const +{ + switch (Opcode) + { + case XCoreISD::BL : return "XCoreISD::BL"; + case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; + case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; + case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; + case XCoreISD::STWSP : return "XCoreISD::STWSP"; + case XCoreISD::RETSP : return "XCoreISD::RETSP"; + case XCoreISD::LADD : return "XCoreISD::LADD"; + case XCoreISD::LSUB : return "XCoreISD::LSUB"; + default : return NULL; + } +} + +XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) + : TargetLowering(XTM, new XCoreTargetObjectFile()), + TM(XTM), + Subtarget(*XTM.getSubtargetImpl()) { + + // Set up the register classes. + addRegisterClass(MVT::i32, XCore::GRRegsRegisterClass); + + // Compute derived properties from the register classes + computeRegisterProperties(); + + // Division is expensive + setIntDivIsCheap(false); + + setShiftAmountType(MVT::i32); + setStackPointerRegisterToSaveRestore(XCore::SP); + + setSchedulingPreference(SchedulingForRegPressure); + + // Use i32 for setcc operations results (slt, sgt, ...). + setBooleanContents(ZeroOrOneBooleanContent); + + // XCore does not have the NodeTypes below. + setOperationAction(ISD::BR_CC, MVT::Other, Expand); + setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); + setOperationAction(ISD::ADDC, MVT::i32, Expand); + setOperationAction(ISD::ADDE, MVT::i32, Expand); + setOperationAction(ISD::SUBC, MVT::i32, Expand); + setOperationAction(ISD::SUBE, MVT::i32, Expand); + + // Stop the combiner recombining select and set_cc + setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + + // 64bit + setOperationAction(ISD::ADD, MVT::i64, Custom); + setOperationAction(ISD::SUB, MVT::i64, Custom); + setOperationAction(ISD::MULHS, MVT::i32, Expand); + setOperationAction(ISD::MULHU, MVT::i32, Expand); + setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); + setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); + setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); + + // Bit Manipulation + setOperationAction(ISD::CTPOP, MVT::i32, Expand); + setOperationAction(ISD::ROTL , MVT::i32, Expand); + setOperationAction(ISD::ROTR , MVT::i32, Expand); + + setOperationAction(ISD::TRAP, MVT::Other, Legal); + + // Expand jump tables for now + setOperationAction(ISD::BR_JT, MVT::Other, Expand); + setOperationAction(ISD::JumpTable, MVT::i32, Custom); + + setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); + setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); + + // Thread Local Storage + setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); + + // Conversion of i64 -> double produces constantpool nodes + setOperationAction(ISD::ConstantPool, MVT::i32, Custom); + + // Loads + setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); + setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); + setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); + + setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); + setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand); + + // Custom expand misaligned loads / stores. + setOperationAction(ISD::LOAD, MVT::i32, Custom); + setOperationAction(ISD::STORE, MVT::i32, Custom); + + // Varargs + setOperationAction(ISD::VAEND, MVT::Other, Expand); + setOperationAction(ISD::VACOPY, MVT::Other, Expand); + setOperationAction(ISD::VAARG, MVT::Other, Custom); + setOperationAction(ISD::VASTART, MVT::Other, Custom); + + // Dynamic stack + setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); + setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); + + maxStoresPerMemset = 4; + maxStoresPerMemmove = maxStoresPerMemcpy = 2; + + // We have target-specific dag combine patterns for the following nodes: + setTargetDAGCombine(ISD::STORE); +} + +SDValue XCoreTargetLowering:: +LowerOperation(SDValue Op, SelectionDAG &DAG) { + switch (Op.getOpcode()) + { + case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); + case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); + case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); + case ISD::ConstantPool: return LowerConstantPool(Op, DAG); + case ISD::JumpTable: return LowerJumpTable(Op, DAG); + case ISD::LOAD: return LowerLOAD(Op, DAG); + case ISD::STORE: return LowerSTORE(Op, DAG); + case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); + case ISD::VAARG: return LowerVAARG(Op, DAG); + case ISD::VASTART: return LowerVASTART(Op, DAG); + // FIXME: Remove these when LegalizeDAGTypes lands. + case ISD::ADD: + case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); + case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); + default: + llvm_unreachable("unimplemented operand"); + return SDValue(); + } +} + +/// ReplaceNodeResults - Replace the results of node with an illegal result +/// type with new values built out of custom code. +void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, + SmallVectorImpl<SDValue>&Results, + SelectionDAG &DAG) { + switch (N->getOpcode()) { + default: + llvm_unreachable("Don't know how to custom expand this!"); + return; + case ISD::ADD: + case ISD::SUB: + Results.push_back(ExpandADDSUB(N, DAG)); + return; + } +} + +/// getFunctionAlignment - Return the Log2 alignment of this function. +unsigned XCoreTargetLowering:: +getFunctionAlignment(const Function *) const { + return 1; +} + +//===----------------------------------------------------------------------===// +// Misc Lower Operation implementation +//===----------------------------------------------------------------------===// + +SDValue XCoreTargetLowering:: +LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) +{ + DebugLoc dl = Op.getDebugLoc(); + SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2), + Op.getOperand(3), Op.getOperand(4)); + return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0), + Op.getOperand(1)); +} + +SDValue XCoreTargetLowering:: +getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, SelectionDAG &DAG) +{ + // FIXME there is no actual debug info here + DebugLoc dl = GA.getDebugLoc(); + if (isa<Function>(GV)) { + return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); + } + const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); + if (!GVar) { + // If GV is an alias then use the aliasee to determine constness + if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) + GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal()); + } + bool isConst = GVar && GVar->isConstant(); + if (isConst) { + return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); + } + return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); +} + +SDValue XCoreTargetLowering:: +LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) +{ + GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); + SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32); + // If it's a debug information descriptor, don't mess with it. + if (DAG.isVerifiedDebugInfoDesc(Op)) + return GA; + return getGlobalAddressWrapper(GA, GV, DAG); +} + +static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) { + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, + DAG.getConstant(Intrinsic::xcore_getid, MVT::i32)); +} + +static inline bool isZeroLengthArray(const Type *Ty) { + const ArrayType *AT = dyn_cast_or_null<ArrayType>(Ty); + return AT && (AT->getNumElements() == 0); +} + +SDValue XCoreTargetLowering:: +LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) +{ + // FIXME there isn't really debug info here + DebugLoc dl = Op.getDebugLoc(); + // transform to label + getid() * size + GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); + SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32); + const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); + if (!GVar) { + // If GV is an alias then use the aliasee to determine size + if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) + GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal()); + } + if (! GVar) { + llvm_unreachable("Thread local object not a GlobalVariable?"); + return SDValue(); + } + const Type *Ty = cast<PointerType>(GV->getType())->getElementType(); + if (!Ty->isSized() || isZeroLengthArray(Ty)) { +#ifndef NDEBUG + errs() << "Size of thread local object " << GVar->getName() + << " is unknown\n"; +#endif + llvm_unreachable(0); + } + SDValue base = getGlobalAddressWrapper(GA, GV, DAG); + const TargetData *TD = TM.getTargetData(); + unsigned Size = TD->getTypeAllocSize(Ty); + SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl), + DAG.getConstant(Size, MVT::i32)); + return DAG.getNode(ISD::ADD, dl, MVT::i32, base, offset); +} + +SDValue XCoreTargetLowering:: +LowerBlockAddress(SDValue Op, SelectionDAG &DAG) +{ + DebugLoc DL = Op.getDebugLoc(); + + BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); + SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true); + + return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result); +} + +SDValue XCoreTargetLowering:: +LowerConstantPool(SDValue Op, SelectionDAG &DAG) +{ + ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); + // FIXME there isn't really debug info here + DebugLoc dl = CP->getDebugLoc(); + EVT PtrVT = Op.getValueType(); + SDValue Res; + if (CP->isMachineConstantPoolEntry()) { + Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, + CP->getAlignment()); + } else { + Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, + CP->getAlignment()); + } + return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); +} + +SDValue XCoreTargetLowering:: +LowerJumpTable(SDValue Op, SelectionDAG &DAG) +{ + // FIXME there isn't really debug info here + DebugLoc dl = Op.getDebugLoc(); + EVT PtrVT = Op.getValueType(); + JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); + SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); + return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, JTI); +} + +static bool +IsWordAlignedBasePlusConstantOffset(SDValue Addr, SDValue &AlignedBase, + int64_t &Offset) +{ + if (Addr.getOpcode() != ISD::ADD) { + return false; + } + ConstantSDNode *CN = 0; + if (!(CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) { + return false; + } + int64_t off = CN->getSExtValue(); + const SDValue &Base = Addr.getOperand(0); + const SDValue *Root = &Base; + if (Base.getOpcode() == ISD::ADD && + Base.getOperand(1).getOpcode() == ISD::SHL) { + ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Base.getOperand(1) + .getOperand(1)); + if (CN && (CN->getSExtValue() >= 2)) { + Root = &Base.getOperand(0); + } + } + if (isa<FrameIndexSDNode>(*Root)) { + // All frame indicies are word aligned + AlignedBase = Base; + Offset = off; + return true; + } + if (Root->getOpcode() == XCoreISD::DPRelativeWrapper || + Root->getOpcode() == XCoreISD::CPRelativeWrapper) { + // All dp / cp relative addresses are word aligned + AlignedBase = Base; + Offset = off; + return true; + } + return false; +} + +SDValue XCoreTargetLowering:: +LowerLOAD(SDValue Op, SelectionDAG &DAG) +{ + LoadSDNode *LD = cast<LoadSDNode>(Op); + assert(LD->getExtensionType() == ISD::NON_EXTLOAD && + "Unexpected extension type"); + assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); + if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { + return SDValue(); + } + unsigned ABIAlignment = getTargetData()-> + getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); + // Leave aligned load alone. + if (LD->getAlignment() >= ABIAlignment) { + return SDValue(); + } + SDValue Chain = LD->getChain(); + SDValue BasePtr = LD->getBasePtr(); + DebugLoc dl = Op.getDebugLoc(); + + SDValue Base; + int64_t Offset; + if (!LD->isVolatile() && + IsWordAlignedBasePlusConstantOffset(BasePtr, Base, Offset)) { + if (Offset % 4 == 0) { + // We've managed to infer better alignment information than the load + // already has. Use an aligned load. + return DAG.getLoad(getPointerTy(), dl, Chain, BasePtr, NULL, 4); + } + // Lower to + // ldw low, base[offset >> 2] + // ldw high, base[(offset >> 2) + 1] + // shr low_shifted, low, (offset & 0x3) * 8 + // shl high_shifted, high, 32 - (offset & 0x3) * 8 + // or result, low_shifted, high_shifted + SDValue LowOffset = DAG.getConstant(Offset & ~0x3, MVT::i32); + SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32); + SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32); + SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32); + + SDValue LowAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, LowOffset); + SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, HighOffset); + + SDValue Low = DAG.getLoad(getPointerTy(), dl, Chain, + LowAddr, NULL, 4); + SDValue High = DAG.getLoad(getPointerTy(), dl, Chain, + HighAddr, NULL, 4); + SDValue LowShifted = DAG.getNode(ISD::SRL, dl, MVT::i32, Low, LowShift); + SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High, HighShift); + SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, LowShifted, HighShifted); + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1), + High.getValue(1)); + SDValue Ops[] = { Result, Chain }; + return DAG.getMergeValues(Ops, 2, dl); + } + + if (LD->getAlignment() == 2) { + int SVOffset = LD->getSrcValueOffset(); + SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain, + BasePtr, LD->getSrcValue(), SVOffset, MVT::i16, + LD->isVolatile(), 2); + SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, + DAG.getConstant(2, MVT::i32)); + SDValue High = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::i32, Chain, + HighAddr, LD->getSrcValue(), SVOffset + 2, + MVT::i16, LD->isVolatile(), 2); + SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High, + DAG.getConstant(16, MVT::i32)); + SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, Low, HighShifted); + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1), + High.getValue(1)); + SDValue Ops[] = { Result, Chain }; + return DAG.getMergeValues(Ops, 2, dl); + } + + // Lower to a call to __misaligned_load(BasePtr). + const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext()); + TargetLowering::ArgListTy Args; + TargetLowering::ArgListEntry Entry; + + Entry.Ty = IntPtrTy; + Entry.Node = BasePtr; + Args.push_back(Entry); + + std::pair<SDValue, SDValue> CallResult = + LowerCallTo(Chain, IntPtrTy, false, false, + false, false, 0, CallingConv::C, false, + /*isReturnValueUsed=*/true, + DAG.getExternalSymbol("__misaligned_load", getPointerTy()), + Args, DAG, dl, DAG.GetOrdering(Chain.getNode())); + + SDValue Ops[] = + { CallResult.first, CallResult.second }; + + return DAG.getMergeValues(Ops, 2, dl); +} + +SDValue XCoreTargetLowering:: +LowerSTORE(SDValue Op, SelectionDAG &DAG) +{ + StoreSDNode *ST = cast<StoreSDNode>(Op); + assert(!ST->isTruncatingStore() && "Unexpected store type"); + assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); + if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { + return SDValue(); + } + unsigned ABIAlignment = getTargetData()-> + getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); + // Leave aligned store alone. + if (ST->getAlignment() >= ABIAlignment) { + return SDValue(); + } + SDValue Chain = ST->getChain(); + SDValue BasePtr = ST->getBasePtr(); + SDValue Value = ST->getValue(); + DebugLoc dl = Op.getDebugLoc(); + + if (ST->getAlignment() == 2) { + int SVOffset = ST->getSrcValueOffset(); + SDValue Low = Value; + SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, + DAG.getConstant(16, MVT::i32)); + SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr, + ST->getSrcValue(), SVOffset, MVT::i16, + ST->isVolatile(), 2); + SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, + DAG.getConstant(2, MVT::i32)); + SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr, + ST->getSrcValue(), SVOffset + 2, + MVT::i16, ST->isVolatile(), 2); + return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); + } + + // Lower to a call to __misaligned_store(BasePtr, Value). + const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext()); + TargetLowering::ArgListTy Args; + TargetLowering::ArgListEntry Entry; + + Entry.Ty = IntPtrTy; + Entry.Node = BasePtr; + Args.push_back(Entry); + + Entry.Node = Value; + Args.push_back(Entry); + + std::pair<SDValue, SDValue> CallResult = + LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false, + false, false, 0, CallingConv::C, false, + /*isReturnValueUsed=*/true, + DAG.getExternalSymbol("__misaligned_store", getPointerTy()), + Args, DAG, dl, DAG.GetOrdering(Chain.getNode())); + + return CallResult.second; +} + +SDValue XCoreTargetLowering:: +ExpandADDSUB(SDNode *N, SelectionDAG &DAG) +{ + assert(N->getValueType(0) == MVT::i64 && + (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && + "Unknown operand to lower!"); + DebugLoc dl = N->getDebugLoc(); + + // Extract components + SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, + N->getOperand(0), DAG.getConstant(0, MVT::i32)); + SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, + N->getOperand(0), DAG.getConstant(1, MVT::i32)); + SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, + N->getOperand(1), DAG.getConstant(0, MVT::i32)); + SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, + N->getOperand(1), DAG.getConstant(1, MVT::i32)); + + // Expand + unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : + XCoreISD::LSUB; + SDValue Zero = DAG.getConstant(0, MVT::i32); + SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), + LHSL, RHSL, Zero); + SDValue Lo(Carry.getNode(), 1); + + SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), + LHSH, RHSH, Carry); + SDValue Hi(Ignored.getNode(), 1); + // Merge the pieces + return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); +} + +SDValue XCoreTargetLowering:: +LowerVAARG(SDValue Op, SelectionDAG &DAG) +{ + llvm_unreachable("unimplemented"); + // FIX Arguments passed by reference need a extra dereference. + SDNode *Node = Op.getNode(); + DebugLoc dl = Node->getDebugLoc(); + const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); + EVT VT = Node->getValueType(0); + SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0), + Node->getOperand(1), V, 0); + // Increment the pointer, VAList, to the next vararg + SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList, + DAG.getConstant(VT.getSizeInBits(), + getPointerTy())); + // Store the incremented VAList to the legalized pointer + Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1), V, 0); + // Load the actual argument out of the pointer VAList + return DAG.getLoad(VT, dl, Tmp3, VAList, NULL, 0); +} + +SDValue XCoreTargetLowering:: +LowerVASTART(SDValue Op, SelectionDAG &DAG) +{ + DebugLoc dl = Op.getDebugLoc(); + // vastart stores the address of the VarArgsFrameIndex slot into the + // memory location argument + MachineFunction &MF = DAG.getMachineFunction(); + XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); + SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); + const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); + return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), SV, 0); +} + +SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { + DebugLoc dl = Op.getDebugLoc(); + // Depths > 0 not supported yet! + if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) + return SDValue(); + + MachineFunction &MF = DAG.getMachineFunction(); + const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); + return DAG.getCopyFromReg(DAG.getEntryNode(), dl, + RegInfo->getFrameRegister(MF), MVT::i32); +} + +//===----------------------------------------------------------------------===// +// Calling Convention Implementation +//===----------------------------------------------------------------------===// + +#include "XCoreGenCallingConv.inc" + +//===----------------------------------------------------------------------===// +// Call Calling Convention Implementation +//===----------------------------------------------------------------------===// + +/// XCore call implementation +SDValue +XCoreTargetLowering::LowerCall(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, + bool &isTailCall, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) { + // XCore target does not yet support tail call optimization. + isTailCall = false; + + // For now, only CallingConv::C implemented + switch (CallConv) + { + default: + llvm_unreachable("Unsupported calling convention"); + case CallingConv::Fast: + case CallingConv::C: + return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, + Outs, Ins, dl, DAG, InVals); + } +} + +/// LowerCCCCallTo - functions arguments are copied from virtual +/// regs to (physical regs)/(stack frame), CALLSEQ_START and +/// CALLSEQ_END are emitted. +/// TODO: isTailCall, sret. +SDValue +XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, + bool isTailCall, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) { + + // Analyze operands of the call, assigning locations to each operand. + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CallConv, isVarArg, getTargetMachine(), + ArgLocs, *DAG.getContext()); + + // The ABI dictates there should be one stack slot available to the callee + // on function entry (for saving lr). + CCInfo.AllocateStack(4, 4); + + CCInfo.AnalyzeCallOperands(Outs, CC_XCore); + + // Get a count of how many bytes are to be pushed on the stack. + unsigned NumBytes = CCInfo.getNextStackOffset(); + + Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, + getPointerTy(), true)); + + SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; + SmallVector<SDValue, 12> MemOpChains; + + // Walk the register/memloc assignments, inserting copies/loads. + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + SDValue Arg = Outs[i].Val; + + // Promote the value if needed. + switch (VA.getLocInfo()) { + default: llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: break; + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); + break; + case CCValAssign::AExt: + Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); + break; + } + + // Arguments that can be passed on register must be kept at + // RegsToPass vector + if (VA.isRegLoc()) { + RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + } else { + assert(VA.isMemLoc()); + + int Offset = VA.getLocMemOffset(); + + MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, + Chain, Arg, + DAG.getConstant(Offset/4, MVT::i32))); + } + } + + // Transform all store nodes into one single node because + // all store nodes are independent of each other. + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, + &MemOpChains[0], MemOpChains.size()); + + // Build a sequence of copy-to-reg nodes chained together with token + // chain and flag operands which copy the outgoing args into registers. + // The InFlag in necessary since all emited instructions must be + // stuck together. + SDValue InFlag; + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { + Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, + RegsToPass[i].second, InFlag); + InFlag = Chain.getValue(1); + } + + // If the callee is a GlobalAddress node (quite common, every direct call is) + // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. + // Likewise ExternalSymbol -> TargetExternalSymbol. + if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32); + else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) + Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); + + // XCoreBranchLink = #chain, #target_address, #opt_in_flags... + // = Chain, Callee, Reg#1, Reg#2, ... + // + // Returns a chain & a flag for retval copy to use. + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); + SmallVector<SDValue, 8> Ops; + Ops.push_back(Chain); + Ops.push_back(Callee); + + // Add argument registers to the end of the list so that they are + // known live into the call. + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) + Ops.push_back(DAG.getRegister(RegsToPass[i].first, + RegsToPass[i].second.getValueType())); + + if (InFlag.getNode()) + Ops.push_back(InFlag); + + Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size()); + InFlag = Chain.getValue(1); + + // Create the CALLSEQ_END node. + Chain = DAG.getCALLSEQ_END(Chain, + DAG.getConstant(NumBytes, getPointerTy(), true), + DAG.getConstant(0, getPointerTy(), true), + InFlag); + InFlag = Chain.getValue(1); + + // Handle result values, copying them out of physregs into vregs that we + // return. + return LowerCallResult(Chain, InFlag, CallConv, isVarArg, + Ins, dl, DAG, InVals); +} + +/// LowerCallResult - Lower the result values of a call into the +/// appropriate copies out of appropriate physical registers. +SDValue +XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) { + + // Assign locations to each value returned by this call. + SmallVector<CCValAssign, 16> RVLocs; + CCState CCInfo(CallConv, isVarArg, getTargetMachine(), + RVLocs, *DAG.getContext()); + + CCInfo.AnalyzeCallResult(Ins, RetCC_XCore); + + // Copy all of the result registers out of their specified physreg. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), + RVLocs[i].getValVT(), InFlag).getValue(1); + InFlag = Chain.getValue(2); + InVals.push_back(Chain.getValue(0)); + } + + return Chain; +} + +//===----------------------------------------------------------------------===// +// Formal Arguments Calling Convention Implementation +//===----------------------------------------------------------------------===// + +/// XCore formal arguments implementation +SDValue +XCoreTargetLowering::LowerFormalArguments(SDValue Chain, + CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, + SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) { + switch (CallConv) + { + default: + llvm_unreachable("Unsupported calling convention"); + case CallingConv::C: + case CallingConv::Fast: + return LowerCCCArguments(Chain, CallConv, isVarArg, + Ins, dl, DAG, InVals); + } +} + +/// LowerCCCArguments - transform physical registers into +/// virtual registers and generate load operations for +/// arguments places on the stack. +/// TODO: sret +SDValue +XCoreTargetLowering::LowerCCCArguments(SDValue Chain, + CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::InputArg> + &Ins, + DebugLoc dl, + SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineRegisterInfo &RegInfo = MF.getRegInfo(); + + // Assign locations to all of the incoming arguments. + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CallConv, isVarArg, getTargetMachine(), + ArgLocs, *DAG.getContext()); + + CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); + + unsigned StackSlotSize = XCoreFrameInfo::stackSlotSize(); + + unsigned LRSaveSize = StackSlotSize; + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + + CCValAssign &VA = ArgLocs[i]; + + if (VA.isRegLoc()) { + // Arguments passed in registers + EVT RegVT = VA.getLocVT(); + switch (RegVT.getSimpleVT().SimpleTy) { + default: + { +#ifndef NDEBUG + errs() << "LowerFormalArguments Unhandled argument type: " + << RegVT.getSimpleVT().SimpleTy << "\n"; +#endif + llvm_unreachable(0); + } + case MVT::i32: + unsigned VReg = RegInfo.createVirtualRegister( + XCore::GRRegsRegisterClass); + RegInfo.addLiveIn(VA.getLocReg(), VReg); + InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); + } + } else { + // sanity check + assert(VA.isMemLoc()); + // Load the argument to a virtual register + unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; + if (ObjSize > StackSlotSize) { + errs() << "LowerFormalArguments Unhandled argument type: " + << (unsigned)VA.getLocVT().getSimpleVT().SimpleTy + << "\n"; + } + // Create the frame index object for this incoming parameter... + int FI = MFI->CreateFixedObject(ObjSize, + LRSaveSize + VA.getLocMemOffset(), + true, false); + + // Create the SelectionDAG nodes corresponding to a load + //from this parameter + SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); + InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, NULL, 0)); + } + } + + if (isVarArg) { + /* Argument registers */ + static const unsigned ArgRegs[] = { + XCore::R0, XCore::R1, XCore::R2, XCore::R3 + }; + XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); + unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs, + array_lengthof(ArgRegs)); + if (FirstVAReg < array_lengthof(ArgRegs)) { + SmallVector<SDValue, 4> MemOps; + int offset = 0; + // Save remaining registers, storing higher register numbers at a higher + // address + for (unsigned i = array_lengthof(ArgRegs) - 1; i >= FirstVAReg; --i) { + // Create a stack slot + int FI = MFI->CreateFixedObject(4, offset, true, false); + if (i == FirstVAReg) { + XFI->setVarArgsFrameIndex(FI); + } + offset -= StackSlotSize; + SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); + // Move argument from phys reg -> virt reg + unsigned VReg = RegInfo.createVirtualRegister( + XCore::GRRegsRegisterClass); + RegInfo.addLiveIn(ArgRegs[i], VReg); + SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); + // Move argument from virt reg -> stack + SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); + MemOps.push_back(Store); + } + if (!MemOps.empty()) + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, + &MemOps[0], MemOps.size()); + } else { + // This will point to the next argument passed via stack. + XFI->setVarArgsFrameIndex( + MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), + true, false)); + } + } + + return Chain; +} + +//===----------------------------------------------------------------------===// +// Return Value Calling Convention Implementation +//===----------------------------------------------------------------------===// + +bool XCoreTargetLowering:: +CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<EVT> &OutTys, + const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, + SelectionDAG &DAG) { + SmallVector<CCValAssign, 16> RVLocs; + CCState CCInfo(CallConv, isVarArg, getTargetMachine(), + RVLocs, *DAG.getContext()); + return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_XCore); +} + +SDValue +XCoreTargetLowering::LowerReturn(SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + DebugLoc dl, SelectionDAG &DAG) { + + // CCValAssign - represent the assignment of + // the return value to a location + SmallVector<CCValAssign, 16> RVLocs; + + // CCState - Info about the registers and stack slot. + CCState CCInfo(CallConv, isVarArg, getTargetMachine(), + RVLocs, *DAG.getContext()); + + // Analize return values. + CCInfo.AnalyzeReturn(Outs, RetCC_XCore); + + // If this is the first return lowered for this function, add + // the regs to the liveout set for the function. + if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { + for (unsigned i = 0; i != RVLocs.size(); ++i) + if (RVLocs[i].isRegLoc()) + DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); + } + + SDValue Flag; + + // Copy the result values into the output registers. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + + Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), + Outs[i].Val, Flag); + + // guarantee that all emitted copies are + // stuck together, avoiding something bad + Flag = Chain.getValue(1); + } + + // Return on XCore is always a "retsp 0" + if (Flag.getNode()) + return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, + Chain, DAG.getConstant(0, MVT::i32), Flag); + else // Return Void + return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, + Chain, DAG.getConstant(0, MVT::i32)); +} + +//===----------------------------------------------------------------------===// +// Other Lowering Code +//===----------------------------------------------------------------------===// + +MachineBasicBlock * +XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, + MachineBasicBlock *BB, + DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const { + const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); + DebugLoc dl = MI->getDebugLoc(); + assert((MI->getOpcode() == XCore::SELECT_CC) && + "Unexpected instr type to insert"); + + // To "insert" a SELECT_CC instruction, we actually have to insert the diamond + // control-flow pattern. The incoming instruction knows the destination vreg + // to set, the condition code register to branch on, the true/false values to + // select between, and a branch opcode to use. + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = BB; + ++It; + + // thisMBB: + // ... + // TrueVal = ... + // cmpTY ccX, r1, r2 + // bCC copy1MBB + // fallthrough --> copy0MBB + MachineBasicBlock *thisMBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); + BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) + .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); + F->insert(It, copy0MBB); + F->insert(It, sinkMBB); + // Update machine-CFG edges by first adding all successors of the current + // block to the new block which will contain the Phi node for the select. + // Also inform sdisel of the edge changes. + for (MachineBasicBlock::succ_iterator I = BB->succ_begin(), + E = BB->succ_end(); I != E; ++I) { + EM->insert(std::make_pair(*I, sinkMBB)); + sinkMBB->addSuccessor(*I); + } + // Next, remove all successors of the current block, and add the true + // and fallthrough blocks as its successors. + while (!BB->succ_empty()) + BB->removeSuccessor(BB->succ_begin()); + // Next, add the true and fallthrough blocks as its successors. + BB->addSuccessor(copy0MBB); + BB->addSuccessor(sinkMBB); + + // copy0MBB: + // %FalseValue = ... + // # fallthrough to sinkMBB + BB = copy0MBB; + + // Update machine-CFG edges + BB->addSuccessor(sinkMBB); + + // sinkMBB: + // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] + // ... + BB = sinkMBB; + BuildMI(BB, dl, TII.get(XCore::PHI), MI->getOperand(0).getReg()) + .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) + .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); + + F->DeleteMachineInstr(MI); // The pseudo instruction is gone now. + return BB; +} + +//===----------------------------------------------------------------------===// +// Target Optimization Hooks +//===----------------------------------------------------------------------===// + +SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, + DAGCombinerInfo &DCI) const { + SelectionDAG &DAG = DCI.DAG; + DebugLoc dl = N->getDebugLoc(); + switch (N->getOpcode()) { + default: break; + case ISD::STORE: { + // Replace unaligned store of unaligned load with memmove. + StoreSDNode *ST = cast<StoreSDNode>(N); + if (!DCI.isBeforeLegalize() || + allowsUnalignedMemoryAccesses(ST->getMemoryVT()) || + ST->isVolatile() || ST->isIndexed()) { + break; + } + SDValue Chain = ST->getChain(); + + unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); + if (StoreBits % 8) { + break; + } + unsigned ABIAlignment = getTargetData()->getABITypeAlignment( + ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); + unsigned Alignment = ST->getAlignment(); + if (Alignment >= ABIAlignment) { + break; + } + + if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { + if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && + LD->getAlignment() == Alignment && + !LD->isVolatile() && !LD->isIndexed() && + Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { + return DAG.getMemmove(Chain, dl, ST->getBasePtr(), + LD->getBasePtr(), + DAG.getConstant(StoreBits/8, MVT::i32), + Alignment, ST->getSrcValue(), + ST->getSrcValueOffset(), LD->getSrcValue(), + LD->getSrcValueOffset()); + } + } + break; + } + } + return SDValue(); +} + +//===----------------------------------------------------------------------===// +// Addressing mode description hooks +//===----------------------------------------------------------------------===// + +static inline bool isImmUs(int64_t val) +{ + return (val >= 0 && val <= 11); +} + +static inline bool isImmUs2(int64_t val) +{ + return (val%2 == 0 && isImmUs(val/2)); +} + +static inline bool isImmUs4(int64_t val) +{ + return (val%4 == 0 && isImmUs(val/4)); +} + +/// isLegalAddressingMode - Return true if the addressing mode represented +/// by AM is legal for this target, for a load/store of the specified type. +bool +XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, + const Type *Ty) const { + // Be conservative with void + // FIXME: Can we be more aggressive? + if (Ty->getTypeID() == Type::VoidTyID) + return false; + + const TargetData *TD = TM.getTargetData(); + unsigned Size = TD->getTypeAllocSize(Ty); + if (AM.BaseGV) { + return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && + AM.BaseOffs%4 == 0; + } + + switch (Size) { + case 1: + // reg + imm + if (AM.Scale == 0) { + return isImmUs(AM.BaseOffs); + } + // reg + reg + return AM.Scale == 1 && AM.BaseOffs == 0; + case 2: + case 3: + // reg + imm + if (AM.Scale == 0) { + return isImmUs2(AM.BaseOffs); + } + // reg + reg<<1 + return AM.Scale == 2 && AM.BaseOffs == 0; + default: + // reg + imm + if (AM.Scale == 0) { + return isImmUs4(AM.BaseOffs); + } + // reg + reg<<2 + return AM.Scale == 4 && AM.BaseOffs == 0; + } + + return false; +} + +//===----------------------------------------------------------------------===// +// XCore Inline Assembly Support +//===----------------------------------------------------------------------===// + +std::vector<unsigned> XCoreTargetLowering:: +getRegClassForInlineAsmConstraint(const std::string &Constraint, + EVT VT) const +{ + if (Constraint.size() != 1) + return std::vector<unsigned>(); + + switch (Constraint[0]) { + default : break; + case 'r': + return make_vector<unsigned>(XCore::R0, XCore::R1, XCore::R2, + XCore::R3, XCore::R4, XCore::R5, + XCore::R6, XCore::R7, XCore::R8, + XCore::R9, XCore::R10, XCore::R11, 0); + break; + } + return std::vector<unsigned>(); +} diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h new file mode 100644 index 0000000..f7b620e --- /dev/null +++ b/lib/Target/XCore/XCoreISelLowering.h @@ -0,0 +1,172 @@ +//===-- XCoreISelLowering.h - XCore DAG Lowering Interface ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that XCore uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#ifndef XCOREISELLOWERING_H +#define XCOREISELLOWERING_H + +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/Target/TargetLowering.h" +#include "XCore.h" + +namespace llvm { + + // Forward delcarations + class XCoreSubtarget; + class XCoreTargetMachine; + + namespace XCoreISD { + enum NodeType { + // Start the numbering where the builtin ops and target ops leave off. + FIRST_NUMBER = ISD::BUILTIN_OP_END+XCore::INSTRUCTION_LIST_END, + + // Branch and link (call) + BL, + + // pc relative address + PCRelativeWrapper, + + // dp relative address + DPRelativeWrapper, + + // cp relative address + CPRelativeWrapper, + + // Store word to stack + STWSP, + + // Corresponds to retsp instruction + RETSP, + + // Corresponds to LADD instruction + LADD, + + // Corresponds to LSUB instruction + LSUB + }; + } + + //===--------------------------------------------------------------------===// + // TargetLowering Implementation + //===--------------------------------------------------------------------===// + class XCoreTargetLowering : public TargetLowering + { + public: + + explicit XCoreTargetLowering(XCoreTargetMachine &TM); + + /// LowerOperation - Provide custom lowering hooks for some operations. + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); + + /// ReplaceNodeResults - Replace the results of node with an illegal result + /// type with new values built out of custom code. + /// + virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, + SelectionDAG &DAG); + + /// getTargetNodeName - This method returns the name of a target specific + // DAG node. + virtual const char *getTargetNodeName(unsigned Opcode) const; + + virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, + MachineBasicBlock *MBB, + DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const; + + virtual bool isLegalAddressingMode(const AddrMode &AM, + const Type *Ty) const; + + /// getFunctionAlignment - Return the Log2 alignment of this function. + virtual unsigned getFunctionAlignment(const Function *F) const; + + private: + const XCoreTargetMachine &TM; + const XCoreSubtarget &Subtarget; + + // Lower Operand helpers + SDValue LowerCCCArguments(SDValue Chain, + CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals); + SDValue LowerCCCCallTo(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, + bool isTailCall, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals); + SDValue LowerCallResult(SDValue Chain, SDValue InFlag, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals); + SDValue getReturnAddressFrameIndex(SelectionDAG &DAG); + SDValue getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, + SelectionDAG &DAG); + + // Lower Operand specifics + SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG); + SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG); + SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); + SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG); + SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG); + SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG); + SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG); + + // Inline asm support + std::vector<unsigned> + getRegClassForInlineAsmConstraint(const std::string &Constraint, + EVT VT) const; + + // Expand specifics + SDValue ExpandADDSUB(SDNode *Op, SelectionDAG &DAG); + + virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; + + virtual SDValue + LowerFormalArguments(SDValue Chain, + CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals); + + virtual SDValue + LowerCall(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, + bool &isTailCall, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals); + + virtual SDValue + LowerReturn(SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + DebugLoc dl, SelectionDAG &DAG); + + virtual bool + CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<EVT> &OutTys, + const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, + SelectionDAG &DAG); + }; +} + +#endif // XCOREISELLOWERING_H diff --git a/lib/Target/XCore/XCoreInstrFormats.td b/lib/Target/XCore/XCoreInstrFormats.td new file mode 100644 index 0000000..8002c99 --- /dev/null +++ b/lib/Target/XCore/XCoreInstrFormats.td @@ -0,0 +1,120 @@ +//===- XCoreInstrFormats.td - XCore Instruction Formats ----*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Instruction format superclass +//===----------------------------------------------------------------------===// +class InstXCore<dag outs, dag ins, string asmstr, list<dag> pattern> + : Instruction { + field bits<32> Inst; + + let Namespace = "XCore"; + dag OutOperandList = outs; + dag InOperandList = ins; + let AsmString = asmstr; + let Pattern = pattern; +} + +// XCore pseudo instructions format +class PseudoInstXCore<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern>; + +//===----------------------------------------------------------------------===// +// Instruction formats +//===----------------------------------------------------------------------===// + +class _F3R<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _FL3R<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _F2RUS<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _FL2RUS<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _FRU6<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _FLRU6<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _FU6<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _FLU6<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _FU10<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _FLU10<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _F2R<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _FRUS<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _FL2R<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _F1R<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _F0R<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _L4R<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _L5R<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} + +class _L6R<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstXCore<outs, ins, asmstr, pattern> { + let Inst{31-0} = 0; +} diff --git a/lib/Target/XCore/XCoreInstrInfo.cpp b/lib/Target/XCore/XCoreInstrInfo.cpp new file mode 100644 index 0000000..5a54844 --- /dev/null +++ b/lib/Target/XCore/XCoreInstrInfo.cpp @@ -0,0 +1,465 @@ +//===- XCoreInstrInfo.cpp - XCore Instruction Information -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the XCore implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#include "XCoreMachineFunctionInfo.h" +#include "XCoreInstrInfo.h" +#include "XCore.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineLocation.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "XCoreGenInstrInfo.inc" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" + +namespace llvm { +namespace XCore { + + // XCore Condition Codes + enum CondCode { + COND_TRUE, + COND_FALSE, + COND_INVALID + }; +} +} + +using namespace llvm; + +XCoreInstrInfo::XCoreInstrInfo() + : TargetInstrInfoImpl(XCoreInsts, array_lengthof(XCoreInsts)), + RI(*this) { +} + +static bool isZeroImm(const MachineOperand &op) { + return op.isImm() && op.getImm() == 0; +} + +/// Return true if the instruction is a register to register move and +/// leave the source and dest operands in the passed parameters. +/// +bool XCoreInstrInfo::isMoveInstr(const MachineInstr &MI, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SrcSR, unsigned &DstSR) const { + SrcSR = DstSR = 0; // No sub-registers. + + // We look for 4 kinds of patterns here: + // add dst, src, 0 + // sub dst, src, 0 + // or dst, src, src + // and dst, src, src + if ((MI.getOpcode() == XCore::ADD_2rus || MI.getOpcode() == XCore::SUB_2rus) + && isZeroImm(MI.getOperand(2))) { + DstReg = MI.getOperand(0).getReg(); + SrcReg = MI.getOperand(1).getReg(); + return true; + } else if ((MI.getOpcode() == XCore::OR_3r || MI.getOpcode() == XCore::AND_3r) + && MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) { + DstReg = MI.getOperand(0).getReg(); + SrcReg = MI.getOperand(1).getReg(); + return true; + } + return false; +} + +/// isLoadFromStackSlot - If the specified machine instruction is a direct +/// load from a stack slot, return the virtual or physical register number of +/// the destination along with the FrameIndex of the loaded stack slot. If +/// not, return 0. This predicate must return 0 if the instruction has +/// any side effects other than loading from the stack slot. +unsigned +XCoreInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const{ + int Opcode = MI->getOpcode(); + if (Opcode == XCore::LDWFI) + { + if ((MI->getOperand(1).isFI()) && // is a stack slot + (MI->getOperand(2).isImm()) && // the imm is zero + (isZeroImm(MI->getOperand(2)))) + { + FrameIndex = MI->getOperand(1).getIndex(); + return MI->getOperand(0).getReg(); + } + } + return 0; +} + + /// isStoreToStackSlot - If the specified machine instruction is a direct + /// store to a stack slot, return the virtual or physical register number of + /// the source reg along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than storing to the stack slot. +unsigned +XCoreInstrInfo::isStoreToStackSlot(const MachineInstr *MI, + int &FrameIndex) const { + int Opcode = MI->getOpcode(); + if (Opcode == XCore::STWFI) + { + if ((MI->getOperand(1).isFI()) && // is a stack slot + (MI->getOperand(2).isImm()) && // the imm is zero + (isZeroImm(MI->getOperand(2)))) + { + FrameIndex = MI->getOperand(1).getIndex(); + return MI->getOperand(0).getReg(); + } + } + return 0; +} + +//===----------------------------------------------------------------------===// +// Branch Analysis +//===----------------------------------------------------------------------===// + +static inline bool IsBRU(unsigned BrOpc) { + return BrOpc == XCore::BRFU_u6 + || BrOpc == XCore::BRFU_lu6 + || BrOpc == XCore::BRBU_u6 + || BrOpc == XCore::BRBU_lu6; +} + +static inline bool IsBRT(unsigned BrOpc) { + return BrOpc == XCore::BRFT_ru6 + || BrOpc == XCore::BRFT_lru6 + || BrOpc == XCore::BRBT_ru6 + || BrOpc == XCore::BRBT_lru6; +} + +static inline bool IsBRF(unsigned BrOpc) { + return BrOpc == XCore::BRFF_ru6 + || BrOpc == XCore::BRFF_lru6 + || BrOpc == XCore::BRBF_ru6 + || BrOpc == XCore::BRBF_lru6; +} + +static inline bool IsCondBranch(unsigned BrOpc) { + return IsBRF(BrOpc) || IsBRT(BrOpc); +} + +/// GetCondFromBranchOpc - Return the XCore CC that matches +/// the correspondent Branch instruction opcode. +static XCore::CondCode GetCondFromBranchOpc(unsigned BrOpc) +{ + if (IsBRT(BrOpc)) { + return XCore::COND_TRUE; + } else if (IsBRF(BrOpc)) { + return XCore::COND_FALSE; + } else { + return XCore::COND_INVALID; + } +} + +/// GetCondBranchFromCond - Return the Branch instruction +/// opcode that matches the cc. +static inline unsigned GetCondBranchFromCond(XCore::CondCode CC) +{ + switch (CC) { + default: llvm_unreachable("Illegal condition code!"); + case XCore::COND_TRUE : return XCore::BRFT_lru6; + case XCore::COND_FALSE : return XCore::BRFF_lru6; + } +} + +/// GetOppositeBranchCondition - Return the inverse of the specified +/// condition, e.g. turning COND_E to COND_NE. +static inline XCore::CondCode GetOppositeBranchCondition(XCore::CondCode CC) +{ + switch (CC) { + default: llvm_unreachable("Illegal condition code!"); + case XCore::COND_TRUE : return XCore::COND_FALSE; + case XCore::COND_FALSE : return XCore::COND_TRUE; + } +} + +/// AnalyzeBranch - Analyze the branching code at the end of MBB, returning +/// true if it cannot be understood (e.g. it's a switch dispatch or isn't +/// implemented for a target). Upon success, this returns false and returns +/// with the following information in various cases: +/// +/// 1. If this block ends with no branches (it just falls through to its succ) +/// just return false, leaving TBB/FBB null. +/// 2. If this block ends with only an unconditional branch, it sets TBB to be +/// the destination block. +/// 3. If this block ends with an conditional branch and it falls through to +/// an successor block, it sets TBB to be the branch destination block and a +/// list of operands that evaluate the condition. These +/// operands can be passed to other TargetInstrInfo methods to create new +/// branches. +/// 4. If this block ends with an conditional branch and an unconditional +/// block, it returns the 'true' destination in TBB, the 'false' destination +/// in FBB, and a list of operands that evaluate the condition. These +/// operands can be passed to other TargetInstrInfo methods to create new +/// branches. +/// +/// Note that RemoveBranch and InsertBranch must be implemented to support +/// cases where this method returns success. +/// +bool +XCoreInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl<MachineOperand> &Cond, + bool AllowModify) const { + // If the block has no terminators, it just falls into the block after it. + MachineBasicBlock::iterator I = MBB.end(); + if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) + return false; + + // Get the last instruction in the block. + MachineInstr *LastInst = I; + + // If there is only one terminator instruction, process it. + if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { + if (IsBRU(LastInst->getOpcode())) { + TBB = LastInst->getOperand(0).getMBB(); + return false; + } + + XCore::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode()); + if (BranchCode == XCore::COND_INVALID) + return true; // Can't handle indirect branch. + + // Conditional branch + // Block ends with fall-through condbranch. + + TBB = LastInst->getOperand(1).getMBB(); + Cond.push_back(MachineOperand::CreateImm(BranchCode)); + Cond.push_back(LastInst->getOperand(0)); + return false; + } + + // Get the instruction before it if it's a terminator. + MachineInstr *SecondLastInst = I; + + // If there are three terminators, we don't know what sort of block this is. + if (SecondLastInst && I != MBB.begin() && + isUnpredicatedTerminator(--I)) + return true; + + unsigned SecondLastOpc = SecondLastInst->getOpcode(); + XCore::CondCode BranchCode = GetCondFromBranchOpc(SecondLastOpc); + + // If the block ends with conditional branch followed by unconditional, + // handle it. + if (BranchCode != XCore::COND_INVALID + && IsBRU(LastInst->getOpcode())) { + + TBB = SecondLastInst->getOperand(1).getMBB(); + Cond.push_back(MachineOperand::CreateImm(BranchCode)); + Cond.push_back(SecondLastInst->getOperand(0)); + + FBB = LastInst->getOperand(0).getMBB(); + return false; + } + + // If the block ends with two unconditional branches, handle it. The second + // one is not executed, so remove it. + if (IsBRU(SecondLastInst->getOpcode()) && + IsBRU(LastInst->getOpcode())) { + TBB = SecondLastInst->getOperand(0).getMBB(); + I = LastInst; + if (AllowModify) + I->eraseFromParent(); + return false; + } + + // Otherwise, can't handle this. + return true; +} + +unsigned +XCoreInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + const SmallVectorImpl<MachineOperand> &Cond)const{ + // FIXME there should probably be a DebugLoc argument here + DebugLoc dl = DebugLoc::getUnknownLoc(); + // Shouldn't be a fall through. + assert(TBB && "InsertBranch must not be told to insert a fallthrough"); + assert((Cond.size() == 2 || Cond.size() == 0) && + "Unexpected number of components!"); + + if (FBB == 0) { // One way branch. + if (Cond.empty()) { + // Unconditional branch + BuildMI(&MBB, dl, get(XCore::BRFU_lu6)).addMBB(TBB); + } else { + // Conditional branch. + unsigned Opc = GetCondBranchFromCond((XCore::CondCode)Cond[0].getImm()); + BuildMI(&MBB, dl, get(Opc)).addReg(Cond[1].getReg()) + .addMBB(TBB); + } + return 1; + } + + // Two-way Conditional branch. + assert(Cond.size() == 2 && "Unexpected number of components!"); + unsigned Opc = GetCondBranchFromCond((XCore::CondCode)Cond[0].getImm()); + BuildMI(&MBB, dl, get(Opc)).addReg(Cond[1].getReg()) + .addMBB(TBB); + BuildMI(&MBB, dl, get(XCore::BRFU_lu6)).addMBB(FBB); + return 2; +} + +unsigned +XCoreInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { + MachineBasicBlock::iterator I = MBB.end(); + if (I == MBB.begin()) return 0; + --I; + if (!IsBRU(I->getOpcode()) && !IsCondBranch(I->getOpcode())) + return 0; + + // Remove the branch. + I->eraseFromParent(); + + I = MBB.end(); + + if (I == MBB.begin()) return 1; + --I; + if (!IsCondBranch(I->getOpcode())) + return 1; + + // Remove the branch. + I->eraseFromParent(); + return 2; +} + +bool XCoreInstrInfo::copyRegToReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned DestReg, unsigned SrcReg, + const TargetRegisterClass *DestRC, + const TargetRegisterClass *SrcRC) const { + DebugLoc DL = DebugLoc::getUnknownLoc(); + if (I != MBB.end()) DL = I->getDebugLoc(); + + if (DestRC == SrcRC) { + if (DestRC == XCore::GRRegsRegisterClass) { + BuildMI(MBB, I, DL, get(XCore::ADD_2rus), DestReg) + .addReg(SrcReg) + .addImm(0); + return true; + } else { + return false; + } + } + + if (SrcRC == XCore::RRegsRegisterClass && SrcReg == XCore::SP && + DestRC == XCore::GRRegsRegisterClass) { + BuildMI(MBB, I, DL, get(XCore::LDAWSP_ru6), DestReg) + .addImm(0); + return true; + } + if (DestRC == XCore::RRegsRegisterClass && DestReg == XCore::SP && + SrcRC == XCore::GRRegsRegisterClass) { + BuildMI(MBB, I, DL, get(XCore::SETSP_1r)) + .addReg(SrcReg); + return true; + } + return false; +} + +void XCoreInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned SrcReg, bool isKill, + int FrameIndex, + const TargetRegisterClass *RC) const +{ + DebugLoc DL = DebugLoc::getUnknownLoc(); + if (I != MBB.end()) DL = I->getDebugLoc(); + BuildMI(MBB, I, DL, get(XCore::STWFI)) + .addReg(SrcReg, getKillRegState(isKill)) + .addFrameIndex(FrameIndex) + .addImm(0); +} + +void XCoreInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC) const +{ + DebugLoc DL = DebugLoc::getUnknownLoc(); + if (I != MBB.end()) DL = I->getDebugLoc(); + BuildMI(MBB, I, DL, get(XCore::LDWFI), DestReg) + .addFrameIndex(FrameIndex) + .addImm(0); +} + +bool XCoreInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector<CalleeSavedInfo> &CSI) const +{ + if (CSI.empty()) { + return true; + } + MachineFunction *MF = MBB.getParent(); + const MachineFrameInfo *MFI = MF->getFrameInfo(); + MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); + XCoreFunctionInfo *XFI = MF->getInfo<XCoreFunctionInfo>(); + + bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(*MF); + + DebugLoc DL = DebugLoc::getUnknownLoc(); + if (MI != MBB.end()) DL = MI->getDebugLoc(); + + for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin(); + it != CSI.end(); ++it) { + // Add the callee-saved register as live-in. It's killed at the spill. + MBB.addLiveIn(it->getReg()); + + storeRegToStackSlot(MBB, MI, it->getReg(), true, + it->getFrameIdx(), it->getRegClass()); + if (emitFrameMoves) { + unsigned SaveLabelId = MMI->NextLabelID(); + BuildMI(MBB, MI, DL, get(XCore::DBG_LABEL)).addImm(SaveLabelId); + XFI->getSpillLabels().push_back( + std::pair<unsigned, CalleeSavedInfo>(SaveLabelId, *it)); + } + } + return true; +} + +bool XCoreInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector<CalleeSavedInfo> &CSI) const +{ + bool AtStart = MI == MBB.begin(); + MachineBasicBlock::iterator BeforeI = MI; + if (!AtStart) + --BeforeI; + for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin(); + it != CSI.end(); ++it) { + + loadRegFromStackSlot(MBB, MI, it->getReg(), + it->getFrameIdx(), + it->getRegClass()); + assert(MI != MBB.begin() && + "loadRegFromStackSlot didn't insert any code!"); + // Insert in reverse order. loadRegFromStackSlot can insert multiple + // instructions. + if (AtStart) + MI = MBB.begin(); + else { + MI = BeforeI; + ++MI; + } + } + return true; +} + +/// ReverseBranchCondition - Return the inverse opcode of the +/// specified Branch instruction. +bool XCoreInstrInfo:: +ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const +{ + assert((Cond.size() == 2) && + "Invalid XCore branch condition!"); + Cond[0].setImm(GetOppositeBranchCondition((XCore::CondCode)Cond[0].getImm())); + return false; +} diff --git a/lib/Target/XCore/XCoreInstrInfo.h b/lib/Target/XCore/XCoreInstrInfo.h new file mode 100644 index 0000000..3e0a765 --- /dev/null +++ b/lib/Target/XCore/XCoreInstrInfo.h @@ -0,0 +1,96 @@ +//===- XCoreInstrInfo.h - XCore Instruction Information ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the XCore implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef XCOREINSTRUCTIONINFO_H +#define XCOREINSTRUCTIONINFO_H + +#include "llvm/Target/TargetInstrInfo.h" +#include "XCoreRegisterInfo.h" + +namespace llvm { + +class XCoreInstrInfo : public TargetInstrInfoImpl { + const XCoreRegisterInfo RI; +public: + XCoreInstrInfo(); + + /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As + /// such, whenever a client has an instance of instruction info, it should + /// always be able to get register info as well (through this method). + /// + virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; } + + /// Return true if the instruction is a register to register move and return + /// the source and dest operands and their sub-register indices by reference. + virtual bool isMoveInstr(const MachineInstr &MI, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SrcSubIdx, unsigned &DstSubIdx) const; + + /// isLoadFromStackSlot - If the specified machine instruction is a direct + /// load from a stack slot, return the virtual or physical register number of + /// the destination along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than loading from the stack slot. + virtual unsigned isLoadFromStackSlot(const MachineInstr *MI, + int &FrameIndex) const; + + /// isStoreToStackSlot - If the specified machine instruction is a direct + /// store to a stack slot, return the virtual or physical register number of + /// the source reg along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than storing to the stack slot. + virtual unsigned isStoreToStackSlot(const MachineInstr *MI, + int &FrameIndex) const; + + virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl<MachineOperand> &Cond, + bool AllowModify) const; + + virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + const SmallVectorImpl<MachineOperand> &Cond) const; + + virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const; + + virtual bool copyRegToReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned DestReg, unsigned SrcReg, + const TargetRegisterClass *DestRC, + const TargetRegisterClass *SrcRC) const; + + virtual void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC) const; + + virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC) const; + + virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector<CalleeSavedInfo> &CSI) const; + + virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector<CalleeSavedInfo> &CSI) const; + + virtual bool ReverseBranchCondition( + SmallVectorImpl<MachineOperand> &Cond) const; +}; + +} + +#endif diff --git a/lib/Target/XCore/XCoreInstrInfo.td b/lib/Target/XCore/XCoreInstrInfo.td new file mode 100644 index 0000000..10dc18c --- /dev/null +++ b/lib/Target/XCore/XCoreInstrInfo.td @@ -0,0 +1,1001 @@ +//===- XCoreInstrInfo.td - Target Description for XCore ----*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the XCore instructions in TableGen format. +// +//===----------------------------------------------------------------------===// + +// Uses of CP, DP are not currently reflected in the patterns, since +// having a physical register as an operand prevents loop hoisting and +// since the value of these registers never changes during the life of the +// function. + +//===----------------------------------------------------------------------===// +// Instruction format superclass. +//===----------------------------------------------------------------------===// + +include "XCoreInstrFormats.td" + +//===----------------------------------------------------------------------===// +// XCore specific DAG Nodes. +// + +// Call +def SDT_XCoreBranchLink : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>; +def XCoreBranchLink : SDNode<"XCoreISD::BL",SDT_XCoreBranchLink, + [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>; + +def XCoreRetsp : SDNode<"XCoreISD::RETSP", SDTNone, + [SDNPHasChain, SDNPOptInFlag]>; + +def SDT_XCoreAddress : SDTypeProfile<1, 1, + [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>; + +def pcrelwrapper : SDNode<"XCoreISD::PCRelativeWrapper", SDT_XCoreAddress, + []>; + +def dprelwrapper : SDNode<"XCoreISD::DPRelativeWrapper", SDT_XCoreAddress, + []>; + +def cprelwrapper : SDNode<"XCoreISD::CPRelativeWrapper", SDT_XCoreAddress, + []>; + +def SDT_XCoreStwsp : SDTypeProfile<0, 2, [SDTCisInt<1>]>; +def XCoreStwsp : SDNode<"XCoreISD::STWSP", SDT_XCoreStwsp, + [SDNPHasChain]>; + +// These are target-independent nodes, but have target-specific formats. +def SDT_XCoreCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>; +def SDT_XCoreCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>, + SDTCisVT<1, i32> ]>; + +def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_XCoreCallSeqStart, + [SDNPHasChain, SDNPOutFlag]>; +def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_XCoreCallSeqEnd, + [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>; + +//===----------------------------------------------------------------------===// +// Instruction Pattern Stuff +//===----------------------------------------------------------------------===// + +def div4_xform : SDNodeXForm<imm, [{ + // Transformation function: imm/4 + assert(N->getZExtValue() % 4 == 0); + return getI32Imm(N->getZExtValue()/4); +}]>; + +def msksize_xform : SDNodeXForm<imm, [{ + // Transformation function: get the size of a mask + assert(isMask_32(N->getZExtValue())); + // look for the first non-zero bit + return getI32Imm(32 - CountLeadingZeros_32(N->getZExtValue())); +}]>; + +def neg_xform : SDNodeXForm<imm, [{ + // Transformation function: -imm + uint32_t value = N->getZExtValue(); + return getI32Imm(-value); +}]>; + +def bpwsub_xform : SDNodeXForm<imm, [{ + // Transformation function: 32-imm + uint32_t value = N->getZExtValue(); + return getI32Imm(32-value); +}]>; + +def div4neg_xform : SDNodeXForm<imm, [{ + // Transformation function: -imm/4 + uint32_t value = N->getZExtValue(); + assert(-value % 4 == 0); + return getI32Imm(-value/4); +}]>; + +def immUs4Neg : PatLeaf<(imm), [{ + uint32_t value = (uint32_t)N->getZExtValue(); + return (-value)%4 == 0 && (-value)/4 <= 11; +}]>; + +def immUs4 : PatLeaf<(imm), [{ + uint32_t value = (uint32_t)N->getZExtValue(); + return value%4 == 0 && value/4 <= 11; +}]>; + +def immUsNeg : PatLeaf<(imm), [{ + return -((uint32_t)N->getZExtValue()) <= 11; +}]>; + +def immUs : PatLeaf<(imm), [{ + return (uint32_t)N->getZExtValue() <= 11; +}]>; + +def immU6 : PatLeaf<(imm), [{ + return (uint32_t)N->getZExtValue() < (1 << 6); +}]>; + +def immU10 : PatLeaf<(imm), [{ + return (uint32_t)N->getZExtValue() < (1 << 10); +}]>; + +def immU16 : PatLeaf<(imm), [{ + return (uint32_t)N->getZExtValue() < (1 << 16); +}]>; + +def immU20 : PatLeaf<(imm), [{ + return (uint32_t)N->getZExtValue() < (1 << 20); +}]>; + +def immMskBitp : PatLeaf<(imm), [{ + uint32_t value = (uint32_t)N->getZExtValue(); + if (!isMask_32(value)) { + return false; + } + int msksize = 32 - CountLeadingZeros_32(value); + return (msksize >= 1 && msksize <= 8) + || msksize == 16 + || msksize == 24 + || msksize == 32; +}]>; + +def immBitp : PatLeaf<(imm), [{ + uint32_t value = (uint32_t)N->getZExtValue(); + return (value >= 1 && value <= 8) + || value == 16 + || value == 24 + || value == 32; +}]>; + +def immBpwSubBitp : PatLeaf<(imm), [{ + uint32_t value = (uint32_t)N->getZExtValue(); + return (value >= 24 && value <= 31) + || value == 16 + || value == 8 + || value == 0; +}]>; + +def lda16f : PatFrag<(ops node:$addr, node:$offset), + (add node:$addr, (shl node:$offset, 1))>; +def lda16b : PatFrag<(ops node:$addr, node:$offset), + (sub node:$addr, (shl node:$offset, 1))>; +def ldawf : PatFrag<(ops node:$addr, node:$offset), + (add node:$addr, (shl node:$offset, 2))>; +def ldawb : PatFrag<(ops node:$addr, node:$offset), + (sub node:$addr, (shl node:$offset, 2))>; + +// Instruction operand types +def calltarget : Operand<i32>; +def brtarget : Operand<OtherVT>; +def pclabel : Operand<i32>; + +// Addressing modes +def ADDRspii : ComplexPattern<i32, 2, "SelectADDRspii", [add, frameindex], []>; +def ADDRdpii : ComplexPattern<i32, 2, "SelectADDRdpii", [add, dprelwrapper], + []>; +def ADDRcpii : ComplexPattern<i32, 2, "SelectADDRcpii", [add, cprelwrapper], + []>; + +// Address operands +def MEMii : Operand<i32> { + let PrintMethod = "printMemOperand"; + let MIOperandInfo = (ops i32imm, i32imm); +} + +//===----------------------------------------------------------------------===// +// Instruction Class Templates +//===----------------------------------------------------------------------===// + +// Three operand short + +multiclass F3R_2RUS<string OpcStr, SDNode OpNode> { + def _3r: _F3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + def _2rus : _F2RUS< + (outs GRRegs:$dst), (ins GRRegs:$b, i32imm:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, immUs:$c))]>; +} + +multiclass F3R_2RUS_np<string OpcStr> { + def _3r: _F3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + []>; + def _2rus : _F2RUS< + (outs GRRegs:$dst), (ins GRRegs:$b, i32imm:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + []>; +} + +multiclass F3R_2RBITP<string OpcStr, SDNode OpNode> { + def _3r: _F3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + def _2rus : _F2RUS< + (outs GRRegs:$dst), (ins GRRegs:$b, i32imm:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, immBitp:$c))]>; +} + +class F3R<string OpcStr, SDNode OpNode> : _F3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + +class F3R_np<string OpcStr> : _F3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + []>; +// Three operand long + +/// FL3R_L2RUS multiclass - Define a normal FL3R/FL2RUS pattern in one shot. +multiclass FL3R_L2RUS<string OpcStr, SDNode OpNode> { + def _l3r: _FL3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + def _l2rus : _FL2RUS< + (outs GRRegs:$dst), (ins GRRegs:$b, i32imm:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, immUs:$c))]>; +} + +/// FL3R_L2RUS multiclass - Define a normal FL3R/FL2RUS pattern in one shot. +multiclass FL3R_L2RBITP<string OpcStr, SDNode OpNode> { + def _l3r: _FL3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + def _l2rus : _FL2RUS< + (outs GRRegs:$dst), (ins GRRegs:$b, i32imm:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, immBitp:$c))]>; +} + +class FL3R<string OpcStr, SDNode OpNode> : _FL3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + +// Register - U6 +// Operand register - U6 +multiclass FRU6_LRU6_branch<string OpcStr> { + def _ru6: _FRU6< + (outs), (ins GRRegs:$cond, brtarget:$dest), + !strconcat(OpcStr, " $cond, $dest"), + []>; + def _lru6: _FLRU6< + (outs), (ins GRRegs:$cond, brtarget:$dest), + !strconcat(OpcStr, " $cond, $dest"), + []>; +} + +multiclass FRU6_LRU6_cp<string OpcStr> { + def _ru6: _FRU6< + (outs GRRegs:$dst), (ins i32imm:$a), + !strconcat(OpcStr, " $dst, cp[$a]"), + []>; + def _lru6: _FLRU6< + (outs GRRegs:$dst), (ins i32imm:$a), + !strconcat(OpcStr, " $dst, cp[$a]"), + []>; +} + +// U6 +multiclass FU6_LU6<string OpcStr, SDNode OpNode> { + def _u6: _FU6< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + [(OpNode immU6:$b)]>; + def _lu6: _FLU6< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + [(OpNode immU16:$b)]>; +} + +multiclass FU6_LU6_np<string OpcStr> { + def _u6: _FU6< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + []>; + def _lu6: _FLU6< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + []>; +} + +// U10 +multiclass FU10_LU10_np<string OpcStr> { + def _u10: _FU10< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + []>; + def _lu10: _FLU10< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + []>; +} + +// Two operand short + +class F2R_np<string OpcStr> : _F2R< + (outs GRRegs:$dst), (ins GRRegs:$b), + !strconcat(OpcStr, " $dst, $b"), + []>; + +// Two operand long + +//===----------------------------------------------------------------------===// +// Pseudo Instructions +//===----------------------------------------------------------------------===// + +let Defs = [SP], Uses = [SP] in { +def ADJCALLSTACKDOWN : PseudoInstXCore<(outs), (ins i32imm:$amt), + "${:comment} ADJCALLSTACKDOWN $amt", + [(callseq_start timm:$amt)]>; +def ADJCALLSTACKUP : PseudoInstXCore<(outs), (ins i32imm:$amt1, i32imm:$amt2), + "${:comment} ADJCALLSTACKUP $amt1", + [(callseq_end timm:$amt1, timm:$amt2)]>; +} + +def LDWFI : PseudoInstXCore<(outs GRRegs:$dst), (ins MEMii:$addr), + "${:comment} LDWFI $dst, $addr", + [(set GRRegs:$dst, (load ADDRspii:$addr))]>; + +def LDAWFI : PseudoInstXCore<(outs GRRegs:$dst), (ins MEMii:$addr), + "${:comment} LDAWFI $dst, $addr", + [(set GRRegs:$dst, ADDRspii:$addr)]>; + +def STWFI : PseudoInstXCore<(outs), (ins GRRegs:$src, MEMii:$addr), + "${:comment} STWFI $src, $addr", + [(store GRRegs:$src, ADDRspii:$addr)]>; + +// SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after +// instruction selection into a branch sequence. +let usesCustomInserter = 1 in { + def SELECT_CC : PseudoInstXCore<(outs GRRegs:$dst), + (ins GRRegs:$cond, GRRegs:$T, GRRegs:$F), + "${:comment} SELECT_CC PSEUDO!", + [(set GRRegs:$dst, + (select GRRegs:$cond, GRRegs:$T, GRRegs:$F))]>; +} + +//===----------------------------------------------------------------------===// +// Instructions +//===----------------------------------------------------------------------===// + +// Three operand short +defm ADD : F3R_2RUS<"add", add>; +defm SUB : F3R_2RUS<"sub", sub>; +let neverHasSideEffects = 1 in { +defm EQ : F3R_2RUS_np<"eq">; +def LSS_3r : F3R_np<"lss">; +def LSU_3r : F3R_np<"lsu">; +} +def AND_3r : F3R<"and", and>; +def OR_3r : F3R<"or", or>; + +let mayLoad=1 in { +def LDW_3r : _F3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "ldw $dst, $addr[$offset]", + []>; + +def LDW_2rus : _F2RUS<(outs GRRegs:$dst), (ins GRRegs:$addr, i32imm:$offset), + "ldw $dst, $addr[$offset]", + []>; + +def LD16S_3r : _F3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "ld16s $dst, $addr[$offset]", + []>; + +def LD8U_3r : _F3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "ld8u $dst, $addr[$offset]", + []>; +} + +let mayStore=1 in { +def STW_3r : _F3R<(outs), (ins GRRegs:$val, GRRegs:$addr, GRRegs:$offset), + "stw $val, $addr[$offset]", + []>; + +def STW_2rus : _F2RUS<(outs), (ins GRRegs:$val, GRRegs:$addr, i32imm:$offset), + "stw $val, $addr[$offset]", + []>; +} + +defm SHL : F3R_2RBITP<"shl", shl>; +defm SHR : F3R_2RBITP<"shr", srl>; +// TODO tsetr + +// Three operand long +def LDAWF_l3r : _FL3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "ldaw $dst, $addr[$offset]", + [(set GRRegs:$dst, (ldawf GRRegs:$addr, GRRegs:$offset))]>; + +let neverHasSideEffects = 1 in +def LDAWF_l2rus : _FL2RUS<(outs GRRegs:$dst), + (ins GRRegs:$addr, i32imm:$offset), + "ldaw $dst, $addr[$offset]", + []>; + +def LDAWB_l3r : _FL3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "ldaw $dst, $addr[-$offset]", + [(set GRRegs:$dst, (ldawb GRRegs:$addr, GRRegs:$offset))]>; + +let neverHasSideEffects = 1 in +def LDAWB_l2rus : _FL2RUS<(outs GRRegs:$dst), + (ins GRRegs:$addr, i32imm:$offset), + "ldaw $dst, $addr[-$offset]", + []>; + +def LDA16F_l3r : _FL3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "lda16 $dst, $addr[$offset]", + [(set GRRegs:$dst, (lda16f GRRegs:$addr, GRRegs:$offset))]>; + +def LDA16B_l3r : _FL3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "lda16 $dst, $addr[-$offset]", + [(set GRRegs:$dst, (lda16b GRRegs:$addr, GRRegs:$offset))]>; + +def MUL_l3r : FL3R<"mul", mul>; +// Instructions which may trap are marked as side effecting. +let hasSideEffects = 1 in { +def DIVS_l3r : FL3R<"divs", sdiv>; +def DIVU_l3r : FL3R<"divu", udiv>; +def REMS_l3r : FL3R<"rems", srem>; +def REMU_l3r : FL3R<"remu", urem>; +} +def XOR_l3r : FL3R<"xor", xor>; +defm ASHR : FL3R_L2RBITP<"ashr", sra>; +// TODO crc32, crc8, inpw, outpw +let mayStore=1 in { +def ST16_l3r : _FL3R<(outs), (ins GRRegs:$val, GRRegs:$addr, GRRegs:$offset), + "st16 $val, $addr[$offset]", + []>; + +def ST8_l3r : _FL3R<(outs), (ins GRRegs:$val, GRRegs:$addr, GRRegs:$offset), + "st8 $val, $addr[$offset]", + []>; +} + +// Four operand long +let Constraints = "$src1 = $dst1,$src2 = $dst2" in { +def MACCU_l4r : _L4R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3, + GRRegs:$src4), + "maccu $dst1, $dst2, $src3, $src4", + []>; + +def MACCS_l4r : _L4R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3, + GRRegs:$src4), + "maccs $dst1, $dst2, $src3, $src4", + []>; +} + +// Five operand long + +def LADD_l5r : _L5R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3), + "ladd $dst1, $dst2, $src1, $src2, $src3", + []>; + +def LSUB_l5r : _L5R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3), + "lsub $dst1, $dst2, $src1, $src2, $src3", + []>; + +def LDIV_l5r : _L5R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3), + "ldiv $dst1, $dst2, $src1, $src2, $src3", + []>; + +// Six operand long + +def LMUL_l6r : _L6R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3, + GRRegs:$src4), + "lmul $dst1, $dst2, $src1, $src2, $src3, $src4", + []>; + +// Register - U6 + +//let Uses = [DP] in ... +let neverHasSideEffects = 1, isReMaterializable = 1 in +def LDAWDP_ru6: _FRU6<(outs GRRegs:$dst), (ins MEMii:$a), + "ldaw $dst, dp[$a]", + []>; + +let isReMaterializable = 1 in +def LDAWDP_lru6: _FLRU6< + (outs GRRegs:$dst), (ins MEMii:$a), + "ldaw $dst, dp[$a]", + [(set GRRegs:$dst, ADDRdpii:$a)]>; + +let mayLoad=1 in +def LDWDP_ru6: _FRU6<(outs GRRegs:$dst), (ins MEMii:$a), + "ldw $dst, dp[$a]", + []>; + +def LDWDP_lru6: _FLRU6< + (outs GRRegs:$dst), (ins MEMii:$a), + "ldw $dst, dp[$a]", + [(set GRRegs:$dst, (load ADDRdpii:$a))]>; + +let mayStore=1 in +def STWDP_ru6 : _FRU6<(outs), (ins GRRegs:$val, MEMii:$addr), + "stw $val, dp[$addr]", + []>; + +def STWDP_lru6 : _FLRU6<(outs), (ins GRRegs:$val, MEMii:$addr), + "stw $val, dp[$addr]", + [(store GRRegs:$val, ADDRdpii:$addr)]>; + +//let Uses = [CP] in .. +let mayLoad = 1, isReMaterializable = 1 in +defm LDWCP : FRU6_LRU6_cp<"ldw">; + +let Uses = [SP] in { +let mayStore=1 in { +def STWSP_ru6 : _FRU6< + (outs), (ins GRRegs:$val, i32imm:$index), + "stw $val, sp[$index]", + [(XCoreStwsp GRRegs:$val, immU6:$index)]>; + +def STWSP_lru6 : _FLRU6< + (outs), (ins GRRegs:$val, i32imm:$index), + "stw $val, sp[$index]", + [(XCoreStwsp GRRegs:$val, immU16:$index)]>; +} + +let mayLoad=1 in { +def LDWSP_ru6 : _FRU6< + (outs GRRegs:$dst), (ins i32imm:$b), + "ldw $dst, sp[$b]", + []>; + +def LDWSP_lru6 : _FLRU6< + (outs GRRegs:$dst), (ins i32imm:$b), + "ldw $dst, sp[$b]", + []>; +} + +let neverHasSideEffects = 1 in { +def LDAWSP_ru6 : _FRU6< + (outs GRRegs:$dst), (ins i32imm:$b), + "ldaw $dst, sp[$b]", + []>; + +def LDAWSP_lru6 : _FLRU6< + (outs GRRegs:$dst), (ins i32imm:$b), + "ldaw $dst, sp[$b]", + []>; + +def LDAWSP_ru6_RRegs : _FRU6< + (outs RRegs:$dst), (ins i32imm:$b), + "ldaw $dst, sp[$b]", + []>; + +def LDAWSP_lru6_RRegs : _FLRU6< + (outs RRegs:$dst), (ins i32imm:$b), + "ldaw $dst, sp[$b]", + []>; +} +} + +let isReMaterializable = 1 in { +def LDC_ru6 : _FRU6< + (outs GRRegs:$dst), (ins i32imm:$b), + "ldc $dst, $b", + [(set GRRegs:$dst, immU6:$b)]>; + +def LDC_lru6 : _FLRU6< + (outs GRRegs:$dst), (ins i32imm:$b), + "ldc $dst, $b", + [(set GRRegs:$dst, immU16:$b)]>; +} + +// Operand register - U6 +// TODO setc +let isBranch = 1, isTerminator = 1 in { +defm BRFT: FRU6_LRU6_branch<"bt">; +defm BRBT: FRU6_LRU6_branch<"bt">; +defm BRFF: FRU6_LRU6_branch<"bf">; +defm BRBF: FRU6_LRU6_branch<"bf">; +} + +// U6 +let Defs = [SP], Uses = [SP] in { +let neverHasSideEffects = 1 in +defm EXTSP : FU6_LU6_np<"extsp">; +let mayStore = 1 in +defm ENTSP : FU6_LU6_np<"entsp">; + +let isReturn = 1, isTerminator = 1, mayLoad = 1, isBarrier = 1 in { +defm RETSP : FU6_LU6<"retsp", XCoreRetsp>; +} +} + +// TODO extdp, kentsp, krestsp, blat, setsr +// clrsr, getsr, kalli +let isBranch = 1, isTerminator = 1 in { +def BRBU_u6 : _FU6< + (outs), + (ins brtarget:$target), + "bu $target", + []>; + +def BRBU_lu6 : _FLU6< + (outs), + (ins brtarget:$target), + "bu $target", + []>; + +def BRFU_u6 : _FU6< + (outs), + (ins brtarget:$target), + "bu $target", + []>; + +def BRFU_lu6 : _FLU6< + (outs), + (ins brtarget:$target), + "bu $target", + []>; +} + +//let Uses = [CP] in ... +let Defs = [R11], neverHasSideEffects = 1, isReMaterializable = 1 in +def LDAWCP_u6: _FRU6<(outs), (ins MEMii:$a), + "ldaw r11, cp[$a]", + []>; + +let Defs = [R11], isReMaterializable = 1 in +def LDAWCP_lu6: _FLRU6< + (outs), (ins MEMii:$a), + "ldaw r11, cp[$a]", + [(set R11, ADDRcpii:$a)]>; + +// U10 +// TODO ldwcpl, blacp + +let Defs = [R11], isReMaterializable = 1, neverHasSideEffects = 1 in +def LDAP_u10 : _FU10< + (outs), + (ins i32imm:$addr), + "ldap r11, $addr", + []>; + +let Defs = [R11], isReMaterializable = 1 in +def LDAP_lu10 : _FLU10< + (outs), + (ins i32imm:$addr), + "ldap r11, $addr", + [(set R11, (pcrelwrapper tglobaladdr:$addr))]>; + +let Defs = [R11], isReMaterializable = 1 in +def LDAP_lu10_ba : _FLU10<(outs), + (ins i32imm:$addr), + "ldap r11, $addr", + [(set R11, (pcrelwrapper tblockaddress:$addr))]>; + +let isCall=1, +// All calls clobber the link register and the non-callee-saved registers: +Defs = [R0, R1, R2, R3, R11, LR] in { +def BL_u10 : _FU10< + (outs), + (ins calltarget:$target, variable_ops), + "bl $target", + [(XCoreBranchLink immU10:$target)]>; + +def BL_lu10 : _FLU10< + (outs), + (ins calltarget:$target, variable_ops), + "bl $target", + [(XCoreBranchLink immU20:$target)]>; +} + +// Two operand short +// TODO getr, getst +def NOT : _F2R<(outs GRRegs:$dst), (ins GRRegs:$b), + "not $dst, $b", + [(set GRRegs:$dst, (not GRRegs:$b))]>; + +def NEG : _F2R<(outs GRRegs:$dst), (ins GRRegs:$b), + "neg $dst, $b", + [(set GRRegs:$dst, (ineg GRRegs:$b))]>; + +// TODO setd, eet, eef, getts, setpt, outct, inct, chkct, outt, intt, out, +// in, outshr, inshr, testct, testwct, tinitpc, tinitdp, tinitsp, tinitcp, +// tsetmr, sext (reg), zext (reg) +let isTwoAddress = 1 in { +let neverHasSideEffects = 1 in +def SEXT_rus : _FRUS<(outs GRRegs:$dst), (ins GRRegs:$src1, i32imm:$src2), + "sext $dst, $src2", + []>; + +let neverHasSideEffects = 1 in +def ZEXT_rus : _FRUS<(outs GRRegs:$dst), (ins GRRegs:$src1, i32imm:$src2), + "zext $dst, $src2", + []>; + +def ANDNOT_2r : _F2R<(outs GRRegs:$dst), (ins GRRegs:$src1, GRRegs:$src2), + "andnot $dst, $src2", + [(set GRRegs:$dst, (and GRRegs:$src1, (not GRRegs:$src2)))]>; +} + +let isReMaterializable = 1, neverHasSideEffects = 1 in +def MKMSK_rus : _FRUS<(outs GRRegs:$dst), (ins i32imm:$size), + "mkmsk $dst, $size", + []>; + +def MKMSK_2r : _FRUS<(outs GRRegs:$dst), (ins GRRegs:$size), + "mkmsk $dst, $size", + [(set GRRegs:$dst, (add (shl 1, GRRegs:$size), 0xffffffff))]>; + +// Two operand long +// TODO settw, setclk, setrdy, setpsc, endin, peek, +// getd, testlcl, tinitlr, getps, setps +def BITREV_l2r : _FL2R<(outs GRRegs:$dst), (ins GRRegs:$src), + "bitrev $dst, $src", + [(set GRRegs:$dst, (int_xcore_bitrev GRRegs:$src))]>; + +def BYTEREV_l2r : _FL2R<(outs GRRegs:$dst), (ins GRRegs:$src), + "byterev $dst, $src", + [(set GRRegs:$dst, (bswap GRRegs:$src))]>; + +def CLZ_l2r : _FL2R<(outs GRRegs:$dst), (ins GRRegs:$src), + "clz $dst, $src", + [(set GRRegs:$dst, (ctlz GRRegs:$src))]>; + +// One operand short +// TODO edu, eeu, waitet, waitef, freer, tstart, msync, mjoin, syncr, clrtp +// bru, setdp, setcp, setv, setev, kcall +// dgetreg +let isBranch=1, isIndirectBranch=1, isTerminator=1 in +def BAU_1r : _F1R<(outs), (ins GRRegs:$addr), + "bau $addr", + [(brind GRRegs:$addr)]>; + +let Defs=[SP], neverHasSideEffects=1 in +def SETSP_1r : _F1R<(outs), (ins GRRegs:$src), + "set sp, $src", + []>; + +let isBarrier = 1, hasCtrlDep = 1 in +def ECALLT_1r : _F1R<(outs), (ins GRRegs:$src), + "ecallt $src", + []>; + +let isBarrier = 1, hasCtrlDep = 1 in +def ECALLF_1r : _F1R<(outs), (ins GRRegs:$src), + "ecallf $src", + []>; + +let isCall=1, +// All calls clobber the link register and the non-callee-saved registers: +Defs = [R0, R1, R2, R3, R11, LR] in { +def BLA_1r : _F1R<(outs), (ins GRRegs:$addr, variable_ops), + "bla $addr", + [(XCoreBranchLink GRRegs:$addr)]>; +} + +// Zero operand short +// TODO waiteu, clre, ssync, freet, ldspc, stspc, ldssr, stssr, ldsed, stsed, +// stet, geted, getet, getkep, getksp, setkep, getid, kret, dcall, dret, +// dentsp, drestsp + +let Defs = [R11] in +def GETID_0R : _F0R<(outs), (ins), + "get r11, id", + [(set R11, (int_xcore_getid))]>; + +//===----------------------------------------------------------------------===// +// Non-Instruction Patterns +//===----------------------------------------------------------------------===// + +def : Pat<(XCoreBranchLink tglobaladdr:$addr), (BL_lu10 tglobaladdr:$addr)>; +def : Pat<(XCoreBranchLink texternalsym:$addr), (BL_lu10 texternalsym:$addr)>; + +/// sext_inreg +def : Pat<(sext_inreg GRRegs:$b, i1), (SEXT_rus GRRegs:$b, 1)>; +def : Pat<(sext_inreg GRRegs:$b, i8), (SEXT_rus GRRegs:$b, 8)>; +def : Pat<(sext_inreg GRRegs:$b, i16), (SEXT_rus GRRegs:$b, 16)>; + +/// loads +def : Pat<(zextloadi8 (add GRRegs:$addr, GRRegs:$offset)), + (LD8U_3r GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(zextloadi8 GRRegs:$addr), (LD8U_3r GRRegs:$addr, (LDC_ru6 0))>; + +def : Pat<(sextloadi16 (lda16f GRRegs:$addr, GRRegs:$offset)), + (LD16S_3r GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(sextloadi16 GRRegs:$addr), (LD16S_3r GRRegs:$addr, (LDC_ru6 0))>; + +def : Pat<(load (ldawf GRRegs:$addr, GRRegs:$offset)), + (LDW_3r GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(load (add GRRegs:$addr, immUs4:$offset)), + (LDW_2rus GRRegs:$addr, (div4_xform immUs4:$offset))>; +def : Pat<(load GRRegs:$addr), (LDW_2rus GRRegs:$addr, 0)>; + +/// anyext +def : Pat<(extloadi8 (add GRRegs:$addr, GRRegs:$offset)), + (LD8U_3r GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(extloadi8 GRRegs:$addr), (LD8U_3r GRRegs:$addr, (LDC_ru6 0))>; +def : Pat<(extloadi16 (lda16f GRRegs:$addr, GRRegs:$offset)), + (LD16S_3r GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(extloadi16 GRRegs:$addr), (LD16S_3r GRRegs:$addr, (LDC_ru6 0))>; + +/// stores +def : Pat<(truncstorei8 GRRegs:$val, (add GRRegs:$addr, GRRegs:$offset)), + (ST8_l3r GRRegs:$val, GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(truncstorei8 GRRegs:$val, GRRegs:$addr), + (ST8_l3r GRRegs:$val, GRRegs:$addr, (LDC_ru6 0))>; + +def : Pat<(truncstorei16 GRRegs:$val, (lda16f GRRegs:$addr, GRRegs:$offset)), + (ST16_l3r GRRegs:$val, GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(truncstorei16 GRRegs:$val, GRRegs:$addr), + (ST16_l3r GRRegs:$val, GRRegs:$addr, (LDC_ru6 0))>; + +def : Pat<(store GRRegs:$val, (ldawf GRRegs:$addr, GRRegs:$offset)), + (STW_3r GRRegs:$val, GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(store GRRegs:$val, (add GRRegs:$addr, immUs4:$offset)), + (STW_2rus GRRegs:$val, GRRegs:$addr, (div4_xform immUs4:$offset))>; +def : Pat<(store GRRegs:$val, GRRegs:$addr), + (STW_2rus GRRegs:$val, GRRegs:$addr, 0)>; + +/// cttz +def : Pat<(cttz GRRegs:$src), (CLZ_l2r (BITREV_l2r GRRegs:$src))>; + +/// trap +def : Pat<(trap), (ECALLF_1r (LDC_ru6 0))>; + +/// +/// branch patterns +/// + +// unconditional branch +def : Pat<(br bb:$addr), (BRFU_lu6 bb:$addr)>; + +// direct match equal/notequal zero brcond +def : Pat<(brcond (setne GRRegs:$lhs, 0), bb:$dst), + (BRFT_lru6 GRRegs:$lhs, bb:$dst)>; +def : Pat<(brcond (seteq GRRegs:$lhs, 0), bb:$dst), + (BRFF_lru6 GRRegs:$lhs, bb:$dst)>; + +def : Pat<(brcond (setle GRRegs:$lhs, GRRegs:$rhs), bb:$dst), + (BRFF_lru6 (LSS_3r GRRegs:$rhs, GRRegs:$lhs), bb:$dst)>; +def : Pat<(brcond (setule GRRegs:$lhs, GRRegs:$rhs), bb:$dst), + (BRFF_lru6 (LSU_3r GRRegs:$rhs, GRRegs:$lhs), bb:$dst)>; +def : Pat<(brcond (setge GRRegs:$lhs, GRRegs:$rhs), bb:$dst), + (BRFF_lru6 (LSS_3r GRRegs:$lhs, GRRegs:$rhs), bb:$dst)>; +def : Pat<(brcond (setuge GRRegs:$lhs, GRRegs:$rhs), bb:$dst), + (BRFF_lru6 (LSU_3r GRRegs:$lhs, GRRegs:$rhs), bb:$dst)>; +def : Pat<(brcond (setne GRRegs:$lhs, GRRegs:$rhs), bb:$dst), + (BRFF_lru6 (EQ_3r GRRegs:$lhs, GRRegs:$rhs), bb:$dst)>; +def : Pat<(brcond (setne GRRegs:$lhs, immUs:$rhs), bb:$dst), + (BRFF_lru6 (EQ_2rus GRRegs:$lhs, immUs:$rhs), bb:$dst)>; + +// generic brcond pattern +def : Pat<(brcond GRRegs:$cond, bb:$addr), (BRFT_lru6 GRRegs:$cond, bb:$addr)>; + + +/// +/// Select patterns +/// + +// direct match equal/notequal zero select +def : Pat<(select (setne GRRegs:$lhs, 0), GRRegs:$T, GRRegs:$F), + (SELECT_CC GRRegs:$lhs, GRRegs:$T, GRRegs:$F)>; + +def : Pat<(select (seteq GRRegs:$lhs, 0), GRRegs:$T, GRRegs:$F), + (SELECT_CC GRRegs:$lhs, GRRegs:$F, GRRegs:$T)>; + +def : Pat<(select (setle GRRegs:$lhs, GRRegs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (LSS_3r GRRegs:$rhs, GRRegs:$lhs), GRRegs:$F, GRRegs:$T)>; +def : Pat<(select (setule GRRegs:$lhs, GRRegs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (LSU_3r GRRegs:$rhs, GRRegs:$lhs), GRRegs:$F, GRRegs:$T)>; +def : Pat<(select (setge GRRegs:$lhs, GRRegs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (LSS_3r GRRegs:$lhs, GRRegs:$rhs), GRRegs:$F, GRRegs:$T)>; +def : Pat<(select (setuge GRRegs:$lhs, GRRegs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (LSU_3r GRRegs:$lhs, GRRegs:$rhs), GRRegs:$F, GRRegs:$T)>; +def : Pat<(select (setne GRRegs:$lhs, GRRegs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (EQ_3r GRRegs:$lhs, GRRegs:$rhs), GRRegs:$F, GRRegs:$T)>; +def : Pat<(select (setne GRRegs:$lhs, immUs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (EQ_2rus GRRegs:$lhs, immUs:$rhs), GRRegs:$F, GRRegs:$T)>; + +/// +/// setcc patterns, only matched when none of the above brcond +/// patterns match +/// + +// setcc 2 register operands +def : Pat<(setle GRRegs:$lhs, GRRegs:$rhs), + (EQ_2rus (LSS_3r GRRegs:$rhs, GRRegs:$lhs), 0)>; +def : Pat<(setule GRRegs:$lhs, GRRegs:$rhs), + (EQ_2rus (LSU_3r GRRegs:$rhs, GRRegs:$lhs), 0)>; + +def : Pat<(setgt GRRegs:$lhs, GRRegs:$rhs), + (LSS_3r GRRegs:$rhs, GRRegs:$lhs)>; +def : Pat<(setugt GRRegs:$lhs, GRRegs:$rhs), + (LSU_3r GRRegs:$rhs, GRRegs:$lhs)>; + +def : Pat<(setge GRRegs:$lhs, GRRegs:$rhs), + (EQ_2rus (LSS_3r GRRegs:$lhs, GRRegs:$rhs), 0)>; +def : Pat<(setuge GRRegs:$lhs, GRRegs:$rhs), + (EQ_2rus (LSU_3r GRRegs:$lhs, GRRegs:$rhs), 0)>; + +def : Pat<(setlt GRRegs:$lhs, GRRegs:$rhs), + (LSS_3r GRRegs:$lhs, GRRegs:$rhs)>; +def : Pat<(setult GRRegs:$lhs, GRRegs:$rhs), + (LSU_3r GRRegs:$lhs, GRRegs:$rhs)>; + +def : Pat<(setne GRRegs:$lhs, GRRegs:$rhs), + (EQ_2rus (EQ_3r GRRegs:$lhs, GRRegs:$rhs), 0)>; + +def : Pat<(seteq GRRegs:$lhs, GRRegs:$rhs), + (EQ_3r GRRegs:$lhs, GRRegs:$rhs)>; + +// setcc reg/imm operands +def : Pat<(seteq GRRegs:$lhs, immUs:$rhs), + (EQ_2rus GRRegs:$lhs, immUs:$rhs)>; +def : Pat<(setne GRRegs:$lhs, immUs:$rhs), + (EQ_2rus (EQ_2rus GRRegs:$lhs, immUs:$rhs), 0)>; + +// misc +def : Pat<(add GRRegs:$addr, immUs4:$offset), + (LDAWF_l2rus GRRegs:$addr, (div4_xform immUs4:$offset))>; + +def : Pat<(sub GRRegs:$addr, immUs4:$offset), + (LDAWB_l2rus GRRegs:$addr, (div4_xform immUs4:$offset))>; + +def : Pat<(and GRRegs:$val, immMskBitp:$mask), + (ZEXT_rus GRRegs:$val, (msksize_xform immMskBitp:$mask))>; + +// (sub X, imm) gets canonicalized to (add X, -imm). Match this form. +def : Pat<(add GRRegs:$src1, immUsNeg:$src2), + (SUB_2rus GRRegs:$src1, (neg_xform immUsNeg:$src2))>; + +def : Pat<(add GRRegs:$src1, immUs4Neg:$src2), + (LDAWB_l2rus GRRegs:$src1, (div4neg_xform immUs4Neg:$src2))>; + +/// +/// Some peepholes +/// + +def : Pat<(mul GRRegs:$src, 3), + (LDA16F_l3r GRRegs:$src, GRRegs:$src)>; + +def : Pat<(mul GRRegs:$src, 5), + (LDAWF_l3r GRRegs:$src, GRRegs:$src)>; + +def : Pat<(mul GRRegs:$src, -3), + (LDAWB_l3r GRRegs:$src, GRRegs:$src)>; + +// ashr X, 32 is equivalent to ashr X, 31 on the XCore. +def : Pat<(sra GRRegs:$src, 31), + (ASHR_l2rus GRRegs:$src, 32)>; + +def : Pat<(brcond (setlt GRRegs:$lhs, 0), bb:$dst), + (BRFT_lru6 (ASHR_l2rus GRRegs:$lhs, 32), bb:$dst)>; + +// setge X, 0 is canonicalized to setgt X, -1 +def : Pat<(brcond (setgt GRRegs:$lhs, -1), bb:$dst), + (BRFF_lru6 (ASHR_l2rus GRRegs:$lhs, 32), bb:$dst)>; + +def : Pat<(select (setlt GRRegs:$lhs, 0), GRRegs:$T, GRRegs:$F), + (SELECT_CC (ASHR_l2rus GRRegs:$lhs, 32), GRRegs:$T, GRRegs:$F)>; + +def : Pat<(select (setgt GRRegs:$lhs, -1), GRRegs:$T, GRRegs:$F), + (SELECT_CC (ASHR_l2rus GRRegs:$lhs, 32), GRRegs:$F, GRRegs:$T)>; + +def : Pat<(setgt GRRegs:$lhs, -1), + (EQ_2rus (ASHR_l2rus GRRegs:$lhs, 32), 0)>; + +def : Pat<(sra (shl GRRegs:$src, immBpwSubBitp:$imm), immBpwSubBitp:$imm), + (SEXT_rus GRRegs:$src, (bpwsub_xform immBpwSubBitp:$imm))>; diff --git a/lib/Target/XCore/XCoreMCAsmInfo.cpp b/lib/Target/XCore/XCoreMCAsmInfo.cpp new file mode 100644 index 0000000..bf78575 --- /dev/null +++ b/lib/Target/XCore/XCoreMCAsmInfo.cpp @@ -0,0 +1,30 @@ +//===-- XCoreMCAsmInfo.cpp - XCore asm properties -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "XCoreMCAsmInfo.h" +using namespace llvm; + +XCoreMCAsmInfo::XCoreMCAsmInfo(const Target &T, const StringRef &TT) { + SupportsDebugInformation = true; + Data16bitsDirective = "\t.short\t"; + Data32bitsDirective = "\t.long\t"; + Data64bitsDirective = 0; + ZeroDirective = "\t.space\t"; + CommentString = "#"; + + PrivateGlobalPrefix = ".L"; + AscizDirective = ".asciiz"; + WeakDefDirective = "\t.weak\t"; + WeakRefDirective = "\t.weak\t"; + + // Debug + HasLEB128 = true; + AbsoluteDebugSectionOffsets = true; +} + diff --git a/lib/Target/XCore/XCoreMCAsmInfo.h b/lib/Target/XCore/XCoreMCAsmInfo.h new file mode 100644 index 0000000..01f8e48 --- /dev/null +++ b/lib/Target/XCore/XCoreMCAsmInfo.h @@ -0,0 +1,29 @@ +//=====-- XCoreMCAsmInfo.h - XCore asm properties -------------*- C++ -*--====// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the XCoreMCAsmInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef XCORETARGETASMINFO_H +#define XCORETARGETASMINFO_H + +#include "llvm/MC/MCAsmInfo.h" + +namespace llvm { + class Target; + class StringRef; + class XCoreMCAsmInfo : public MCAsmInfo { + public: + explicit XCoreMCAsmInfo(const Target &T, const StringRef &TT); + }; + +} // namespace llvm + +#endif diff --git a/lib/Target/XCore/XCoreMachineFunctionInfo.h b/lib/Target/XCore/XCoreMachineFunctionInfo.h new file mode 100644 index 0000000..124a011 --- /dev/null +++ b/lib/Target/XCore/XCoreMachineFunctionInfo.h @@ -0,0 +1,69 @@ +//====- XCoreMachineFuctionInfo.h - XCore machine function info -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares XCore-specific per-machine-function information. +// +//===----------------------------------------------------------------------===// + +#ifndef XCOREMACHINEFUNCTIONINFO_H +#define XCOREMACHINEFUNCTIONINFO_H + +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include <vector> + +namespace llvm { + +// Forward declarations +class Function; + +/// XCoreFunctionInfo - This class is derived from MachineFunction private +/// XCore target-specific information for each MachineFunction. +class XCoreFunctionInfo : public MachineFunctionInfo { +private: + bool UsesLR; + int LRSpillSlot; + int FPSpillSlot; + int VarArgsFrameIndex; + std::vector<std::pair<unsigned, CalleeSavedInfo> > SpillLabels; + +public: + XCoreFunctionInfo() : + UsesLR(false), + LRSpillSlot(0), + FPSpillSlot(0), + VarArgsFrameIndex(0) {} + + explicit XCoreFunctionInfo(MachineFunction &MF) : + UsesLR(false), + LRSpillSlot(0), + FPSpillSlot(0), + VarArgsFrameIndex(0) {} + + ~XCoreFunctionInfo() {} + + void setVarArgsFrameIndex(int off) { VarArgsFrameIndex = off; } + int getVarArgsFrameIndex() const { return VarArgsFrameIndex; } + + void setUsesLR(bool val) { UsesLR = val; } + bool getUsesLR() const { return UsesLR; } + + void setLRSpillSlot(int off) { LRSpillSlot = off; } + int getLRSpillSlot() const { return LRSpillSlot; } + + void setFPSpillSlot(int off) { FPSpillSlot = off; } + int getFPSpillSlot() const { return FPSpillSlot; } + + std::vector<std::pair<unsigned, CalleeSavedInfo> >&getSpillLabels() { + return SpillLabels; + } +}; +} // End llvm namespace + +#endif // XCOREMACHINEFUNCTIONINFO_H diff --git a/lib/Target/XCore/XCoreRegisterInfo.cpp b/lib/Target/XCore/XCoreRegisterInfo.cpp new file mode 100644 index 0000000..c7c8c7b --- /dev/null +++ b/lib/Target/XCore/XCoreRegisterInfo.cpp @@ -0,0 +1,618 @@ +//===- XCoreRegisterInfo.cpp - XCore Register Information -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the XCore implementation of the MRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#include "XCoreRegisterInfo.h" +#include "XCoreMachineFunctionInfo.h" +#include "XCore.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineLocation.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/Target/TargetFrameInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Type.h" +#include "llvm/Function.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +XCoreRegisterInfo::XCoreRegisterInfo(const TargetInstrInfo &tii) + : XCoreGenRegisterInfo(XCore::ADJCALLSTACKDOWN, XCore::ADJCALLSTACKUP), + TII(tii) { +} + +// helper functions +static inline bool isImmUs(unsigned val) { + return val <= 11; +} + +static inline bool isImmU6(unsigned val) { + return val < (1 << 6); +} + +static inline bool isImmU16(unsigned val) { + return val < (1 << 16); +} + +static const unsigned XCore_ArgRegs[] = { + XCore::R0, XCore::R1, XCore::R2, XCore::R3 +}; + +const unsigned * XCoreRegisterInfo::getArgRegs(const MachineFunction *MF) +{ + return XCore_ArgRegs; +} + +unsigned XCoreRegisterInfo::getNumArgRegs(const MachineFunction *MF) +{ + return array_lengthof(XCore_ArgRegs); +} + +bool XCoreRegisterInfo::needsFrameMoves(const MachineFunction &MF) +{ + const MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); + return (MMI && MMI->hasDebugInfo()) || + !MF.getFunction()->doesNotThrow() || + UnwindTablesMandatory; +} + +const unsigned* XCoreRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) + const { + static const unsigned CalleeSavedRegs[] = { + XCore::R4, XCore::R5, XCore::R6, XCore::R7, + XCore::R8, XCore::R9, XCore::R10, XCore::LR, + 0 + }; + return CalleeSavedRegs; +} + +const TargetRegisterClass* const* +XCoreRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { + static const TargetRegisterClass * const CalleeSavedRegClasses[] = { + XCore::GRRegsRegisterClass, XCore::GRRegsRegisterClass, + XCore::GRRegsRegisterClass, XCore::GRRegsRegisterClass, + XCore::GRRegsRegisterClass, XCore::GRRegsRegisterClass, + XCore::GRRegsRegisterClass, XCore::RRegsRegisterClass, + 0 + }; + return CalleeSavedRegClasses; +} + +BitVector XCoreRegisterInfo::getReservedRegs(const MachineFunction &MF) const { + BitVector Reserved(getNumRegs()); + Reserved.set(XCore::CP); + Reserved.set(XCore::DP); + Reserved.set(XCore::SP); + Reserved.set(XCore::LR); + if (hasFP(MF)) { + Reserved.set(XCore::R10); + } + return Reserved; +} + +bool +XCoreRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const { + // TODO can we estimate stack size? + return hasFP(MF); +} + +bool XCoreRegisterInfo::hasFP(const MachineFunction &MF) const { + return NoFramePointerElim || MF.getFrameInfo()->hasVarSizedObjects(); +} + +// This function eliminates ADJCALLSTACKDOWN, +// ADJCALLSTACKUP pseudo instructions +void XCoreRegisterInfo:: +eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const { + if (!hasReservedCallFrame(MF)) { + // Turn the adjcallstackdown instruction into 'extsp <amt>' and the + // adjcallstackup instruction into 'ldaw sp, sp[<amt>]' + MachineInstr *Old = I; + uint64_t Amount = Old->getOperand(0).getImm(); + if (Amount != 0) { + // We need to keep the stack aligned properly. To do this, we round the + // amount of space needed for the outgoing arguments up to the next + // alignment boundary. + unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); + Amount = (Amount+Align-1)/Align*Align; + + assert(Amount%4 == 0); + Amount /= 4; + + bool isU6 = isImmU6(Amount); + + if (!isU6 && !isImmU16(Amount)) { + // FIX could emit multiple instructions in this case. +#ifndef NDEBUG + errs() << "eliminateCallFramePseudoInstr size too big: " + << Amount << "\n"; +#endif + llvm_unreachable(0); + } + + MachineInstr *New; + if (Old->getOpcode() == XCore::ADJCALLSTACKDOWN) { + int Opcode = isU6 ? XCore::EXTSP_u6 : XCore::EXTSP_lu6; + New=BuildMI(MF, Old->getDebugLoc(), TII.get(Opcode)) + .addImm(Amount); + } else { + assert(Old->getOpcode() == XCore::ADJCALLSTACKUP); + int Opcode = isU6 ? XCore::LDAWSP_ru6_RRegs : XCore::LDAWSP_lru6_RRegs; + New=BuildMI(MF, Old->getDebugLoc(), TII.get(Opcode), XCore::SP) + .addImm(Amount); + } + + // Replace the pseudo instruction with a new instruction... + MBB.insert(I, New); + } + } + + MBB.erase(I); +} + +unsigned +XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, int *Value, + RegScavenger *RS) const { + assert(SPAdj == 0 && "Unexpected"); + MachineInstr &MI = *II; + DebugLoc dl = MI.getDebugLoc(); + unsigned i = 0; + + while (!MI.getOperand(i).isFI()) { + ++i; + assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); + } + + MachineOperand &FrameOp = MI.getOperand(i); + int FrameIndex = FrameOp.getIndex(); + + MachineFunction &MF = *MI.getParent()->getParent(); + int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex); + int StackSize = MF.getFrameInfo()->getStackSize(); + + #ifndef NDEBUG + DEBUG(errs() << "\nFunction : " + << MF.getFunction()->getName() << "\n"); + DEBUG(errs() << "<--------->\n"); + DEBUG(MI.print(errs())); + DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n"); + DEBUG(errs() << "FrameOffset : " << Offset << "\n"); + DEBUG(errs() << "StackSize : " << StackSize << "\n"); + #endif + + Offset += StackSize; + + // fold constant into offset. + Offset += MI.getOperand(i + 1).getImm(); + MI.getOperand(i + 1).ChangeToImmediate(0); + + assert(Offset%4 == 0 && "Misaligned stack offset"); + + DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n"); + + Offset/=4; + + bool FP = hasFP(MF); + + unsigned Reg = MI.getOperand(0).getReg(); + bool isKill = MI.getOpcode() == XCore::STWFI && MI.getOperand(0).isKill(); + + assert(XCore::GRRegsRegisterClass->contains(Reg) && + "Unexpected register operand"); + + MachineBasicBlock &MBB = *MI.getParent(); + + if (FP) { + bool isUs = isImmUs(Offset); + unsigned FramePtr = XCore::R10; + + if (!isUs) { + if (!RS) { + std::string msg; + raw_string_ostream Msg(msg); + Msg << "eliminateFrameIndex Frame size too big: " << Offset; + llvm_report_error(Msg.str()); + } + unsigned ScratchReg = RS->scavengeRegister(XCore::GRRegsRegisterClass, II, + SPAdj); + loadConstant(MBB, II, ScratchReg, Offset, dl); + switch (MI.getOpcode()) { + case XCore::LDWFI: + BuildMI(MBB, II, dl, TII.get(XCore::LDW_3r), Reg) + .addReg(FramePtr) + .addReg(ScratchReg, RegState::Kill); + break; + case XCore::STWFI: + BuildMI(MBB, II, dl, TII.get(XCore::STW_3r)) + .addReg(Reg, getKillRegState(isKill)) + .addReg(FramePtr) + .addReg(ScratchReg, RegState::Kill); + break; + case XCore::LDAWFI: + BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l3r), Reg) + .addReg(FramePtr) + .addReg(ScratchReg, RegState::Kill); + break; + default: + llvm_unreachable("Unexpected Opcode"); + } + } else { + switch (MI.getOpcode()) { + case XCore::LDWFI: + BuildMI(MBB, II, dl, TII.get(XCore::LDW_2rus), Reg) + .addReg(FramePtr) + .addImm(Offset); + break; + case XCore::STWFI: + BuildMI(MBB, II, dl, TII.get(XCore::STW_2rus)) + .addReg(Reg, getKillRegState(isKill)) + .addReg(FramePtr) + .addImm(Offset); + break; + case XCore::LDAWFI: + BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l2rus), Reg) + .addReg(FramePtr) + .addImm(Offset); + break; + default: + llvm_unreachable("Unexpected Opcode"); + } + } + } else { + bool isU6 = isImmU6(Offset); + if (!isU6 && !isImmU16(Offset)) { + std::string msg; + raw_string_ostream Msg(msg); + Msg << "eliminateFrameIndex Frame size too big: " << Offset; + llvm_report_error(Msg.str()); + } + + switch (MI.getOpcode()) { + int NewOpcode; + case XCore::LDWFI: + NewOpcode = (isU6) ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6; + BuildMI(MBB, II, dl, TII.get(NewOpcode), Reg) + .addImm(Offset); + break; + case XCore::STWFI: + NewOpcode = (isU6) ? XCore::STWSP_ru6 : XCore::STWSP_lru6; + BuildMI(MBB, II, dl, TII.get(NewOpcode)) + .addReg(Reg, getKillRegState(isKill)) + .addImm(Offset); + break; + case XCore::LDAWFI: + NewOpcode = (isU6) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6; + BuildMI(MBB, II, dl, TII.get(NewOpcode), Reg) + .addImm(Offset); + break; + default: + llvm_unreachable("Unexpected Opcode"); + } + } + // Erase old instruction. + MBB.erase(II); + return 0; +} + +void +XCoreRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, + RegScavenger *RS) const { + MachineFrameInfo *MFI = MF.getFrameInfo(); + bool LRUsed = MF.getRegInfo().isPhysRegUsed(XCore::LR); + const TargetRegisterClass *RC = XCore::GRRegsRegisterClass; + XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); + if (LRUsed) { + MF.getRegInfo().setPhysRegUnused(XCore::LR); + + bool isVarArg = MF.getFunction()->isVarArg(); + int FrameIdx; + if (! isVarArg) { + // A fixed offset of 0 allows us to save / restore LR using entsp / retsp. + FrameIdx = MFI->CreateFixedObject(RC->getSize(), 0, true, false); + } else { + FrameIdx = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), + false); + } + XFI->setUsesLR(FrameIdx); + XFI->setLRSpillSlot(FrameIdx); + } + if (requiresRegisterScavenging(MF)) { + // Reserve a slot close to SP or frame pointer. + RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), + RC->getAlignment(), + false)); + } + if (hasFP(MF)) { + // A callee save register is used to hold the FP. + // This needs saving / restoring in the epilogue / prologue. + XFI->setFPSpillSlot(MFI->CreateStackObject(RC->getSize(), + RC->getAlignment(), + false)); + } +} + +void XCoreRegisterInfo:: +processFunctionBeforeFrameFinalized(MachineFunction &MF) const { + +} + +void XCoreRegisterInfo:: +loadConstant(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned DstReg, int64_t Value, DebugLoc dl) const { + // TODO use mkmsk if possible. + if (!isImmU16(Value)) { + // TODO use constant pool. + std::string msg; + raw_string_ostream Msg(msg); + Msg << "loadConstant value too big " << Value; + llvm_report_error(Msg.str()); + } + int Opcode = isImmU6(Value) ? XCore::LDC_ru6 : XCore::LDC_lru6; + BuildMI(MBB, I, dl, TII.get(Opcode), DstReg).addImm(Value); +} + +void XCoreRegisterInfo:: +storeToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned SrcReg, int Offset, DebugLoc dl) const { + assert(Offset%4 == 0 && "Misaligned stack offset"); + Offset/=4; + bool isU6 = isImmU6(Offset); + if (!isU6 && !isImmU16(Offset)) { + std::string msg; + raw_string_ostream Msg(msg); + Msg << "storeToStack offset too big " << Offset; + llvm_report_error(Msg.str()); + } + int Opcode = isU6 ? XCore::STWSP_ru6 : XCore::STWSP_lru6; + BuildMI(MBB, I, dl, TII.get(Opcode)) + .addReg(SrcReg) + .addImm(Offset); +} + +void XCoreRegisterInfo:: +loadFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned DstReg, int Offset, DebugLoc dl) const { + assert(Offset%4 == 0 && "Misaligned stack offset"); + Offset/=4; + bool isU6 = isImmU6(Offset); + if (!isU6 && !isImmU16(Offset)) { + std::string msg; + raw_string_ostream Msg(msg); + Msg << "loadFromStack offset too big " << Offset; + llvm_report_error(Msg.str()); + } + int Opcode = isU6 ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6; + BuildMI(MBB, I, dl, TII.get(Opcode), DstReg) + .addImm(Offset); +} + +void XCoreRegisterInfo::emitPrologue(MachineFunction &MF) const { + MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB + MachineBasicBlock::iterator MBBI = MBB.begin(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); + XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); + DebugLoc dl = (MBBI != MBB.end() ? + MBBI->getDebugLoc() : DebugLoc::getUnknownLoc()); + + bool FP = hasFP(MF); + + // Work out frame sizes. + int FrameSize = MFI->getStackSize(); + + assert(FrameSize%4 == 0 && "Misaligned frame size"); + + FrameSize/=4; + + bool isU6 = isImmU6(FrameSize); + + if (!isU6 && !isImmU16(FrameSize)) { + // FIXME could emit multiple instructions. + std::string msg; + raw_string_ostream Msg(msg); + Msg << "emitPrologue Frame size too big: " << FrameSize; + llvm_report_error(Msg.str()); + } + bool emitFrameMoves = needsFrameMoves(MF); + + // Do we need to allocate space on the stack? + if (FrameSize) { + bool saveLR = XFI->getUsesLR(); + bool LRSavedOnEntry = false; + int Opcode; + if (saveLR && (MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0)) { + Opcode = (isU6) ? XCore::ENTSP_u6 : XCore::ENTSP_lu6; + MBB.addLiveIn(XCore::LR); + saveLR = false; + LRSavedOnEntry = true; + } else { + Opcode = (isU6) ? XCore::EXTSP_u6 : XCore::EXTSP_lu6; + } + BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize); + + if (emitFrameMoves) { + std::vector<MachineMove> &Moves = MMI->getFrameMoves(); + + // Show update of SP. + unsigned FrameLabelId = MMI->NextLabelID(); + BuildMI(MBB, MBBI, dl, TII.get(XCore::DBG_LABEL)).addImm(FrameLabelId); + + MachineLocation SPDst(MachineLocation::VirtualFP); + MachineLocation SPSrc(MachineLocation::VirtualFP, -FrameSize * 4); + Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); + + if (LRSavedOnEntry) { + MachineLocation CSDst(MachineLocation::VirtualFP, 0); + MachineLocation CSSrc(XCore::LR); + Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc)); + } + } + if (saveLR) { + int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot()); + storeToStack(MBB, MBBI, XCore::LR, LRSpillOffset + FrameSize*4, dl); + MBB.addLiveIn(XCore::LR); + + if (emitFrameMoves) { + unsigned SaveLRLabelId = MMI->NextLabelID(); + BuildMI(MBB, MBBI, dl, TII.get(XCore::DBG_LABEL)).addImm(SaveLRLabelId); + MachineLocation CSDst(MachineLocation::VirtualFP, LRSpillOffset); + MachineLocation CSSrc(XCore::LR); + MMI->getFrameMoves().push_back(MachineMove(SaveLRLabelId, + CSDst, CSSrc)); + } + } + } + + if (FP) { + // Save R10 to the stack. + int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot()); + storeToStack(MBB, MBBI, XCore::R10, FPSpillOffset + FrameSize*4, dl); + // R10 is live-in. It is killed at the spill. + MBB.addLiveIn(XCore::R10); + if (emitFrameMoves) { + unsigned SaveR10LabelId = MMI->NextLabelID(); + BuildMI(MBB, MBBI, dl, TII.get(XCore::DBG_LABEL)).addImm(SaveR10LabelId); + MachineLocation CSDst(MachineLocation::VirtualFP, FPSpillOffset); + MachineLocation CSSrc(XCore::R10); + MMI->getFrameMoves().push_back(MachineMove(SaveR10LabelId, + CSDst, CSSrc)); + } + // Set the FP from the SP. + unsigned FramePtr = XCore::R10; + BuildMI(MBB, MBBI, dl, TII.get(XCore::LDAWSP_ru6), FramePtr) + .addImm(0); + if (emitFrameMoves) { + // Show FP is now valid. + unsigned FrameLabelId = MMI->NextLabelID(); + BuildMI(MBB, MBBI, dl, TII.get(XCore::DBG_LABEL)).addImm(FrameLabelId); + MachineLocation SPDst(FramePtr); + MachineLocation SPSrc(MachineLocation::VirtualFP); + MMI->getFrameMoves().push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); + } + } + + if (emitFrameMoves) { + // Frame moves for callee saved. + std::vector<MachineMove> &Moves = MMI->getFrameMoves(); + std::vector<std::pair<unsigned, CalleeSavedInfo> >&SpillLabels = + XFI->getSpillLabels(); + for (unsigned I = 0, E = SpillLabels.size(); I != E; ++I) { + unsigned SpillLabel = SpillLabels[I].first; + CalleeSavedInfo &CSI = SpillLabels[I].second; + int Offset = MFI->getObjectOffset(CSI.getFrameIdx()); + unsigned Reg = CSI.getReg(); + MachineLocation CSDst(MachineLocation::VirtualFP, Offset); + MachineLocation CSSrc(Reg); + Moves.push_back(MachineMove(SpillLabel, CSDst, CSSrc)); + } + } +} + +void XCoreRegisterInfo::emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineBasicBlock::iterator MBBI = prior(MBB.end()); + DebugLoc dl = MBBI->getDebugLoc(); + + bool FP = hasFP(MF); + + if (FP) { + // Restore the stack pointer. + unsigned FramePtr = XCore::R10; + BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)) + .addReg(FramePtr); + } + + // Work out frame sizes. + int FrameSize = MFI->getStackSize(); + + assert(FrameSize%4 == 0 && "Misaligned frame size"); + + FrameSize/=4; + + bool isU6 = isImmU6(FrameSize); + + if (!isU6 && !isImmU16(FrameSize)) { + // FIXME could emit multiple instructions. + std::string msg; + raw_string_ostream Msg(msg); + Msg << "emitEpilogue Frame size too big: " << FrameSize; + llvm_report_error(Msg.str()); + } + + if (FrameSize) { + XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); + + if (FP) { + // Restore R10 + int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot()); + FPSpillOffset += FrameSize*4; + loadFromStack(MBB, MBBI, XCore::R10, FPSpillOffset, dl); + } + bool restoreLR = XFI->getUsesLR(); + if (restoreLR && MFI->getObjectOffset(XFI->getLRSpillSlot()) != 0) { + int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot()); + LRSpillOffset += FrameSize*4; + loadFromStack(MBB, MBBI, XCore::LR, LRSpillOffset, dl); + restoreLR = false; + } + if (restoreLR) { + // Fold prologue into return instruction + assert(MBBI->getOpcode() == XCore::RETSP_u6 + || MBBI->getOpcode() == XCore::RETSP_lu6); + int Opcode = (isU6) ? XCore::RETSP_u6 : XCore::RETSP_lu6; + BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize); + MBB.erase(MBBI); + } else { + int Opcode = (isU6) ? XCore::LDAWSP_ru6_RRegs : XCore::LDAWSP_lru6_RRegs; + BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(FrameSize); + } + } +} + +int XCoreRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const { + return XCoreGenRegisterInfo::getDwarfRegNumFull(RegNum, 0); +} + +unsigned XCoreRegisterInfo::getFrameRegister(const MachineFunction &MF) const { + bool FP = hasFP(MF); + + return FP ? XCore::R10 : XCore::SP; +} + +unsigned XCoreRegisterInfo::getRARegister() const { + return XCore::LR; +} + +void XCoreRegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) + const { + // Initial state of the frame pointer is SP. + MachineLocation Dst(MachineLocation::VirtualFP); + MachineLocation Src(XCore::SP, 0); + Moves.push_back(MachineMove(0, Dst, Src)); +} + +#include "XCoreGenRegisterInfo.inc" + diff --git a/lib/Target/XCore/XCoreRegisterInfo.h b/lib/Target/XCore/XCoreRegisterInfo.h new file mode 100644 index 0000000..8ab1750 --- /dev/null +++ b/lib/Target/XCore/XCoreRegisterInfo.h @@ -0,0 +1,95 @@ +//===- XCoreRegisterInfo.h - XCore Register Information Impl ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the XCore implementation of the MRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef XCOREREGISTERINFO_H +#define XCOREREGISTERINFO_H + +#include "llvm/Target/TargetRegisterInfo.h" +#include "XCoreGenRegisterInfo.h.inc" + +namespace llvm { + +class TargetInstrInfo; + +struct XCoreRegisterInfo : public XCoreGenRegisterInfo { +private: + const TargetInstrInfo &TII; + + void loadConstant(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned DstReg, int64_t Value, DebugLoc dl) const; + + void storeToStack(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned SrcReg, int Offset, DebugLoc dl) const; + + void loadFromStack(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned DstReg, int Offset, DebugLoc dl) const; + +public: + XCoreRegisterInfo(const TargetInstrInfo &tii); + + /// Code Generation virtual methods... + + const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const; + + const TargetRegisterClass* const* getCalleeSavedRegClasses( + const MachineFunction *MF = 0) const; + + BitVector getReservedRegs(const MachineFunction &MF) const; + + bool requiresRegisterScavenging(const MachineFunction &MF) const; + + bool hasFP(const MachineFunction &MF) const; + + void eliminateCallFramePseudoInstr(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const; + + unsigned eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, int *Value = NULL, + RegScavenger *RS = NULL) const; + + void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, + RegScavenger *RS = NULL) const; + + void processFunctionBeforeFrameFinalized(MachineFunction &MF) const; + + void emitPrologue(MachineFunction &MF) const; + void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const; + + // Debug information queries. + unsigned getRARegister() const; + unsigned getFrameRegister(const MachineFunction &MF) const; + void getInitialFrameState(std::vector<MachineMove> &Moves) const; + + //! Return the array of argument passing registers + /*! + \note The size of this array is returned by getArgRegsSize(). + */ + static const unsigned *getArgRegs(const MachineFunction *MF = 0); + + //! Return the size of the argument passing register array + static unsigned getNumArgRegs(const MachineFunction *MF = 0); + + //! Return whether to emit frame moves + static bool needsFrameMoves(const MachineFunction &MF); + + //! Get DWARF debugging register number + int getDwarfRegNum(unsigned RegNum, bool isEH) const; +}; + +} // end namespace llvm + +#endif diff --git a/lib/Target/XCore/XCoreRegisterInfo.td b/lib/Target/XCore/XCoreRegisterInfo.td new file mode 100644 index 0000000..62daf5d --- /dev/null +++ b/lib/Target/XCore/XCoreRegisterInfo.td @@ -0,0 +1,91 @@ +//===- XCoreRegisterInfo.td - XCore Register defs ----------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Declarations that describe the XCore register file +//===----------------------------------------------------------------------===// + +class XCoreReg<string n> : Register<n> { + field bits<4> Num; + let Namespace = "XCore"; +} + +// Registers are identified with 4-bit ID numbers. +// Ri - 32-bit integer registers +class Ri<bits<4> num, string n> : XCoreReg<n> { + let Num = num; +} + +// CPU registers +def R0 : Ri< 0, "r0">, DwarfRegNum<[0]>; +def R1 : Ri< 1, "r1">, DwarfRegNum<[1]>; +def R2 : Ri< 2, "r2">, DwarfRegNum<[2]>; +def R3 : Ri< 3, "r3">, DwarfRegNum<[3]>; +def R4 : Ri< 4, "r4">, DwarfRegNum<[4]>; +def R5 : Ri< 5, "r5">, DwarfRegNum<[5]>; +def R6 : Ri< 6, "r6">, DwarfRegNum<[6]>; +def R7 : Ri< 7, "r7">, DwarfRegNum<[7]>; +def R8 : Ri< 8, "r8">, DwarfRegNum<[8]>; +def R9 : Ri< 9, "r9">, DwarfRegNum<[9]>; +def R10 : Ri<10, "r10">, DwarfRegNum<[10]>; +def R11 : Ri<11, "r11">, DwarfRegNum<[11]>; +def CP : Ri<12, "cp">, DwarfRegNum<[12]>; +def DP : Ri<13, "dp">, DwarfRegNum<[13]>; +def SP : Ri<14, "sp">, DwarfRegNum<[14]>; +def LR : Ri<15, "lr">, DwarfRegNum<[15]>; + +// Register classes. +// +def GRRegs : RegisterClass<"XCore", [i32], 32, + // Return values and arguments + [R0, R1, R2, R3, + // Not preserved across procedure calls + R11, + // Callee save + R4, R5, R6, R7, R8, R9, R10]> { + let MethodProtos = [{ + iterator allocation_order_begin(const MachineFunction &MF) const; + iterator allocation_order_end(const MachineFunction &MF) const; + }]; + let MethodBodies = [{ + GRRegsClass::iterator + GRRegsClass::allocation_order_begin(const MachineFunction &MF) const { + return begin(); + } + GRRegsClass::iterator + GRRegsClass::allocation_order_end(const MachineFunction &MF) const { + const TargetMachine &TM = MF.getTarget(); + const TargetRegisterInfo *RI = TM.getRegisterInfo(); + if (RI->hasFP(MF)) + return end()-1; // don't allocate R10 + else + return end(); + } + }]; +} + +def RRegs : RegisterClass<"XCore", [i32], 32, + // Reserved + [CP, DP, SP, LR]> { + let MethodProtos = [{ + iterator allocation_order_begin(const MachineFunction &MF) const; + iterator allocation_order_end(const MachineFunction &MF) const; + }]; + let MethodBodies = [{ + RRegsClass::iterator + RRegsClass::allocation_order_begin(const MachineFunction &MF) const { + return begin(); + } + RRegsClass::iterator + RRegsClass::allocation_order_end(const MachineFunction &MF) const { + // No allocatable registers + return begin(); + } + }]; +} diff --git a/lib/Target/XCore/XCoreSubtarget.cpp b/lib/Target/XCore/XCoreSubtarget.cpp new file mode 100644 index 0000000..78a6fa5 --- /dev/null +++ b/lib/Target/XCore/XCoreSubtarget.cpp @@ -0,0 +1,20 @@ +//===- XCoreSubtarget.cpp - XCore Subtarget Information -----------*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the XCore specific subclass of TargetSubtarget. +// +//===----------------------------------------------------------------------===// + +#include "XCoreSubtarget.h" +#include "XCore.h" +using namespace llvm; + +XCoreSubtarget::XCoreSubtarget(const std::string &TT, const std::string &FS) +{ +} diff --git a/lib/Target/XCore/XCoreSubtarget.h b/lib/Target/XCore/XCoreSubtarget.h new file mode 100644 index 0000000..f8be3ec --- /dev/null +++ b/lib/Target/XCore/XCoreSubtarget.h @@ -0,0 +1,39 @@ +//=====-- XCoreSubtarget.h - Define Subtarget for the XCore -----*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the XCore specific subclass of TargetSubtarget. +// +//===----------------------------------------------------------------------===// + +#ifndef XCORESUBTARGET_H +#define XCORESUBTARGET_H + +#include "llvm/Target/TargetSubtarget.h" +#include "llvm/Target/TargetMachine.h" + +#include <string> + +namespace llvm { + +class XCoreSubtarget : public TargetSubtarget { + +public: + /// This constructor initializes the data members to match that + /// of the specified triple. + /// + XCoreSubtarget(const std::string &TT, const std::string &FS); + + /// ParseSubtargetFeatures - Parses features string setting specified + /// subtarget options. Definition of function is auto generated by tblgen. + std::string ParseSubtargetFeatures(const std::string &FS, + const std::string &CPU); +}; +} // End llvm namespace + +#endif diff --git a/lib/Target/XCore/XCoreTargetMachine.cpp b/lib/Target/XCore/XCoreTargetMachine.cpp new file mode 100644 index 0000000..267f46a --- /dev/null +++ b/lib/Target/XCore/XCoreTargetMachine.cpp @@ -0,0 +1,44 @@ +//===-- XCoreTargetMachine.cpp - Define TargetMachine for XCore -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +#include "XCoreMCAsmInfo.h" +#include "XCoreTargetMachine.h" +#include "XCore.h" +#include "llvm/Module.h" +#include "llvm/PassManager.h" +#include "llvm/Target/TargetRegistry.h" +using namespace llvm; + +/// XCoreTargetMachine ctor - Create an ILP32 architecture model +/// +XCoreTargetMachine::XCoreTargetMachine(const Target &T, const std::string &TT, + const std::string &FS) + : LLVMTargetMachine(T, TT), + Subtarget(TT, FS), + DataLayout("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-" + "i16:16:32-i32:32:32-i64:32:32-n32"), + InstrInfo(), + FrameInfo(*this), + TLInfo(*this) { +} + +bool XCoreTargetMachine::addInstSelector(PassManagerBase &PM, + CodeGenOpt::Level OptLevel) { + PM.add(createXCoreISelDag(*this)); + return false; +} + +// Force static initialization. +extern "C" void LLVMInitializeXCoreTarget() { + RegisterTargetMachine<XCoreTargetMachine> X(TheXCoreTarget); + RegisterAsmInfo<XCoreMCAsmInfo> Y(TheXCoreTarget); +} diff --git a/lib/Target/XCore/XCoreTargetMachine.h b/lib/Target/XCore/XCoreTargetMachine.h new file mode 100644 index 0000000..b0b1464 --- /dev/null +++ b/lib/Target/XCore/XCoreTargetMachine.h @@ -0,0 +1,54 @@ +//===-- XCoreTargetMachine.h - Define TargetMachine for XCore ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the XCore specific subclass of TargetMachine. +// +//===----------------------------------------------------------------------===// + +#ifndef XCORETARGETMACHINE_H +#define XCORETARGETMACHINE_H + +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetData.h" +#include "XCoreFrameInfo.h" +#include "XCoreSubtarget.h" +#include "XCoreInstrInfo.h" +#include "XCoreISelLowering.h" + +namespace llvm { + +class XCoreTargetMachine : public LLVMTargetMachine { + XCoreSubtarget Subtarget; + const TargetData DataLayout; // Calculates type size & alignment + XCoreInstrInfo InstrInfo; + XCoreFrameInfo FrameInfo; + XCoreTargetLowering TLInfo; +public: + XCoreTargetMachine(const Target &T, const std::string &TT, + const std::string &FS); + + virtual const XCoreInstrInfo *getInstrInfo() const { return &InstrInfo; } + virtual const XCoreFrameInfo *getFrameInfo() const { return &FrameInfo; } + virtual const XCoreSubtarget *getSubtargetImpl() const { return &Subtarget; } + virtual XCoreTargetLowering *getTargetLowering() const { + return const_cast<XCoreTargetLowering*>(&TLInfo); + } + + virtual const TargetRegisterInfo *getRegisterInfo() const { + return &InstrInfo.getRegisterInfo(); + } + virtual const TargetData *getTargetData() const { return &DataLayout; } + + // Pass Pipeline Configuration + virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel); +}; + +} // end namespace llvm + +#endif diff --git a/lib/Target/XCore/XCoreTargetObjectFile.cpp b/lib/Target/XCore/XCoreTargetObjectFile.cpp new file mode 100644 index 0000000..7de3b55 --- /dev/null +++ b/lib/Target/XCore/XCoreTargetObjectFile.cpp @@ -0,0 +1,67 @@ +//===-- XCoreTargetObjectFile.cpp - XCore object files --------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "XCoreTargetObjectFile.h" +#include "XCoreSubtarget.h" +#include "MCSectionXCore.h" +#include "llvm/Target/TargetMachine.h" +using namespace llvm; + + +void XCoreTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){ + TargetLoweringObjectFileELF::Initialize(Ctx, TM); + + DataSection = + MCSectionXCore::Create(".dp.data", MCSectionELF::SHT_PROGBITS, + MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_WRITE | + MCSectionXCore::SHF_DP_SECTION, + SectionKind::getDataRel(), false, getContext()); + BSSSection = + MCSectionXCore::Create(".dp.bss", MCSectionELF::SHT_NOBITS, + MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_WRITE | + MCSectionXCore::SHF_DP_SECTION, + SectionKind::getBSS(), false, getContext()); + + MergeableConst4Section = + MCSectionXCore::Create(".cp.rodata.cst4", MCSectionELF::SHT_PROGBITS, + MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_MERGE | + MCSectionXCore::SHF_CP_SECTION, + SectionKind::getMergeableConst4(), false, + getContext()); + MergeableConst8Section = + MCSectionXCore::Create(".cp.rodata.cst8", MCSectionELF::SHT_PROGBITS, + MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_MERGE | + MCSectionXCore::SHF_CP_SECTION, + SectionKind::getMergeableConst8(), false, + getContext()); + MergeableConst16Section = + MCSectionXCore::Create(".cp.rodata.cst16", MCSectionELF::SHT_PROGBITS, + MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_MERGE | + MCSectionXCore::SHF_CP_SECTION, + SectionKind::getMergeableConst16(), false, + getContext()); + + // TLS globals are lowered in the backend to arrays indexed by the current + // thread id. After lowering they require no special handling by the linker + // and can be placed in the standard data / bss sections. + TLSDataSection = DataSection; + TLSBSSSection = BSSSection; + + ReadOnlySection = + MCSectionXCore::Create(".cp.rodata", MCSectionELF::SHT_PROGBITS, + MCSectionELF::SHF_ALLOC | + MCSectionXCore::SHF_CP_SECTION, + SectionKind::getReadOnlyWithRel(), false, + getContext()); + + // Dynamic linking is not supported. Data with relocations is placed in the + // same section as data without relocations. + DataRelSection = DataRelLocalSection = DataSection; + DataRelROSection = DataRelROLocalSection = ReadOnlySection; +} diff --git a/lib/Target/XCore/XCoreTargetObjectFile.h b/lib/Target/XCore/XCoreTargetObjectFile.h new file mode 100644 index 0000000..7efb990 --- /dev/null +++ b/lib/Target/XCore/XCoreTargetObjectFile.h @@ -0,0 +1,26 @@ +//===-- llvm/Target/XCoreTargetObjectFile.h - XCore Object Info -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_XCORE_TARGETOBJECTFILE_H +#define LLVM_TARGET_XCORE_TARGETOBJECTFILE_H + +#include "llvm/Target/TargetLoweringObjectFile.h" + +namespace llvm { + + class XCoreTargetObjectFile : public TargetLoweringObjectFileELF { + public: + + void Initialize(MCContext &Ctx, const TargetMachine &TM); + + // TODO: Classify globals as xcore wishes. + }; +} // end namespace llvm + +#endif |
