diff options
| author | Chris Lattner <sabre@nondot.org> | 2011-04-15 05:18:47 +0000 | 
|---|---|---|
| committer | Chris Lattner <sabre@nondot.org> | 2011-04-15 05:18:47 +0000 | 
| commit | 7a2bdde0a0eebcd2125055e0eacaca040f0b766c (patch) | |
| tree | 1cd5fa470f290368855c9081cb213ed118812805 /lib/Target | |
| parent | bcb8c6d09ee426e0f774e3412912f6ae9e5f78dd (diff) | |
| download | external_llvm-7a2bdde0a0eebcd2125055e0eacaca040f0b766c.zip external_llvm-7a2bdde0a0eebcd2125055e0eacaca040f0b766c.tar.gz external_llvm-7a2bdde0a0eebcd2125055e0eacaca040f0b766c.tar.bz2  | |
Fix a ton of comment typos found by codespell.  Patch by
Luis Felipe Strano Moraes!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129558 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
41 files changed, 63 insertions, 63 deletions
diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index 8eb1993..4d66f85 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -1708,7 +1708,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {      return;    }    // Tail jump branches are really just branch instructions with additional -  // code-gen attributes. Convert them to the cannonical form here. +  // code-gen attributes. Convert them to the canonical form here.    case ARM::TAILJMPd:    case ARM::TAILJMPdND: {      MCInst TmpInst, TmpInst2; diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index fdc5b9e..af51fe7 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1201,7 +1201,7 @@ bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,  }  /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to -/// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should +/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should  /// be scheduled togther. On some targets if two loads are loading from  /// addresses in the same cache line, it's better if they are scheduled  /// together. This function takes two integers that represent the load offsets diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h index 517328b..9a2faf8 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/lib/Target/ARM/ARMBaseInstrInfo.h @@ -291,7 +291,7 @@ public:                                         int64_t &Offset1, int64_t &Offset2)const;    /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to -  /// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should +  /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should    /// be scheduled togther. On some targets if two loads are loading from    /// addresses in the same cache line, it's better if they are scheduled    /// together. This function takes two integers that represent the load offsets diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 1918fd9..3d1eaf0 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -88,7 +88,7 @@ BitVector ARMBaseRegisterInfo::  getReservedRegs(const MachineFunction &MF) const {    const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); -  // FIXME: avoid re-calculating this everytime. +  // FIXME: avoid re-calculating this every time.    BitVector Reserved(getNumRegs());    Reserved.set(ARM::SP);    Reserved.set(ARM::PC); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index ded62eb..62d5b16 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -725,7 +725,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)  // pressure of the register class's representative and all of it's super  // classes' representatives transitively. We have not implemented this because  // of the difficulty prior to coalescing of modeling operand register classes -// due to the common occurence of cross class copies and subregister insertions +// due to the common occurrence of cross class copies and subregister insertions  // and extractions.  std::pair<const TargetRegisterClass*, uint8_t>  ARMTargetLowering::findRepresentativeClass(EVT VT) const{ @@ -1323,7 +1323,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,      // than necessary, because it means that each store effectively depends      // on every argument instead of just those arguments it would clobber. -    // Do not flag preceeding copytoreg stuff together with the following stuff. +    // Do not flag preceding copytoreg stuff together with the following stuff.      InFlag = SDValue();      for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {        Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index eb8f4aa..b2961f8 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -3690,7 +3690,7 @@ let isCall = 1,  //   here, and we're using the stack frame for the containing function to  //   save/restore registers, we can't keep anything live in regs across  //   the eh_sjlj_setjmp(), else it will almost certainly have been tromped upon -//   when we get here from a longjmp(). We force everthing out of registers +//   when we get here from a longjmp(). We force everything out of registers  //   except for our own input by listing the relevant registers in Defs. By  //   doing so, we also cause the prologue/epilogue code to actively preserve  //   all of the callee-saved resgisters, which is exactly what we want. diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td index 736b56a..9c88c10 100644 --- a/lib/Target/ARM/ARMInstrThumb.td +++ b/lib/Target/ARM/ARMInstrThumb.td @@ -1439,7 +1439,7 @@ def tTPsoft : TIx2<0b11110, 0b11, 1, (outs), (ins), IIC_Br,  // from some other function to get here, and we're using the stack frame for the  // containing function to save/restore registers, we can't keep anything live in  // regs across the eh_sjlj_setjmp(), else it will almost certainly have been -// tromped upon when we get here from a longjmp(). We force everthing out of +// tromped upon when we get here from a longjmp(). We force everything out of  // registers except for our own input by listing the relevant registers in  // Defs. By doing so, we also cause the prologue/epilogue code to actively  // preserve all of the callee-saved resgisters, which is exactly what we want. diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td index 685a9c3..50065f9 100644 --- a/lib/Target/ARM/ARMInstrThumb2.td +++ b/lib/Target/ARM/ARMInstrThumb2.td @@ -2965,7 +2965,7 @@ let isCall = 1,  //   here, and we're using the stack frame for the containing function to  //   save/restore registers, we can't keep anything live in regs across  //   the eh_sjlj_setjmp(), else it will almost certainly have been tromped upon -//   when we get here from a longjmp(). We force everthing out of registers +//   when we get here from a longjmp(). We force everything out of registers  //   except for our own input by listing the relevant registers in Defs. By  //   doing so, we also cause the prologue/epilogue code to actively preserve  //   all of the callee-saved resgisters, which is exactly what we want. diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index ac5cbfe..334b50f 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1287,14 +1287,14 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {          MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize,                       CurrPred, CurrPredReg, Scratch, MemOps, Merges); -        // Try folding preceeding/trailing base inc/dec into the generated +        // Try folding preceding/trailing base inc/dec into the generated          // LDM/STM ops.          for (unsigned i = 0, e = Merges.size(); i < e; ++i)            if (MergeBaseUpdateLSMultiple(MBB, Merges[i], Advance, MBBI))              ++NumMerges;          NumMerges += Merges.size(); -        // Try folding preceeding/trailing base inc/dec into those load/store +        // Try folding preceding/trailing base inc/dec into those load/store          // that were not merged to form LDM/STM ops.          for (unsigned i = 0; i != NumMemOps; ++i)            if (!MemOps[i].Merged) @@ -1304,7 +1304,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {          // RS may be pointing to an instruction that's deleted.          RS->skipTo(prior(MBBI));        } else if (NumMemOps == 1) { -        // Try folding preceeding/trailing base inc/dec into the single +        // Try folding preceding/trailing base inc/dec into the single          // load/store.          if (MergeBaseUpdateLoadStore(MBB, MemOps[0].MBBI, TII, Advance, MBBI)) {            ++NumMerges; @@ -1334,7 +1334,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {  }  /// MergeReturnIntoLDM - If this is a exit BB, try merging the return ops -/// ("bx lr" and "mov pc, lr") into the preceeding stack restore so it +/// ("bx lr" and "mov pc, lr") into the preceding stack restore so it  /// directly restore the value of LR into pc.  ///   ldmfd sp!, {..., lr}  ///   bx lr diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp index 2b9202b..aa1e398 100644 --- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp +++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp @@ -35,7 +35,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,    // This requires 4-byte alignment.    if ((Align & 3) != 0)      return SDValue(); -  // This requires the copy size to be a constant, preferrably +  // This requires the copy size to be a constant, preferably    // within a subtarget-specific limit.    ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);    if (!ConstantSize) diff --git a/lib/Target/Alpha/Alpha.td b/lib/Target/Alpha/Alpha.td index 4508eda..ae79c2e 100644 --- a/lib/Target/Alpha/Alpha.td +++ b/lib/Target/Alpha/Alpha.td @@ -21,7 +21,7 @@ include "llvm/Target/Target.td"  //===----------------------------------------------------------------------===//  def FeatureCIX : SubtargetFeature<"cix", "HasCT", "true", -                                  "Enable CIX extentions">; +                                  "Enable CIX extensions">;  //===----------------------------------------------------------------------===//  // Register File Description diff --git a/lib/Target/Alpha/AlphaISelLowering.cpp b/lib/Target/Alpha/AlphaISelLowering.cpp index c4f43ab..ee404f0 100644 --- a/lib/Target/Alpha/AlphaISelLowering.cpp +++ b/lib/Target/Alpha/AlphaISelLowering.cpp @@ -296,7 +296,7 @@ AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee,    // Build a sequence of copy-to-reg nodes chained together with token chain and    // flag operands which copy the outgoing args into registers.  The InFlag in -  // necessary since all emited instructions must be stuck together. +  // necessary since all emitted instructions must be stuck together.    SDValue InFlag;    for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {      Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, diff --git a/lib/Target/Alpha/AlphaInstrInfo.td b/lib/Target/Alpha/AlphaInstrInfo.td index 099d715..b201712 100644 --- a/lib/Target/Alpha/AlphaInstrInfo.td +++ b/lib/Target/Alpha/AlphaInstrInfo.td @@ -1030,7 +1030,7 @@ def : Pat<(brcond (setune F8RC:$RA, immFPZ), bb:$DISP),  //WMB Mfc 18.4400 Write memory barrier  //MF_FPCR F-P 17.025 Move from FPCR  //MT_FPCR F-P 17.024 Move to FPCR -//There are in the Multimedia extentions, so let's not use them yet +//There are in the Multimedia extensions, so let's not use them yet  //def MAXSB8  : OForm<0x1C, 0x3E, "MAXSB8 $RA,$RB,$RC">; //Vector signed byte maximum  //def MAXSW4 : OForm< 0x1C, 0x3F, "MAXSW4 $RA,$RB,$RC">; //Vector signed word maximum  //def MAXUB8  : OForm<0x1C, 0x3C, "MAXUB8 $RA,$RB,$RC">; //Vector unsigned byte maximum diff --git a/lib/Target/Alpha/README.txt b/lib/Target/Alpha/README.txt index 9ae1517..cc170e3 100644 --- a/lib/Target/Alpha/README.txt +++ b/lib/Target/Alpha/README.txt @@ -33,9 +33,9 @@ add crazy vector instructions (MVI):  (MIN|MAX)(U|S)(B8|W4) min and max, signed and unsigned, byte and word  PKWB, UNPKBW pack/unpack word to byte  PKLB UNPKBL pack/unpack long to byte -PERR pixel error (sum accross bytes of bytewise abs(i8v8 a - i8v8 b)) +PERR pixel error (sum across bytes of bytewise abs(i8v8 a - i8v8 b)) -cmpbytes bytewise cmpeq of i8v8 a and i8v8 b (not part of MVI extentions) +cmpbytes bytewise cmpeq of i8v8 a and i8v8 b (not part of MVI extensions)  this has some good examples for other operations that can be synthesised well   from these rather meager vector ops (such as saturating add). diff --git a/lib/Target/Blackfin/BlackfinISelLowering.cpp b/lib/Target/Blackfin/BlackfinISelLowering.cpp index 7c80eec..1e1f8c9 100644 --- a/lib/Target/Blackfin/BlackfinISelLowering.cpp +++ b/lib/Target/Blackfin/BlackfinISelLowering.cpp @@ -345,7 +345,7 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,    // Build a sequence of copy-to-reg nodes chained together with token    // chain and flag operands which copy the outgoing args into registers. -  // The InFlag in necessary since all emited instructions must be +  // The InFlag in necessary since all emitted instructions must be    // stuck together.    SDValue InFlag;    for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { diff --git a/lib/Target/CellSPU/SPU64InstrInfo.td b/lib/Target/CellSPU/SPU64InstrInfo.td index 5ef5716..f340edf 100644 --- a/lib/Target/CellSPU/SPU64InstrInfo.td +++ b/lib/Target/CellSPU/SPU64InstrInfo.td @@ -24,7 +24,7 @@  // 5. The code sequences for r64 and v2i64 are probably overly conservative,  //    compared to the code that gcc produces.  // -// M00$E B!tes Kan be Pretty N@sTi!!!!! (appologies to Monty!) +// M00$E B!tes Kan be Pretty N@sTi!!!!! (apologies to Monty!)  //-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~  // selb instruction definition for i64. Note that the selection mask is diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 743a4d7..8668da3 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -705,7 +705,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {                                                   offset                                                  )); -    // Shift the low similarily +    // Shift the low similarly      // TODO: add SPUISD::SHL_BYTES      low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset ); diff --git a/lib/Target/MBlaze/MBlazeISelLowering.cpp b/lib/Target/MBlaze/MBlazeISelLowering.cpp index f39826b..585469b 100644 --- a/lib/Target/MBlaze/MBlazeISelLowering.cpp +++ b/lib/Target/MBlaze/MBlazeISelLowering.cpp @@ -274,7 +274,7 @@ MBlazeTargetLowering::EmitCustomShift(MachineInstr *MI,    F->insert(It, loop);    F->insert(It, finish); -  // Update machine-CFG edges by transfering adding all successors and +  // Update machine-CFG edges by transferring adding all successors and    // remaining instructions from the current block to the new block which    // will contain the Phi node for the select.    finish->splice(finish->begin(), MBB, @@ -456,7 +456,7 @@ MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI,    F->insert(It, start);    F->insert(It, exit); -  // Update machine-CFG edges by transfering adding all successors and +  // Update machine-CFG edges by transferring adding all successors and    // remaining instructions from the current block to the new block which    // will contain the Phi node for the select.    exit->splice(exit->begin(), MBB, llvm::next(MachineBasicBlock::iterator(MI)), @@ -778,7 +778,7 @@ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,    // Build a sequence of copy-to-reg nodes chained together with token    // chain and flag operands which copy the outgoing args into registers. -  // The InFlag in necessary since all emited instructions must be +  // The InFlag in necessary since all emitted instructions must be    // stuck together.    SDValue InFlag;    for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { diff --git a/lib/Target/MBlaze/MBlazeSchedule3.td b/lib/Target/MBlaze/MBlazeSchedule3.td index 2984851..ccbf99d 100644 --- a/lib/Target/MBlaze/MBlazeSchedule3.td +++ b/lib/Target/MBlaze/MBlazeSchedule3.td @@ -8,7 +8,7 @@  //===----------------------------------------------------------------------===//  //===----------------------------------------------------------------------===// -// MBlaze instruction itineraries for the threee stage pipeline. +// MBlaze instruction itineraries for the three stage pipeline.  //===----------------------------------------------------------------------===//  def MBlazePipe3Itineraries : ProcessorItineraries<    [IF,ID,EX], [], [ diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index a95d59c..006785b 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -515,7 +515,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,    // Build a sequence of copy-to-reg nodes chained together with token chain and    // flag operands which copy the outgoing args into registers.  The InFlag in -  // necessary since all emited instructions must be stuck together. +  // necessary since all emitted instructions must be stuck together.    SDValue InFlag;    for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {      Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, diff --git a/lib/Target/Mips/Mips.td b/lib/Target/Mips/Mips.td index 5102c69..b79016d 100644 --- a/lib/Target/Mips/Mips.td +++ b/lib/Target/Mips/Mips.td @@ -81,7 +81,7 @@ def : Proc<"r6000", [FeatureMips2]>;  def : Proc<"4ke", [FeatureMips32r2]>; -// Allegrex is a 32bit subset of r4000, both for interger and fp registers, +// Allegrex is a 32bit subset of r4000, both for integer and fp registers,  // but much more similar to Mips2 than Mips3. It also contains some of  // Mips32/Mips32r2 instructions and a custom vector fpu processor.  def : Proc<"allegrex", [FeatureMips2, FeatureSingleFloat, FeatureEABI, diff --git a/lib/Target/Mips/MipsFrameLowering.cpp b/lib/Target/Mips/MipsFrameLowering.cpp index 5e4a7da..66a3588 100644 --- a/lib/Target/Mips/MipsFrameLowering.cpp +++ b/lib/Target/Mips/MipsFrameLowering.cpp @@ -285,7 +285,7 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {    if (ATUsed)      BuildMI(MBB, MBBI, dl, TII.get(Mips::ATMACRO)); -  // Save the return address only if the function isnt a leaf one. +  // Save the return address only if the function isn't a leaf one.    // sw  $ra, stack_loc($sp)    if (MFI->adjustsStack()) {      ATUsed = expandRegLargeImmPair(Mips::SP, RAOffset, NewReg, NewImm, MBB, @@ -360,7 +360,7 @@ void MipsFrameLowering::emitEpilogue(MachineFunction &MF,        BuildMI(MBB, MBBI, dl, TII.get(Mips::ATMACRO));    } -  // Restore the return address only if the function isnt a leaf one. +  // Restore the return address only if the function isn't a leaf one.    // lw  $ra, stack_loc($sp)    if (MFI->adjustsStack()) {      ATUsed = expandRegLargeImmPair(Mips::SP, RAOffset, NewReg, NewImm, MBB, diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 0e193f2..7baaa0f 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -723,7 +723,7 @@ LowerBRCOND(SDValue Op, SelectionDAG &DAG) const    SDValue CondRes = CreateFPCmp(DAG, Op.getOperand(1)); -  // Return if flag is not set by a floating point comparision. +  // Return if flag is not set by a floating point comparison.    if (CondRes.getOpcode() != MipsISD::FPCmp)      return Op; @@ -741,7 +741,7 @@ LowerSELECT(SDValue Op, SelectionDAG &DAG) const  {    SDValue Cond = CreateFPCmp(DAG, Op.getOperand(0)); -  // Return if flag is not set by a floating point comparision. +  // Return if flag is not set by a floating point comparison.    if (Cond.getOpcode() != MipsISD::FPCmp)      return Op; @@ -867,7 +867,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const    // gp_rel relocation    // FIXME: we should reference the constant pool using small data sections, -  // but the asm printer currently doens't support this feature without +  // but the asm printer currently doesn't support this feature without    // hacking it. This feature should come soon so we can uncomment the    // stuff below.    //if (IsInSmallSection(C->getType())) { @@ -1189,7 +1189,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,    // Build a sequence of copy-to-reg nodes chained together with token    // chain and flag operands which copy the outgoing args into registers. -  // The InFlag in necessary since all emited instructions must be +  // The InFlag in necessary since all emitted instructions must be    // stuck together.    SDValue InFlag;    for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { @@ -1272,7 +1272,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,    // Create a stack location to hold GP when PIC is used. This stack    // location is used on function prologue to save GP and also after all -  // emited CALL's to restore GP. +  // emitted CALL's to restore GP.    if (IsPIC) {        // Function can have an arbitrary number of calls, so        // hold the LastArgStackLoc with the biggest offset. diff --git a/lib/Target/Mips/MipsInstrFPU.td b/lib/Target/Mips/MipsInstrFPU.td index 251f377..995b6cd 100644 --- a/lib/Target/Mips/MipsInstrFPU.td +++ b/lib/Target/Mips/MipsInstrFPU.td @@ -187,7 +187,7 @@ let Predicates = [IsNotSingleFloat, IsNotMipsI] in {                   "sdc1 $ft, $addr", [(store AFGR64:$ft, addr:$addr)]>;  } -// LWC1 and SWC1 can always be emited with odd registers. +// LWC1 and SWC1 can always be emitted with odd registers.  def LWC1  : FFI<0b110001, (outs FGR32:$ft), (ins mem:$addr), "lwc1 $ft, $addr",                 [(set FGR32:$ft, (load addr:$addr))]>;  def SWC1  : FFI<0b111001, (outs), (ins FGR32:$ft, mem:$addr), "swc1 $ft, $addr", diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 46b97e1..5ed7600 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2155,7 +2155,7 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,  }  /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be -/// adjusted to accomodate the arguments for the tailcall. +/// adjusted to accommodate the arguments for the tailcall.  static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,                                     unsigned ParamSize) { @@ -2396,7 +2396,7 @@ void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,    // Emit a sequence of copyto/copyfrom virtual registers for arguments that    // might overwrite each other in case of tail call optimization.    SmallVector<SDValue, 8> MemOpChains2; -  // Do not flag preceeding copytoreg stuff together with the following stuff. +  // Do not flag preceding copytoreg stuff together with the following stuff.    InFlag = SDValue();    StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,                                      MemOpChains2, dl); diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 70574c3..edb62fa 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -544,7 +544,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,    // Build a sequence of copy-to-reg nodes chained together with token    // chain and flag operands which copy the outgoing args into registers. -  // The InFlag in necessary since all emited instructions must be +  // The InFlag in necessary since all emitted instructions must be    // stuck together.    SDValue InFlag;    for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 90939c3..d331614 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -451,7 +451,7 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,    // Build a sequence of copy-to-reg nodes chained together with token chain and    // flag operands which copy the outgoing args into registers.  The InFlag in -  // necessary since all emited instructions must be stuck together. +  // necessary since all emitted instructions must be stuck together.    SDValue InFlag;    for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {      Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h index d4a88d7..a9c90f8 100644 --- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h @@ -485,7 +485,7 @@ struct InternalInstruction {       consumed___ indicates that the byte was already consumed and does not       need to be consumed again */ -  /* The VEX.vvvv field, which contains a thrid register operand for some AVX +  /* The VEX.vvvv field, which contains a third register operand for some AVX       instructions */    Reg                           vvvv; diff --git a/lib/Target/X86/README-X86-64.txt b/lib/Target/X86/README-X86-64.txt index e21d69a..e7429a3 100644 --- a/lib/Target/X86/README-X86-64.txt +++ b/lib/Target/X86/README-X86-64.txt @@ -36,7 +36,7 @@ _conv:  	cmovb %rcx, %rax  	ret -Seems like the jb branch has high likelyhood of being taken. It would have +Seems like the jb branch has high likelihood of being taken. It would have  saved a few instructions.  //===---------------------------------------------------------------------===// diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt index 07722f4..94cf25b 100644 --- a/lib/Target/X86/README.txt +++ b/lib/Target/X86/README.txt @@ -1572,7 +1572,7 @@ Implement processor-specific optimizations for parity with GCC on these  processors.  GCC does two optimizations:  1. ix86_pad_returns inserts a noop before ret instructions if immediately -   preceeded by a conditional branch or is the target of a jump. +   preceded by a conditional branch or is the target of a jump.  2. ix86_avoid_jump_misspredicts inserts noops in cases where a 16-byte block of     code contains more than 3 branches. diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index 912dff0..25b8d3e 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -7,7 +7,7 @@  //  //===----------------------------------------------------------------------===//  // -// This is a target description file for the Intel i386 architecture, refered to +// This is a target description file for the Intel i386 architecture, referred to  // here as the "X86" architecture.  //  //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index 3aaa693..325d061 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -1307,7 +1307,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {      // set up by FpSET_ST0, and our StackTop is off by one because of it.      unsigned Op0 = getFPReg(MI->getOperand(0));      // Restore the actual StackTop from before Fp_SET_ST0. -    // Note we can't handle Fp_SET_ST1 without a preceeding Fp_SET_ST0, and we +    // Note we can't handle Fp_SET_ST1 without a preceding Fp_SET_ST0, and we      // are not enforcing the constraint.      ++StackTop;      unsigned RegOnTop = getStackEntry(0); // This reg must remain in st(0). diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 071fbe0..dee27a0 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -296,7 +296,7 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,    // FIXME: This is dirty hack. The code itself is pretty mess right now.    // It should be rewritten from scratch and generalized sometimes. -  // Determine maximum offset (minumum due to stack growth). +  // Determine maximum offset (minimum due to stack growth).    int64_t MaxOffset = 0;    for (std::vector<CalleeSavedInfo>::const_iterator           I = CSI.begin(), E = CSI.end(); I != E; ++I) @@ -785,7 +785,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,      assert(Offset >= 0 && "Offset should never be negative");      if (Offset) { -      // Check for possible merge with preceeding ADD instruction. +      // Check for possible merge with preceding ADD instruction.        Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);        emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII, *RegInfo);      } @@ -829,7 +829,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,      int delta = -1*X86FI->getTCReturnAddrDelta();      MBBI = MBB.getLastNonDebugInstr(); -    // Check for possible merge with preceeding ADD instruction. +    // Check for possible merge with preceding ADD instruction.      delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);      emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII, *RegInfo);    } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index cd1d201..449b87a 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1952,7 +1952,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,    return SDValue(OutRetAddr.getNode(), 1);  } -/// EmitTailCallStoreRetAddr - Emit a store of the return adress if tail call +/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call  /// optimization is performed and it is required (FPDiff!=0).  static SDValue  EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, @@ -2043,7 +2043,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,      Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));    SDValue RetAddrFrIdx; -  // Load return adress for tail calls. +  // Load return address for tail calls.    if (isTailCall && FPDiff)      Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,                                      Is64Bit, FPDiff, dl); @@ -2200,7 +2200,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,      SmallVector<SDValue, 8> MemOpChains2;      SDValue FIN;      int FI = 0; -    // Do not flag preceeding copytoreg stuff together with the following stuff. +    // Do not flag preceding copytoreg stuff together with the following stuff.      InFlag = SDValue();      if (GuaranteedTailCallOpt) {        for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { @@ -4018,7 +4018,7 @@ static SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG,  /// getNumOfConsecutiveZeros - Return the number of elements of a vector  /// shuffle operation which come from a consecutively from a zero. The -/// search can start in two diferent directions, from left or right. +/// search can start in two different directions, from left or right.  static  unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems,                                    bool ZerosFromLeft, SelectionDAG &DAG) { @@ -12216,7 +12216,7 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {      AsmPieces.clear();      SplitString(AsmStr, AsmPieces, " \t");  // Split with whitespace. -    // FIXME: this should verify that we are targetting a 486 or better.  If not, +    // FIXME: this should verify that we are targeting a 486 or better.  If not,      // we will turn this bswap into something that will be lowered to logical ops      // instead of emitting the bswap asm.  For now, we don't support 486 or lower      // so don't worry about this. diff --git a/lib/Target/X86/X86InstrArithmetic.td b/lib/Target/X86/X86InstrArithmetic.td index f0ea068..9f7a4b0 100644 --- a/lib/Target/X86/X86InstrArithmetic.td +++ b/lib/Target/X86/X86InstrArithmetic.td @@ -163,7 +163,7 @@ def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),  } // Defs = [EFLAGS] -// Suprisingly enough, these are not two address instructions! +// Surprisingly enough, these are not two address instructions!  let Defs = [EFLAGS] in {  // Register-Integer Signed Integer Multiply  def IMUL16rri  : Ii16<0x69, MRMSrcReg,                      // GR16 = GR16*I16 diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 85ab916..c48ea15 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -232,7 +232,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)      assert(!RegOp2MemOpTable2Addr.count(RegOp) && "Duplicated entries?");      RegOp2MemOpTable2Addr[RegOp] = std::make_pair(MemOp, 0U); -    // If this is not a reversable operation (because there is a many->one) +    // If this is not a reversible operation (because there is a many->one)      // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.      if (OpTbl2Addr[i][1] & TB_NOT_REVERSABLE)        continue; @@ -335,7 +335,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)      assert(!RegOp2MemOpTable0.count(RegOp) && "Duplicated entries?");      RegOp2MemOpTable0[RegOp] = std::make_pair(MemOp, Align); -    // If this is not a reversable operation (because there is a many->one) +    // If this is not a reversible operation (because there is a many->one)      // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.      if (OpTbl0[i][1] & TB_NOT_REVERSABLE)        continue; @@ -460,7 +460,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)      assert(!RegOp2MemOpTable1.count(RegOp) && "Duplicate entries");      RegOp2MemOpTable1[RegOp] = std::make_pair(MemOp, Align); -    // If this is not a reversable operation (because there is a many->one) +    // If this is not a reversible operation (because there is a many->one)      // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.      if (OpTbl1[i][1] & TB_NOT_REVERSABLE)        continue; @@ -682,7 +682,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)      assert(!RegOp2MemOpTable2.count(RegOp) && "Duplicate entry!");      RegOp2MemOpTable2[RegOp] = std::make_pair(MemOp, Align); -    // If this is not a reversable operation (because there is a many->one) +    // If this is not a reversible operation (because there is a many->one)      // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.      if (OpTbl2[i][1] & TB_NOT_REVERSABLE)        continue; diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index 4625b4c..8da68b5 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -807,7 +807,7 @@ public:                                         int64_t &Offset1, int64_t &Offset2) const;    /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to -  /// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should +  /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should    /// be scheduled togther. On some targets if two loads are loading from    /// addresses in the same cache line, it's better if they are scheduled    /// together. This function takes two integers that represent the load offsets diff --git a/lib/Target/X86/X86MCCodeEmitter.cpp b/lib/Target/X86/X86MCCodeEmitter.cpp index a2bd638..f195a67 100644 --- a/lib/Target/X86/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/X86MCCodeEmitter.cpp @@ -514,7 +514,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,      }      // To only check operands before the memory address ones, start -    // the search from the begining +    // the search from the beginning      if (IsDestMem)        CurOp = 0; diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 1f464f4..40b65e1 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -470,7 +470,7 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {    // FIXME: It's more complicated than this...    if (0 && requiresRealignment && MFI->hasVarSizedObjects())      report_fatal_error( -      "Stack realignment in presense of dynamic allocas is not supported"); +      "Stack realignment in presence of dynamic allocas is not supported");    // If we've requested that we force align the stack do so now.    if (ForceStackAlign) diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp index 42e8193..02754f9 100644 --- a/lib/Target/X86/X86SelectionDAGInfo.cpp +++ b/lib/Target/X86/X86SelectionDAGInfo.cpp @@ -178,7 +178,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,                                          bool isVolatile, bool AlwaysInline,                                           MachinePointerInfo DstPtrInfo,                                           MachinePointerInfo SrcPtrInfo) const { -  // This requires the copy size to be a constant, preferrably +  // This requires the copy size to be a constant, preferably    // within a subtarget-specific limit.    ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);    if (!ConstantSize) diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 4817787..89dbf3d 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -967,7 +967,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,    // Build a sequence of copy-to-reg nodes chained together with token    // chain and flag operands which copy the outgoing args into registers. -  // The InFlag in necessary since all emited instructions must be +  // The InFlag in necessary since all emitted instructions must be    // stuck together.    SDValue InFlag;    for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {  | 
