diff options
-rw-r--r-- | lib/Target/X86/X86InstrSSE.td | 18 | ||||
-rw-r--r-- | lib/Target/X86/X86MCCodeEmitter.cpp | 28 | ||||
-rw-r--r-- | test/MC/AsmParser/X86/x86_32-encoding.s | 33 | ||||
-rw-r--r-- | test/MC/AsmParser/X86/x86_64-encoding.s | 32 | ||||
-rw-r--r-- | utils/TableGen/X86RecognizableInstr.cpp | 9 |
5 files changed, 113 insertions, 7 deletions
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 32358a3..fe66d8b 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -704,6 +704,24 @@ multiclass basic_sse12_fp_binop_rm<bits<8> opc, string OpcodeStr, !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"), [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>; + def V#NAME#SSrm : VSSI<opc, MRMSrcMem, (outs FR32:$dst), + (ins FR32:$src1, f32mem:$src2), + !strconcat(OpcodeStr, + "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []> { + let Constraints = ""; + let isAsmParserOnly = 1; + } + + def V#NAME#SDrm : VSDI<opc, MRMSrcMem, (outs FR64:$dst), + (ins FR64:$src1, f64mem:$src2), + !strconcat(OpcodeStr, + "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []> { + let Constraints = ""; + let isAsmParserOnly = 1; + } + // Vector operation, reg+reg. def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), diff --git a/lib/Target/X86/X86MCCodeEmitter.cpp b/lib/Target/X86/X86MCCodeEmitter.cpp index d105b5d..9a32b6ea0 100644 --- a/lib/Target/X86/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/X86MCCodeEmitter.cpp @@ -349,6 +349,13 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // unsigned char VEX_R = 0x1; + // VEX_X: equivalent to REX.X, only used when a + // register is used for index in SIB Byte. + // + // 1: Same as REX.X=0 (must be 1 in 32-bit mode) + // 0: Same as REX.X=1 (64-bit mode only) + unsigned char VEX_X = 0x1; + // VEX_B: // // 1: Same as REX_B=0 (ignored in 32-bit mode) @@ -415,9 +422,12 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, unsigned NumOps = MI.getNumOperands(); unsigned i = 0; unsigned SrcReg = 0, SrcRegNum = 0; + bool IsSrcMem = false; switch (TSFlags & X86II::FormMask) { case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!"); + case X86II::MRMSrcMem: + IsSrcMem = true; case X86II::MRMSrcReg: if (MI.getOperand(0).isReg() && X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0).getReg())) @@ -447,6 +457,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, const MCOperand &MO = MI.getOperand(i); if (MO.isReg() && X86InstrInfo::isX86_64ExtendedReg(MO.getReg())) VEX_B = 0x0; + if (!VEX_B && MO.isReg() && IsSrcMem && + X86InstrInfo::isX86_64ExtendedReg(MO.getReg())) + VEX_X = 0x0; } break; default: @@ -464,11 +477,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // | C5h | | R | vvvv | L | pp | // +-----+ +-------------------+ // - // Note: VEX.X isn't used so far - // unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3); - if (VEX_B /* & VEX_X */) { // 2 byte VEX prefix + if (VEX_B && VEX_X) { // 2 byte VEX prefix EmitByte(0xC5, CurByte, OS); EmitByte(LastByte | (VEX_R << 7), CurByte, OS); return; @@ -476,7 +487,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // 3 byte VEX prefix EmitByte(0xC4, CurByte, OS); - EmitByte(VEX_R << 7 | 1 << 6 /* VEX_X = 1 */ | VEX_5M, CurByte, OS); + EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_5M, CurByte, OS); EmitByte(LastByte | (VEX_W << 7), CurByte, OS); } @@ -751,8 +762,13 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, AddrOperands = X86AddrNumOperands - 1; // No segment register else AddrOperands = X86AddrNumOperands; - - EmitMemModRMByte(MI, CurOp+1, GetX86RegNum(MI.getOperand(CurOp)), + + if (IsAVXForm) + AddrOperands++; + + // Skip the register source (which is encoded in VEX_VVVV) + EmitMemModRMByte(MI, IsAVXForm ? CurOp+2 : CurOp+1, + GetX86RegNum(MI.getOperand(CurOp)), TSFlags, CurByte, OS, Fixups); CurOp += AddrOperands + 1; break; diff --git a/test/MC/AsmParser/X86/x86_32-encoding.s b/test/MC/AsmParser/X86/x86_32-encoding.s index 7f79b9d..237f67b 100644 --- a/test/MC/AsmParser/X86/x86_32-encoding.s +++ b/test/MC/AsmParser/X86/x86_32-encoding.s @@ -10084,3 +10084,36 @@ pshufb CPI1_0(%rip), %xmm1 // CHECK: vdivsd %xmm4, %xmm6, %xmm2 // CHECK: encoding: [0xc5,0xcb,0x5e,0xd4] vdivsd %xmm4, %xmm6, %xmm2 + +// CHECK: vaddss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 +// CHECK: encoding: [0xc5,0xea,0x58,0xac,0xcb,0xef,0xbe,0xad,0xde] + vaddss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 + +// CHECK: vsubss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 +// CHECK: encoding: [0xc5,0xea,0x5c,0xac,0xcb,0xef,0xbe,0xad,0xde] + vsubss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 + +// CHECK: vmulss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 +// CHECK: encoding: [0xc5,0xea,0x59,0xac,0xcb,0xef,0xbe,0xad,0xde] + vmulss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 + +// CHECK: vdivss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 +// CHECK: encoding: [0xc5,0xea,0x5e,0xac,0xcb,0xef,0xbe,0xad,0xde] + vdivss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 + +// CHECK: vaddsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 +// CHECK: encoding: [0xc5,0xeb,0x58,0xac,0xcb,0xef,0xbe,0xad,0xde] + vaddsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 + +// CHECK: vsubsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 +// CHECK: encoding: [0xc5,0xeb,0x5c,0xac,0xcb,0xef,0xbe,0xad,0xde] + vsubsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 + +// CHECK: vmulsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 +// CHECK: encoding: [0xc5,0xeb,0x59,0xac,0xcb,0xef,0xbe,0xad,0xde] + vmulsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 + +// CHECK: vdivsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 +// CHECK: encoding: [0xc5,0xeb,0x5e,0xac,0xcb,0xef,0xbe,0xad,0xde] + vdivsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5 + diff --git a/test/MC/AsmParser/X86/x86_64-encoding.s b/test/MC/AsmParser/X86/x86_64-encoding.s index b259608..592db35 100644 --- a/test/MC/AsmParser/X86/x86_64-encoding.s +++ b/test/MC/AsmParser/X86/x86_64-encoding.s @@ -135,3 +135,35 @@ vsubsd %xmm8, %xmm9, %xmm10 // CHECK: vdivsd %xmm8, %xmm9, %xmm10 // CHECK: encoding: [0xc4,0x41,0x33,0x5e,0xd0] vdivsd %xmm8, %xmm9, %xmm10 + +// CHECK: vaddss -4(%rcx,%rbx,8), %xmm10, %xmm11 +// CHECK: encoding: [0xc5,0x2a,0x58,0x5c,0xd9,0xfc] +vaddss -4(%rcx,%rbx,8), %xmm10, %xmm11 + +// CHECK: vsubss -4(%rcx,%rbx,8), %xmm10, %xmm11 +// CHECK: encoding: [0xc5,0x2a,0x5c,0x5c,0xd9,0xfc] +vsubss -4(%rcx,%rbx,8), %xmm10, %xmm11 + +// CHECK: vmulss -4(%rcx,%rbx,8), %xmm10, %xmm11 +// CHECK: encoding: [0xc5,0x2a,0x59,0x5c,0xd9,0xfc] +vmulss -4(%rcx,%rbx,8), %xmm10, %xmm11 + +// CHECK: vdivss -4(%rcx,%rbx,8), %xmm10, %xmm11 +// CHECK: encoding: [0xc5,0x2a,0x5e,0x5c,0xd9,0xfc] +vdivss -4(%rcx,%rbx,8), %xmm10, %xmm11 + +// CHECK: vaddsd -4(%rcx,%rbx,8), %xmm10, %xmm11 +// CHECK: encoding: [0xc5,0x2b,0x58,0x5c,0xd9,0xfc] +vaddsd -4(%rcx,%rbx,8), %xmm10, %xmm11 + +// CHECK: vsubsd -4(%rcx,%rbx,8), %xmm10, %xmm11 +// CHECK: encoding: [0xc5,0x2b,0x5c,0x5c,0xd9,0xfc] +vsubsd -4(%rcx,%rbx,8), %xmm10, %xmm11 + +// CHECK: vmulsd -4(%rcx,%rbx,8), %xmm10, %xmm11 +// CHECK: encoding: [0xc5,0x2b,0x59,0x5c,0xd9,0xfc] +vmulsd -4(%rcx,%rbx,8), %xmm10, %xmm11 + +// CHECK: vdivsd -4(%rcx,%rbx,8), %xmm10, %xmm11 +// CHECK: encoding: [0xc5,0x2b,0x5e,0x5c,0xd9,0xfc] +vdivsd -4(%rcx,%rbx,8), %xmm10, %xmm11 diff --git a/utils/TableGen/X86RecognizableInstr.cpp b/utils/TableGen/X86RecognizableInstr.cpp index 5fe2bfe..19b51cb 100644 --- a/utils/TableGen/X86RecognizableInstr.cpp +++ b/utils/TableGen/X86RecognizableInstr.cpp @@ -535,7 +535,8 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) { HANDLE_OPERAND(rmRegister) if (HasVEX_4VPrefix) - // FIXME: encoding of registers in AVX is in 1's complement form. + // FIXME: In AVX, the register below becomes the one encoded + // in ModRMVEX and the one above the one in the VEX.VVVV field HANDLE_OPTIONAL(rmRegister) else HANDLE_OPTIONAL(immediate) @@ -547,6 +548,12 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) { assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 && "Unexpected number of operands for MRMSrcMemFrm"); HANDLE_OPERAND(roRegister) + + if (HasVEX_4VPrefix) + // FIXME: In AVX, the register below becomes the one encoded + // in ModRMVEX and the one above the one in the VEX.VVVV field + HANDLE_OPTIONAL(rmRegister) + HANDLE_OPERAND(memory) HANDLE_OPTIONAL(immediate) break; |