diff options
-rw-r--r-- | lib/Target/ARM/ARMInstrNEON.td | 39 | ||||
-rw-r--r-- | test/MC/ARM/neon-shift-encoding.s | 104 |
2 files changed, 110 insertions, 33 deletions
diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 76654fb..14d480d 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -6494,6 +6494,45 @@ def : NEONInstAlias<"vpadd${p}.i32 $Vdn, $Vm", def : NEONInstAlias<"vpadd${p}.f32 $Vdn, $Vm", (VPADDf DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +// Two-operand variants for VSRA. + // Signed. +def : NEONInstAlias<"vsra${p}.s8 $Vdm, $imm", + (VSRAsv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.s16 $Vdm, $imm", + (VSRAsv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.s32 $Vdm, $imm", + (VSRAsv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.s64 $Vdm, $imm", + (VSRAsv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>; + +def : NEONInstAlias<"vsra${p}.s8 $Vdm, $imm", + (VSRAsv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.s16 $Vdm, $imm", + (VSRAsv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.s32 $Vdm, $imm", + (VSRAsv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.s64 $Vdm, $imm", + (VSRAsv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>; + + // Unsigned. +def : NEONInstAlias<"vsra${p}.u8 $Vdm, $imm", + (VSRAuv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.u16 $Vdm, $imm", + (VSRAuv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.u32 $Vdm, $imm", + (VSRAuv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.u64 $Vdm, $imm", + (VSRAuv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>; + +def : NEONInstAlias<"vsra${p}.u8 $Vdm, $imm", + (VSRAuv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.u16 $Vdm, $imm", + (VSRAuv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.u32 $Vdm, $imm", + (VSRAuv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>; +def : NEONInstAlias<"vsra${p}.u64 $Vdm, $imm", + (VSRAuv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>; + // Two-operand variants for VSRI. def : NEONInstAlias<"vsri${p}.8 $Vdm, $imm", (VSRIv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>; diff --git a/test/MC/ARM/neon-shift-encoding.s b/test/MC/ARM/neon-shift-encoding.s index d47eeca..cd450a8 100644 --- a/test/MC/ARM/neon-shift-encoding.s +++ b/test/MC/ARM/neon-shift-encoding.s @@ -105,39 +105,77 @@ _foo: @ CHECK: vshr.s32 q8, q8, #31 @ encoding: [0x70,0x00,0xe1,0xf2] @ CHECK: vshr.s64 q8, q8, #63 @ encoding: [0xf0,0x00,0xc1,0xf2] - vsra.u8 d16, d16, #7 - vsra.u16 d16, d16, #15 - vsra.u32 d16, d16, #31 - vsra.u64 d16, d16, #63 - vsra.u8 q8, q8, #7 - vsra.u16 q8, q8, #15 - vsra.u32 q8, q8, #31 - vsra.u64 q8, q8, #63 - vsra.s8 d16, d16, #7 - vsra.s16 d16, d16, #15 - vsra.s32 d16, d16, #31 - vsra.s64 d16, d16, #63 - vsra.s8 q8, q8, #7 - vsra.s16 q8, q8, #15 - vsra.s32 q8, q8, #31 - vsra.s64 q8, q8, #63 - -@ CHECK: vsra.u8 d16, d16, #7 @ encoding: [0x30,0x01,0xc9,0xf3] -@ CHECK: vsra.u16 d16, d16, #15 @ encoding: [0x30,0x01,0xd1,0xf3] -@ CHECK: vsra.u32 d16, d16, #31 @ encoding: [0x30,0x01,0xe1,0xf3] -@ CHECK: vsra.u64 d16, d16, #63 @ encoding: [0xb0,0x01,0xc1,0xf3] -@ CHECK: vsra.u8 q8, q8, #7 @ encoding: [0x70,0x01,0xc9,0xf3] -@ CHECK: vsra.u16 q8, q8, #15 @ encoding: [0x70,0x01,0xd1,0xf3] -@ CHECK: vsra.u32 q8, q8, #31 @ encoding: [0x70,0x01,0xe1,0xf3] -@ CHECK: vsra.u64 q8, q8, #63 @ encoding: [0xf0,0x01,0xc1,0xf3] -@ CHECK: vsra.s8 d16, d16, #7 @ encoding: [0x30,0x01,0xc9,0xf2] -@ CHECK: vsra.s16 d16, d16, #15 @ encoding: [0x30,0x01,0xd1,0xf2] -@ CHECK: vsra.s32 d16, d16, #31 @ encoding: [0x30,0x01,0xe1,0xf2] -@ CHECK: vsra.s64 d16, d16, #63 @ encoding: [0xb0,0x01,0xc1,0xf2] -@ CHECK: vsra.s8 q8, q8, #7 @ encoding: [0x70,0x01,0xc9,0xf2] -@ CHECK: vsra.s16 q8, q8, #15 @ encoding: [0x70,0x01,0xd1,0xf2] -@ CHECK: vsra.s32 q8, q8, #31 @ encoding: [0x70,0x01,0xe1,0xf2] -@ CHECK: vsra.s64 q8, q8, #63 @ encoding: [0xf0,0x01,0xc1,0xf2] + + vsra.s8 d16, d6, #7 + vsra.s16 d26, d18, #15 + vsra.s32 d11, d10, #31 + vsra.s64 d12, d19, #63 + vsra.s8 q1, q8, #7 + vsra.s16 q2, q7, #15 + vsra.s32 q3, q6, #31 + vsra.s64 q4, q5, #63 + + vsra.s8 d16, #7 + vsra.s16 d15, #15 + vsra.s32 d14, #31 + vsra.s64 d13, #63 + vsra.s8 q4, #7 + vsra.s16 q5, #15 + vsra.s32 q6, #31 + vsra.s64 q7, #63 + +@ CHECK: vsra.s8 d16, d6, #7 @ encoding: [0x16,0x01,0xc9,0xf2] +@ CHECK: vsra.s16 d26, d18, #15 @ encoding: [0x32,0xa1,0xd1,0xf2] +@ CHECK: vsra.s32 d11, d10, #31 @ encoding: [0x1a,0xb1,0xa1,0xf2] +@ CHECK: vsra.s64 d12, d19, #63 @ encoding: [0xb3,0xc1,0x81,0xf2] +@ CHECK: vsra.s8 q1, q8, #7 @ encoding: [0x70,0x21,0x89,0xf2] +@ CHECK: vsra.s16 q2, q7, #15 @ encoding: [0x5e,0x41,0x91,0xf2] +@ CHECK: vsra.s32 q3, q6, #31 @ encoding: [0x5c,0x61,0xa1,0xf2] +@ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2] +@ CHECK: vsra.s8 d16, d16, #7 @ encoding: [0x30,0x01,0xc9,0xf2] +@ CHECK: vsra.s16 d15, d15, #15 @ encoding: [0x1f,0xf1,0x91,0xf2] +@ CHECK: vsra.s32 d14, d14, #31 @ encoding: [0x1e,0xe1,0xa1,0xf2] +@ CHECK: vsra.s64 d13, d13, #63 @ encoding: [0x9d,0xd1,0x81,0xf2] +@ CHECK: vsra.s8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf2] +@ CHECK: vsra.s16 q5, q5, #15 @ encoding: [0x5a,0xa1,0x91,0xf2] +@ CHECK: vsra.s32 q6, q6, #31 @ encoding: [0x5c,0xc1,0xa1,0xf2] +@ CHECK: vsra.s64 q7, q7, #63 @ encoding: [0xde,0xe1,0x81,0xf2] + + + vsra.u8 d16, d6, #7 + vsra.u16 d26, d18, #15 + vsra.u32 d11, d10, #31 + vsra.u64 d12, d19, #63 + vsra.u8 q1, q8, #7 + vsra.u16 q2, q7, #15 + vsra.u32 q3, q6, #31 + vsra.u64 q4, q5, #63 + + vsra.u8 d16, #7 + vsra.u16 d15, #15 + vsra.u32 d14, #31 + vsra.u64 d13, #63 + vsra.u8 q4, #7 + vsra.u16 q5, #15 + vsra.u32 q6, #31 + vsra.u64 q7, #63 + +@ CHECK: vsra.u8 d16, d6, #7 @ encoding: [0x16,0x01,0xc9,0xf3] +@ CHECK: vsra.u16 d26, d18, #15 @ encoding: [0x32,0xa1,0xd1,0xf3] +@ CHECK: vsra.u32 d11, d10, #31 @ encoding: [0x1a,0xb1,0xa1,0xf3] +@ CHECK: vsra.u64 d12, d19, #63 @ encoding: [0xb3,0xc1,0x81,0xf3] +@ CHECK: vsra.u8 q1, q8, #7 @ encoding: [0x70,0x21,0x89,0xf3] +@ CHECK: vsra.u16 q2, q7, #15 @ encoding: [0x5e,0x41,0x91,0xf3] +@ CHECK: vsra.u32 q3, q6, #31 @ encoding: [0x5c,0x61,0xa1,0xf3] +@ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3] +@ CHECK: vsra.u8 d16, d16, #7 @ encoding: [0x30,0x01,0xc9,0xf3] +@ CHECK: vsra.u16 d15, d15, #15 @ encoding: [0x1f,0xf1,0x91,0xf3] +@ CHECK: vsra.u32 d14, d14, #31 @ encoding: [0x1e,0xe1,0xa1,0xf3] +@ CHECK: vsra.u64 d13, d13, #63 @ encoding: [0x9d,0xd1,0x81,0xf3] +@ CHECK: vsra.u8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf3] +@ CHECK: vsra.u16 q5, q5, #15 @ encoding: [0x5a,0xa1,0x91,0xf3] +@ CHECK: vsra.u32 q6, q6, #31 @ encoding: [0x5c,0xc1,0xa1,0xf3] +@ CHECK: vsra.u64 q7, q7, #63 @ encoding: [0xde,0xe1,0x81,0xf3] vsri.8 d16, d6, #7 |