diff options
Diffstat (limited to 'test/CodeGen/ARM64/vaddv.ll')
-rw-r--r-- | test/CodeGen/ARM64/vaddv.ll | 233 |
1 files changed, 233 insertions, 0 deletions
diff --git a/test/CodeGen/ARM64/vaddv.ll b/test/CodeGen/ARM64/vaddv.ll new file mode 100644 index 0000000..44bfa84 --- /dev/null +++ b/test/CodeGen/ARM64/vaddv.ll @@ -0,0 +1,233 @@ +; RUN: llc -march=arm64 -arm64-neon-syntax=apple < %s | FileCheck %s + +define signext i8 @test_vaddv_s8(<8 x i8> %a1) { +; CHECK-LABEL: test_vaddv_s8: +; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0 +; CHECK-NEXT: smov.b w0, v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v8i8(<8 x i8> %a1) + %0 = trunc i32 %vaddv.i to i8 + ret i8 %0 +} + +define signext i16 @test_vaddv_s16(<4 x i16> %a1) { +; CHECK-LABEL: test_vaddv_s16: +; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0 +; CHECK-NEXT: smov.h w0, v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v4i16(<4 x i16> %a1) + %0 = trunc i32 %vaddv.i to i16 + ret i16 %0 +} + +define i32 @test_vaddv_s32(<2 x i32> %a1) { +; CHECK-LABEL: test_vaddv_s32: +; 2 x i32 is not supported by the ISA, thus, this is a special case +; CHECK: addp.2s v[[REGNUM:[0-9]+]], v0, v0 +; CHECK-NEXT: fmov w0, s[[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v2i32(<2 x i32> %a1) + ret i32 %vaddv.i +} + +define i64 @test_vaddv_s64(<2 x i64> %a1) { +; CHECK-LABEL: test_vaddv_s64: +; CHECK: addp.2d [[REGNUM:d[0-9]+]], v0 +; CHECK-NEXT: fmov x0, [[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i64 @llvm.arm64.neon.saddv.i64.v2i64(<2 x i64> %a1) + ret i64 %vaddv.i +} + +define zeroext i8 @test_vaddv_u8(<8 x i8> %a1) { +; CHECK-LABEL: test_vaddv_u8: +; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0 +; CHECK-NEXT: fmov w0, s[[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.uaddv.i32.v8i8(<8 x i8> %a1) + %0 = trunc i32 %vaddv.i to i8 + ret i8 %0 +} + +define i32 @test_vaddv_u8_masked(<8 x i8> %a1) { +; CHECK-LABEL: test_vaddv_u8_masked: +; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0 +; CHECK-NEXT: fmov w0, s[[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.uaddv.i32.v8i8(<8 x i8> %a1) + %0 = and i32 %vaddv.i, 511 ; 0x1ff + ret i32 %0 +} + +define zeroext i16 @test_vaddv_u16(<4 x i16> %a1) { +; CHECK-LABEL: test_vaddv_u16: +; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0 +; CHECK-NEXT: fmov w0, s[[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.uaddv.i32.v4i16(<4 x i16> %a1) + %0 = trunc i32 %vaddv.i to i16 + ret i16 %0 +} + +define i32 @test_vaddv_u16_masked(<4 x i16> %a1) { +; CHECK-LABEL: test_vaddv_u16_masked: +; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0 +; CHECK-NEXT: fmov w0, s[[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.uaddv.i32.v4i16(<4 x i16> %a1) + %0 = and i32 %vaddv.i, 3276799 ; 0x31ffff + ret i32 %0 +} + +define i32 @test_vaddv_u32(<2 x i32> %a1) { +; CHECK-LABEL: test_vaddv_u32: +; 2 x i32 is not supported by the ISA, thus, this is a special case +; CHECK: addp.2s v[[REGNUM:[0-9]+]], v0, v0 +; CHECK-NEXT: fmov w0, s[[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.uaddv.i32.v2i32(<2 x i32> %a1) + ret i32 %vaddv.i +} + +define float @test_vaddv_f32(<2 x float> %a1) { +; CHECK-LABEL: test_vaddv_f32: +; CHECK: faddp.2s s0, v0 +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call float @llvm.arm64.neon.faddv.f32.v2f32(<2 x float> %a1) + ret float %vaddv.i +} + +define float @test_vaddv_v4f32(<4 x float> %a1) { +; CHECK-LABEL: test_vaddv_v4f32: +; CHECK: faddp.4s [[REGNUM:v[0-9]+]], v0, v0 +; CHECK: faddp.2s s0, [[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call float @llvm.arm64.neon.faddv.f32.v4f32(<4 x float> %a1) + ret float %vaddv.i +} + +define double @test_vaddv_f64(<2 x double> %a1) { +; CHECK-LABEL: test_vaddv_f64: +; CHECK: faddp.2d d0, v0 +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call double @llvm.arm64.neon.faddv.f64.v2f64(<2 x double> %a1) + ret double %vaddv.i +} + +define i64 @test_vaddv_u64(<2 x i64> %a1) { +; CHECK-LABEL: test_vaddv_u64: +; CHECK: addp.2d [[REGNUM:d[0-9]+]], v0 +; CHECK-NEXT: fmov x0, [[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i64 @llvm.arm64.neon.uaddv.i64.v2i64(<2 x i64> %a1) + ret i64 %vaddv.i +} + +define signext i8 @test_vaddvq_s8(<16 x i8> %a1) { +; CHECK-LABEL: test_vaddvq_s8: +; CHECK: addv.16b b[[REGNUM:[0-9]+]], v0 +; CHECK-NEXT: smov.b w0, v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v16i8(<16 x i8> %a1) + %0 = trunc i32 %vaddv.i to i8 + ret i8 %0 +} + +define signext i16 @test_vaddvq_s16(<8 x i16> %a1) { +; CHECK-LABEL: test_vaddvq_s16: +; CHECK: addv.8h h[[REGNUM:[0-9]+]], v0 +; CHECK-NEXT: smov.h w0, v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v8i16(<8 x i16> %a1) + %0 = trunc i32 %vaddv.i to i16 + ret i16 %0 +} + +define i32 @test_vaddvq_s32(<4 x i32> %a1) { +; CHECK-LABEL: test_vaddvq_s32: +; CHECK: addv.4s [[REGNUM:s[0-9]+]], v0 +; CHECK-NEXT: fmov w0, [[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v4i32(<4 x i32> %a1) + ret i32 %vaddv.i +} + +define zeroext i8 @test_vaddvq_u8(<16 x i8> %a1) { +; CHECK-LABEL: test_vaddvq_u8: +; CHECK: addv.16b b[[REGNUM:[0-9]+]], v0 +; CHECK-NEXT: fmov w0, s[[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.uaddv.i32.v16i8(<16 x i8> %a1) + %0 = trunc i32 %vaddv.i to i8 + ret i8 %0 +} + +define zeroext i16 @test_vaddvq_u16(<8 x i16> %a1) { +; CHECK-LABEL: test_vaddvq_u16: +; CHECK: addv.8h h[[REGNUM:[0-9]+]], v0 +; CHECK-NEXT: fmov w0, s[[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.uaddv.i32.v8i16(<8 x i16> %a1) + %0 = trunc i32 %vaddv.i to i16 + ret i16 %0 +} + +define i32 @test_vaddvq_u32(<4 x i32> %a1) { +; CHECK-LABEL: test_vaddvq_u32: +; CHECK: addv.4s [[REGNUM:s[0-9]+]], v0 +; CHECK-NEXT: fmov [[FMOVRES:w[0-9]+]], [[REGNUM]] +; CHECK-NEXT: ret +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.uaddv.i32.v4i32(<4 x i32> %a1) + ret i32 %vaddv.i +} + +declare i32 @llvm.arm64.neon.uaddv.i32.v4i32(<4 x i32>) + +declare i32 @llvm.arm64.neon.uaddv.i32.v8i16(<8 x i16>) + +declare i32 @llvm.arm64.neon.uaddv.i32.v16i8(<16 x i8>) + +declare i32 @llvm.arm64.neon.saddv.i32.v4i32(<4 x i32>) + +declare i32 @llvm.arm64.neon.saddv.i32.v8i16(<8 x i16>) + +declare i32 @llvm.arm64.neon.saddv.i32.v16i8(<16 x i8>) + +declare i64 @llvm.arm64.neon.uaddv.i64.v2i64(<2 x i64>) + +declare i32 @llvm.arm64.neon.uaddv.i32.v2i32(<2 x i32>) + +declare i32 @llvm.arm64.neon.uaddv.i32.v4i16(<4 x i16>) + +declare i32 @llvm.arm64.neon.uaddv.i32.v8i8(<8 x i8>) + +declare i32 @llvm.arm64.neon.saddv.i32.v2i32(<2 x i32>) + +declare i64 @llvm.arm64.neon.saddv.i64.v2i64(<2 x i64>) + +declare i32 @llvm.arm64.neon.saddv.i32.v4i16(<4 x i16>) + +declare i32 @llvm.arm64.neon.saddv.i32.v8i8(<8 x i8>) + +declare float @llvm.arm64.neon.faddv.f32.v2f32(<2 x float> %a1) +declare float @llvm.arm64.neon.faddv.f32.v4f32(<4 x float> %a1) +declare double @llvm.arm64.neon.faddv.f64.v2f64(<2 x double> %a1) |