aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/Hexagon
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2015-03-23 12:10:34 -0700
committerStephen Hines <srhines@google.com>2015-03-23 12:10:34 -0700
commitebe69fe11e48d322045d5949c83283927a0d790b (patch)
treec92f1907a6b8006628a4b01615f38264d29834ea /test/CodeGen/Hexagon
parentb7d2e72b02a4cb8034f32f8247a2558d2434e121 (diff)
downloadexternal_llvm-ebe69fe11e48d322045d5949c83283927a0d790b.zip
external_llvm-ebe69fe11e48d322045d5949c83283927a0d790b.tar.gz
external_llvm-ebe69fe11e48d322045d5949c83283927a0d790b.tar.bz2
Update aosp/master LLVM for rebase to r230699.
Change-Id: I2b5be30509658cb8266be782de0ab24f9099f9b9
Diffstat (limited to 'test/CodeGen/Hexagon')
-rw-r--r--test/CodeGen/Hexagon/BranchPredict.ll4
-rw-r--r--test/CodeGen/Hexagon/always-ext.ll3
-rw-r--r--test/CodeGen/Hexagon/block-addr.ll2
-rw-r--r--test/CodeGen/Hexagon/cext-check.ll8
-rw-r--r--test/CodeGen/Hexagon/cmp-not.ll50
-rw-r--r--test/CodeGen/Hexagon/cmp-to-predreg.ll4
-rw-r--r--test/CodeGen/Hexagon/dadd.ll2
-rw-r--r--test/CodeGen/Hexagon/dmul.ll2
-rw-r--r--test/CodeGen/Hexagon/dsub.ll2
-rw-r--r--test/CodeGen/Hexagon/dualstore.ll17
-rw-r--r--test/CodeGen/Hexagon/hwloop-dbg.ll60
-rw-r--r--test/CodeGen/Hexagon/idxload-with-zero-offset.ll40
-rw-r--r--test/CodeGen/Hexagon/intrinsics/alu32_alu.ll202
-rw-r--r--test/CodeGen/Hexagon/intrinsics/alu32_perm.ll104
-rw-r--r--test/CodeGen/Hexagon/intrinsics/cr.ll132
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_alu.ll1020
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_bit.ll329
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_complex.ll349
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_fp.ll388
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll1525
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_perm.ll252
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_pred.ll351
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_shift.ll723
-rw-r--r--test/CodeGen/Hexagon/newvaluestore.ll2
-rw-r--r--test/CodeGen/Hexagon/pred-absolute-store.ll2
-rw-r--r--test/CodeGen/Hexagon/struct_args_large.ll2
26 files changed, 5448 insertions, 127 deletions
diff --git a/test/CodeGen/Hexagon/BranchPredict.ll b/test/CodeGen/Hexagon/BranchPredict.ll
index 4ab1966..5d56449 100644
--- a/test/CodeGen/Hexagon/BranchPredict.ll
+++ b/test/CodeGen/Hexagon/BranchPredict.ll
@@ -72,5 +72,5 @@ return: ; preds = %if.else, %if.then
ret i32 %retval.0
}
-!0 = metadata !{metadata !"branch_weights", i32 64, i32 4}
-!1 = metadata !{metadata !"branch_weights", i32 4, i32 64}
+!0 = !{!"branch_weights", i32 64, i32 4}
+!1 = !{!"branch_weights", i32 4, i32 64}
diff --git a/test/CodeGen/Hexagon/always-ext.ll b/test/CodeGen/Hexagon/always-ext.ll
index 9c8d708..93f4240 100644
--- a/test/CodeGen/Hexagon/always-ext.ll
+++ b/test/CodeGen/Hexagon/always-ext.ll
@@ -1,3 +1,4 @@
+; XFAIL:
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; Check that we don't generate an invalid packet with too many instructions
@@ -7,7 +8,7 @@
; CHECK: {
; CHECK-NOT: call abort
; CHECK: memw(##0)
-; CHECK: memw(r{{[0-9+]}}<<#2+##4)
+; CHECK: memw(r{{[0-9+]}}<<#2 + ##4)
; CHECK: }
%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111 = type { i8*, void (%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*)*, i32, i32, i8*, [23 x i32]* }
diff --git a/test/CodeGen/Hexagon/block-addr.ll b/test/CodeGen/Hexagon/block-addr.ll
index 54a12bf..dc0d6e6 100644
--- a/test/CodeGen/Hexagon/block-addr.ll
+++ b/test/CodeGen/Hexagon/block-addr.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
; CHECK: r{{[0-9]+}} = CONST32(#.LJTI{{[0-9]+_[0-9]+}})
-; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+r{{[0-9]+<<#[0-9]+}})
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}} + r{{[0-9]+<<#[0-9]+}})
; CHECK: jumpr r{{[0-9]+}}
define void @main() #0 {
diff --git a/test/CodeGen/Hexagon/cext-check.ll b/test/CodeGen/Hexagon/cext-check.ll
index 7c4b19e..b7181d8 100644
--- a/test/CodeGen/Hexagon/cext-check.ll
+++ b/test/CodeGen/Hexagon/cext-check.ll
@@ -2,9 +2,9 @@
; Check that we constant extended instructions only when necessary.
define i32 @cext_test1(i32* %a) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}+##8000)
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}##8000)
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##300000)
-; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}+##4092)
+; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}##4092)
; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##300)
entry:
%0 = load i32* %a, align 4
@@ -29,9 +29,9 @@ return:
}
define i32 @cext_test2(i8* %a) nounwind {
-; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}+##1023)
+; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}+{{ *}}##1023)
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##300000)
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}+##1024)
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}{{ *}}+{{ *}}##1024)
; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##6000)
entry:
%tobool = icmp ne i8* %a, null
diff --git a/test/CodeGen/Hexagon/cmp-not.ll b/test/CodeGen/Hexagon/cmp-not.ll
deleted file mode 100644
index abcddc38..0000000
--- a/test/CodeGen/Hexagon/cmp-not.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
-; Check that we generate matching compare insn.
-
-; Function Attrs: nounwind
-define i32 @neqi(i32 %argc) #0 {
-entry:
- %p = alloca i8, align 1
- %0 = tail call i1 @llvm.hexagon.C4.cmpneqi(i32 %argc, i32 512)
- %conv = zext i1 %0 to i8
- store volatile i8 %conv, i8* %p, align 1
- %p.0.p.0. = load volatile i8* %p, align 1
- %conv1 = zext i8 %p.0.p.0. to i32
- ret i32 %conv1
-}
-; CHECK: p{{[0-3]}}{{ *}} = !cmp.eq(r{{[0-9]+}}, ##512)
-
-; Function Attrs: nounwind readnone
-declare i1 @llvm.hexagon.C4.cmpneqi(i32, i32) #1
-
-; Function Attrs: nounwind
-define i32 @ngti(i32 %argc) #0 {
-entry:
- %p = alloca i8, align 1
- %0 = tail call i1 @llvm.hexagon.C4.cmpltei(i32 %argc, i32 4)
- %conv = zext i1 %0 to i8
- store volatile i8 %conv, i8* %p, align 1
- %p.0.p.0. = load volatile i8* %p, align 1
- %conv1 = zext i8 %p.0.p.0. to i32
- ret i32 %conv1
-}
-; CHECK: p{{[0-3]}}{{ *}} = !cmp.gt(r{{[0-9]+}}, #4)
-
-; Function Attrs: nounwind readnone
-declare i1 @llvm.hexagon.C4.cmpltei(i32, i32) #1
-
-; Function Attrs: nounwind
-define i32 @ngtui(i32 %argc) #0 {
-entry:
- %p = alloca i8, align 1
- %0 = tail call i1 @llvm.hexagon.C4.cmplteui(i32 %argc, i32 4)
- %conv = zext i1 %0 to i8
- store volatile i8 %conv, i8* %p, align 1
- %p.0.p.0. = load volatile i8* %p, align 1
- %conv1 = zext i8 %p.0.p.0. to i32
- ret i32 %conv1
-}
-; CHECK: p{{[0-3]}}{{ *}} = !cmp.gtu(r{{[0-9]+}}, #4)
-
-; Function Attrs: nounwind readnone
-declare i1 @llvm.hexagon.C4.cmplteui(i32, i32) #1
diff --git a/test/CodeGen/Hexagon/cmp-to-predreg.ll b/test/CodeGen/Hexagon/cmp-to-predreg.ll
index d430b90..2b65343 100644
--- a/test/CodeGen/Hexagon/cmp-to-predreg.ll
+++ b/test/CodeGen/Hexagon/cmp-to-predreg.ll
@@ -2,7 +2,7 @@
; Check that we generate compare to predicate register.
define i32 @compare1(i32 %a, i32 %b) nounwind {
-; CHECK: p{{[0-3]}}{{ *}}={{ *}}!cmp.eq(r{{[0-9]+}},{{ *}}r{{[0-9]+}})
+; CHECK: p{{[0-3]}}{{ *}}={{ *[!]?}}cmp.eq(r{{[0-9]+}},{{ *}}r{{[0-9]+}})
entry:
%cmp = icmp ne i32 %a, %b
%add = add nsw i32 %a, %b
@@ -12,7 +12,7 @@ entry:
}
define i32 @compare2(i32 %a) nounwind {
-; CHECK: p{{[0-3]}}{{ *}}={{ *}}!cmp.eq(r{{[0-9]+}},{{ *}}#10)
+; CHECK: p{{[0-3]}}{{ *}}={{ *[!]?}}cmp.eq(r{{[0-9]+}},{{ *}}#10)
entry:
%cmp = icmp ne i32 %a, 10
%add = add nsw i32 %a, 10
diff --git a/test/CodeGen/Hexagon/dadd.ll b/test/CodeGen/Hexagon/dadd.ll
index 602978a..a86a90c 100644
--- a/test/CodeGen/Hexagon/dadd.ll
+++ b/test/CodeGen/Hexagon/dadd.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate double precision floating point add in V5.
-; CHECK: r{{[0-9]+}}:{{[0-9]+}} = dfadd(r{{[0-9]+}}:{{[0-9]+}}, r{{[0-9]+}}:{{[0-9]+}})
+; CHECK: call __hexagon_adddf3
define i32 @main() nounwind {
diff --git a/test/CodeGen/Hexagon/dmul.ll b/test/CodeGen/Hexagon/dmul.ll
index d743773..cbe0d7f 100644
--- a/test/CodeGen/Hexagon/dmul.ll
+++ b/test/CodeGen/Hexagon/dmul.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate double precision floating point multiply in V5.
-; CHECK: r{{[0-9]+}}:{{[0-9]+}} = dfmpy(r{{[0-9]+}}:{{[0-9]+}}, r{{[0-9]+}}:{{[0-9]+}})
+; CHECK: call __hexagon_muldf3
define i32 @main() nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/dsub.ll b/test/CodeGen/Hexagon/dsub.ll
index 4f9d39e..f271492 100644
--- a/test/CodeGen/Hexagon/dsub.ll
+++ b/test/CodeGen/Hexagon/dsub.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate double precision floating point subtract in V5.
-; CHECK: r{{[0-9]+}}:{{[0-9]+}} = dfsub(r{{[0-9]+}}:{{[0-9]+}}, r{{[0-9]+}}:{{[0-9]+}})
+; CHECK: call __hexagon_subdf3
define i32 @main() nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/dualstore.ll b/test/CodeGen/Hexagon/dualstore.ll
index f7d7e8b..33d9ce9 100644
--- a/test/CodeGen/Hexagon/dualstore.ll
+++ b/test/CodeGen/Hexagon/dualstore.ll
@@ -1,17 +1,12 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 -disable-hexagon-misched < %s | FileCheck %s
+; RUN: llc -march=hexagon -disable-hexagon-misched < %s | FileCheck %s
; Check that we generate dual stores in one packet in V4
-; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}={{ *}}##500000
-; CHECK-NEXT: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}={{ *}}##100000
-; CHECK-NEXT: }
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}=
+; CHECK-NEXT: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}=
-@Reg = global i32 0, align 4
-define i32 @main() nounwind {
+define i32 @main(i32 %v, i32* %p1, i32* %p2) nounwind {
entry:
- %number= alloca i32, align 4
- store i32 500000, i32* %number, align 4
- %number1= alloca i32, align 4
- store i32 100000, i32* %number1, align 4
+ store i32 %v, i32* %p1, align 4
+ store i32 %v, i32* %p2, align 4
ret i32 0
}
-
diff --git a/test/CodeGen/Hexagon/hwloop-dbg.ll b/test/CodeGen/Hexagon/hwloop-dbg.ll
index f093dae..3c05884 100644
--- a/test/CodeGen/Hexagon/hwloop-dbg.ll
+++ b/test/CodeGen/Hexagon/hwloop-dbg.ll
@@ -5,9 +5,9 @@ target triple = "hexagon"
define void @foo(i32* nocapture %a, i32* nocapture %b) nounwind {
entry:
- tail call void @llvm.dbg.value(metadata !{i32* %a}, i64 0, metadata !13, metadata !{metadata !"0x102"}), !dbg !17
- tail call void @llvm.dbg.value(metadata !{i32* %b}, i64 0, metadata !14, metadata !{metadata !"0x102"}), !dbg !18
- tail call void @llvm.dbg.value(metadata !30, i64 0, metadata !15, metadata !{metadata !"0x102"}), !dbg !19
+ tail call void @llvm.dbg.value(metadata i32* %a, i64 0, metadata !13, metadata !{!"0x102"}), !dbg !17
+ tail call void @llvm.dbg.value(metadata i32* %b, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !18
+ tail call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !19
br label %for.body, !dbg !19
for.body: ; preds = %for.body, %entry
@@ -18,11 +18,11 @@ for.body: ; preds = %for.body, %entry
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%b.addr.01 = phi i32* [ %b, %entry ], [ %incdec.ptr, %for.body ]
%incdec.ptr = getelementptr inbounds i32* %b.addr.01, i32 1, !dbg !21
- tail call void @llvm.dbg.value(metadata !{i32* %incdec.ptr}, i64 0, metadata !14, metadata !{metadata !"0x102"}), !dbg !21
+ tail call void @llvm.dbg.value(metadata i32* %incdec.ptr, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !21
%0 = load i32* %b.addr.01, align 4, !dbg !21
store i32 %0, i32* %arrayidx.phi, align 4, !dbg !21
%inc = add nsw i32 %i.02, 1, !dbg !26
- tail call void @llvm.dbg.value(metadata !{i32 %inc}, i64 0, metadata !15, metadata !{metadata !"0x102"}), !dbg !26
+ tail call void @llvm.dbg.value(metadata i32 %inc, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !26
%exitcond = icmp eq i32 %inc, 10, !dbg !19
%arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1
br i1 %exitcond, label %for.end, label %for.body, !dbg !19
@@ -37,28 +37,28 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!29}
-!0 = metadata !{metadata !"0x11\0012\00QuIC LLVM Hexagon Clang version 6.1-pre-unknown, (git://git-hexagon-aus.quicinc.com/llvm/clang-mainline.git e9382867661454cdf44addb39430741578e9765c) (llvm/llvm-mainline.git 36412bb1fcf03ed426d4437b41198bae066675ac)\001\00\000\00\001", metadata !28, metadata !2, metadata !2, metadata !3, metadata !2, null} ; [ DW_TAG_compile_unit ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c] [DW_LANG_C99]
-!2 = metadata !{}
-!3 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x2e\00foo\00foo\00\001\000\001\000\006\00256\001\001", metadata !28, null, metadata !7, null, void (i32*, i32*)* @foo, null, null, metadata !11} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
-!6 = metadata !{metadata !"0x29", metadata !28} ; [ DW_TAG_file_type ]
-!7 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!8 = metadata !{null, metadata !9, metadata !9}
-!9 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, null, metadata !10} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from int]
-!10 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!11 = metadata !{metadata !13, metadata !14, metadata !15}
-!13 = metadata !{metadata !"0x101\00a\0016777217\000", metadata !5, metadata !6, metadata !9} ; [ DW_TAG_arg_variable ] [a] [line 1]
-!14 = metadata !{metadata !"0x101\00b\0033554433\000", metadata !5, metadata !6, metadata !9} ; [ DW_TAG_arg_variable ] [b] [line 1]
-!15 = metadata !{metadata !"0x100\00i\002\000", metadata !16, metadata !6, metadata !10} ; [ DW_TAG_auto_variable ] [i] [line 2]
-!16 = metadata !{metadata !"0xb\001\0026\000", metadata !28, metadata !5} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
-!17 = metadata !{i32 1, i32 15, metadata !5, null}
-!18 = metadata !{i32 1, i32 23, metadata !5, null}
-!19 = metadata !{i32 3, i32 8, metadata !20, null}
-!20 = metadata !{metadata !"0xb\003\003\001", metadata !28, metadata !16} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
-!21 = metadata !{i32 4, i32 5, metadata !22, null}
-!22 = metadata !{metadata !"0xb\003\0028\002", metadata !28, metadata !20} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
-!26 = metadata !{i32 3, i32 23, metadata !20, null}
-!27 = metadata !{i32 6, i32 1, metadata !16, null}
-!28 = metadata !{metadata !"hwloop-dbg.c", metadata !"/usr2/kparzysz/s.hex/t"}
-!29 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
-!30 = metadata !{i32 0}
+!0 = !{!"0x11\0012\00QuIC LLVM Hexagon Clang version 6.1-pre-unknown, (git://git-hexagon-aus.quicinc.com/llvm/clang-mainline.git e9382867661454cdf44addb39430741578e9765c) (llvm/llvm-mainline.git 36412bb1fcf03ed426d4437b41198bae066675ac)\001\00\000\00\001", !28, !2, !2, !3, !2, null} ; [ DW_TAG_compile_unit ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c] [DW_LANG_C99]
+!2 = !{}
+!3 = !{!5}
+!5 = !{!"0x2e\00foo\00foo\00\001\000\001\000\006\00256\001\001", !28, null, !7, null, void (i32*, i32*)* @foo, null, null, !11} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!6 = !{!"0x29", !28} ; [ DW_TAG_file_type ]
+!7 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = !{null, !9, !9}
+!9 = !{!"0xf\00\000\0032\0032\000\000", null, null, !10} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from int]
+!10 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!11 = !{!13, !14, !15}
+!13 = !{!"0x101\00a\0016777217\000", !5, !6, !9} ; [ DW_TAG_arg_variable ] [a] [line 1]
+!14 = !{!"0x101\00b\0033554433\000", !5, !6, !9} ; [ DW_TAG_arg_variable ] [b] [line 1]
+!15 = !{!"0x100\00i\002\000", !16, !6, !10} ; [ DW_TAG_auto_variable ] [i] [line 2]
+!16 = !{!"0xb\001\0026\000", !28, !5} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
+!17 = !MDLocation(line: 1, column: 15, scope: !5)
+!18 = !MDLocation(line: 1, column: 23, scope: !5)
+!19 = !MDLocation(line: 3, column: 8, scope: !20)
+!20 = !{!"0xb\003\003\001", !28, !16} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
+!21 = !MDLocation(line: 4, column: 5, scope: !22)
+!22 = !{!"0xb\003\0028\002", !28, !20} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
+!26 = !MDLocation(line: 3, column: 23, scope: !20)
+!27 = !MDLocation(line: 6, column: 1, scope: !16)
+!28 = !{!"hwloop-dbg.c", !"/usr2/kparzysz/s.hex/t"}
+!29 = !{i32 1, !"Debug Info Version", i32 2}
+!30 = !{i32 0}
diff --git a/test/CodeGen/Hexagon/idxload-with-zero-offset.ll b/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
index ca6df88..fbf1a3a 100644
--- a/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
+++ b/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
@@ -1,12 +1,12 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
-; Check that we generate load instruction with (base + register offset << 0)
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Check that we generate load instruction with (base + register offset << x)
; load word
-define i32 @load_w(i32* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i32 @load_w(i32* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#2)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i32* %a, i32 %tmp
%val = load i32* %scevgep9, align 4
ret i32 %val
@@ -14,10 +14,10 @@ entry:
; load unsigned half word
-define i16 @load_uh(i16* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memuh(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i16 @load_uh(i16* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memuh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#1)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i16* %a, i32 %tmp
%val = load i16* %scevgep9, align 2
ret i16 %val
@@ -25,10 +25,10 @@ entry:
; load signed half word
-define i32 @load_h(i16* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memh(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i32 @load_h(i16* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#1)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i16* %a, i32 %tmp
%val = load i16* %scevgep9, align 2
%conv = sext i16 %val to i32
@@ -37,10 +37,10 @@ entry:
; load unsigned byte
-define i8 @load_ub(i8* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i8 @load_ub(i8* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#0)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i8* %a, i32 %tmp
%val = load i8* %scevgep9, align 1
ret i8 %val
@@ -48,10 +48,10 @@ entry:
; load signed byte
-define i32 @foo_2(i8* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memb(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i32 @foo_2(i8* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memb(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#0)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i8* %a, i32 %tmp
%val = load i8* %scevgep9, align 1
%conv = sext i8 %val to i32
@@ -60,10 +60,10 @@ entry:
; load doubleword
-define i64 @load_d(i64* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}memd(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i64 @load_d(i64* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}memd(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#3)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i64* %a, i32 %tmp
%val = load i64* %scevgep9, align 8
ret i64 %val
diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
new file mode 100644
index 0000000..37f9f40
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
@@ -0,0 +1,202 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.1.1 ALU32/ALU
+
+; Add
+declare i32 @llvm.hexagon.A2.addi(i32, i32)
+define i32 @A2_addi(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.addi(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, #0)
+
+declare i32 @llvm.hexagon.A2.add(i32, i32)
+define i32 @A2_add(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, r1)
+
+declare i32 @llvm.hexagon.A2.addsat(i32, i32)
+define i32 @A2_addsat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addsat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, r1):sat
+
+; Logical operations
+declare i32 @llvm.hexagon.A2.and(i32, i32)
+define i32 @A2_and(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = and(r0, r1)
+
+declare i32 @llvm.hexagon.A2.or(i32, i32)
+define i32 @A2_or(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = or(r0, r1)
+
+declare i32 @llvm.hexagon.A2.xor(i32, i32)
+define i32 @A2_xor(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = xor(r0, r1)
+
+declare i32 @llvm.hexagon.A4.andn(i32, i32)
+define i32 @A4_andn(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.andn(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = and(r0, ~r1)
+
+declare i32 @llvm.hexagon.A4.orn(i32, i32)
+define i32 @A4_orn(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.orn(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = or(r0, ~r1)
+
+; Nop
+declare void @llvm.hexagon.A2.nop()
+define void @A2_nop(i32 %a, i32 %b) {
+ call void @llvm.hexagon.A2.nop()
+ ret void
+}
+; CHECK: nop
+
+; Subtract
+declare i32 @llvm.hexagon.A2.sub(i32, i32)
+define i32 @A2_sub(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0, r1)
+
+declare i32 @llvm.hexagon.A2.subsat(i32, i32)
+define i32 @A2_subsat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subsat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0, r1):sat
+
+; Sign extend
+declare i32 @llvm.hexagon.A2.sxtb(i32)
+define i32 @A2_sxtb(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.sxtb(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = sxtb(r0)
+
+declare i32 @llvm.hexagon.A2.sxth(i32)
+define i32 @A2_sxth(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.sxth(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = sxth(r0)
+
+; Transfer immediate
+declare i32 @llvm.hexagon.A2.tfril(i32, i32)
+define i32 @A2_tfril(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.tfril(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0.l = #0
+
+declare i32 @llvm.hexagon.A2.tfrih(i32, i32)
+define i32 @A2_tfrih(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.tfrih(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0.h = #0
+
+declare i32 @llvm.hexagon.A2.tfrsi(i32)
+define i32 @A2_tfrsi() {
+ %z = call i32 @llvm.hexagon.A2.tfrsi(i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = #0
+
+; Transfer register
+declare i32 @llvm.hexagon.A2.tfr(i32)
+define i32 @A2_tfr(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.tfr(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = r0
+
+; Vector add halfwords
+declare i32 @llvm.hexagon.A2.svaddh(i32, i32)
+define i32 @A2_svaddh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svaddh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vaddh(r0, r1)
+
+declare i32 @llvm.hexagon.A2.svaddhs(i32, i32)
+define i32 @A2_svaddhs(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svaddhs(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vaddh(r0, r1):sat
+
+declare i32 @llvm.hexagon.A2.svadduhs(i32, i32)
+define i32 @A2_svadduhs(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svadduhs(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vadduh(r0, r1):sat
+
+; Vector average halfwords
+declare i32 @llvm.hexagon.A2.svavgh(i32, i32)
+define i32 @A2_svavgh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svavgh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vavgh(r0, r1)
+
+declare i32 @llvm.hexagon.A2.svavghs(i32, i32)
+define i32 @A2_svavghs(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svavghs(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vavgh(r0, r1):rnd
+
+declare i32 @llvm.hexagon.A2.svnavgh(i32, i32)
+define i32 @A2_svnavgh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svnavgh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vnavgh(r0, r1)
+
+; Vector subtract halfwords
+declare i32 @llvm.hexagon.A2.svsubh(i32, i32)
+define i32 @A2_svsubh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svsubh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vsubh(r0, r1)
+
+declare i32 @llvm.hexagon.A2.svsubhs(i32, i32)
+define i32 @A2_svsubhs(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svsubhs(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vsubh(r0, r1):sat
+
+declare i32 @llvm.hexagon.A2.svsubuhs(i32, i32)
+define i32 @A2_svsubuhs(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svsubuhs(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vsubuh(r0, r1):sat
+
+; Zero extend
+declare i32 @llvm.hexagon.A2.zxth(i32)
+define i32 @A2_zxth(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.zxth(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = zxth(r0)
diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
new file mode 100644
index 0000000..a9cc01c
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
@@ -0,0 +1,104 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.1.2 ALU32/PERM
+
+; Combine words into doubleword
+declare i64 @llvm.hexagon.A4.combineri(i32, i32)
+define i64 @A4_combineri(i32 %a) {
+ %z = call i64 @llvm.hexagon.A4.combineri(i32 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: = combine(r0, #0)
+
+declare i64 @llvm.hexagon.A4.combineir(i32, i32)
+define i64 @A4_combineir(i32 %a) {
+ %z = call i64 @llvm.hexagon.A4.combineir(i32 0, i32 %a)
+ ret i64 %z
+}
+; CHECK: = combine(#0, r0)
+
+declare i64 @llvm.hexagon.A2.combineii(i32, i32)
+define i64 @A2_combineii() {
+ %z = call i64 @llvm.hexagon.A2.combineii(i32 0, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = combine(#0, #0)
+
+declare i32 @llvm.hexagon.A2.combine.hh(i32, i32)
+define i32 @A2_combine_hh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.combine.hh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = combine(r0.h, r1.h)
+
+declare i32 @llvm.hexagon.A2.combine.hl(i32, i32)
+define i32 @A2_combine_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.combine.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = combine(r0.h, r1.l)
+
+declare i32 @llvm.hexagon.A2.combine.lh(i32, i32)
+define i32 @A2_combine_lh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.combine.lh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = combine(r0.l, r1.h)
+
+declare i32 @llvm.hexagon.A2.combine.ll(i32, i32)
+define i32 @A2_combine_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.combine.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = combine(r0.l, r1.l)
+
+declare i64 @llvm.hexagon.A2.combinew(i32, i32)
+define i64 @A2_combinew(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = combine(r0, r1)
+
+; Mux
+declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32)
+define i32 @C2_muxri(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C2.muxri(i32 %a, i32 0, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mux(p0, #0, r1)
+
+declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32)
+define i32 @C2_muxir(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C2.muxir(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = mux(p0, r1, #0)
+
+declare i32 @llvm.hexagon.C2.mux(i32, i32, i32)
+define i32 @C2_mux(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.C2.mux(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 = mux(p0, r1, r2)
+
+; Shift word by 16
+declare i32 @llvm.hexagon.A2.aslh(i32)
+define i32 @A2_aslh(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.aslh(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = aslh(r0)
+
+declare i32 @llvm.hexagon.A2.asrh(i32)
+define i32 @A2_asrh(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.asrh(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = asrh(r0)
+
+; Pack high and low halfwords
+declare i64 @llvm.hexagon.S2.packhl(i32, i32)
+define i64 @S2_packhl(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.packhl(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = packhl(r0, r1)
diff --git a/test/CodeGen/Hexagon/intrinsics/cr.ll b/test/CodeGen/Hexagon/intrinsics/cr.ll
new file mode 100644
index 0000000..9bdcb25
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/cr.ll
@@ -0,0 +1,132 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.2 CR
+
+; Corner detection acceleration
+declare i32 @llvm.hexagon.C4.fastcorner9(i32, i32)
+define i32 @C4_fastcorner9(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C4.fastcorner9(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = fastcorner9(p0, p1)
+
+declare i32 @llvm.hexagon.C4.fastcorner9.not(i32, i32)
+define i32 @C4_fastcorner9_not(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C4.fastcorner9.not(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = !fastcorner9(p0, p1)
+
+; Logical reductions on predicates
+declare i32 @llvm.hexagon.C2.any8(i32)
+define i32 @C2_any8(i32 %a) {
+ %z = call i32@llvm.hexagon.C2.any8(i32 %a)
+ ret i32 %z
+}
+; CHECK: p0 = any8(p0)
+
+declare i32 @llvm.hexagon.C2.all8(i32)
+define i32 @C2_all8(i32 %a) {
+ %z = call i32@llvm.hexagon.C2.all8(i32 %a)
+ ret i32 %z
+}
+
+; CHECK: p0 = all8(p0)
+
+; Logical operations on predicates
+declare i32 @llvm.hexagon.C2.and(i32, i32)
+define i32 @C2_and(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C2.and(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, p1)
+
+declare i32 @llvm.hexagon.C4.and.and(i32, i32, i32)
+define i32 @C4_and_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.and.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, and(p1, p2))
+
+declare i32 @llvm.hexagon.C2.or(i32, i32)
+define i32 @C2_or(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C2.or(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, p1)
+
+declare i32 @llvm.hexagon.C4.and.or(i32, i32, i32)
+define i32 @C4_and_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.and.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, or(p1, p2))
+
+declare i32 @llvm.hexagon.C2.xor(i32, i32)
+define i32 @C2_xor(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C2.xor(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = xor(p0, p1)
+
+declare i32 @llvm.hexagon.C4.or.and(i32, i32, i32)
+define i32 @C4_or_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.or.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, and(p1, p2))
+
+declare i32 @llvm.hexagon.C2.andn(i32, i32)
+define i32 @C2_andn(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C2.andn(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, !p1)
+
+declare i32 @llvm.hexagon.C4.or.or(i32, i32, i32)
+define i32 @C4_or_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.or.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, or(p1, p2))
+
+declare i32 @llvm.hexagon.C4.and.andn(i32, i32, i32)
+define i32 @C4_and_andn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.and.andn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, and(p1, !p2))
+
+declare i32 @llvm.hexagon.C4.and.orn(i32, i32, i32)
+define i32 @C4_and_orn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.and.orn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, or(p1, !p2))
+
+declare i32 @llvm.hexagon.C2.not(i32)
+define i32 @C2_not(i32 %a) {
+ %z = call i32@llvm.hexagon.C2.not(i32 %a)
+ ret i32 %z
+}
+; CHECK: p0 = not(p0)
+
+declare i32 @llvm.hexagon.C4.or.andn(i32, i32, i32)
+define i32 @C4_or_andn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.or.andn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, and(p1, !p2))
+
+declare i32 @llvm.hexagon.C2.orn(i32, i32)
+define i32 @C2_orn(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C2.orn(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, !p1)
+
+declare i32 @llvm.hexagon.C4.or.orn(i32, i32, i32)
+define i32 @C4_or_orn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.or.orn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, or(p1, !p2))
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
new file mode 100644
index 0000000..4a11112
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
@@ -0,0 +1,1020 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.1 XTYPE/ALU
+
+; Absolute value doubleword
+declare i64 @llvm.hexagon.A2.absp(i64)
+define i64 @A2_absp(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.absp(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = abs(r1:0)
+
+; Absolute value word
+declare i32 @llvm.hexagon.A2.abs(i32)
+define i32 @A2_abs(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.abs(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = abs(r0)
+
+declare i32 @llvm.hexagon.A2.abssat(i32)
+define i32 @A2_abssat(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.abssat(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = abs(r0):sat
+
+; Add and accumulate
+declare i32 @llvm.hexagon.S4.addaddi(i32, i32, i32)
+define i32 @S4_addaddi(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.addaddi(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, add(r1, #0))
+
+declare i32 @llvm.hexagon.S4.subaddi(i32, i32, i32)
+define i32 @S4_subaddi(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.subaddi(i32 %a, i32 0, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, sub(#0, r1))
+
+declare i32 @llvm.hexagon.M2.accii(i32, i32, i32)
+define i32 @M2_accii(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.accii(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 += add(r1, #0)
+
+declare i32 @llvm.hexagon.M2.naccii(i32, i32, i32)
+define i32 @M2_naccii(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.naccii(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 -= add(r1, #0)
+
+declare i32 @llvm.hexagon.M2.acci(i32, i32, i32)
+define i32 @M2_acci(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.acci(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += add(r1, r2)
+
+declare i32 @llvm.hexagon.M2.nacci(i32, i32, i32)
+define i32 @M2_nacci(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.nacci(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= add(r1, r2)
+
+; Add doublewords
+declare i64 @llvm.hexagon.A2.addp(i64, i64)
+define i64 @A2_addp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.addp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = add(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.addpsat(i64, i64)
+define i64 @A2_addpsat(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.addpsat(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = add(r1:0, r3:2):sat
+
+; Add halfword
+declare i32 @llvm.hexagon.A2.addh.l16.ll(i32, i32)
+define i32 @A2_addh_l16_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.l)
+
+declare i32 @llvm.hexagon.A2.addh.l16.hl(i32, i32)
+define i32 @A2_addh_l16_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.h)
+
+declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32)
+define i32 @A2_addh_l16_sat.ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.l):sat
+
+declare i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32, i32)
+define i32 @A2_addh_l16_sat.hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.h):sat
+
+declare i32 @llvm.hexagon.A2.addh.h16.ll(i32, i32)
+define i32 @A2_addh_h16_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.l):<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.lh(i32, i32)
+define i32 @A2_addh_h16_lh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.h):<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.hl(i32, i32)
+define i32 @A2_addh_h16_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.h, r1.l):<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.hh(i32, i32)
+define i32 @A2_addh_h16_hh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.h, r1.h):<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32, i32)
+define i32 @A2_addh_h16_sat_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.l):sat:<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32, i32)
+define i32 @A2_addh_h16_sat_lh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.h):sat:<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32, i32)
+define i32 @A2_addh_h16_sat_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.h, r1.l):sat:<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32, i32)
+define i32 @A2_addh_h16_sat_hh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.h, r1.h):sat:<<16
+
+; Logical doublewords
+declare i64 @llvm.hexagon.A2.notp(i64)
+define i64 @A2_notp(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.notp(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = not(r1:0)
+
+declare i64 @llvm.hexagon.A2.andp(i64, i64)
+define i64 @A2_andp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.andp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = and(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A4.andnp(i64, i64)
+define i64 @A2_andnp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A4.andnp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = and(r1:0, ~r3:2)
+
+declare i64 @llvm.hexagon.A2.orp(i64, i64)
+define i64 @A2_orp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.orp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = or(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A4.ornp(i64, i64)
+define i64 @A2_ornp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A4.ornp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = or(r1:0, ~r3:2)
+
+declare i64 @llvm.hexagon.A2.xorp(i64, i64)
+define i64 @A2_xorp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.xorp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = xor(r1:0, r3:2)
+
+; Logical-logical doublewords
+declare i64 @llvm.hexagon.M4.xor.xacc(i64, i64, i64)
+define i64 @M4_xor_xacc(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M4.xor.xacc(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= xor(r3:2, r5:4)
+
+; Logical-logical words
+declare i32 @llvm.hexagon.S4.or.andi(i32, i32, i32)
+define i32 @S4_or_andi(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.or.andi(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 |= and(r1, #0)
+
+declare i32 @llvm.hexagon.S4.or.andix(i32, i32, i32)
+define i32 @S4_or_andix(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.or.andix(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r1 = or(r0, and(r1, #0))
+
+declare i32 @llvm.hexagon.M4.or.andn(i32, i32, i32)
+define i32 @M4_or_andn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.or.andn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= and(r1, ~r2)
+
+declare i32 @llvm.hexagon.M4.and.andn(i32, i32, i32)
+define i32 @M4_and_andn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.and.andn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= and(r1, ~r2)
+
+declare i32 @llvm.hexagon.M4.xor.andn(i32, i32, i32)
+define i32 @M4_xor_andn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.xor.andn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 ^= and(r1, ~r2)
+
+declare i32 @llvm.hexagon.M4.and.and(i32, i32, i32)
+define i32 @M4_and_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.and.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= and(r1, r2)
+
+declare i32 @llvm.hexagon.M4.and.or(i32, i32, i32)
+define i32 @M4_and_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.and.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= or(r1, r2)
+
+declare i32 @llvm.hexagon.M4.and.xor(i32, i32, i32)
+define i32 @M4_and_xor(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.and.xor(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= xor(r1, r2)
+
+declare i32 @llvm.hexagon.M4.or.and(i32, i32, i32)
+define i32 @M4_or_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.or.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= and(r1, r2)
+
+declare i32 @llvm.hexagon.M4.or.or(i32, i32, i32)
+define i32 @M4_or_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.or.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= or(r1, r2)
+
+declare i32 @llvm.hexagon.M4.or.xor(i32, i32, i32)
+define i32 @M4_or_xor(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.or.xor(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= xor(r1, r2)
+
+declare i32 @llvm.hexagon.M4.xor.and(i32, i32, i32)
+define i32 @M4_xor_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.xor.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 ^= and(r1, r2)
+
+declare i32 @llvm.hexagon.M4.xor.or(i32, i32, i32)
+define i32 @M4_xor_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.xor.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 ^= or(r1, r2)
+
+; Maximum words
+declare i32 @llvm.hexagon.A2.max(i32, i32)
+define i32 @A2_max(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.max(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = max(r0, r1)
+
+declare i32 @llvm.hexagon.A2.maxu(i32, i32)
+define i32 @A2_maxu(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.maxu(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = maxu(r0, r1)
+
+; Maximum doublewords
+declare i64 @llvm.hexagon.A2.maxp(i64, i64)
+define i64 @A2_maxp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.maxp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = max(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.maxup(i64, i64)
+define i64 @A2_maxup(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.maxup(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = maxu(r1:0, r3:2)
+
+; Minimum words
+declare i32 @llvm.hexagon.A2.min(i32, i32)
+define i32 @A2_min(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.min(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = min(r0, r1)
+
+declare i32 @llvm.hexagon.A2.minu(i32, i32)
+define i32 @A2_minu(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.minu(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = minu(r0, r1)
+
+; Minimum doublewords
+declare i64 @llvm.hexagon.A2.minp(i64, i64)
+define i64 @A2_minp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.minp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = min(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.minup(i64, i64)
+define i64 @A2_minup(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.minup(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = minu(r1:0, r3:2)
+
+; Module wrap
+declare i32 @llvm.hexagon.A4.modwrapu(i32, i32)
+define i32 @A4_modwrapu(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.modwrapu(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = modwrap(r0, r1)
+
+; Negate
+declare i64 @llvm.hexagon.A2.negp(i64)
+define i64 @A2_negp(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.negp(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = neg(r1:0)
+
+declare i32 @llvm.hexagon.A2.negsat(i32)
+define i32 @A2_negsat(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.negsat(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = neg(r0):sat
+
+; Round
+declare i32 @llvm.hexagon.A2.roundsat(i64)
+define i32 @A2_roundsat(i64 %a) {
+ %z = call i32 @llvm.hexagon.A2.roundsat(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = round(r1:0):sat
+
+declare i32 @llvm.hexagon.A4.cround.ri(i32, i32)
+define i32 @A4_cround_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cround.ri(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = cround(r0, #0)
+
+declare i32 @llvm.hexagon.A4.round.ri(i32, i32)
+define i32 @A4_round_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.round.ri(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = round(r0, #0)
+
+declare i32 @llvm.hexagon.A4.round.ri.sat(i32, i32)
+define i32 @A4_round_ri_sat(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.round.ri.sat(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = round(r0, #0):sat
+
+declare i32 @llvm.hexagon.A4.cround.rr(i32, i32)
+define i32 @A4_cround_rr(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cround.rr(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cround(r0, r1)
+
+declare i32 @llvm.hexagon.A4.round.rr(i32, i32)
+define i32 @A4_round_rr(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.round.rr(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = round(r0, r1)
+
+declare i32 @llvm.hexagon.A4.round.rr.sat(i32, i32)
+define i32 @A4_round_rr_sat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.round.rr.sat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = round(r0, r1):sat
+
+; Subtract doublewords
+declare i64 @llvm.hexagon.A2.subp(i64, i64)
+define i64 @A2_subp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.subp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = sub(r1:0, r3:2)
+
+; Subtract and accumulate
+declare i32 @llvm.hexagon.M2.subacc(i32, i32, i32)
+define i32 @M2_subacc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.subacc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += sub(r1, r2)
+
+; Subtract halfwords
+declare i32 @llvm.hexagon.A2.subh.l16.ll(i32, i32)
+define i32 @A2_subh_l16_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.l)
+
+declare i32 @llvm.hexagon.A2.subh.l16.hl(i32, i32)
+define i32 @A2_subh_l16_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.h)
+
+declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32)
+define i32 @A2_subh_l16_sat.ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.l):sat
+
+declare i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32, i32)
+define i32 @A2_subh_l16_sat.hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.h):sat
+
+declare i32 @llvm.hexagon.A2.subh.h16.ll(i32, i32)
+define i32 @A2_subh_h16_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.l):<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.lh(i32, i32)
+define i32 @A2_subh_h16_lh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.h):<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.hl(i32, i32)
+define i32 @A2_subh_h16_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.h, r1.l):<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.hh(i32, i32)
+define i32 @A2_subh_h16_hh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.h, r1.h):<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32, i32)
+define i32 @A2_subh_h16_sat_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.l):sat:<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32, i32)
+define i32 @A2_subh_h16_sat_lh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.h):sat:<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32, i32)
+define i32 @A2_subh_h16_sat_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.h, r1.l):sat:<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32, i32)
+define i32 @A2_subh_h16_sat_hh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.h, r1.h):sat:<<16
+
+; Sign extend word to doubleword
+declare i64 @llvm.hexagon.A2.sxtw(i32)
+define i64 @A2_sxtw(i32 %a) {
+ %z = call i64 @llvm.hexagon.A2.sxtw(i32 %a)
+ ret i64 %z
+}
+; CHECK: = sxtw(r0)
+
+; Vector absolute value halfwords
+declare i64 @llvm.hexagon.A2.vabsh(i64)
+define i64 @A2_vabsh(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.vabsh(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsh(r1:0)
+
+declare i64 @llvm.hexagon.A2.vabshsat(i64)
+define i64 @A2_vabshsat(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.vabshsat(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsh(r1:0):sat
+
+; Vector absolute value words
+declare i64 @llvm.hexagon.A2.vabsw(i64)
+define i64 @A2_vabsw(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.vabsw(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsw(r1:0)
+
+declare i64 @llvm.hexagon.A2.vabswsat(i64)
+define i64 @A2_vabswsat(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.vabswsat(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsw(r1:0):sat
+
+; Vector absolute difference halfwords
+declare i64 @llvm.hexagon.M2.vabsdiffh(i64, i64)
+define i64 @M2_vabsdiffh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vabsdiffh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsdiffh(r1:0, r3:2)
+
+; Vector absolute difference words
+declare i64 @llvm.hexagon.M2.vabsdiffw(i64, i64)
+define i64 @M2_vabsdiffw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vabsdiffw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsdiffw(r1:0, r3:2)
+
+; Vector add halfwords
+declare i64 @llvm.hexagon.A2.vaddh(i64, i64)
+define i64 @A2_vaddh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vaddhs(i64, i64)
+define i64 @A2_vaddhs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddhs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.A2.vadduhs(i64, i64)
+define i64 @A2_vadduhs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vadduhs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vadduh(r1:0, r3:2):sat
+
+; Vector add halfwords with saturate and pack to unsigned bytes
+declare i32 @llvm.hexagon.A5.vaddhubs(i64, i64)
+define i32 @A5_vaddhubs(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A5.vaddhubs(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vaddhub(r1:0, r3:2):sat
+
+; Vector reduce add unsigned bytes
+declare i64 @llvm.hexagon.A2.vraddub(i64, i64)
+define i64 @A2_vraddub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vraddub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vraddub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vraddub.acc(i64, i64, i64)
+define i64 @A2_vraddub_acc(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.A2.vraddub.acc(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vraddub(r3:2, r5:4)
+
+; Vector reduce add halfwords
+declare i32 @llvm.hexagon.M2.vradduh(i64, i64)
+define i32 @M2_vradduh(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.M2.vradduh(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vradduh(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.M2.vraddh(i64, i64)
+define i32 @M2_vraddh(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.M2.vraddh(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vraddh(r1:0, r3:2)
+
+; Vector add bytes
+declare i64 @llvm.hexagon.A2.vaddub(i64, i64)
+define i64 @A2_vaddub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vaddubs(i64, i64)
+define i64 @A2_vaddubs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddubs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddub(r1:0, r3:2):sat
+
+; Vector add words
+declare i64 @llvm.hexagon.A2.vaddw(i64, i64)
+define i64 @A2_vaddw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddw(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vaddws(i64, i64)
+define i64 @A2_vaddws(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddws(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddw(r1:0, r3:2):sat
+
+; Vector average halfwords
+declare i64 @llvm.hexagon.A2.vavgh(i64, i64)
+define i64 @A2_vavgh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vavghr(i64, i64)
+define i64 @A2_vavghr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavghr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgh(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vavghcr(i64, i64)
+define i64 @A2_vavghcr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavghcr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgh(r1:0, r3:2):crnd
+
+declare i64 @llvm.hexagon.A2.vavguh(i64, i64)
+define i64 @A2_vavguh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavguh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavguh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vavguhr(i64, i64)
+define i64 @A2_vavguhr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavguhr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavguh(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vnavgh(i64, i64)
+define i64 @A2_vnavgh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavgh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vnavghr(i64, i64)
+define i64 @A2_vnavghr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavghr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgh(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vnavghcr(i64, i64)
+define i64 @A2_vnavghcr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavghcr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgh(r1:0, r3:2):crnd
+
+; Vector average unsigned bytes
+declare i64 @llvm.hexagon.A2.vavgub(i64, i64)
+define i64 @A2_vavgub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vavgubr(i64, i64)
+define i64 @A2_vavgubr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgubr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgub(r1:0, r3:2):rnd
+
+; Vector average words
+declare i64 @llvm.hexagon.A2.vavgw(i64, i64)
+define i64 @A2_vavgw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgw(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vavgwr(i64, i64)
+define i64 @A2_vavgwr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgwr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgw(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vavgwcr(i64, i64)
+define i64 @A2_vavgwcr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgwcr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgw(r1:0, r3:2):crnd
+
+declare i64 @llvm.hexagon.A2.vavguw(i64, i64)
+define i64 @A2_vavguw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavguw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavguw(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vavguwr(i64, i64)
+define i64 @A2_vavguwr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavguwr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavguw(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vnavgw(i64, i64)
+define i64 @A2_vnavgw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavgw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgw(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vnavgwr(i64, i64)
+define i64 @A2_vnavgwr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavgwr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgw(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vnavgwcr(i64, i64)
+define i64 @A2_vnavgwcr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavgwcr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgw(r1:0, r3:2):crnd
+
+; Vector conditional negate
+declare i64 @llvm.hexagon.S2.vcnegh(i64, i32)
+define i64 @S2_vcnegh(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.vcnegh(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcnegh(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.vrcnegh(i64, i64, i32)
+define i64 @S2_vrcnegh(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.vrcnegh(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcnegh(r3:2, r4)
+
+; Vector maximum bytes
+declare i64 @llvm.hexagon.A2.vmaxub(i64, i64)
+define i64 @A2_vmaxub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vmaxub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmaxub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vmaxb(i64, i64)
+define i64 @A2_vmaxb(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vmaxb(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmaxb(r1:0, r3:2)
+
+; Vector maximum halfwords
+declare i64 @llvm.hexagon.A2.vmaxh(i64, i64)
+define i64 @A2_vmaxh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vmaxh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmaxh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vmaxuh(i64, i64)
+define i64 @A2_vmaxuh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vmaxuh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmaxuh(r1:0, r3:2)
+
+; Vector reduce maximum halfwords
+declare i64 @llvm.hexagon.A4.vrmaxh(i64, i64, i32)
+define i64 @A4_vrmaxh(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrmaxh(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmaxh(r3:2, r4)
+
+declare i64 @llvm.hexagon.A4.vrmaxuh(i64, i64, i32)
+define i64 @A4_vrmaxuh(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrmaxuh(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmaxuh(r3:2, r4)
+
+; Vector reduce maximum words
+declare i64 @llvm.hexagon.A4.vrmaxw(i64, i64, i32)
+define i64 @A4_vrmaxw(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrmaxw(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmaxw(r3:2, r4)
+
+declare i64 @llvm.hexagon.A4.vrmaxuw(i64, i64, i32)
+define i64 @A4_vrmaxuw(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrmaxuw(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmaxuw(r3:2, r4)
+
+; Vector minimum bytes
+declare i64 @llvm.hexagon.A2.vminub(i64, i64)
+define i64 @A2_vminub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vminub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vminub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vminb(i64, i64)
+define i64 @A2_vminb(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vminb(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vminb(r1:0, r3:2)
+
+; Vector minimum halfwords
+declare i64 @llvm.hexagon.A2.vminh(i64, i64)
+define i64 @A2_vminh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vminh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vminh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vminuh(i64, i64)
+define i64 @A2_vminuh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vminuh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vminuh(r1:0, r3:2)
+
+; Vector reduce minimum halfwords
+declare i64 @llvm.hexagon.A4.vrminh(i64, i64, i32)
+define i64 @A4_vrminh(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrminh(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrminh(r3:2, r4)
+
+declare i64 @llvm.hexagon.A4.vrminuh(i64, i64, i32)
+define i64 @A4_vrminuh(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrminuh(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrminuh(r3:2, r4)
+
+; Vector reduce minimum words
+declare i64 @llvm.hexagon.A4.vrminw(i64, i64, i32)
+define i64 @A4_vrminw(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrminw(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrminw(r3:2, r4)
+
+declare i64 @llvm.hexagon.A4.vrminuw(i64, i64, i32)
+define i64 @A4_vrminuw(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrminuw(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrminuw(r3:2, r4)
+
+; Vector sum of absolute differences unsigned bytes
+declare i64 @llvm.hexagon.A2.vrsadub(i64, i64)
+define i64 @A2_vrsadub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vrsadub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrsadub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vrsadub.acc(i64, i64, i64)
+define i64 @A2_vrsadub_acc(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.A2.vrsadub.acc(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrsadub(r3:2, r5:4)
+
+; Vector subtract halfwords
+declare i64 @llvm.hexagon.A2.vsubh(i64, i64)
+define i64 @A2_vsubh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vsubhs(i64, i64)
+define i64 @A2_vsubhs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubhs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.A2.vsubuhs(i64, i64)
+define i64 @A2_vsubuhs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubuhs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubuh(r1:0, r3:2):sat
+
+; Vector subtract bytes
+declare i64 @llvm.hexagon.A2.vsubub(i64, i64)
+define i64 @A2_vsubub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vsububs(i64, i64)
+define i64 @A2_vsububs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsububs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubub(r1:0, r3:2):sat
+
+; Vector subtract words
+declare i64 @llvm.hexagon.A2.vsubw(i64, i64)
+define i64 @A2_vsubw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubw(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vsubws(i64, i64)
+define i64 @A2_vsubws(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubws(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubw(r1:0, r3:2):sat
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
new file mode 100644
index 0000000..8531b2f
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
@@ -0,0 +1,329 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.2 XTYPE/BIT
+
+; Count leading
+declare i32 @llvm.hexagon.S2.clbp(i64)
+define i32 @S2_clbp(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.clbp(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = clb(r1:0)
+
+declare i32 @llvm.hexagon.S2.cl0p(i64)
+define i32 @S2_cl0p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl0p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl0(r1:0)
+
+declare i32 @llvm.hexagon.S2.cl1p(i64)
+define i32 @S2_cl1p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl1p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl1(r1:0)
+
+declare i32 @llvm.hexagon.S4.clbpnorm(i64)
+define i32 @S4_clbpnorm(i64 %a) {
+ %z = call i32 @llvm.hexagon.S4.clbpnorm(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = normamt(r1:0)
+
+declare i32 @llvm.hexagon.S4.clbpaddi(i64, i32)
+define i32 @S4_clbpaddi(i64 %a) {
+ %z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(clb(r1:0), #0)
+
+declare i32 @llvm.hexagon.S4.clbaddi(i32, i32)
+define i32 @S4_clbaddi(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(clb(r0), #0)
+
+declare i32 @llvm.hexagon.S2.cl0(i32)
+define i32 @S2_cl0(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl0(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl0(r0)
+
+declare i32 @llvm.hexagon.S2.cl1(i32)
+define i32 @S2_cl1(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl1(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl1(r0)
+
+declare i32 @llvm.hexagon.S2.clbnorm(i32)
+define i32 @S4_clbnorm(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.clbnorm(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = normamt(r0)
+
+; Count population
+declare i32 @llvm.hexagon.S5.popcountp(i64)
+define i32 @S5_popcountp(i64 %a) {
+ %z = call i32 @llvm.hexagon.S5.popcountp(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = popcount(r1:0)
+
+; Count trailing
+declare i32 @llvm.hexagon.S2.ct0p(i64)
+define i32 @S2_ct0p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct0p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct0(r1:0)
+
+declare i32 @llvm.hexagon.S2.ct1p(i64)
+define i32 @S2_ct1p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct1p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct1(r1:0)
+
+declare i32 @llvm.hexagon.S2.ct0(i32)
+define i32 @S2_ct0(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct0(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct0(r0)
+
+declare i32 @llvm.hexagon.S2.ct1(i32)
+define i32 @S2_ct1(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct1(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct1(r0)
+
+; Extract bitfield
+declare i64 @llvm.hexagon.S2.extractup(i64, i32, i32)
+define i64 @S2_extractup(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = extractu(r1:0, #0, #0)
+
+declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32)
+define i64 @S2_extractp(i64 %a) {
+ %z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = extract(r1:0, #0, #0)
+
+declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32)
+define i32 @S2_extractu(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = extractu(r0, #0, #0)
+
+declare i32 @llvm.hexagon.S4.extract(i32, i32, i32)
+define i32 @S2_extract(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = extract(r0, #0, #0)
+
+declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64)
+define i64 @S2_extractup_rp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = extractu(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64)
+define i64 @S4_extractp_rp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = extract(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64)
+define i32 @S2_extractu_rp(i32 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = extractu(r0, r3:2)
+
+declare i32 @llvm.hexagon.S4.extract.rp(i32, i64)
+define i32 @S4_extract_rp(i32 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = extract(r0, r3:2)
+
+; Insert bitfield
+declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32)
+define i64 @S2_insertp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = insert(r3:2, #0, #0)
+
+declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32)
+define i32 @S2_insert(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = insert(r1, #0, #0)
+
+declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64)
+define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) {
+ %z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c)
+ ret i32 %z
+}
+; CHECK: r0 = insert(r1, r3:2)
+
+declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64)
+define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = insert(r3:2, r5:4)
+
+; Interleave/deinterleave
+declare i64 @llvm.hexagon.S2.deinterleave(i64)
+define i64 @S2_deinterleave(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.deinterleave(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = deinterleave(r1:0)
+
+declare i64 @llvm.hexagon.S2.interleave(i64)
+define i64 @S2_interleave(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.interleave(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = interleave(r1:0)
+
+; Linear feedback-shift operation
+declare i64 @llvm.hexagon.S2.lfsp(i64, i64)
+define i64 @S2_lfsp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = lfs(r1:0, r3:2)
+
+; Masked parity
+declare i32 @llvm.hexagon.S2.parityp(i64, i64)
+define i32 @S2_parityp(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = parity(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.S4.parity(i32, i32)
+define i32 @S4_parity(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = parity(r0, r1)
+
+; Bit reverse
+declare i64 @llvm.hexagon.S2.brevp(i64)
+define i64 @S2_brevp(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.brevp(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = brev(r1:0)
+
+declare i32 @llvm.hexagon.S2.brev(i32)
+define i32 @S2_brev(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.brev(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = brev(r0)
+
+; Set/clear/toggle bit
+declare i32 @llvm.hexagon.S2.setbit.i(i32, i32)
+define i32 @S2_setbit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = setbit(r0, #0)
+
+declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32)
+define i32 @S2_clrbit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = clrbit(r0, #0)
+
+declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32)
+define i32 @S2_togglebit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = togglebit(r0, #0)
+
+declare i32 @llvm.hexagon.S2.setbit.r(i32, i32)
+define i32 @S2_setbit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = setbit(r0, r1)
+
+declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32)
+define i32 @S2_clrbit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = clrbit(r0, r1)
+
+declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32)
+define i32 @S2_togglebit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = togglebit(r0, r1)
+
+; Split bitfield
+declare i64 @llvm.hexagon.A4.bitspliti(i32, i32)
+define i64 @A4_bitspliti(i32 %a) {
+ %z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: = bitsplit(r0, #0)
+
+declare i64 @llvm.hexagon.A4.bitsplit(i32, i32)
+define i64 @A4_bitsplit(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = bitsplit(r0, r1)
+
+; Table index
+declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxb_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxb(r1, #0, #0)
+
+declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxh(r1, #0, #-1)
+
+declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxw(r1, #0, #-2)
+
+declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxd(r1, #0, #-3)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
new file mode 100644
index 0000000..57b0c5b
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
@@ -0,0 +1,349 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.3 XTYPE/COMPLEX
+
+; Complex add/sub halfwords
+declare i64 @llvm.hexagon.S4.vxaddsubh(i64, i64)
+define i64 @S4_vxaddsubh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxaddsubh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.S4.vxsubaddh(i64, i64)
+define i64 @S4_vxsubaddh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxsubaddh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64)
+define i64 @S4_vxaddsubhr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):rnd:>>1:sat
+
+declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64)
+define i64 @S4_vxsubaddhr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):rnd:>>1:sat
+
+; Complex add/sub words
+declare i64 @llvm.hexagon.S4.vxaddsubw(i64, i64)
+define i64 @S4_vxaddsubw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxaddsubw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxaddsubw(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.S4.vxsubaddw(i64, i64)
+define i64 @S4_vxsubaddw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxsubaddw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxsubaddw(r1:0, r3:2):sat
+
+; Complex multiply
+declare i64 @llvm.hexagon.M2.cmpys.s0(i32, i32)
+define i64 @M2_cmpys_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpys.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1):sat
+
+declare i64 @llvm.hexagon.M2.cmpys.s1(i32, i32)
+define i64 @M2_cmpys_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpys.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cmpysc.s0(i32, i32)
+define i64 @M2_cmpysc_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpysc.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1*):sat
+
+declare i64 @llvm.hexagon.M2.cmpysc.s1(i32, i32)
+define i64 @M2_cmpysc_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpysc.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1*):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cmacs.s0(i64, i32, i32)
+define i64 @M2_cmacs_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmacs.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3):sat
+
+declare i64 @llvm.hexagon.M2.cmacs.s1(i64, i32, i32)
+define i64 @M2_cmacs_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmacs.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cnacs.s0(i64, i32, i32)
+define i64 @M2_cnacs_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cnacs.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3):sat
+
+declare i64 @llvm.hexagon.M2.cnacs.s1(i64, i32, i32)
+define i64 @M2_cnacs_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cnacs.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cmacsc.s0(i64, i32, i32)
+define i64 @M2_cmacsc_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmacsc.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3*):sat
+
+declare i64 @llvm.hexagon.M2.cmacsc.s1(i64, i32, i32)
+define i64 @M2_cmacsc_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmacsc.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3*):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cnacsc.s0(i64, i32, i32)
+define i64 @M2_cnacsc_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cnacsc.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3*):sat
+
+declare i64 @llvm.hexagon.M2.cnacsc.s1(i64, i32, i32)
+define i64 @M2_cnacsc_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cnacsc.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3*):<<1:sat
+
+; Complex multiply real or imaginary
+declare i64 @llvm.hexagon.M2.cmpyi.s0(i32, i32)
+define i64 @M2_cmpyi_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpyi.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpyi(r0, r1)
+
+declare i64 @llvm.hexagon.M2.cmpyr.s0(i32, i32)
+define i64 @M2_cmpyr_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpyr.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpyr(r0, r1)
+
+declare i64 @llvm.hexagon.M2.cmaci.s0(i64, i32, i32)
+define i64 @M2_cmaci_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmaci.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpyi(r2, r3)
+
+declare i64 @llvm.hexagon.M2.cmacr.s0(i64, i32, i32)
+define i64 @M2_cmacr_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmacr.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpyr(r2, r3)
+
+; Complex multiply with round and pack
+declare i32 @llvm.hexagon.M2.cmpyrs.s0(i32, i32)
+define i32 @M2_cmpyrs_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.cmpyrs.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1):rnd:sat
+
+declare i32 @llvm.hexagon.M2.cmpyrs.s1(i32, i32)
+define i32 @M2_cmpyrs_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.cmpyrs.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.cmpyrsc.s0(i32, i32)
+define i32 @M2_cmpyrsc_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.cmpyrsc.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1*):rnd:sat
+
+declare i32 @llvm.hexagon.M2.cmpyrsc.s1(i32, i32)
+define i32 @M2_cmpyrsc_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.cmpyrsc.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1*):<<1:rnd:sat
+
+; Complex multiply 32x16
+declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32)
+define i32 @M4_cmpyi_wh(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpyiwh(r1:0, r2):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32)
+define i32 @M4_cmpyi_whc(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpyiwh(r1:0, r2*):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M4.cmpyr.wh(i64, i32)
+define i32 @M4_cmpyr_wh(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.cmpyr.wh(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpyrwh(r1:0, r2):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32)
+define i32 @M4_cmpyr_whc(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpyrwh(r1:0, r2*):<<1:rnd:sat
+
+; Vector complex multiply real or imaginary
+declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64, i64)
+define i64 @M2_vcmpy_s0_sat_r(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcmpyr(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64, i64)
+define i64 @M2_vcmpy_s1_sat_r(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcmpyr(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64, i64)
+define i64 @M2_vcmpy_s0_sat_i(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcmpyi(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64, i64)
+define i64 @M2_vcmpy_s1_sat_i(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcmpyi(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64, i64, i64)
+define i64 @M2_vcmac_s0_sat_r(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vcmpyr(r3:2, r5:4):sat
+
+declare i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64, i64, i64)
+define i64 @M2_vcmac_s0_sat_i(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vcmpyi(r3:2, r5:4):sat
+
+; Vector complex conjugate
+declare i64 @llvm.hexagon.A2.vconj(i64)
+define i64 @A2_vconj(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.vconj(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vconj(r1:0):sat
+
+; Vector complex rotate
+declare i64 @llvm.hexagon.S2.vcrotate(i64, i32)
+define i64 @S2_vcrotate(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.vcrotate(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcrotate(r1:0, r2)
+
+; Vector reduce complex multiply real or imaginary
+declare i64 @llvm.hexagon.M2.vrcmpyi.s0(i64, i64)
+define i64 @M2_vrcmpyi_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyi(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64)
+define i64 @M2_vrcmpyr_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyr(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64, i64)
+define i64 @M2_vrcmpyi_s0c(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyi(r1:0, r3:2*)
+
+declare i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64, i64)
+define i64 @M2_vrcmpyr_s0c(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyr(r1:0, r3:2*)
+
+declare i64 @llvm.hexagon.M2.vrcmaci.s0(i64, i64, i64)
+define i64 @M2_vrcmaci_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vrcmaci.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyi(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M2.vrcmacr.s0(i64, i64, i64)
+define i64 @M2_vrcmacr_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vrcmacr.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyr(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M2.vrcmaci.s0c(i64, i64, i64)
+define i64 @M2_vrcmaci_s0c(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vrcmaci.s0c(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyi(r3:2, r5:4*)
+
+declare i64 @llvm.hexagon.M2.vrcmacr.s0c(i64, i64, i64)
+define i64 @M2_vrcmacr_s0c(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vrcmacr.s0c(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyr(r3:2, r5:4*)
+
+; Vector reduce complex rotate
+declare i64 @llvm.hexagon.S4.vrcrotate(i64, i32, i32)
+define i64 @S4_vrcrotate(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S4.vrcrotate(i64 %a, i32 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrcrotate(r1:0, r2, #0)
+
+declare i64 @llvm.hexagon.S4.vrcrotate.acc(i64, i64, i32, i32)
+define i64 @S4_vrcrotate_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S4.vrcrotate.acc(i64 %a, i64 %b, i32 %c, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcrotate(r3:2, r4, #0)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
new file mode 100644
index 0000000..aef8127
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
@@ -0,0 +1,388 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.4 XTYPE/FP
+
+; Floating point addition
+declare float @llvm.hexagon.F2.sfadd(float, float)
+define float @F2_sfadd(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sfadd(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sfadd(r0, r1)
+
+; Classify floating-point value
+declare i32 @llvm.hexagon.F2.sfclass(float, i32)
+define i32 @F2_sfclass(float %a) {
+ %z = call i32 @llvm.hexagon.F2.sfclass(float %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = sfclass(r0, #0)
+
+declare i32 @llvm.hexagon.F2.dfclass(double, i32)
+define i32 @F2_dfclass(double %a) {
+ %z = call i32 @llvm.hexagon.F2.dfclass(double %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = dfclass(r1:0, #0)
+
+; Compare floating-point value
+declare i32 @llvm.hexagon.F2.sfcmpge(float, float)
+define i32 @F2_sfcmpge(float %a, float %b) {
+ %z = call i32 @llvm.hexagon.F2.sfcmpge(float %a, float %b)
+ ret i32 %z
+}
+; CHECK: p0 = sfcmp.ge(r0, r1)
+
+declare i32 @llvm.hexagon.F2.sfcmpuo(float, float)
+define i32 @F2_sfcmpuo(float %a, float %b) {
+ %z = call i32 @llvm.hexagon.F2.sfcmpuo(float %a, float %b)
+ ret i32 %z
+}
+; CHECK: p0 = sfcmp.uo(r0, r1)
+
+declare i32 @llvm.hexagon.F2.sfcmpeq(float, float)
+define i32 @F2_sfcmpeq(float %a, float %b) {
+ %z = call i32 @llvm.hexagon.F2.sfcmpeq(float %a, float %b)
+ ret i32 %z
+}
+; CHECK: p0 = sfcmp.eq(r0, r1)
+
+declare i32 @llvm.hexagon.F2.sfcmpgt(float, float)
+define i32 @F2_sfcmpgt(float %a, float %b) {
+ %z = call i32 @llvm.hexagon.F2.sfcmpgt(float %a, float %b)
+ ret i32 %z
+}
+; CHECK: p0 = sfcmp.gt(r0, r1)
+
+declare i32 @llvm.hexagon.F2.dfcmpge(double, double)
+define i32 @F2_dfcmpge(double %a, double %b) {
+ %z = call i32 @llvm.hexagon.F2.dfcmpge(double %a, double %b)
+ ret i32 %z
+}
+; CHECK: p0 = dfcmp.ge(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.F2.dfcmpuo(double, double)
+define i32 @F2_dfcmpuo(double %a, double %b) {
+ %z = call i32 @llvm.hexagon.F2.dfcmpuo(double %a, double %b)
+ ret i32 %z
+}
+; CHECK: p0 = dfcmp.uo(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.F2.dfcmpeq(double, double)
+define i32 @F2_dfcmpeq(double %a, double %b) {
+ %z = call i32 @llvm.hexagon.F2.dfcmpeq(double %a, double %b)
+ ret i32 %z
+}
+; CHECK: p0 = dfcmp.eq(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.F2.dfcmpgt(double, double)
+define i32 @F2_dfcmpgt(double %a, double %b) {
+ %z = call i32 @llvm.hexagon.F2.dfcmpgt(double %a, double %b)
+ ret i32 %z
+}
+; CHECK: p0 = dfcmp.gt(r1:0, r3:2)
+
+; Convert floating-point value to other format
+declare double @llvm.hexagon.F2.conv.sf2df(float)
+define double @F2_conv_sf2df(float %a) {
+ %z = call double @llvm.hexagon.F2.conv.sf2df(float %a)
+ ret double %z
+}
+; CHECK: = convert_sf2df(r0)
+
+declare float @llvm.hexagon.F2.conv.df2sf(double)
+define float @F2_conv_df2sf(double %a) {
+ %z = call float @llvm.hexagon.F2.conv.df2sf(double %a)
+ ret float %z
+}
+; CHECK: r0 = convert_df2sf(r1:0)
+
+; Convert integer to floating-point value
+declare double @llvm.hexagon.F2.conv.ud2df(i64)
+define double @F2_conv_ud2df(i64 %a) {
+ %z = call double @llvm.hexagon.F2.conv.ud2df(i64 %a)
+ ret double %z
+}
+; CHECK: r1:0 = convert_ud2df(r1:0)
+
+declare double @llvm.hexagon.F2.conv.d2df(i64)
+define double @F2_conv_d2df(i64 %a) {
+ %z = call double @llvm.hexagon.F2.conv.d2df(i64 %a)
+ ret double %z
+}
+; CHECK: r1:0 = convert_d2df(r1:0)
+
+declare double @llvm.hexagon.F2.conv.uw2df(i32)
+define double @F2_conv_uw2df(i32 %a) {
+ %z = call double @llvm.hexagon.F2.conv.uw2df(i32 %a)
+ ret double %z
+}
+; CHECK: = convert_uw2df(r0)
+
+declare double @llvm.hexagon.F2.conv.w2df(i32)
+define double @F2_conv_w2df(i32 %a) {
+ %z = call double @llvm.hexagon.F2.conv.w2df(i32 %a)
+ ret double %z
+}
+; CHECK: = convert_w2df(r0)
+
+declare float @llvm.hexagon.F2.conv.ud2sf(i64)
+define float @F2_conv_ud2sf(i64 %a) {
+ %z = call float @llvm.hexagon.F2.conv.ud2sf(i64 %a)
+ ret float %z
+}
+; CHECK: r0 = convert_ud2sf(r1:0)
+
+declare float @llvm.hexagon.F2.conv.d2sf(i64)
+define float @F2_conv_d2sf(i64 %a) {
+ %z = call float @llvm.hexagon.F2.conv.d2sf(i64 %a)
+ ret float %z
+}
+; CHECK: r0 = convert_d2sf(r1:0)
+
+declare float @llvm.hexagon.F2.conv.uw2sf(i32)
+define float @F2_conv_uw2sf(i32 %a) {
+ %z = call float @llvm.hexagon.F2.conv.uw2sf(i32 %a)
+ ret float %z
+}
+; CHECK: r0 = convert_uw2sf(r0)
+
+declare float @llvm.hexagon.F2.conv.w2sf(i32)
+define float @F2_conv_w2sf(i32 %a) {
+ %z = call float @llvm.hexagon.F2.conv.w2sf(i32 %a)
+ ret float %z
+}
+; CHECK: r0 = convert_w2sf(r0)
+
+; Convert floating-point value to integer
+declare i64 @llvm.hexagon.F2.conv.df2d(double)
+define i64 @F2_conv_df2d(double %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.df2d(double %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = convert_df2d(r1:0)
+
+declare i64 @llvm.hexagon.F2.conv.df2ud(double)
+define i64 @F2_conv_df2ud(double %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.df2ud(double %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = convert_df2ud(r1:0)
+
+declare i64 @llvm.hexagon.F2.conv.df2d.chop(double)
+define i64 @F2_conv_df2d_chop(double %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.df2d.chop(double %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = convert_df2d(r1:0):chop
+
+declare i64 @llvm.hexagon.F2.conv.df2ud.chop(double)
+define i64 @F2_conv_df2ud_chop(double %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.df2ud.chop(double %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = convert_df2ud(r1:0):chop
+
+declare i64 @llvm.hexagon.F2.conv.sf2ud(float)
+define i64 @F2_conv_sf2ud(float %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.sf2ud(float %a)
+ ret i64 %z
+}
+; CHECK: = convert_sf2ud(r0)
+
+declare i64 @llvm.hexagon.F2.conv.sf2d(float)
+define i64 @F2_conv_sf2d(float %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.sf2d(float %a)
+ ret i64 %z
+}
+; CHECK: = convert_sf2d(r0)
+
+declare i64 @llvm.hexagon.F2.conv.sf2d.chop(float)
+define i64 @F2_conv_sf2d_chop(float %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.sf2d.chop(float %a)
+ ret i64 %z
+}
+; CHECK: = convert_sf2d(r0):chop
+
+declare i64 @llvm.hexagon.F2.conv.sf2ud.chop(float)
+define i64 @F2_conv_sf2ud_chop(float %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.sf2ud.chop(float %a)
+ ret i64 %z
+}
+; CHECK: = convert_sf2ud(r0):chop
+
+declare i32 @llvm.hexagon.F2.conv.df2uw(double)
+define i32 @F2_conv_df2uw(double %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.df2uw(double %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_df2uw(r1:0)
+
+declare i32 @llvm.hexagon.F2.conv.df2w(double)
+define i32 @F2_conv_df2w(double %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.df2w(double %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_df2w(r1:0)
+
+declare i32 @llvm.hexagon.F2.conv.df2w.chop(double)
+define i32 @F2_conv_df2w_chop(double %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.df2w.chop(double %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_df2w(r1:0):chop
+
+declare i32 @llvm.hexagon.F2.conv.df2uw.chop(double)
+define i32 @F2_conv_df2uw_chop(double %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.df2uw.chop(double %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_df2uw(r1:0):chop
+
+declare i32 @llvm.hexagon.F2.conv.sf2uw(float)
+define i32 @F2_conv_sf2uw(float %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.sf2uw(float %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_sf2uw(r0)
+
+declare i32 @llvm.hexagon.F2.conv.sf2uw.chop(float)
+define i32 @F2_conv_sf2uw_chop(float %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.sf2uw.chop(float %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_sf2uw(r0):chop
+
+declare i32 @llvm.hexagon.F2.conv.sf2w(float)
+define i32 @F2_conv_sf2w(float %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.sf2w(float %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_sf2w(r0)
+
+declare i32 @llvm.hexagon.F2.conv.sf2w.chop(float)
+define i32 @F2_conv_sf2w_chop(float %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.sf2w.chop(float %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_sf2w(r0):chop
+
+; Floating point extreme value assistance
+declare float @llvm.hexagon.F2.sffixupr(float)
+define float @F2_sffixupr(float %a) {
+ %z = call float @llvm.hexagon.F2.sffixupr(float %a)
+ ret float %z
+}
+; CHECK: r0 = sffixupr(r0)
+
+declare float @llvm.hexagon.F2.sffixupn(float, float)
+define float @F2_sffixupn(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sffixupn(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sffixupn(r0, r1)
+
+declare float @llvm.hexagon.F2.sffixupd(float, float)
+define float @F2_sffixupd(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sffixupd(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sffixupd(r0, r1)
+
+; Floating point fused multiply-add
+declare float @llvm.hexagon.F2.sffma(float, float, float)
+define float @F2_sffma(float %a, float %b, float %c) {
+ %z = call float @llvm.hexagon.F2.sffma(float %a, float %b, float %c)
+ ret float %z
+}
+; CHECK: r0 += sfmpy(r1, r2)
+
+declare float @llvm.hexagon.F2.sffms(float, float, float)
+define float @F2_sffms(float %a, float %b, float %c) {
+ %z = call float @llvm.hexagon.F2.sffms(float %a, float %b, float %c)
+ ret float %z
+}
+; CHECK: r0 -= sfmpy(r1, r2)
+
+; Floating point fused multiply-add with scaling
+declare float @llvm.hexagon.F2.sffma.sc(float, float, float, i32)
+define float @F2_sffma_sc(float %a, float %b, float %c, i32 %d) {
+ %z = call float @llvm.hexagon.F2.sffma.sc(float %a, float %b, float %c, i32 %d)
+ ret float %z
+}
+; CHECK: r0 += sfmpy(r1, r2, p0):scale
+
+; Floating point fused multiply-add for library routines
+declare float @llvm.hexagon.F2.sffma.lib(float, float, float)
+define float @F2_sffma_lib(float %a, float %b, float %c) {
+ %z = call float @llvm.hexagon.F2.sffma.lib(float %a, float %b, float %c)
+ ret float %z
+}
+; CHECK: r0 += sfmpy(r1, r2):lib
+
+declare float @llvm.hexagon.F2.sffms.lib(float, float, float)
+define float @F2_sffms_lib(float %a, float %b, float %c) {
+ %z = call float @llvm.hexagon.F2.sffms.lib(float %a, float %b, float %c)
+ ret float %z
+}
+; CHECK: r0 -= sfmpy(r1, r2):lib
+
+; Create floating-point constant
+declare float @llvm.hexagon.F2.sfimm.p(i32)
+define float @F2_sfimm_p() {
+ %z = call float @llvm.hexagon.F2.sfimm.p(i32 0)
+ ret float %z
+}
+; CHECK: r0 = sfmake(#0):pos
+
+declare float @llvm.hexagon.F2.sfimm.n(i32)
+define float @F2_sfimm_n() {
+ %z = call float @llvm.hexagon.F2.sfimm.n(i32 0)
+ ret float %z
+}
+; CHECK: r0 = sfmake(#0):neg
+
+declare double @llvm.hexagon.F2.dfimm.p(i32)
+define double @F2_dfimm_p() {
+ %z = call double @llvm.hexagon.F2.dfimm.p(i32 0)
+ ret double %z
+}
+; CHECK: r1:0 = dfmake(#0):pos
+
+declare double @llvm.hexagon.F2.dfimm.n(i32)
+define double @F2_dfimm_n() {
+ %z = call double @llvm.hexagon.F2.dfimm.n(i32 0)
+ ret double %z
+}
+; CHECK: r1:0 = dfmake(#0):neg
+
+; Floating point maximum
+declare float @llvm.hexagon.F2.sfmax(float, float)
+define float @F2_sfmax(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sfmax(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sfmax(r0, r1)
+
+; Floating point minimum
+declare float @llvm.hexagon.F2.sfmin(float, float)
+define float @F2_sfmin(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sfmin(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sfmin(r0, r1)
+
+; Floating point multiply
+declare float @llvm.hexagon.F2.sfmpy(float, float)
+define float @F2_sfmpy(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sfmpy(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sfmpy(r0, r1)
+
+; Floating point subtraction
+declare float @llvm.hexagon.F2.sfsub(float, float)
+define float @F2_sfsub(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sfsub(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sfsub(r0, r1)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
new file mode 100644
index 0000000..6409e4e
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
@@ -0,0 +1,1525 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.5 XTYPE/MPY
+
+; Multiply and use lower result
+declare i32 @llvm.hexagon.M4.mpyrr.addi(i32, i32, i32)
+define i32 @M4_mpyrr_addi(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.mpyrr.addi(i32 0, i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(#0, mpyi(r0, r1))
+
+declare i32 @llvm.hexagon.M4.mpyri.addi(i32, i32, i32)
+define i32 @M4_mpyri_addi(i32 %a) {
+ %z = call i32 @llvm.hexagon.M4.mpyri.addi(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(#0, mpyi(r0, #0))
+
+declare i32 @llvm.hexagon.M4.mpyri.addr.u2(i32, i32, i32)
+define i32 @M4_mpyri_addr_u2(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.mpyri.addr.u2(i32 %a, i32 0, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, mpyi(#0, r1))
+
+declare i32 @llvm.hexagon.M4.mpyri.addr(i32, i32, i32)
+define i32 @M4_mpyri_addr(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.mpyri.addr(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, mpyi(r1, #0))
+
+declare i32 @llvm.hexagon.M4.mpyrr.addr(i32, i32, i32)
+define i32 @M4_mpyrr_addr(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.mpyrr.addr(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r1 = add(r0, mpyi(r1, r2))
+
+; Vector multiply word by signed half (32x16)
+declare i64 @llvm.hexagon.M2.mmpyl.s0(i64, i64)
+define i64 @M2_mmpyl_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyl.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64)
+define i64 @M2_mmpyl_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweh(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.mmpyh.s0(i64, i64)
+define i64 @M2_mmpyh_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywoh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.mmpyh.s1(i64, i64)
+define i64 @M2_mmpyh_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyh.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywoh(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.mmpyl.rs0(i64, i64)
+define i64 @M2_mmpyl_rs0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyl.rs0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweh(r1:0, r3:2):rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyl.rs1(i64, i64)
+define i64 @M2_mmpyl_rs1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyl.rs1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweh(r1:0, r3:2):<<1:rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyh.rs0(i64, i64)
+define i64 @M2_mmpyh_rs0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyh.rs0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywoh(r1:0, r3:2):rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyh.rs1(i64, i64)
+define i64 @M2_mmpyh_rs1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyh.rs1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywoh(r1:0, r3:2):<<1:rnd:sat
+
+; Vector multiply word by unsigned half (32x16)
+declare i64 @llvm.hexagon.M2.mmpyul.s0(i64, i64)
+define i64 @M2_mmpyul_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyul.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.mmpyul.s1(i64, i64)
+define i64 @M2_mmpyul_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyul.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.mmpyuh.s0(i64, i64)
+define i64 @M2_mmpyuh_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyuh.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywouh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.mmpyuh.s1(i64, i64)
+define i64 @M2_mmpyuh_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyuh.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywouh(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.mmpyul.rs0(i64, i64)
+define i64 @M2_mmpyul_rs0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyul.rs0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyul.rs1(i64, i64)
+define i64 @M2_mmpyul_rs1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyul.rs1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):<<1:rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyuh.rs0(i64, i64)
+define i64 @M2_mmpyuh_rs0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyuh.rs0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywouh(r1:0, r3:2):rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyuh.rs1(i64, i64)
+define i64 @M2_mmpyuh_rs1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyuh.rs1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywouh(r1:0, r3:2):<<1:rnd:sat
+
+; Multiply signed halfwords
+declare i64 @llvm.hexagon.M2.mpyd.ll.s0(i32, i32)
+define i64 @M2_mpyd_ll_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.ll.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32)
+define i64 @M2_mpyd_ll_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.ll.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.lh.s0(i32, i32)
+define i64 @M2_mpyd_lh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.lh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.lh.s1(i32, i32)
+define i64 @M2_mpyd_lh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.lh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.hl.s0(i32, i32)
+define i64 @M2_mpyd_hl_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.hl.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.hl.s1(i32, i32)
+define i64 @M2_mpyd_hl_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.hl.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.hh.s0(i32, i32)
+define i64 @M2_mpyd_hh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.hh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.hh.s1(i32, i32)
+define i64 @M2_mpyd_hh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.hh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32, i32)
+define i64 @M2_mpyd_rnd_ll_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.l):rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32, i32)
+define i64 @M2_mpyd_rnd_ll_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.l):<<1:rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32, i32)
+define i64 @M2_mpyd_rnd_lh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.h):rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32, i32)
+define i64 @M2_mpyd_rnd_lh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.h):<<1:rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32, i32)
+define i64 @M2_mpyd_rnd_hl_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.l):rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32, i32)
+define i64 @M2_mpyd_rnd_hl_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.l):<<1:rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32, i32)
+define i64 @M2_mpyd_rnd_hh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.h):rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32, i32)
+define i64 @M2_mpyd_rnd_hh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.h):<<1:rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64, i32, i32)
+define i64 @M2_mpyd_acc_ll_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.l, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64, i32, i32)
+define i64 @M2_mpyd_acc_ll_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.l, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64, i32, i32)
+define i64 @M2_mpyd_acc_lh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.l, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64, i32, i32)
+define i64 @M2_mpyd_acc_lh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.l, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64, i32, i32)
+define i64 @M2_mpyd_acc_hl_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.h, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64, i32, i32)
+define i64 @M2_mpyd_acc_hl_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.h, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64, i32, i32)
+define i64 @M2_mpyd_acc_hh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.h, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64, i32, i32)
+define i64 @M2_mpyd_acc_hh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.h, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64, i32, i32)
+define i64 @M2_mpyd_nac_ll_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.l, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64, i32, i32)
+define i64 @M2_mpyd_nac_ll_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.l, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64, i32, i32)
+define i64 @M2_mpyd_nac_lh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.l, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64, i32, i32)
+define i64 @M2_mpyd_nac_lh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.l, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64, i32, i32)
+define i64 @M2_mpyd_nac_hl_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.h, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64, i32, i32)
+define i64 @M2_mpyd_nac_hl_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.h, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64, i32, i32)
+define i64 @M2_mpyd_nac_hh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.h, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64, i32, i32)
+define i64 @M2_mpyd_nac_hh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.h, r3.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.ll.s0(i32, i32)
+define i32 @M2_mpy_ll_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l)
+
+declare i32 @llvm.hexagon.M2.mpy.ll.s1(i32, i32)
+define i32 @M2_mpy_ll_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.ll.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.lh.s0(i32, i32)
+define i32 @M2_mpy_lh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.lh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h)
+
+declare i32 @llvm.hexagon.M2.mpy.lh.s1(i32, i32)
+define i32 @M2_mpy_lh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.lh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.hl.s0(i32, i32)
+define i32 @M2_mpy_hl_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.hl.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l)
+
+declare i32 @llvm.hexagon.M2.mpy.hl.s1(i32, i32)
+define i32 @M2_mpy_hl_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.hl.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.hh.s0(i32, i32)
+define i32 @M2_mpy_hh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.hh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h)
+
+declare i32 @llvm.hexagon.M2.mpy.hh.s1(i32, i32)
+define i32 @M2_mpy_hh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.hh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32, i32)
+define i32 @M2_mpy_sat_ll_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32, i32)
+define i32 @M2_mpy_sat_ll_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32, i32)
+define i32 @M2_mpy_sat_lh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32, i32)
+define i32 @M2_mpy_sat_lh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32, i32)
+define i32 @M2_mpy_sat_hl_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32, i32)
+define i32 @M2_mpy_sat_hl_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32, i32)
+define i32 @M2_mpy_sat_hh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32, i32)
+define i32 @M2_mpy_sat_hh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32, i32)
+define i32 @M2_mpy_sat_rnd_ll_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l):rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32, i32)
+define i32 @M2_mpy_sat_rnd_ll_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32, i32)
+define i32 @M2_mpy_sat_rnd_lh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h):rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32, i32)
+define i32 @M2_mpy_sat_rnd_lh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32, i32)
+define i32 @M2_mpy_sat_rnd_hl_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l):rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32, i32)
+define i32 @M2_mpy_sat_rnd_hl_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32, i32)
+define i32 @M2_mpy_sat_rnd_hh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h):rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32, i32)
+define i32 @M2_mpy_sat_rnd_hh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.h):<<1:sat
+
+; Multiply unsigned halfwords
+declare i64 @llvm.hexagon.M2.mpyud.ll.s0(i32, i32)
+define i64 @M2_mpyud_ll_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.ll.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.l, r1.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.ll.s1(i32, i32)
+define i64 @M2_mpyud_ll_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.ll.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.l, r1.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.lh.s0(i32, i32)
+define i64 @M2_mpyud_lh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.lh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.l, r1.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.lh.s1(i32, i32)
+define i64 @M2_mpyud_lh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.lh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.l, r1.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.hl.s0(i32, i32)
+define i64 @M2_mpyud_hl_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.hl.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.h, r1.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.hl.s1(i32, i32)
+define i64 @M2_mpyud_hl_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.hl.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.h, r1.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.hh.s0(i32, i32)
+define i64 @M2_mpyud_hh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.hh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.h, r1.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.hh.s1(i32, i32)
+define i64 @M2_mpyud_hh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.hh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.h, r1.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64, i32, i32)
+define i64 @M2_mpyud_acc_ll_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.l, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64, i32, i32)
+define i64 @M2_mpyud_acc_ll_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.l, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64, i32, i32)
+define i64 @M2_mpyud_acc_lh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.l, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64, i32, i32)
+define i64 @M2_mpyud_acc_lh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.l, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64, i32, i32)
+define i64 @M2_mpyud_acc_hl_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.h, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64, i32, i32)
+define i64 @M2_mpyud_acc_hl_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.h, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64, i32, i32)
+define i64 @M2_mpyud_acc_hh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.h, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64, i32, i32)
+define i64 @M2_mpyud_acc_hh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.h, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64, i32, i32)
+define i64 @M2_mpyud_nac_ll_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.l, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64, i32, i32)
+define i64 @M2_mpyud_nac_ll_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.l, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64, i32, i32)
+define i64 @M2_mpyud_nac_lh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.l, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64, i32, i32)
+define i64 @M2_mpyud_nac_lh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.l, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64, i32, i32)
+define i64 @M2_mpyud_nac_hl_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.h, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64, i32, i32)
+define i64 @M2_mpyud_nac_hl_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.h, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64, i32, i32)
+define i64 @M2_mpyud_nac_hh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.h, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64, i32, i32)
+define i64 @M2_mpyud_nac_hh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.h, r3.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.ll.s0(i32, i32)
+define i32 @M2_mpyu_ll_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.ll.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.l, r1.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.ll.s1(i32, i32)
+define i32 @M2_mpyu_ll_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.ll.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.l, r1.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.lh.s0(i32, i32)
+define i32 @M2_mpyu_lh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.lh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.l, r1.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.lh.s1(i32, i32)
+define i32 @M2_mpyu_lh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.lh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.l, r1.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.hl.s0(i32, i32)
+define i32 @M2_mpyu_hl_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.hl.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.h, r1.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.hl.s1(i32, i32)
+define i32 @M2_mpyu_hl_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.hl.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.h, r1.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.hh.s0(i32, i32)
+define i32 @M2_mpyu_hh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.hh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.h, r1.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.hh.s1(i32, i32)
+define i32 @M2_mpyu_hh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.hh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.h, r1.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32, i32, i32)
+define i32 @M2_mpyu_acc_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.l, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32, i32, i32)
+define i32 @M2_mpyu_acc_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.l, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32, i32, i32)
+define i32 @M2_mpyu_acc_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.l, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32, i32, i32)
+define i32 @M2_mpyu_acc_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.l, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32, i32, i32)
+define i32 @M2_mpyu_acc_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.h, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32, i32, i32)
+define i32 @M2_mpyu_acc_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.h, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32, i32, i32)
+define i32 @M2_mpyu_acc_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.h, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32, i32, i32)
+define i32 @M2_mpyu_acc_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.h, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32, i32, i32)
+define i32 @M2_mpyu_nac_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.l, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32, i32, i32)
+define i32 @M2_mpyu_nac_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.l, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32, i32, i32)
+define i32 @M2_mpyu_nac_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.l, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32, i32, i32)
+define i32 @M2_mpyu_nac_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.l, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32, i32, i32)
+define i32 @M2_mpyu_nac_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.h, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32, i32, i32)
+define i32 @M2_mpyu_nac_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.h, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32, i32, i32)
+define i32 @M2_mpyu_nac_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.h, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32, i32, i32)
+define i32 @M2_mpyu_nac_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.h, r2.h):<<1
+
+; Polynomial multiply words
+declare i64 @llvm.hexagon.M4.pmpyw(i32, i32)
+define i64 @M4_pmpyw(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M4.pmpyw(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = pmpyw(r0, r1)
+
+declare i64 @llvm.hexagon.M4.pmpyw.acc(i64, i32, i32)
+define i64 @M4_pmpyw_acc(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= pmpyw(r2, r3)
+
+; Vector reduce multiply word by signed half
+declare i64 @llvm.hexagon.M4.vrmpyoh.s0(i64, i64)
+define i64 @M4_vrmpyoh_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyoh.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpywoh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M4.vrmpyoh.s1(i64, i64)
+define i64 @M4_vrmpyoh_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyoh.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpywoh(r1:0, r3:2):<<1
+
+declare i64 @llvm.hexagon.M4.vrmpyeh.s0(i64, i64)
+define i64 @M4_vrmpyeh_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyeh.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpyweh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M4.vrmpyeh.s1(i64, i64)
+define i64 @M4_vrmpyeh_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyeh.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpyweh(r1:0, r3:2):<<1
+
+declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64, i64, i64)
+define i64 @M4_vrmpyoh_acc_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpywoh(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64, i64, i64)
+define i64 @M4_vrmpyoh_acc_s1(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpywoh(r3:2, r5:4):<<1
+
+declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64, i64, i64)
+define i64 @M4_vrmpyeh_acc_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpyweh(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64, i64, i64)
+define i64 @M4_vrmpyeh_acc_s1(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpyweh(r3:2, r5:4):<<1
+
+; Multiply and use upper result
+declare i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32, i32)
+define i32 @M2_dpmpyss_rnd_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1):rnd
+
+declare i32 @llvm.hexagon.M2.mpyu.up(i32, i32)
+define i32 @M2_mpyu_up(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.up(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0, r1)
+
+declare i32 @llvm.hexagon.M2.mpysu.up(i32, i32)
+define i32 @M2_mpysu_up(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpysu.up(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpysu(r0, r1)
+
+declare i32 @llvm.hexagon.M2.hmmpyh.s1(i32, i32)
+define i32 @M2_hmmpyh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.hmmpyh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.hmmpyl.s1(i32, i32)
+define i32 @M2_hmmpyl_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.hmmpyh.rs1(i32, i32)
+define i32 @M2_hmmpyh_rs1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.hmmpyh.rs1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1.h):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32, i32)
+define i32 @M2_mpy_up_s1_sat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1):<<1:sat
+
+declare i32 @llvm.hexagon.M2.hmmpyl.rs1(i32, i32)
+define i32 @M2_hmmpyl_rs1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.hmmpyl.rs1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1.l):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.up(i32, i32)
+define i32 @M2_mpy_up(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.up(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1)
+
+declare i32 @llvm.hexagon.M2.mpy.up.s1(i32, i32)
+define i32 @M2_mpy_up_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.up.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1):<<1
+
+declare i32 @llvm.hexagon.M4.mac.up.s1.sat(i32, i32, i32)
+define i32 @M4_mac_up_s1_sat(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.mac.up.s1.sat(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1, r2):<<1:sat
+
+declare i32 @llvm.hexagon.M4.nac.up.s1.sat(i32, i32, i32)
+define i32 @M4_nac_up_s1_sat(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.nac.up.s1.sat(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1, r2):<<1:sat
+
+; Multiply and use full result
+declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32)
+define i64 @M2_dpmpyss_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0, r1)
+
+declare i64 @llvm.hexagon.M2.dpmpyuu.s0(i32, i32)
+define i64 @M2_dpmpyuu_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyuu.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0, r1)
+
+declare i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64, i32, i32)
+define i64 @M2_dpmpyss_acc_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2, r3)
+
+declare i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64, i32, i32)
+define i64 @M2_dpmpyss_nac_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2, r3)
+
+declare i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64, i32, i32)
+define i64 @M2_dpmpyuu_acc_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2, r3)
+
+declare i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64, i32, i32)
+define i64 @M2_dpmpyuu_nac_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2, r3)
+
+; Vector dual multiply
+declare i64 @llvm.hexagon.M2.vdmpys.s0(i64, i64)
+define i64 @M2_vdmpys_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vdmpys.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vdmpy(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.vdmpys.s1(i64, i64)
+define i64 @M2_vdmpys_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vdmpys.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vdmpy(r1:0, r3:2):<<1:sat
+
+; Vector reduce multiply bytes
+declare i64 @llvm.hexagon.M5.vrmpybuu(i64, i64)
+define i64 @M5_vrmpybuu(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M5.vrmpybuu(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpybu(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M5.vrmpybsu(i64, i64)
+define i64 @M5_vrmpybsu(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M5.vrmpybsu(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpybsu(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M5.vrmacbuu(i64, i64, i64)
+define i64 @M5_vrmacbuu(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M5.vrmacbuu(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpybu(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M5.vrmacbsu(i64, i64, i64)
+define i64 @M5_vrmacbsu(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M5.vrmacbsu(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpybsu(r3:2, r5:4)
+
+; Vector dual multiply signed by unsigned bytes
+declare i64 @llvm.hexagon.M5.vdmpybsu(i64, i64)
+define i64 @M5_vdmpybsu(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M5.vdmpybsu(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vdmpybsu(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M5.vdmacbsu(i64, i64, i64)
+define i64 @M5_vdmacbsu(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M5.vdmacbsu(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vdmpybsu(r3:2, r5:4):sat
+
+; Vector multiply even halfwords
+declare i64 @llvm.hexagon.M2.vmpy2es.s0(i64, i64)
+define i64 @M2_vmpy2es_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2es.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyeh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.vmpy2es.s1(i64, i64)
+define i64 @M2_vmpy2es_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2es.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyeh(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vmac2es(i64, i64, i64)
+define i64 @M2_vmac2es(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2es(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyeh(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M2.vmac2es.s0(i64, i64, i64)
+define i64 @M2_vmac2es_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2es.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyeh(r3:2, r5:4):sat
+
+declare i64 @llvm.hexagon.M2.vmac2es.s1(i64, i64, i64)
+define i64 @M2_vmac2es_s1(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2es.s1(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyeh(r3:2, r5:4):<<1:sat
+
+; Vector multiply halfwords
+declare i64 @llvm.hexagon.M2.vmpy2s.s0(i32, i32)
+define i64 @M2_vmpy2s_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyh(r0, r1):sat
+
+declare i64 @llvm.hexagon.M2.vmpy2s.s1(i32, i32)
+define i64 @M2_vmpy2s_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2s.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyh(r0, r1):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vmac2(i64, i32, i32)
+define i64 @M2_vmac2(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyh(r2, r3)
+
+declare i64 @llvm.hexagon.M2.vmac2s.s0(i64, i32, i32)
+define i64 @M2_vmac2s_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyh(r2, r3):sat
+
+declare i64 @llvm.hexagon.M2.vmac2s.s1(i64, i32, i32)
+define i64 @M2_vmac2s_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2s.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyh(r2, r3):<<1:sat
+
+; Vector multiply halfwords signed by unsigned
+declare i64 @llvm.hexagon.M2.vmpy2su.s0(i32, i32)
+define i64 @M2_vmpy2su_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2su.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyhsu(r0, r1):sat
+
+declare i64 @llvm.hexagon.M2.vmpy2su.s1(i32, i32)
+define i64 @M2_vmpy2su_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2su.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyhsu(r0, r1):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vmac2su.s0(i64, i32, i32)
+define i64 @M2_vmac2su_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2su.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyhsu(r2, r3):sat
+
+declare i64 @llvm.hexagon.M2.vmac2su.s1(i64, i32, i32)
+define i64 @M2_vmac2su_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2su.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyhsu(r2, r3):<<1:sat
+
+; Vector reduce multiply halfwords
+declare i64 @llvm.hexagon.M2.vrmpy.s0(i64, i64)
+define i64 @M2_vrmpy_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vrmpy.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpyh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M2.vrmac.s0(i64, i64, i64)
+define i64 @M2_vrmac_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vrmac.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpyh(r3:2, r5:4)
+
+; Vector multiply bytes
+declare i64 @llvm.hexagon.M5.vmpybsu(i32, i32)
+define i64 @M2_vmpybsu(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M5.vmpybsu(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpybsu(r0, r1)
+
+declare i64 @llvm.hexagon.M5.vmpybuu(i32, i32)
+define i64 @M2_vmpybuu(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M5.vmpybuu(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpybu(r0, r1)
+
+declare i64 @llvm.hexagon.M5.vmacbuu(i64, i32, i32)
+define i64 @M2_vmacbuu(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M5.vmacbuu(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpybu(r2, r3)
+
+declare i64 @llvm.hexagon.M5.vmacbsu(i64, i32, i32)
+define i64 @M2_vmacbsu(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M5.vmacbsu(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpybsu(r2, r3)
+
+; Vector polynomial multiply halfwords
+declare i64 @llvm.hexagon.M4.vpmpyh(i32, i32)
+define i64 @M4_vpmpyh(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M4.vpmpyh(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vpmpyh(r0, r1)
+
+declare i64 @llvm.hexagon.M4.vpmpyh.acc(i64, i32, i32)
+define i64 @M4_vpmpyh_acc(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M4.vpmpyh.acc(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= vpmpyh(r2, r3)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
new file mode 100644
index 0000000..0b76132
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
@@ -0,0 +1,252 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.6 XTYPE/PERM
+
+; Saturate
+declare i32 @llvm.hexagon.A2.sat(i64)
+define i32 @A2_sat(i64 %a) {
+ %z = call i32 @llvm.hexagon.A2.sat(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = sat(r1:0)
+
+declare i32 @llvm.hexagon.A2.sath(i32)
+define i32 @A2_sath(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.sath(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = sath(r0)
+
+declare i32 @llvm.hexagon.A2.satuh(i32)
+define i32 @A2_satuh(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.satuh(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = satuh(r0)
+
+declare i32 @llvm.hexagon.A2.satub(i32)
+define i32 @A2_satub(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.satub(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = satub(r0)
+
+declare i32 @llvm.hexagon.A2.satb(i32)
+define i32 @A2_satb(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.satb(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = satb(r0)
+
+; Swizzle bytes
+declare i32 @llvm.hexagon.A2.swiz(i32)
+define i32 @A2_swiz(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.swiz(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = swiz(r0)
+
+; Vector round and pack
+declare i32 @llvm.hexagon.S2.vrndpackwh(i64)
+define i32 @S2_vrndpackwh(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vrndpackwh(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vrndwh(r1:0)
+
+declare i32 @llvm.hexagon.S2.vrndpackwhs(i64)
+define i32 @S2_vrndpackwhs(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vrndpackwhs(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vrndwh(r1:0):sat
+
+; Vector saturate and pack
+declare i32 @llvm.hexagon.S2.vsathub(i64)
+define i32 @S2_vsathub(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vsathub(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsathub(r1:0)
+
+declare i32 @llvm.hexagon.S2.vsatwh(i64)
+define i32 @S2_vsatwh(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vsatwh(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsatwh(r1:0)
+
+declare i32 @llvm.hexagon.S2.vsatwuh(i64)
+define i32 @S2_vsatwuh(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vsatwuh(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsatwuh(r1:0)
+
+declare i32 @llvm.hexagon.S2.vsathb(i64)
+define i32 @S2_vsathb(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vsathb(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsathb(r1:0)
+
+declare i32 @llvm.hexagon.S2.svsathb(i32)
+define i32 @S2_svsathb(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.svsathb(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsathb(r0)
+
+declare i32 @llvm.hexagon.S2.svsathub(i32)
+define i32 @S2_svsathub(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.svsathub(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsathub(r0)
+
+; Vector saturate without pack
+declare i64 @llvm.hexagon.S2.vsathub.nopack(i64)
+define i64 @S2_vsathub_nopack(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsathub.nopack(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsathub(r1:0)
+
+declare i64 @llvm.hexagon.S2.vsatwuh.nopack(i64)
+define i64 @S2_vsatwuh_nopack(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsatwuh.nopack(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsatwuh(r1:0)
+
+declare i64 @llvm.hexagon.S2.vsatwh.nopack(i64)
+define i64 @S2_vsatwh_nopack(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsatwh.nopack(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsatwh(r1:0)
+
+declare i64 @llvm.hexagon.S2.vsathb.nopack(i64)
+define i64 @S2_vsathb_nopack(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsathb.nopack(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsathb(r1:0)
+
+; Vector shuffle
+declare i64 @llvm.hexagon.S2.shuffeb(i64, i64)
+define i64 @S2_shuffeb(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.shuffeb(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = shuffeb(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.S2.shuffob(i64, i64)
+define i64 @S2_shuffob(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.shuffob(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = shuffob(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.S2.shuffeh(i64, i64)
+define i64 @S2_shuffeh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.shuffeh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = shuffeh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.S2.shuffoh(i64, i64)
+define i64 @S2_shuffoh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.shuffoh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = shuffoh(r1:0, r3:2)
+
+; Vector splat bytes
+declare i32 @llvm.hexagon.S2.vsplatrb(i32)
+define i32 @S2_vsplatrb(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.vsplatrb(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsplatb(r0)
+
+; Vector splat halfwords
+declare i64 @llvm.hexagon.S2.vsplatrh(i32)
+define i64 @S2_vsplatrh(i32 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsplatrh(i32 %a)
+ ret i64 %z
+}
+; CHECK: = vsplath(r0)
+
+; Vector splice
+declare i64 @llvm.hexagon.S2.vspliceib(i64, i64, i32)
+define i64 @S2_vspliceib(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.vspliceib(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vspliceb(r1:0, r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.vsplicerb(i64, i64, i32)
+define i64 @S2_vsplicerb(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.vsplicerb(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vspliceb(r1:0, r3:2, p0)
+
+; Vector sign extend
+declare i64 @llvm.hexagon.S2.vsxtbh(i32)
+define i64 @S2_vsxtbh(i32 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsxtbh(i32 %a)
+ ret i64 %z
+}
+; CHECK: = vsxtbh(r0)
+
+declare i64 @llvm.hexagon.S2.vsxthw(i32)
+define i64 @S2_vsxthw(i32 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsxthw(i32 %a)
+ ret i64 %z
+}
+; CHECK: = vsxthw(r0)
+
+; Vector truncate
+declare i32 @llvm.hexagon.S2.vtrunohb(i64)
+define i32 @S2_vtrunohb(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vtrunohb(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vtrunohb(r1:0)
+
+declare i32 @llvm.hexagon.S2.vtrunehb(i64)
+define i32 @S2_vtrunehb(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vtrunehb(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vtrunehb(r1:0)
+
+declare i64 @llvm.hexagon.S2.vtrunowh(i64, i64)
+define i64 @S2_vtrunowh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.vtrunowh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vtrunowh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.S2.vtrunewh(i64, i64)
+define i64 @S2_vtrunewh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.vtrunewh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vtrunewh(r1:0, r3:2)
+
+; Vector zero extend
+declare i64 @llvm.hexagon.S2.vzxtbh(i32)
+define i64 @S2_vzxtbh(i32 %a) {
+ %z = call i64 @llvm.hexagon.S2.vzxtbh(i32 %a)
+ ret i64 %z
+}
+; CHECK: = vzxtbh(r0)
+
+declare i64 @llvm.hexagon.S2.vzxthw(i32)
+define i64 @S2_vzxthw(i32 %a) {
+ %z = call i64 @llvm.hexagon.S2.vzxthw(i32 %a)
+ ret i64 %z
+}
+; CHECK: = vzxthw(r0)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
new file mode 100644
index 0000000..96e63d8
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
@@ -0,0 +1,351 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.7 XTYPE/PRED
+
+; Compare byte
+declare i32 @llvm.hexagon.A4.cmpbgt(i32, i32)
+define i32 @A4_cmpbgt(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmpbgt(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.gt(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmpbeq(i32, i32)
+define i32 @A4_cmpbeq(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmpbeq(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.eq(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmpbgtu(i32, i32)
+define i32 @A4_cmpbgtu(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmpbgtu(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.gtu(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmpbgti(i32, i32)
+define i32 @A4_cmpbgti(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmpbgti(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.gt(r0, #0)
+
+declare i32 @llvm.hexagon.A4.cmpbeqi(i32, i32)
+define i32 @A4_cmpbeqi(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmpbeqi(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.eq(r0, #0)
+
+declare i32 @llvm.hexagon.A4.cmpbgtui(i32, i32)
+define i32 @A4_cmpbgtui(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmpbgtui(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.gtu(r0, #0)
+
+; Compare half
+declare i32 @llvm.hexagon.A4.cmphgt(i32, i32)
+define i32 @A4_cmphgt(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmphgt(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.gt(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmpheq(i32, i32)
+define i32 @A4_cmpheq(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmpheq(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.eq(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmphgtu(i32, i32)
+define i32 @A4_cmphgtu(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmphgtu(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.gtu(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmphgti(i32, i32)
+define i32 @A4_cmphgti(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmphgti(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.gt(r0, #0)
+
+declare i32 @llvm.hexagon.A4.cmpheqi(i32, i32)
+define i32 @A4_cmpheqi(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmpheqi(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.eq(r0, #0)
+
+declare i32 @llvm.hexagon.A4.cmphgtui(i32, i32)
+define i32 @A4_cmphgtui(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmphgtui(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.gtu(r0, #0)
+
+; Compare doublewords
+declare i32 @llvm.hexagon.C2.cmpgtp(i64, i64)
+define i32 @C2_cmpgtp(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.C2.cmpgtp(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmp.gt(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.C2.cmpeqp(i64, i64)
+define i32 @C2_cmpeqp(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.C2.cmpeqp(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmp.eq(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.C2.cmpgtup(i64, i64)
+define i32 @C2_cmpgtup(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.C2.cmpgtup(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmp.gtu(r1:0, r3:2)
+
+; Compare bitmask
+declare i32 @llvm.hexagon.C2.bitsclri(i32, i32)
+define i32 @C2_bitsclri(i32 %a) {
+ %z = call i32 @llvm.hexagon.C2.bitsclri(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = bitsclr(r0, #0)
+
+declare i32 @llvm.hexagon.C4.nbitsclri(i32, i32)
+define i32 @C4_nbitsclri(i32 %a) {
+ %z = call i32 @llvm.hexagon.C4.nbitsclri(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = !bitsclr(r0, #0)
+
+declare i32 @llvm.hexagon.C2.bitsset(i32, i32)
+define i32 @C2_bitsset(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C2.bitsset(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = bitsset(r0, r1)
+
+declare i32 @llvm.hexagon.C4.nbitsset(i32, i32)
+define i32 @C4_nbitsset(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C4.nbitsset(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = !bitsset(r0, r1)
+
+declare i32 @llvm.hexagon.C2.bitsclr(i32, i32)
+define i32 @C2_bitsclr(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C2.bitsclr(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = bitsclr(r0, r1)
+
+declare i32 @llvm.hexagon.C4.nbitsclr(i32, i32)
+define i32 @C4_nbitsclr(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C4.nbitsclr(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = !bitsclr(r0, r1)
+
+; Mask generate from predicate
+declare i64 @llvm.hexagon.C2.mask(i32)
+define i64 @C2_mask(i32 %a) {
+ %z = call i64 @llvm.hexagon.C2.mask(i32 %a)
+ ret i64 %z
+}
+; CHECK: = mask(p0)
+
+; Check for TLB match
+declare i32 @llvm.hexagon.A4.tlbmatch(i64, i32)
+define i32 @A4_tlbmatch(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.tlbmatch(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = tlbmatch(r1:0, r2)
+
+; Test bit
+declare i32 @llvm.hexagon.S2.tstbit.i(i32, i32)
+define i32 @S2_tstbit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = tstbit(r0, #0)
+
+declare i32 @llvm.hexagon.S4.ntstbit.i(i32, i32)
+define i32 @S4_ntstbit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.ntstbit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = !tstbit(r0, #0)
+
+declare i32 @llvm.hexagon.S2.tstbit.r(i32, i32)
+define i32 @S2_tstbit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tstbit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = tstbit(r0, r1)
+
+declare i32 @llvm.hexagon.S4.ntstbit.r(i32, i32)
+define i32 @S4_ntstbit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.ntstbit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = !tstbit(r0, r1)
+
+; Vector compare halfwords
+declare i32 @llvm.hexagon.A2.vcmpheq(i64, i64)
+define i32 @A2_vcmpheq(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpheq(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.eq(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A2.vcmphgt(i64, i64)
+define i32 @A2_vcmphgt(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmphgt(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.gt(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A2.vcmphgtu(i64, i64)
+define i32 @A2_vcmphgtu(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmphgtu(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.gtu(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A4.vcmpheqi(i64, i32)
+define i32 @A4_vcmpheqi(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpheqi(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.eq(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmphgti(i64, i32)
+define i32 @A4_vcmphgti(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmphgti(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.gt(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmphgtui(i64, i32)
+define i32 @A4_vcmphgtui(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmphgtui(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.gtu(r1:0, #0)
+
+; Vector compare bytes for any match
+declare i32 @llvm.hexagon.A4.vcmpbeq.any(i64, i64)
+define i32 @A4_vcmpbeq_any(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = any8(vcmpb.eq(r1:0, r3:2))
+
+; Vector compare bytes
+declare i32 @llvm.hexagon.A2.vcmpbeq(i64, i64)
+define i32 @A2_vcmpbeq(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpbeq(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.eq(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A2.vcmpbgtu(i64, i64)
+define i32 @A2_vcmpbgtu(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpbgtu(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.gtu(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A4.vcmpbgt(i64, i64)
+define i32 @A4_vcmpbgt(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A4.vcmpbgt(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.gt(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A4.vcmpbeqi(i64, i32)
+define i32 @A4_vcmpbeqi(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpbeqi(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.eq(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmpbgti(i64, i32)
+define i32 @A4_vcmpbgti(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpbgti(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.gt(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmpbgtui(i64, i32)
+define i32 @A4_vcmpbgtui(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpbgtui(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.gtu(r1:0, #0)
+
+; Vector compare words
+declare i32 @llvm.hexagon.A2.vcmpweq(i64, i64)
+define i32 @A2_vcmpweq(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpweq(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.eq(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A2.vcmpwgt(i64, i64)
+define i32 @A2_vcmpwgt(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpwgt(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.gt(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A2.vcmpwgtu(i64, i64)
+define i32 @A2_vcmpwgtu(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.gtu(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A4.vcmpweqi(i64, i32)
+define i32 @A4_vcmpweqi(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpweqi(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.eq(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmpwgti(i64, i32)
+define i32 @A4_vcmpwgti(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpwgti(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.gt(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmpwgtui(i64, i32)
+define i32 @A4_vcmpwgtui(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpwgtui(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.gtu(r1:0, #0)
+
+; Viterbi pack even and odd predicate bitsclr
+declare i32 @llvm.hexagon.C2.vitpack(i32, i32)
+define i32 @C2_vitpack(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C2.vitpack(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vitpack(p1, p0)
+
+; Vector mux
+declare i64 @llvm.hexagon.C2.vmux(i32, i64, i64)
+define i64 @C2_vmux(i32 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.C2.vmux(i32 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: = vmux(p0, r3:2, r5:4)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
new file mode 100644
index 0000000..c84999b
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
@@ -0,0 +1,723 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.8 XTYPE/SHIFT
+
+; Shift by immediate
+declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32)
+define i64 @S2_asr_i_p(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = asr(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32)
+define i64 @S2_lsr_i_p(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = lsr(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32)
+define i64 @S2_asl_i_p(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = asl(r1:0, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32)
+define i32 @S2_asr_i_r(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32)
+define i32 @S2_lsr_i_r(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = lsr(r0, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32)
+define i32 @S2_asl_i_r(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, #0)
+
+; Shift by immediate and accumulate
+declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32)
+define i64 @S2_asr_i_p_nac(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32)
+define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 -= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32)
+define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asl(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32)
+define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 += asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32)
+define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 += lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32)
+define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 += asl(r3:2, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32)
+define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 -= asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32)
+define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 -= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32)
+define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 -= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32)
+define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 += asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32)
+define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 += lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32)
+define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 += asl(r1, #0)
+
+; Shift by immediate and add
+declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32)
+define i32 @S4_addi_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32)
+define i32 @S4_subi_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = sub(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32)
+define i32 @S4_addi_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(#0, lsr(r0, #0))
+
+declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32)
+define i32 @S4_subi_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = sub(#0, lsr(r0, #0))
+
+declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32)
+define i32 @S2_addasl_rrri(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = addasl(r0, r1, #0)
+
+; Shift by immediate and logical
+declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32)
+define i64 @S2_asr_i_p_and(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32)
+define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 &= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32)
+define i64 @S2_asl_i_p_and(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asl(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32)
+define i64 @S2_asr_i_p_or(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32)
+define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 |= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32)
+define i64 @S2_asl_i_p_or(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asl(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32)
+define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32)
+define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= asl(r3:2, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32)
+define i32 @S2_asr_i_r_and(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 &= asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32)
+define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 &= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32)
+define i32 @S2_asl_i_r_and(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 &= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32)
+define i32 @S2_asr_i_r_or(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 |= asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32)
+define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 |= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32)
+define i32 @S2_asl_i_r_or(i32%a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 |= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32)
+define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 ^= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32)
+define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 ^= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32)
+define i32 @S4_andi_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = and(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32)
+define i32 @S4_ori_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = or(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32)
+define i32 @S4_andi_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = and(#0, lsr(r0, #0))
+
+declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32)
+define i32 @S4_ori_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = or(#0, lsr(r0, #0))
+
+; Shift right by immediate with rounding
+declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32)
+define i64 @S2_asr_i_p_rnd(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = asr(r1:0, #0):rnd
+
+declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32)
+define i32 @S2_asr_i_r_rnd(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, #0):rnd
+
+; Shift left by immediate with saturation
+declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32)
+define i32 @S2_asl_i_r_sat(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, #0):sat
+
+; Shift by register
+declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32)
+define i64 @S2_asr_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = asr(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32)
+define i64 @S2_lsr_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = lsr(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32)
+define i64 @S2_asl_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = asl(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32)
+define i64 @S2_lsl_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = lsl(r1:0, r2)
+
+declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32)
+define i32 @S2_asr_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, r1)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32)
+define i32 @S2_lsr_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = lsr(r0, r1)
+
+declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32)
+define i32 @S2_asl_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, r1)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32)
+define i32 @S2_lsl_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = lsl(r0, r1)
+
+declare i32 @llvm.hexagon.S4.lsli(i32, i32)
+define i32 @S4_lsli(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = lsl(#0, r0)
+
+; Shift by register and accumulate
+declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32)
+define i64 @S2_asr_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32)
+define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32)
+define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32)
+define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= lsl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32)
+define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32)
+define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32)
+define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32)
+define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += lsl(r3:2, r4)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32)
+define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32)
+define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32)
+define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32)
+define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= lsl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32)
+define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32)
+define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32)
+define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32)
+define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += lsl(r1, r2)
+
+; Shift by register and logical
+declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32)
+define i64 @S2_asr_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32)
+define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32)
+define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32)
+define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= lsl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32)
+define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32)
+define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32)
+define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32)
+define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= lsl(r3:2, r4)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32)
+define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32)
+define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32)
+define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32)
+define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= lsl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32)
+define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32)
+define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32)
+define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32)
+define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= lsl(r1, r2)
+
+; Shift by register with saturation
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32)
+define i32 @S2_asr_r_r_sat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, r1):sat
+
+declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32)
+define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, r1):sat
+
+; Vector shift halfwords by immediate
+declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32)
+define i64 @S2_asr_i_vh(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vasrh(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32)
+define i64 @S2_lsr_i_vh(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vlsrh(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32)
+define i64 @S2_asl_i_vh(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaslh(r1:0, #0)
+
+; Vector shift halfwords by register
+declare i64 @llvm.hexagon.S2.asr.r.vh(i64, i32)
+define i64 @S2_asr_r_vh(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.vh(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vasrh(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.lsr.r.vh(i64, i32)
+define i64 @S2_lsr_r_vh(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.vh(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vlsrh(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.asl.r.vh(i64, i32)
+define i64 @S2_asl_r_vh(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.vh(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaslh(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.lsl.r.vh(i64, i32)
+define i64 @S2_lsl_r_vh(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.vh(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vlslh(r1:0, r2)
+
+; Vector shift words by immediate
+declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32)
+define i64 @S2_asr_i_vw(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vasrw(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32)
+define i64 @S2_lsr_i_vw(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vlsrw(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32)
+define i64 @S2_asl_i_vw(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaslw(r1:0, #0)
+
+; Vector shift words by with truncate and pack
+declare i32 @llvm.hexagon.S2.asr.i.svw.trun(i64, i32)
+define i32 @S2_asr_i_svw_trun(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.svw.trun(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = vasrw(r1:0, #0)
+
+declare i32 @llvm.hexagon.S2.asr.r.svw.trun(i64, i32)
+define i32 @S2_asr_r_svw_trun(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.svw.trun(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vasrw(r1:0, r2)
diff --git a/test/CodeGen/Hexagon/newvaluestore.ll b/test/CodeGen/Hexagon/newvaluestore.ll
index 186e393..93cf347 100644
--- a/test/CodeGen/Hexagon/newvaluestore.ll
+++ b/test/CodeGen/Hexagon/newvaluestore.ll
@@ -7,7 +7,7 @@
define i32 @main() nounwind {
entry:
-; CHECK: memw(r{{[0-9]+}} + #{{[0-9]+}}) = r{{[0-9]+}}.new
+; CHECK: memw(r{{[0-9]+}}+#{{[0-9]+}}) = r{{[0-9]+}}.new
%number1 = alloca i32, align 4
%number2 = alloca i32, align 4
%number3 = alloca i32, align 4
diff --git a/test/CodeGen/Hexagon/pred-absolute-store.ll b/test/CodeGen/Hexagon/pred-absolute-store.ll
index b1b09f4..64635b1 100644
--- a/test/CodeGen/Hexagon/pred-absolute-store.ll
+++ b/test/CodeGen/Hexagon/pred-absolute-store.ll
@@ -2,7 +2,7 @@
; Check that we are able to predicate instructions with abosolute
; addressing mode.
-; CHECK: if{{ *}}(p{{[0-3]+}}){{ *}}memw(##gvar){{ *}}={{ *}}r{{[0-9]+}}
+; CHECK: if{{ *}}(p{{[0-3]+}}.new){{ *}}memw(##gvar){{ *}}={{ *}}r{{[0-9]+}}
@gvar = external global i32
define i32 @test2(i32 %a, i32 %b) nounwind {
diff --git a/test/CodeGen/Hexagon/struct_args_large.ll b/test/CodeGen/Hexagon/struct_args_large.ll
index f09fd10..db87d9e 100644
--- a/test/CodeGen/Hexagon/struct_args_large.ll
+++ b/test/CodeGen/Hexagon/struct_args_large.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; CHECK: r[[T0:[0-9]+]] = CONST32(#s2)
-; CHECK: memw(r29 + #0) = r{{.}}
+; CHECK: memw(r29+#0) = r{{.}}
; CHECK: memw(r29+#8) = r{{.}}
%struct.large = type { i64, i64 }