diff options
Diffstat (limited to 'test/CodeGen/Mips')
305 files changed, 5510 insertions, 4314 deletions
diff --git a/test/CodeGen/Mips/2008-07-03-SRet.ll b/test/CodeGen/Mips/2008-07-03-SRet.ll index afec7f6..6313ec4 100644 --- a/test/CodeGen/Mips/2008-07-03-SRet.ll +++ b/test/CodeGen/Mips/2008-07-03-SRet.ll @@ -7,11 +7,11 @@ entry: ; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4) ; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4) ; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4) - getelementptr %struct.sret0* %agg.result, i32 0, i32 0 ; <i32*>:0 [#uses=1] + getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 0 ; <i32*>:0 [#uses=1] store i32 %dummy, i32* %0, align 4 - getelementptr %struct.sret0* %agg.result, i32 0, i32 1 ; <i32*>:1 [#uses=1] + getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 1 ; <i32*>:1 [#uses=1] store i32 %dummy, i32* %1, align 4 - getelementptr %struct.sret0* %agg.result, i32 0, i32 2 ; <i32*>:2 [#uses=1] + getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 2 ; <i32*>:2 [#uses=1] store i32 %dummy, i32* %2, align 4 ret void } diff --git a/test/CodeGen/Mips/2008-07-15-InternalConstant.ll b/test/CodeGen/Mips/2008-07-15-InternalConstant.ll index 29a7b5c..d7e8f5c 100644 --- a/test/CodeGen/Mips/2008-07-15-InternalConstant.ll +++ b/test/CodeGen/Mips/2008-07-15-InternalConstant.ll @@ -8,7 +8,7 @@ entry: ; CHECK: foo ; CHECK: %hi(.str) ; CHECK: %lo(.str) - ret i8* getelementptr ([10 x i8]* @.str, i32 0, i32 0) + ret i8* getelementptr ([10 x i8], [10 x i8]* @.str, i32 0, i32 0) } define i32* @bar() nounwind { @@ -16,7 +16,7 @@ entry: ; CHECK: bar ; CHECK: %hi(i0) ; CHECK: %lo(i0) - ret i32* getelementptr ([5 x i32]* @i0, i32 0, i32 0) + ret i32* getelementptr ([5 x i32], [5 x i32]* @i0, i32 0, i32 0) } ; CHECK: rodata.str1.4,"aMS",@progbits diff --git a/test/CodeGen/Mips/2008-07-15-SmallSection.ll b/test/CodeGen/Mips/2008-07-15-SmallSection.ll index cbc3ecf..08d99d8 100644 --- a/test/CodeGen/Mips/2008-07-15-SmallSection.ll +++ b/test/CodeGen/Mips/2008-07-15-SmallSection.ll @@ -22,13 +22,13 @@ target triple = "mipsallegrexel-unknown-psp-elf" define i8* @A0() nounwind { entry: - ret i8* getelementptr ([8 x i8]* @s0, i32 0, i32 0) + ret i8* getelementptr ([8 x i8], [8 x i8]* @s0, i32 0, i32 0) } define i32 @A1() nounwind { entry: - load i32* getelementptr (%struct.anon* @foo, i32 0, i32 0), align 8 - load i32* getelementptr (%struct.anon* @foo, i32 0, i32 1), align 4 + load i32, i32* getelementptr (%struct.anon, %struct.anon* @foo, i32 0, i32 0), align 8 + load i32, i32* getelementptr (%struct.anon, %struct.anon* @foo, i32 0, i32 1), align 4 add i32 %1, %0 ret i32 %2 } diff --git a/test/CodeGen/Mips/2008-08-01-AsmInline.ll b/test/CodeGen/Mips/2008-08-01-AsmInline.ll index ae06ffe..5edba02 100644 --- a/test/CodeGen/Mips/2008-08-01-AsmInline.ll +++ b/test/CodeGen/Mips/2008-08-01-AsmInline.ll @@ -26,8 +26,8 @@ entry: define void @foo0() nounwind { entry: ; CHECK: addu - %0 = load i32* @gi1, align 4 - %1 = load i32* @gi0, align 4 + %0 = load i32, i32* @gi1, align 4 + %1 = load i32, i32* @gi0, align 4 %2 = tail call i32 asm "addu $0, $1, $2", "=r,r,r"(i32 %0, i32 %1) nounwind store i32 %2, i32* @gi2, align 4 ret void @@ -36,7 +36,7 @@ entry: define void @foo2() nounwind { entry: ; CHECK: neg.s - %0 = load float* @gf1, align 4 + %0 = load float, float* @gf1, align 4 %1 = tail call float asm "neg.s $0, $1", "=f,f"(float %0) nounwind store float %1, float* @gf0, align 4 ret void @@ -45,7 +45,7 @@ entry: define void @foo3() nounwind { entry: ; CHECK: neg.d - %0 = load double* @gd1, align 8 + %0 = load double, double* @gd1, align 8 %1 = tail call double asm "neg.d $0, $1", "=f,f"(double %0) nounwind store double %1, double* @gd0, align 8 ret void @@ -64,7 +64,7 @@ define void @foo4() { entry: %0 = tail call i32 asm sideeffect "ulh $0,16($$sp)\0A\09", "=r,~{$2}"() store i32 %0, i32* @gi2, align 4 - %1 = load float* @gf0, align 4 + %1 = load float, float* @gf0, align 4 %2 = tail call double asm sideeffect "cvt.d.s $0, $1\0A\09", "=f,f,~{$f0}"(float %1) store double %2, double* @gd0, align 8 ret void diff --git a/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll b/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll index c41d521..592e574 100644 --- a/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll +++ b/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll @@ -7,12 +7,12 @@ entry: %retval = alloca double ; <double*> [#uses=3] store double 0.000000e+00, double* %retval %r = alloca double ; <double*> [#uses=1] - load double* %r ; <double>:0 [#uses=1] + load double, double* %r ; <double>:0 [#uses=1] store double %0, double* %retval br label %return return: ; preds = %entry - load double* %retval ; <double>:1 [#uses=1] + load double, double* %retval ; <double>:1 [#uses=1] ret double %1 } diff --git a/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll b/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll index 18f5b3d..eaf6ddc 100644 --- a/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll +++ b/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll @@ -7,8 +7,8 @@ entry: continue.outer: ; preds = %case4, %entry %p.0.ph.rec = phi i32 [ 0, %entry ], [ %indvar.next, %case4 ] ; <i32> [#uses=2] - %p.0.ph = getelementptr i8* %0, i32 %p.0.ph.rec ; <i8*> [#uses=1] - %1 = load i8* %p.0.ph ; <i8> [#uses=1] + %p.0.ph = getelementptr i8, i8* %0, i32 %p.0.ph.rec ; <i8*> [#uses=1] + %1 = load i8, i8* %p.0.ph ; <i8> [#uses=1] switch i8 %1, label %infloop [ i8 0, label %return.split i8 76, label %case4 diff --git a/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll b/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll index 9c4838a..9cebfcd 100644 --- a/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll +++ b/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll @@ -13,16 +13,16 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f define double @_erand48_r(%struct._reent* %r, i16* %xseed) nounwind { entry: tail call void @__dorand48( %struct._reent* %r, i16* %xseed ) nounwind - load i16* %xseed, align 2 ; <i16>:0 [#uses=1] + load i16, i16* %xseed, align 2 ; <i16>:0 [#uses=1] uitofp i16 %0 to double ; <double>:1 [#uses=1] tail call double @ldexp( double %1, i32 -48 ) nounwind ; <double>:2 [#uses=1] - getelementptr i16* %xseed, i32 1 ; <i16*>:3 [#uses=1] - load i16* %3, align 2 ; <i16>:4 [#uses=1] + getelementptr i16, i16* %xseed, i32 1 ; <i16*>:3 [#uses=1] + load i16, i16* %3, align 2 ; <i16>:4 [#uses=1] uitofp i16 %4 to double ; <double>:5 [#uses=1] tail call double @ldexp( double %5, i32 -32 ) nounwind ; <double>:6 [#uses=1] fadd double %2, %6 ; <double>:7 [#uses=1] - getelementptr i16* %xseed, i32 2 ; <i16*>:8 [#uses=1] - load i16* %8, align 2 ; <i16>:9 [#uses=1] + getelementptr i16, i16* %xseed, i32 2 ; <i16*>:8 [#uses=1] + load i16, i16* %8, align 2 ; <i16>:9 [#uses=1] uitofp i16 %9 to double ; <double>:10 [#uses=1] tail call double @ldexp( double %10, i32 -16 ) nounwind ; <double>:11 [#uses=1] fadd double %7, %11 ; <double>:12 [#uses=1] @@ -35,18 +35,18 @@ declare double @ldexp(double, i32) define double @erand48(i16* %xseed) nounwind { entry: - load %struct._reent** @_impure_ptr, align 4 ; <%struct._reent*>:0 [#uses=1] + load %struct._reent*, %struct._reent** @_impure_ptr, align 4 ; <%struct._reent*>:0 [#uses=1] tail call void @__dorand48( %struct._reent* %0, i16* %xseed ) nounwind - load i16* %xseed, align 2 ; <i16>:1 [#uses=1] + load i16, i16* %xseed, align 2 ; <i16>:1 [#uses=1] uitofp i16 %1 to double ; <double>:2 [#uses=1] tail call double @ldexp( double %2, i32 -48 ) nounwind ; <double>:3 [#uses=1] - getelementptr i16* %xseed, i32 1 ; <i16*>:4 [#uses=1] - load i16* %4, align 2 ; <i16>:5 [#uses=1] + getelementptr i16, i16* %xseed, i32 1 ; <i16*>:4 [#uses=1] + load i16, i16* %4, align 2 ; <i16>:5 [#uses=1] uitofp i16 %5 to double ; <double>:6 [#uses=1] tail call double @ldexp( double %6, i32 -32 ) nounwind ; <double>:7 [#uses=1] fadd double %3, %7 ; <double>:8 [#uses=1] - getelementptr i16* %xseed, i32 2 ; <i16*>:9 [#uses=1] - load i16* %9, align 2 ; <i16>:10 [#uses=1] + getelementptr i16, i16* %xseed, i32 2 ; <i16*>:9 [#uses=1] + load i16, i16* %9, align 2 ; <i16>:10 [#uses=1] uitofp i16 %10 to double ; <double>:11 [#uses=1] tail call double @ldexp( double %11, i32 -16 ) nounwind ; <double>:12 [#uses=1] fadd double %8, %12 ; <double>:13 [#uses=1] diff --git a/test/CodeGen/Mips/2010-07-20-Switch.ll b/test/CodeGen/Mips/2010-07-20-Switch.ll index 5c84077..fd0254e 100644 --- a/test/CodeGen/Mips/2010-07-20-Switch.ll +++ b/test/CodeGen/Mips/2010-07-20-Switch.ll @@ -15,7 +15,7 @@ define i32 @main() nounwind readnone { entry: %x = alloca i32, align 4 ; <i32*> [#uses=2] store volatile i32 2, i32* %x, align 4 - %0 = load volatile i32* %x, align 4 ; <i32> [#uses=1] + %0 = load volatile i32, i32* %x, align 4 ; <i32> [#uses=1] ; STATIC-O32: sll $[[R0:[0-9]+]], ${{[0-9]+}}, 2 ; STATIC-O32: lui $[[R1:[0-9]+]], %hi($JTI0_0) ; STATIC-O32: addu $[[R2:[0-9]+]], $[[R0]], $[[R1]] diff --git a/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll b/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll index 9d4daee..24bcfae 100644 --- a/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll +++ b/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll @@ -4,7 +4,7 @@ define void @t(i8* %ptr) { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %ptr, i8* getelementptr inbounds ([7 x i8]* @.str, i64 0, i64 0), i64 7, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %ptr, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i64 0, i64 0), i64 7, i32 1, i1 false) ret void } diff --git a/test/CodeGen/Mips/Fast-ISel/br1.ll b/test/CodeGen/Mips/Fast-ISel/br1.ll index 579a77f..11842dd 100644 --- a/test/CodeGen/Mips/Fast-ISel/br1.ll +++ b/test/CodeGen/Mips/Fast-ISel/br1.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s @b = global i32 1, align 4 @@ -10,7 +10,7 @@ ; Function Attrs: nounwind define void @br() #0 { entry: - %0 = load i32* @b, align 4 + %0 = load i32, i32* @b, align 4 %tobool = icmp eq i32 %0, 0 br i1 %tobool, label %if.end, label %if.then diff --git a/test/CodeGen/Mips/Fast-ISel/callabi.ll b/test/CodeGen/Mips/Fast-ISel/callabi.ll index e76d7a7..f80cb82 100644 --- a/test/CodeGen/Mips/Fast-ISel/callabi.ll +++ b/test/CodeGen/Mips/Fast-ISel/callabi.ll @@ -1,14 +1,14 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -check-prefix=mips32r2 -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s -check-prefix=mips32 -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -check-prefix=CHECK2 -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s -check-prefix=CHECK2 @@ -84,13 +84,13 @@ entry: ; CHECK-LABEL: cxiiiiconv ; mips32r2-LABEL: cxiiiiconv ; mips32-LABEL: cxiiiiconv - %0 = load i8* @c1, align 1 + %0 = load i8, i8* @c1, align 1 %conv = sext i8 %0 to i32 - %1 = load i8* @uc1, align 1 + %1 = load i8, i8* @uc1, align 1 %conv1 = zext i8 %1 to i32 - %2 = load i16* @s1, align 2 + %2 = load i16, i16* @s1, align 2 %conv2 = sext i16 %2 to i32 - %3 = load i16* @us1, align 2 + %3 = load i16, i16* @us1, align 2 %conv3 = zext i16 %3 to i32 call void @xiiii(i32 %conv, i32 %conv1, i32 %conv2, i32 %conv3) ; CHECK: addu $[[REG_GP:[0-9]+]], ${{[0-9]+}}, ${{[0-9+]}} diff --git a/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll b/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll index c72b1e7..72de888 100644 --- a/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll +++ b/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s @f1 = common global float 0.000000e+00, align 4 @@ -12,8 +12,8 @@ ; Function Attrs: nounwind define void @feq1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp oeq float %0, %1 ; CHECK-LABEL: feq1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -33,8 +33,8 @@ entry: ; Function Attrs: nounwind define void @fne1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp une float %0, %1 ; CHECK-LABEL: fne1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -53,8 +53,8 @@ entry: ; Function Attrs: nounwind define void @flt1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp olt float %0, %1 ; CHECK-LABEL: flt1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -74,8 +74,8 @@ entry: ; Function Attrs: nounwind define void @fgt1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp ogt float %0, %1 ; CHECK-LABEL: fgt1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -94,8 +94,8 @@ entry: ; Function Attrs: nounwind define void @fle1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp ole float %0, %1 ; CHECK-LABEL: fle1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -114,8 +114,8 @@ entry: ; Function Attrs: nounwind define void @fge1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp oge float %0, %1 ; CHECK-LABEL: fge1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -134,8 +134,8 @@ entry: ; Function Attrs: nounwind define void @deq1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp oeq double %0, %1 ; CHECK-LABEL: deq1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) @@ -154,8 +154,8 @@ entry: ; Function Attrs: nounwind define void @dne1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp une double %0, %1 ; CHECK-LABEL: dne1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) @@ -174,8 +174,8 @@ entry: ; Function Attrs: nounwind define void @dlt1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp olt double %0, %1 ; CHECK-LABEL: dlt1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) @@ -194,8 +194,8 @@ entry: ; Function Attrs: nounwind define void @dgt1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp ogt double %0, %1 ; CHECK-LABEL: dgt1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) @@ -214,8 +214,8 @@ entry: ; Function Attrs: nounwind define void @dle1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp ole double %0, %1 ; CHECK-LABEL: dle1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) @@ -234,8 +234,8 @@ entry: ; Function Attrs: nounwind define void @dge1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp oge double %0, %1 ; CHECK-LABEL: dge1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) diff --git a/test/CodeGen/Mips/Fast-ISel/fpext.ll b/test/CodeGen/Mips/Fast-ISel/fpext.ll index 98aca75..5ac2249 100644 --- a/test/CodeGen/Mips/Fast-ISel/fpext.ll +++ b/test/CodeGen/Mips/Fast-ISel/fpext.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s @f = global float 0x40147E6B80000000, align 4 @@ -10,7 +10,7 @@ ; Function Attrs: nounwind define void @dv() #0 { entry: - %0 = load float* @f, align 4 + %0 = load float, float* @f, align 4 %conv = fpext float %0 to double ; CHECK: cvt.d.s $f{{[0-9]+}}, $f{{[0-9]+}} store double %conv, double* @d_f, align 8 diff --git a/test/CodeGen/Mips/Fast-ISel/fpintconv.ll b/test/CodeGen/Mips/Fast-ISel/fpintconv.ll index 846726a..a94ef50 100644 --- a/test/CodeGen/Mips/Fast-ISel/fpintconv.ll +++ b/test/CodeGen/Mips/Fast-ISel/fpintconv.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s @@ -14,7 +14,7 @@ define void @ifv() { entry: ; CHECK-LABEL: .ent ifv - %0 = load float* @f, align 4 + %0 = load float, float* @f, align 4 %conv = fptosi float %0 to i32 ; CHECK: trunc.w.s $f[[REG:[0-9]+]], $f{{[0-9]+}} ; CHECK: mfc1 ${{[0-9]+}}, $f[[REG]] @@ -26,7 +26,7 @@ entry: define void @idv() { entry: ; CHECK-LABEL: .ent idv - %0 = load double* @d, align 8 + %0 = load double, double* @d, align 8 %conv = fptosi double %0 to i32 ; CHECK: trunc.w.d $f[[REG:[0-9]+]], $f{{[0-9]+}} ; CHECK: mfc1 ${{[0-9]+}}, $f[[REG]] diff --git a/test/CodeGen/Mips/Fast-ISel/fptrunc.ll b/test/CodeGen/Mips/Fast-ISel/fptrunc.ll index d843dee..2eec4c3 100644 --- a/test/CodeGen/Mips/Fast-ISel/fptrunc.ll +++ b/test/CodeGen/Mips/Fast-ISel/fptrunc.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s @d = global double 0x40147E6B74DF0446, align 8 @@ -10,7 +10,7 @@ ; Function Attrs: nounwind define void @fv() #0 { entry: - %0 = load double* @d, align 8 + %0 = load double, double* @d, align 8 %conv = fptrunc double %0 to float ; CHECK: cvt.s.d $f{{[0-9]+}}, $f{{[0-9]+}} store float %conv, float* @f, align 4 diff --git a/test/CodeGen/Mips/Fast-ISel/icmpa.ll b/test/CodeGen/Mips/Fast-ISel/icmpa.ll index bd41a29..670a8d5 100644 --- a/test/CodeGen/Mips/Fast-ISel/icmpa.ll +++ b/test/CodeGen/Mips/Fast-ISel/icmpa.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s @c = global i32 4, align 4 @@ -14,8 +14,8 @@ define void @eq() { entry: ; CHECK-LABEL: .ent eq - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp eq i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}}) @@ -35,8 +35,8 @@ entry: define void @ne() { entry: ; CHECK-LABEL: .ent ne - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp ne i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}}) @@ -56,8 +56,8 @@ entry: define void @ugt() { entry: ; CHECK-LABEL: .ent ugt - %0 = load i32* @uc, align 4 - %1 = load i32* @ud, align 4 + %0 = load i32, i32* @uc, align 4 + %1 = load i32, i32* @ud, align 4 %cmp = icmp ugt i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}}) @@ -76,8 +76,8 @@ entry: define void @ult() { entry: ; CHECK-LABEL: .ent ult - %0 = load i32* @uc, align 4 - %1 = load i32* @ud, align 4 + %0 = load i32, i32* @uc, align 4 + %1 = load i32, i32* @ud, align 4 %cmp = icmp ult i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}}) @@ -95,8 +95,8 @@ entry: define void @uge() { entry: ; CHECK-LABEL: .ent uge - %0 = load i32* @uc, align 4 - %1 = load i32* @ud, align 4 + %0 = load i32, i32* @uc, align 4 + %1 = load i32, i32* @ud, align 4 %cmp = icmp uge i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}}) @@ -115,8 +115,8 @@ entry: define void @ule() { entry: ; CHECK-LABEL: .ent ule - %0 = load i32* @uc, align 4 - %1 = load i32* @ud, align 4 + %0 = load i32, i32* @uc, align 4 + %1 = load i32, i32* @ud, align 4 %cmp = icmp ule i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}}) @@ -135,8 +135,8 @@ entry: define void @sgt() { entry: ; CHECK-LABEL: .ent sgt - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp sgt i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}}) @@ -154,8 +154,8 @@ entry: define void @slt() { entry: ; CHECK-LABEL: .ent slt - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp slt i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}}) @@ -173,8 +173,8 @@ entry: define void @sge() { entry: ; CHECK-LABEL: .ent sge - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp sge i32 %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @b1, align 4 @@ -193,8 +193,8 @@ entry: define void @sle() { entry: ; CHECK-LABEL: .ent sle - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp sle i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}}) diff --git a/test/CodeGen/Mips/Fast-ISel/loadstore2.ll b/test/CodeGen/Mips/Fast-ISel/loadstore2.ll index d84478b..3daf03d 100644 --- a/test/CodeGen/Mips/Fast-ISel/loadstore2.ll +++ b/test/CodeGen/Mips/Fast-ISel/loadstore2.ll @@ -4,9 +4,9 @@ target triple = "mips--linux-gnu" @c2 = common global i8 0, align 1 @c1 = common global i8 0, align 1 -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s @s2 = common global i16 0, align 2 @@ -21,7 +21,7 @@ target triple = "mips--linux-gnu" ; Function Attrs: nounwind define void @cfoo() #0 { entry: - %0 = load i8* @c2, align 1 + %0 = load i8, i8* @c2, align 1 store i8 %0, i8* @c1, align 1 ; CHECK-LABEL: cfoo: ; CHECK: lbu $[[REGc:[0-9]+]], 0(${{[0-9]+}}) @@ -34,7 +34,7 @@ entry: ; Function Attrs: nounwind define void @sfoo() #0 { entry: - %0 = load i16* @s2, align 2 + %0 = load i16, i16* @s2, align 2 store i16 %0, i16* @s1, align 2 ; CHECK-LABEL: sfoo: ; CHECK: lhu $[[REGs:[0-9]+]], 0(${{[0-9]+}}) @@ -46,7 +46,7 @@ entry: ; Function Attrs: nounwind define void @ifoo() #0 { entry: - %0 = load i32* @i2, align 4 + %0 = load i32, i32* @i2, align 4 store i32 %0, i32* @i1, align 4 ; CHECK-LABEL: ifoo: ; CHECK: lw $[[REGi:[0-9]+]], 0(${{[0-9]+}}) @@ -58,7 +58,7 @@ entry: ; Function Attrs: nounwind define void @ffoo() #0 { entry: - %0 = load float* @f2, align 4 + %0 = load float, float* @f2, align 4 store float %0, float* @f1, align 4 ; CHECK-LABEL: ffoo: ; CHECK: lwc1 $f[[REGf:[0-9]+]], 0(${{[0-9]+}}) @@ -71,7 +71,7 @@ entry: ; Function Attrs: nounwind define void @dfoo() #0 { entry: - %0 = load double* @d2, align 8 + %0 = load double, double* @d2, align 8 store double %0, double* @d1, align 8 ; CHECK-LABEL: dfoo: ; CHECK: ldc1 $f[[REGd:[0-9]+]], 0(${{[0-9]+}}) diff --git a/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll b/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll index f7f2c64..acba132 100644 --- a/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll +++ b/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll @@ -1,10 +1,10 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -check-prefix=mips32r2 -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s -check-prefix=mips32 @b2 = global i8 0, align 1 @@ -28,11 +28,11 @@ define void @_Z3b_iv() { entry: ; CHECK-LABEL: .ent _Z3b_iv - %0 = load i8* @b1, align 1 + %0 = load i8, i8* @b1, align 1 %tobool = trunc i8 %0 to i1 %frombool = zext i1 %tobool to i8 store i8 %frombool, i8* @b2, align 1 - %1 = load i8* @b2, align 1 + %1 = load i8, i8* @b2, align 1 %tobool1 = trunc i8 %1 to i1 %conv = zext i1 %tobool1 to i32 store i32 %conv, i32* @i, align 4 @@ -51,10 +51,10 @@ define void @_Z4uc_iv() { entry: ; CHECK-LABEL: .ent _Z4uc_iv - %0 = load i8* @uc1, align 1 + %0 = load i8, i8* @uc1, align 1 %conv = zext i8 %0 to i32 store i32 %conv, i32* @i, align 4 - %1 = load i8* @uc2, align 1 + %1 = load i8, i8* @uc2, align 1 %conv1 = zext i8 %1 to i32 ; CHECK: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}}) ; CHECK: andi ${{[0-9]+}}, $[[REG1]], 255 @@ -71,10 +71,10 @@ entry: ; mips32r2-LABEL: .ent _Z4sc_iv ; mips32-LABEL: .ent _Z4sc_iv - %0 = load i8* @sc1, align 1 + %0 = load i8, i8* @sc1, align 1 %conv = sext i8 %0 to i32 store i32 %conv, i32* @i, align 4 - %1 = load i8* @sc2, align 1 + %1 = load i8, i8* @sc2, align 1 %conv1 = sext i8 %1 to i32 store i32 %conv1, i32* @j, align 4 ; mips32r2: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}}) @@ -91,10 +91,10 @@ entry: define void @_Z4us_iv() { entry: ; CHECK-LABEL: .ent _Z4us_iv - %0 = load i16* @us1, align 2 + %0 = load i16, i16* @us1, align 2 %conv = zext i16 %0 to i32 store i32 %conv, i32* @i, align 4 - %1 = load i16* @us2, align 2 + %1 = load i16, i16* @us2, align 2 %conv1 = zext i16 %1 to i32 store i32 %conv1, i32* @j, align 4 ret void @@ -109,10 +109,10 @@ entry: ; mips32r2-LABEL: .ent _Z4ss_iv ; mips32=LABEL: .ent _Z4ss_iv - %0 = load i16* @ss1, align 2 + %0 = load i16, i16* @ss1, align 2 %conv = sext i16 %0 to i32 store i32 %conv, i32* @i, align 4 - %1 = load i16* @ss2, align 2 + %1 = load i16, i16* @ss2, align 2 %conv1 = sext i16 %1 to i32 store i32 %conv1, i32* @j, align 4 ; mips32r2: lhu $[[REG1:[0-9]+]], 0(${{[0-9]+}}) @@ -129,7 +129,7 @@ entry: define void @_Z4b_ssv() { entry: ; CHECK-LABEL: .ent _Z4b_ssv - %0 = load i8* @b2, align 1 + %0 = load i8, i8* @b2, align 1 %tobool = trunc i8 %0 to i1 %conv = zext i1 %tobool to i16 store i16 %conv, i16* @ssi, align 2 @@ -143,10 +143,10 @@ entry: define void @_Z5uc_ssv() { entry: ; CHECK-LABEL: .ent _Z5uc_ssv - %0 = load i8* @uc1, align 1 + %0 = load i8, i8* @uc1, align 1 %conv = zext i8 %0 to i16 store i16 %conv, i16* @ssi, align 2 - %1 = load i8* @uc2, align 1 + %1 = load i8, i8* @uc2, align 1 %conv1 = zext i8 %1 to i16 ; CHECK: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}}) ; CHECK: andi ${{[0-9]+}}, $[[REG1]], 255 @@ -161,10 +161,10 @@ define void @_Z5sc_ssv() { entry: ; mips32r2-LABEL: .ent _Z5sc_ssv ; mips32-LABEL: .ent _Z5sc_ssv - %0 = load i8* @sc1, align 1 + %0 = load i8, i8* @sc1, align 1 %conv = sext i8 %0 to i16 store i16 %conv, i16* @ssi, align 2 - %1 = load i8* @sc2, align 1 + %1 = load i8, i8* @sc2, align 1 %conv1 = sext i8 %1 to i16 store i16 %conv1, i16* @ssj, align 2 ; mips32r2: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}}) diff --git a/test/CodeGen/Mips/Fast-ISel/loadstrconst.ll b/test/CodeGen/Mips/Fast-ISel/loadstrconst.ll index 93cf4c1..9f644ec 100644 --- a/test/CodeGen/Mips/Fast-ISel/loadstrconst.ll +++ b/test/CodeGen/Mips/Fast-ISel/loadstrconst.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s @.str = private unnamed_addr constant [6 x i8] c"hello\00", align 1 @@ -9,7 +9,7 @@ ; Function Attrs: nounwind define void @foo() #0 { entry: - store i8* getelementptr inbounds ([6 x i8]* @.str, i32 0, i32 0), i8** @s, align 4 + store i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), i8** @s, align 4 ret void ; CHECK: .ent foo ; CHECK: lw $[[REG1:[0-9]+]], %got($.str)(${{[0-9]+}}) diff --git a/test/CodeGen/Mips/Fast-ISel/logopm.ll b/test/CodeGen/Mips/Fast-ISel/logopm.ll new file mode 100644 index 0000000..cfb751f --- /dev/null +++ b/test/CodeGen/Mips/Fast-ISel/logopm.ll @@ -0,0 +1,605 @@ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -fast-isel -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 < %s | FileCheck %s +; RUN: llc -march=mipsel -relocation-model=pic -O0 -fast-isel -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 < %s | FileCheck %s + +@ub1 = common global i8 0, align 1 +@ub2 = common global i8 0, align 1 +@ub3 = common global i8 0, align 1 +@uc1 = common global i8 0, align 1 +@uc2 = common global i8 0, align 1 +@uc3 = common global i8 0, align 1 +@us1 = common global i16 0, align 2 +@us2 = common global i16 0, align 2 +@us3 = common global i16 0, align 2 +@ub = common global i8 0, align 1 +@uc = common global i8 0, align 1 +@us = common global i16 0, align 2 +@.str = private unnamed_addr constant [4 x i8] c"%i\0A\00", align 1 +@ui = common global i32 0, align 4 +@ui1 = common global i32 0, align 4 +@ui2 = common global i32 0, align 4 +@ui3 = common global i32 0, align 4 + +; Function Attrs: noinline nounwind +define void @andUb() #0 { +entry: + %0 = load i8, i8* @ub1, align 1 + %1 = load i8, i8* @ub2, align 1 + %conv0 = trunc i8 %0 to i1 + %conv1 = trunc i8 %1 to i1 + %and0 = and i1 %conv1, %conv0 + %conv3 = zext i1 %and0 to i8 + store i8 %conv3, i8* @ub, align 1, !tbaa !2 +; CHECK-LABEL: .ent andUb +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UB_ADDR:[0-9]+]], %got(ub)($[[REG_GP]]) +; CHECK-DAG: lw $[[UB2_ADDR:[0-9]+]], %got(ub2)($[[REG_GP]]) +; CHECK-DAG: lw $[[UB1_ADDR:[0-9]+]], %got(ub1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UB1:[0-9]+]], 0($[[UB1_ADDR]]) +; CHECK-DAG: lbu $[[UB2:[0-9]+]], 0($[[UB2_ADDR]]) +; CHECK-DAG: and $[[RES1:[0-9]+]], $[[UB2]], $[[UB1]] +; CHECK: andi $[[RES:[0-9]+]], $[[RES1]], 1 +; CHECK: sb $[[RES]], 0($[[UB_ADDR]]) + ret void +} + +; Function Attrs: noinline nounwind +define void @andUb0() #0 { +entry: + %0 = load i8, i8* @ub1, align 1, !tbaa !2 + %conv = trunc i8 %0 to i1 + %and = and i1 %conv, 0 + %conv1 = zext i1 %and to i8 + store i8 %conv1, i8* @ub, align 1, !tbaa !2 +; CHECK-LABEL: .ent andUb0 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UB_ADDR:[0-9]+]], %got(ub)($[[REG_GP]]) +; CHECK-DAG: lw $[[UB1_ADDR:[0-9]+]], %got(ub1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UB1:[0-9]+]], 0($[[UB1_ADDR]]) +; CHECK-DAG: and $[[RES1:[0-9]+]], $[[UB1]], $zero +; CHECK: andi $[[RES:[0-9]+]], $[[RES1]], 1 +; CHECK: sb $[[RES]], 0($[[UB_ADDR]]) +; CHECK: .end andUb0 + ret void +} + +; Function Attrs: noinline nounwind +define void @andUb1() #0 { +entry: + %0 = load i8, i8* @ub1, align 1, !tbaa !2 + %conv = trunc i8 %0 to i1 + %and = and i1 %conv, 1 + %conv1 = zext i1 %and to i8 + store i8 %conv1, i8* @ub, align 1, !tbaa !2 +; CHECK-LABEL: .ent andUb1 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UB_ADDR:[0-9]+]], %got(ub)($[[REG_GP]]) +; CHECK-DAG: addiu $[[CONST:[0-9]+]], $zero, 1 +; CHECK-DAG: lw $[[UB1_ADDR:[0-9]+]], %got(ub1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UB1:[0-9]+]], 0($[[UB1_ADDR]]) +; CHECK-DAG: and $[[RES1:[0-9]+]], $[[UB1]], $[[CONST]] +; CHECK: andi $[[RES:[0-9]+]], $[[RES1]], 1 +; CHECK: sb $[[RES]], 0($[[UB_ADDR]]) +; CHECK: .end andUb1 + ret void +} + +; Function Attrs: noinline nounwind +define void @orUb() #0 { +entry: + %0 = load i8, i8* @ub1, align 1 + %1 = load i8, i8* @ub2, align 1 + %conv0 = trunc i8 %0 to i1 + %conv1 = trunc i8 %1 to i1 + %or0 = or i1 %conv1, %conv0 + %conv3 = zext i1 %or0 to i8 + store i8 %conv3, i8* @ub, align 1, !tbaa !2 +; CHECK-LABEL: .ent orUb +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UB_ADDR:[0-9]+]], %got(ub)($[[REG_GP]]) +; CHECK-DAG: lw $[[UB2_ADDR:[0-9]+]], %got(ub2)($[[REG_GP]]) +; CHECK-DAG: lw $[[UB1_ADDR:[0-9]+]], %got(ub1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UB1:[0-9]+]], 0($[[UB1_ADDR]]) +; CHECK-DAG: lbu $[[UB2:[0-9]+]], 0($[[UB2_ADDR]]) +; CHECK-DAG: or $[[RES1:[0-9]+]], $[[UB2]], $[[UB1]] +; CHECK: andi $[[RES:[0-9]+]], $[[RES1]], 1 +; CHECK: sb $[[RES]], 0($[[UB_ADDR]]) + ret void +} + +; Function Attrs: noinline nounwind +define void @orUb0() #0 { +entry: + %0 = load i8, i8* @ub1, align 1, !tbaa !2 + %conv = trunc i8 %0 to i1 + %or = or i1 %conv, 0 + %conv1 = zext i1 %or to i8 + store i8 %conv1, i8* @ub, align 1, !tbaa !2 +; CHECK-LABEL: .ent orUb0 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UB_ADDR:[0-9]+]], %got(ub)($[[REG_GP]]) +; CHECK-DAG: lw $[[UB1_ADDR:[0-9]+]], %got(ub1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UB1:[0-9]+]], 0($[[UB1_ADDR]]) +; CHECK: andi $[[RES:[0-9]+]], $[[UB1]], 1 +; CHECK: sb $[[RES]], 0($[[UB_ADDR]]) +; CHECK: .end orUb0 + ret void +} + +; Function Attrs: noinline nounwind +define void @orUb1() #0 { +entry: + %0 = load i8, i8* @ub1, align 1, !tbaa !2 + %conv = trunc i8 %0 to i1 + %or = or i1 %conv, 1 + %conv1 = zext i1 %or to i8 + store i8 %conv1, i8* @ub, align 1, !tbaa !2 +; CHECK-LABEL: .ent orUb1 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UB_ADDR:[0-9]+]], %got(ub)($[[REG_GP]]) +; CHECK-DAG: addiu $[[CONST:[0-9]+]], $zero, 1 +; CHECK-DAG: lw $[[UB1_ADDR:[0-9]+]], %got(ub1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UB1:[0-9]+]], 0($[[UB1_ADDR]]) +; CHECK-DAG: or $[[RES1:[0-9]+]], $[[UB1]], $[[CONST]] +; CHECK: andi $[[RES:[0-9]+]], $[[RES1]], 1 +; CHECK: sb $[[RES]], 0($[[UB_ADDR]]) +; CHECK: .end orUb1 + ret void +} + +; Function Attrs: noinline nounwind +define void @xorUb() #0 { +entry: + %0 = load i8, i8* @ub1, align 1 + %1 = load i8, i8* @ub2, align 1 + %conv0 = trunc i8 %0 to i1 + %conv1 = trunc i8 %1 to i1 + %xor0 = xor i1 %conv1, %conv0 + %conv3 = zext i1 %xor0 to i8 + store i8 %conv3, i8* @ub, align 1, !tbaa !2 +; CHECK-LABEL: .ent xorUb +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UB_ADDR:[0-9]+]], %got(ub)($[[REG_GP]]) +; CHECK-DAG: lw $[[UB2_ADDR:[0-9]+]], %got(ub2)($[[REG_GP]]) +; CHECK-DAG: lw $[[UB1_ADDR:[0-9]+]], %got(ub1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UB1:[0-9]+]], 0($[[UB1_ADDR]]) +; CHECK-DAG: lbu $[[UB2:[0-9]+]], 0($[[UB2_ADDR]]) +; CHECK-DAG: xor $[[RES1:[0-9]+]], $[[UB2]], $[[UB1]] +; CHECK: andi $[[RES:[0-9]+]], $[[RES1]], 1 +; CHECK: sb $[[RES]], 0($[[UB_ADDR]]) + ret void +} + +; Function Attrs: noinline nounwind +define void @xorUb0() #0 { +entry: + %0 = load i8, i8* @ub1, align 1, !tbaa !2 + %conv = trunc i8 %0 to i1 + %xor = xor i1 %conv, 0 + %conv1 = zext i1 %xor to i8 + store i8 %conv1, i8* @ub, align 1, !tbaa !2 +; CHECK-LABEL: .ent xorUb0 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UB_ADDR:[0-9]+]], %got(ub)($[[REG_GP]]) +; CHECK-DAG: lw $[[UB1_ADDR:[0-9]+]], %got(ub1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UB1:[0-9]+]], 0($[[UB1_ADDR]]) +; CHECK-DAG: xor $[[RES1:[0-9]+]], $[[UB1]], $zero +; CHECK: andi $[[RES:[0-9]+]], $[[RES1]], 1 +; CHECK: sb $[[RES]], 0($[[UB_ADDR]]) +; CHECK: .end xorUb0 + ret void +} + +; Function Attrs: noinline nounwind +define void @xorUb1() #0 { +entry: + %0 = load i8, i8* @ub1, align 1, !tbaa !2 + %conv = trunc i8 %0 to i1 + %xor = xor i1 %conv, 1 + %conv1 = zext i1 %xor to i8 + store i8 %conv1, i8* @ub, align 1, !tbaa !2 +; CHECK-LABEL: .ent xorUb1 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UB_ADDR:[0-9]+]], %got(ub)($[[REG_GP]]) +; CHECK-DAG: addiu $[[CONST:[0-9]+]], $zero, 1 +; CHECK-DAG: lw $[[UB1_ADDR:[0-9]+]], %got(ub1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UB1:[0-9]+]], 0($[[UB1_ADDR]]) +; CHECK-DAG: xor $[[RES1:[0-9]+]], $[[UB1]], $[[CONST]] +; CHECK: andi $[[RES:[0-9]+]], $[[RES1]], 1 +; CHECK: sb $[[RES]], 0($[[UB_ADDR]]) +; CHECK: .end xorUb1 + ret void +} + +; Function Attrs: noinline nounwind +define void @andUc() #0 { +entry: + %0 = load i8, i8* @uc1, align 1, !tbaa !2 + %1 = load i8, i8* @uc2, align 1, !tbaa !2 + %and3 = and i8 %1, %0 + store i8 %and3, i8* @uc, align 1, !tbaa !2 +; CHECK-LABEL: .ent andUc +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UC_ADDR:[0-9]+]], %got(uc)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC2_ADDR:[0-9]+]], %got(uc2)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC1_ADDR:[0-9]+]], %got(uc1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UC1:[0-9]+]], 0($[[UC1_ADDR]]) +; CHECK-DAG: lbu $[[UC2:[0-9]+]], 0($[[UC2_ADDR]]) +; CHECK-DAG: and $[[RES:[0-9]+]], $[[UC2]], $[[UB1]] +; CHECK: sb $[[RES]], 0($[[UC_ADDR]]) + ret void +} + +; Function Attrs: noinline nounwind +define void @andUc0() #0 { +entry: + %0 = load i8, i8* @uc1, align 1, !tbaa !2 + %and = and i8 %0, 67 + store i8 %and, i8* @uc, align 1, !tbaa !2 +; CHECK-LABEL: .ent andUc0 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UC_ADDR:[0-9]+]], %got(uc)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC1_ADDR:[0-9]+]], %got(uc1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UC1:[0-9]+]], 0($[[UC1_ADDR]]) +; CHECK-DAG: addiu $[[CONST_67:[0-9]+]], $zero, 67 +; CHECK-DAG: and $[[RES:[0-9]+]], $[[UC1]], $[[CONST_67]] +; CHECK: sb $[[RES]], 0($[[UC_ADDR]]) +; CHECK: .end andUc0 + ret void +} + +; Function Attrs: noinline nounwind +define void @andUc1() #0 { +entry: + %0 = load i8, i8* @uc1, align 1, !tbaa !2 + %and = and i8 %0, 167 + store i8 %and, i8* @uc, align 1, !tbaa !2 +; CHECK-LABEL: .ent andUc1 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UC_ADDR:[0-9]+]], %got(uc)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC1_ADDR:[0-9]+]], %got(uc1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UC1:[0-9]+]], 0($[[UC1_ADDR]]) +; CHECK-DAG: addiu $[[CONST_Neg89:[0-9]+]], $zero, -89 +; CHECK-DAG: and $[[RES:[0-9]+]], $[[UC1]], $[[CONST_Neg89]] +; CHECK: sb $[[RES]], 0($[[UC_ADDR]]) +; CHECK: .end andUc1 + ret void +} + +; Function Attrs: noinline nounwind +define void @orUc() #0 { +entry: + %0 = load i8, i8* @uc1, align 1, !tbaa !2 + %1 = load i8, i8* @uc2, align 1, !tbaa !2 + %or3 = or i8 %1, %0 + store i8 %or3, i8* @uc, align 1, !tbaa !2 +; CHECK-LABEL: .ent orUc +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UC_ADDR:[0-9]+]], %got(uc)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC2_ADDR:[0-9]+]], %got(uc2)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC1_ADDR:[0-9]+]], %got(uc1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UC1:[0-9]+]], 0($[[UC1_ADDR]]) +; CHECK-DAG: lbu $[[UC2:[0-9]+]], 0($[[UC2_ADDR]]) +; CHECK-DAG: or $[[RES:[0-9]+]], $[[UC2]], $[[UC1]] +; CHECK: sb $[[RES]], 0($[[UC_ADDR]]) +; CHECK: .end orUc + ret void +} + +; Function Attrs: noinline nounwind +define void @orUc0() #0 { +entry: + %0 = load i8, i8* @uc1, align 1, !tbaa !2 + %or = or i8 %0, 69 + store i8 %or, i8* @uc, align 1, !tbaa !2 +; CHECK-LABEL: .ent orUc0 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UC_ADDR:[0-9]+]], %got(uc)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC1_ADDR:[0-9]+]], %got(uc1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UC1:[0-9]+]], 0($[[UC1_ADDR]]) +; CHECK-DAG: addiu $[[CONST_69:[0-9]+]], $zero, 69 +; CHECK-DAG: or $[[RES:[0-9]+]], $[[UC1]], $[[CONST_69]] +; CHECK: sb $[[RES]], 0($[[UC_ADDR]]) +; CHECK: .end orUc0 + ret void +} + +; Function Attrs: noinline nounwind +define void @orUc1() #0 { +entry: + %0 = load i8, i8* @uc1, align 1, !tbaa !2 + %or = or i8 %0, 238 + store i8 %or, i8* @uc, align 1, !tbaa !2 +; CHECK-LABEL: .ent orUc1 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UC_ADDR:[0-9]+]], %got(uc)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC1_ADDR:[0-9]+]], %got(uc1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UC1:[0-9]+]], 0($[[UC1_ADDR]]) +; CHECK-DAG: addiu $[[CONST_neg18:[0-9]+]], $zero, -18 +; CHECK-DAG: or $[[RES:[0-9]+]], $[[UC1]], $[[CONST_neg18]] +; CHECK: sb $[[RES]], 0($[[UC_ADDR]]) +; CHECK: .end orUc1 + ret void +} + +; Function Attrs: noinline nounwind +define void @xorUc() #0 { +entry: + %0 = load i8, i8* @uc1, align 1, !tbaa !2 + %1 = load i8, i8* @uc2, align 1, !tbaa !2 + %xor3 = xor i8 %1, %0 + store i8 %xor3, i8* @uc, align 1, !tbaa !2 +; CHECK-LABEL: .ent xorUc +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UC_ADDR:[0-9]+]], %got(uc)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC2_ADDR:[0-9]+]], %got(uc2)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC1_ADDR:[0-9]+]], %got(uc1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UC1:[0-9]+]], 0($[[UC1_ADDR]]) +; CHECK-DAG: lbu $[[UC2:[0-9]+]], 0($[[UC2_ADDR]]) +; CHECK-DAG: xor $[[RES:[0-9]+]], $[[UC2]], $[[UC1]] +; CHECK: sb $[[RES]], 0($[[UC_ADDR]]) +; CHECK: .end xorUc + ret void +} + +; Function Attrs: noinline nounwind +define void @xorUc0() #0 { +entry: + %0 = load i8, i8* @uc1, align 1, !tbaa !2 + %xor = xor i8 %0, 23 + store i8 %xor, i8* @uc, align 1, !tbaa !2 +; CHECK-LABEL: .ent xorUc0 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UC_ADDR:[0-9]+]], %got(uc)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC1_ADDR:[0-9]+]], %got(uc1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UC1:[0-9]+]], 0($[[UC1_ADDR]]) +; CHECK-DAG: addiu $[[CONST_23:[0-9]+]], $zero, 23 +; CHECK-DAG: xor $[[RES:[0-9]+]], $[[UC1]], $[[CONST_23]] +; CHECK: sb $[[RES]], 0($[[UC_ADDR]]) +; CHECK: .end xorUc0 + ret void +} + +; Function Attrs: noinline nounwind +define void @xorUc1() #0 { +entry: + %0 = load i8, i8* @uc1, align 1, !tbaa !2 + %xor = xor i8 %0, 120 + store i8 %xor, i8* @uc, align 1, !tbaa !2 +; CHECK-LABEL: .ent xorUc1 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[UC_ADDR:[0-9]+]], %got(uc)($[[REG_GP]]) +; CHECK-DAG: lw $[[UC1_ADDR:[0-9]+]], %got(uc1)($[[REG_GP]]) +; CHECK-DAG: lbu $[[UC1:[0-9]+]], 0($[[UC1_ADDR]]) +; CHECK-DAG: addiu $[[CONST_120:[0-9]+]], $zero, 120 +; CHECK-DAG: xor $[[RES:[0-9]+]], $[[UC1]], $[[CONST_120]] +; CHECK: sb $[[RES]], 0($[[UC_ADDR]]) +; CHECK: .end xorUc1 + ret void +} + +; Function Attrs: noinline nounwind +define void @andUs() #0 { +entry: + %0 = load i16, i16* @us1, align 2, !tbaa !5 + %1 = load i16, i16* @us2, align 2, !tbaa !5 + %and3 = and i16 %1, %0 + store i16 %and3, i16* @us, align 2, !tbaa !5 +; CHECK-LABEL: .ent andUs +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[US_ADDR:[0-9]+]], %got(us)($[[REG_GP]]) +; CHECK-DAG: lw $[[US2_ADDR:[0-9]+]], %got(us2)($[[REG_GP]]) +; CHECK-DAG: lw $[[US1_ADDR:[0-9]+]], %got(us1)($[[REG_GP]]) +; CHECK-DAG: lhu $[[US1:[0-9]+]], 0($[[US1_ADDR]]) +; CHECK-DAG: lhu $[[US2:[0-9]+]], 0($[[US2_ADDR]]) +; CHECK-DAG: and $[[RES:[0-9]+]], $[[US2]], $[[UB1]] +; CHECK: sh $[[RES]], 0($[[US_ADDR]]) +; CHECK: .end andUs + ret void +} + +; Function Attrs: noinline nounwind +define void @andUs0() #0 { +entry: + %0 = load i16, i16* @us1, align 2, !tbaa !5 + %and = and i16 %0, 4660 + store i16 %and, i16* @us, align 2, !tbaa !5 +; CHECK-LABEL: .ent andUs0 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[US_ADDR:[0-9]+]], %got(us)($[[REG_GP]]) +; CHECK-DAG: lw $[[US1_ADDR:[0-9]+]], %got(us1)($[[REG_GP]]) +; CHECK-DAG: lhu $[[US1:[0-9]+]], 0($[[US1_ADDR]]) +; CHECK-DAG: addiu $[[CONST_4660:[0-9]+]], $zero, 4660 +; CHECK-DAG: and $[[RES:[0-9]+]], $[[US1]], $[[CONST_4660]] +; CHECK: sh $[[RES]], 0($[[US_ADDR]]) +; CHECK: .end andUs0 + ret void +} + +; Function Attrs: noinline nounwind +define void @andUs1() #0 { +entry: + %0 = load i16, i16* @us1, align 2, !tbaa !5 + %and = and i16 %0, 61351 + store i16 %and, i16* @us, align 2, !tbaa !5 +; CHECK-LABEL: .ent andUs1 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[US_ADDR:[0-9]+]], %got(us)($[[REG_GP]]) +; CHECK-DAG: lw $[[US1_ADDR:[0-9]+]], %got(us1)($[[REG_GP]]) +; CHECK-DAG: lhu $[[US1:[0-9]+]], 0($[[US1_ADDR]]) +; CHECK-DAG: addiu $[[CONST_Neg4185:[0-9]+]], $zero, -4185 +; CHECK-DAG: and $[[RES:[0-9]+]], $[[US1]], $[[CONST_Neg4185]] +; CHECK: sh $[[RES]], 0($[[US_ADDR]]) +; CHECK: .end andUs1 + ret void +} + +; Function Attrs: noinline nounwind +define void @orUs() #0 { +entry: + %0 = load i16, i16* @us1, align 2, !tbaa !5 + %1 = load i16, i16* @us2, align 2, !tbaa !5 + %or3 = or i16 %1, %0 + store i16 %or3, i16* @us, align 2, !tbaa !5 +; CHECK-LABEL: .ent orUs +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[US_ADDR:[0-9]+]], %got(us)($[[REG_GP]]) +; CHECK-DAG: lw $[[US2_ADDR:[0-9]+]], %got(us2)($[[REG_GP]]) +; CHECK-DAG: lw $[[US1_ADDR:[0-9]+]], %got(us1)($[[REG_GP]]) +; CHECK-DAG: lhu $[[US1:[0-9]+]], 0($[[US1_ADDR]]) +; CHECK-DAG: lhu $[[US2:[0-9]+]], 0($[[US2_ADDR]]) +; CHECK-DAG: or $[[RES:[0-9]+]], $[[US2]], $[[US1]] +; CHECK: sh $[[RES]], 0($[[US_ADDR]]) +; CHECK: .end orUs + ret void +} + +; Function Attrs: noinline nounwind +define void @orUs0() #0 { +entry: + %0 = load i16, i16* @us1, align 2, !tbaa !5 + %or = or i16 %0, 17666 + store i16 %or, i16* @us, align 2, !tbaa !5 + ret void +} + +; Function Attrs: noinline nounwind +define void @orUs1() #0 { +entry: + %0 = load i16, i16* @us1, align 2, !tbaa !5 + %or = or i16 %0, 60945 + store i16 %or, i16* @us, align 2, !tbaa !5 +; CHECK-LABEL: .ent orUs1 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[US_ADDR:[0-9]+]], %got(us)($[[REG_GP]]) +; CHECK-DAG: lw $[[US1_ADDR:[0-9]+]], %got(us1)($[[REG_GP]]) +; CHECK-DAG: lhu $[[US1:[0-9]+]], 0($[[US1_ADDR]]) +; CHECK-DAG: addiu $[[CONST_neg4591:[0-9]+]], $zero, -4591 +; CHECK-DAG: or $[[RES:[0-9]+]], $[[US1]], $[[CONST_neg4591]] +; CHECK: sh $[[RES]], 0($[[US_ADDR]]) +; CHECK: .end orUs1 + ret void +} + +; Function Attrs: noinline nounwind +define void @xorUs() #0 { +entry: + %0 = load i16, i16* @us1, align 2, !tbaa !5 + %1 = load i16, i16* @us2, align 2, !tbaa !5 + %xor3 = xor i16 %1, %0 + store i16 %xor3, i16* @us, align 2, !tbaa !5 +; CHECK-LABEL: .ent xorUs +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[US_ADDR:[0-9]+]], %got(us)($[[REG_GP]]) +; CHECK-DAG: lw $[[US2_ADDR:[0-9]+]], %got(us2)($[[REG_GP]]) +; CHECK-DAG: lw $[[US1_ADDR:[0-9]+]], %got(us1)($[[REG_GP]]) +; CHECK-DAG: lhu $[[US1:[0-9]+]], 0($[[US1_ADDR]]) +; CHECK-DAG: lhu $[[US2:[0-9]+]], 0($[[US2_ADDR]]) +; CHECK-DAG: xor $[[RES:[0-9]+]], $[[US2]], $[[US1]] +; CHECK: sh $[[RES]], 0($[[US_ADDR]]) +; CHECK: .end xorUs + ret void +} + +; Function Attrs: noinline nounwind +define void @xorUs0() #0 { +entry: + %0 = load i16, i16* @us1, align 2, !tbaa !5 + %xor = xor i16 %0, 6062 + store i16 %xor, i16* @us, align 2, !tbaa !5 +; CHECK-LABEL: .ent xorUs0 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[US_ADDR:[0-9]+]], %got(us)($[[REG_GP]]) +; CHECK-DAG: lw $[[US1_ADDR:[0-9]+]], %got(us1)($[[REG_GP]]) +; CHECK-DAG: lhu $[[US1:[0-9]+]], 0($[[US1_ADDR]]) +; CHECK-DAG: addiu $[[CONST_6062:[0-9]+]], $zero, 6062 +; CHECK-DAG: xor $[[RES:[0-9]+]], $[[US1]], $[[CONST_6062]] +; CHECK: sh $[[RES]], 0($[[US_ADDR]]) +; CHECK: .end xorUs0 + + ret void +} + +; Function Attrs: noinline nounwind +define void @xorUs1() #0 { +entry: + %0 = load i16, i16* @us1, align 2, !tbaa !5 + %xor = xor i16 %0, 60024 + store i16 %xor, i16* @us, align 2, !tbaa !5 +; CHECK-LABEL: .ent xorUs1 +; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) +; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) +; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25 +; CHECK-DAG: lw $[[US_ADDR:[0-9]+]], %got(us)($[[REG_GP]]) +; CHECK-DAG: lw $[[US1_ADDR:[0-9]+]], %got(us1)($[[REG_GP]]) +; CHECK-DAG: lhu $[[US1:[0-9]+]], 0($[[US1_ADDR]]) +; CHECK-DAG: addiu $[[CONST_Neg5512:[0-9]+]], $zero, -5512 +; CHECK-DAG: xor $[[RES:[0-9]+]], $[[US1]], $[[CONST_Neg5512]] +; CHECK: sh $[[RES]], 0($[[US_ADDR]]) +; CHECK: .end xorUs1 + ret void +} + +attributes #0 = { noinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #2 = { nounwind } + +!llvm.module.flags = !{!0} +!llvm.ident = !{!1} + +!0 = !{i32 1, !"PIC Level", i32 2} +!1 = !{!"clang version 3.7.0 (trunk)"} +!2 = !{!3, !3, i64 0} +!3 = !{!"omnipotent char", !4, i64 0} +!4 = !{!"Simple C/C++ TBAA"} +!5 = !{!6, !6, i64 0} +!6 = !{!"short", !3, i64 0} diff --git a/test/CodeGen/Mips/Fast-ISel/nullvoid.ll b/test/CodeGen/Mips/Fast-ISel/nullvoid.ll index c847561..5fa3f13 100644 --- a/test/CodeGen/Mips/Fast-ISel/nullvoid.ll +++ b/test/CodeGen/Mips/Fast-ISel/nullvoid.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s ; Function Attrs: nounwind diff --git a/test/CodeGen/Mips/Fast-ISel/overflt.ll b/test/CodeGen/Mips/Fast-ISel/overflt.ll index 94abd2d..57f991e 100644 --- a/test/CodeGen/Mips/Fast-ISel/overflt.ll +++ b/test/CodeGen/Mips/Fast-ISel/overflt.ll @@ -1,10 +1,10 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s @x = common global [128000 x float] zeroinitializer, align 4 -@y = global float* getelementptr inbounds ([128000 x float]* @x, i32 0, i32 0), align 4 +@y = global float* getelementptr inbounds ([128000 x float], [128000 x float]* @x, i32 0, i32 0), align 4 @result = common global float 0.000000e+00, align 4 @.str = private unnamed_addr constant [5 x i8] c"%f \0A\00", align 1 @@ -12,8 +12,8 @@ define void @foo() { entry: ; CHECK-LABEL: .ent foo - %0 = load float** @y, align 4 - %arrayidx = getelementptr inbounds float* %0, i32 64000 + %0 = load float*, float** @y, align 4 + %arrayidx = getelementptr inbounds float, float* %0, i32 64000 store float 5.500000e+00, float* %arrayidx, align 4 ; CHECK: lui $[[REG_FPCONST_INT:[0-9]+]], 16560 ; CHECK: mtc1 $[[REG_FPCONST_INT]], $f[[REG_FPCONST:[0-9]+]] @@ -31,9 +31,9 @@ entry: define void @goo() { entry: ; CHECK-LABEL: .ent goo - %0 = load float** @y, align 4 - %arrayidx = getelementptr inbounds float* %0, i32 64000 - %1 = load float* %arrayidx, align 4 + %0 = load float*, float** @y, align 4 + %arrayidx = getelementptr inbounds float, float* %0, i32 64000 + %1 = load float, float* %arrayidx, align 4 store float %1, float* @result, align 4 ; CHECK-DAG: lw $[[REG_RESULT:[0-9]+]], %got(result)(${{[0-9]+}}) ; CHECK-DAG: lw $[[REG_Y_GOT:[0-9]+]], %got(y)(${{[0-9]+}}) diff --git a/test/CodeGen/Mips/Fast-ISel/retabi.ll b/test/CodeGen/Mips/Fast-ISel/retabi.ll index d271aef..ce0ca34 100644 --- a/test/CodeGen/Mips/Fast-ISel/retabi.ll +++ b/test/CodeGen/Mips/Fast-ISel/retabi.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s @i = global i32 75, align 4 @@ -11,7 +11,7 @@ define i32 @reti() { entry: ; CHECK-LABEL: reti: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 ret i32 %0 ; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) ; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) @@ -25,7 +25,7 @@ entry: define signext i16 @rets() { entry: ; CHECK-LABEL: rets: - %0 = load i16* @s, align 2 + %0 = load i16, i16* @s, align 2 ret i16 %0 ; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) ; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) @@ -40,7 +40,7 @@ entry: define signext i8 @retc() { entry: ; CHECK-LABEL: retc: - %0 = load i8* @c, align 1 + %0 = load i8, i8* @c, align 1 ret i8 %0 ; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) ; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) @@ -55,7 +55,7 @@ entry: define float @retf() { entry: ; CHECK-LABEL: retf: - %0 = load float* @f, align 4 + %0 = load float, float* @f, align 4 ret float %0 ; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) ; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) @@ -69,7 +69,7 @@ entry: define double @retd() { entry: ; CHECK-LABEL: retd: - %0 = load double* @d, align 8 + %0 = load double, double* @d, align 8 ret double %0 ; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) ; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) diff --git a/test/CodeGen/Mips/Fast-ISel/shift.ll b/test/CodeGen/Mips/Fast-ISel/shift.ll index 18fd5ac..df1c827 100644 --- a/test/CodeGen/Mips/Fast-ISel/shift.ll +++ b/test/CodeGen/Mips/Fast-ISel/shift.ll @@ -9,7 +9,7 @@ define i32 @main() nounwind uwtable { entry: %foo = alloca %struct.s, align 4 %0 = bitcast %struct.s* %foo to i32* - %bf.load = load i32* %0, align 4 + %bf.load = load i32, i32* %0, align 4 %bf.lshr = lshr i32 %bf.load, 2 %cmp = icmp ne i32 %bf.lshr, 2 br i1 %cmp, label %if.then, label %if.end diff --git a/test/CodeGen/Mips/Fast-ISel/simplestore.ll b/test/CodeGen/Mips/Fast-ISel/simplestore.ll index 83e3f3f..bcb198b 100644 --- a/test/CodeGen/Mips/Fast-ISel/simplestore.ll +++ b/test/CodeGen/Mips/Fast-ISel/simplestore.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s @abcd = external global i32 diff --git a/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll b/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll index 74723ae..f4b91d8 100644 --- a/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll +++ b/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll @@ -1,10 +1,10 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -check-prefix=mips32r2 -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s -check-prefix=mips32 @f = common global float 0.000000e+00, align 4 diff --git a/test/CodeGen/Mips/Fast-ISel/simplestorei.ll b/test/CodeGen/Mips/Fast-ISel/simplestorei.ll index 128e1de..83ccae0 100644 --- a/test/CodeGen/Mips/Fast-ISel/simplestorei.ll +++ b/test/CodeGen/Mips/Fast-ISel/simplestorei.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32r2 \ ; RUN: < %s | FileCheck %s -; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort=1 -mcpu=mips32 \ ; RUN: < %s | FileCheck %s @ijk = external global i32 diff --git a/test/CodeGen/Mips/addi.ll b/test/CodeGen/Mips/addi.ll index 01d409e..b6af2ee 100644 --- a/test/CodeGen/Mips/addi.ll +++ b/test/CodeGen/Mips/addi.ll @@ -8,16 +8,16 @@ define void @foo() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %add = add nsw i32 %0, 5 store i32 %add, i32* @i, align 4 - %1 = load i32* @j, align 4 + %1 = load i32, i32* @j, align 4 %sub = sub nsw i32 %1, 5 store i32 %sub, i32* @j, align 4 - %2 = load i32* @k, align 4 + %2 = load i32, i32* @k, align 4 %add1 = add nsw i32 %2, 10000 store i32 %add1, i32* @k, align 4 - %3 = load i32* @l, align 4 + %3 = load i32, i32* @l, align 4 %sub2 = sub nsw i32 %3, 10000 store i32 %sub2, i32* @l, align 4 ; 16: addiu ${{[0-9]+}}, 5 # 16 bit inst diff --git a/test/CodeGen/Mips/addressing-mode.ll b/test/CodeGen/Mips/addressing-mode.ll index ea76dde..81e0620 100644 --- a/test/CodeGen/Mips/addressing-mode.ll +++ b/test/CodeGen/Mips/addressing-mode.ll @@ -20,10 +20,10 @@ for.cond1.preheader: for.body3: %s.120 = phi i32 [ %s.022, %for.cond1.preheader ], [ %add7, %for.body3 ] %j.019 = phi i32 [ 0, %for.cond1.preheader ], [ %add8, %for.body3 ] - %arrayidx4 = getelementptr inbounds [256 x i32]* %a, i32 %i.021, i32 %j.019 - %0 = load i32* %arrayidx4, align 4 - %arrayidx6 = getelementptr inbounds [256 x i32]* %b, i32 %i.021, i32 %j.019 - %1 = load i32* %arrayidx6, align 4 + %arrayidx4 = getelementptr inbounds [256 x i32], [256 x i32]* %a, i32 %i.021, i32 %j.019 + %0 = load i32, i32* %arrayidx4, align 4 + %arrayidx6 = getelementptr inbounds [256 x i32], [256 x i32]* %b, i32 %i.021, i32 %j.019 + %1 = load i32, i32* %arrayidx6, align 4 %add = add i32 %0, %s.120 %add7 = add i32 %add, %1 %add8 = add nsw i32 %j.019, %m diff --git a/test/CodeGen/Mips/align16.ll b/test/CodeGen/Mips/align16.ll index 689ae83..f385adf 100644 --- a/test/CodeGen/Mips/align16.ll +++ b/test/CodeGen/Mips/align16.ll @@ -15,13 +15,13 @@ entry: %x = alloca i32, align 8 %zz = alloca i32, align 4 %z = alloca i32, align 4 - %0 = load i32* @i, align 4 - %arrayidx = getelementptr inbounds [512 x i32]* %y, i32 0, i32 10 + %0 = load i32, i32* @i, align 4 + %arrayidx = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10 store i32 %0, i32* %arrayidx, align 4 - %1 = load i32* @i, align 4 + %1 = load i32, i32* @i, align 4 store i32 %1, i32* %x, align 8 call void @p(i32* %x) - %arrayidx1 = getelementptr inbounds [512 x i32]* %y, i32 0, i32 10 + %arrayidx1 = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10 call void @p(i32* %arrayidx1) ret void } diff --git a/test/CodeGen/Mips/alloca.ll b/test/CodeGen/Mips/alloca.ll index fc7ef86..8967d57 100644 --- a/test/CodeGen/Mips/alloca.ll +++ b/test/CodeGen/Mips/alloca.ll @@ -9,7 +9,7 @@ entry: ; CHECK: move $4, $[[T0]] ; CHECK: move $4, $[[T2]] %tmp1 = alloca i8, i32 %size, align 4 - %add.ptr = getelementptr inbounds i8* %tmp1, i32 5 + %add.ptr = getelementptr inbounds i8, i8* %tmp1, i32 5 store i8 97, i8* %add.ptr, align 1 %tmp4 = alloca i8, i32 %size, align 4 call void @foo2(double 1.000000e+00, double 2.000000e+00, i32 3) nounwind @@ -39,17 +39,17 @@ entry: if.then: ; preds = %entry ; CHECK: addiu $4, $[[T0]], 40 - %add.ptr = getelementptr inbounds i8* %tmp1, i32 40 + %add.ptr = getelementptr inbounds i8, i8* %tmp1, i32 40 %1 = bitcast i8* %add.ptr to i32* call void @foo3(i32* %1) nounwind - %arrayidx15.pre = getelementptr inbounds i8* %tmp1, i32 12 + %arrayidx15.pre = getelementptr inbounds i8, i8* %tmp1, i32 12 %.pre = bitcast i8* %arrayidx15.pre to i32* br label %if.end if.else: ; preds = %entry ; CHECK: addiu $4, $[[T0]], 12 - %add.ptr5 = getelementptr inbounds i8* %tmp1, i32 12 + %add.ptr5 = getelementptr inbounds i8, i8* %tmp1, i32 12 %2 = bitcast i8* %add.ptr5 to i32* call void @foo3(i32* %2) nounwind br label %if.end @@ -59,24 +59,24 @@ if.end: ; preds = %if.else, %if.then ; CHECK: lw $25, %call16(printf) %.pre-phi = phi i32* [ %2, %if.else ], [ %.pre, %if.then ] - %tmp7 = load i32* %0, align 4 - %arrayidx9 = getelementptr inbounds i8* %tmp1, i32 4 + %tmp7 = load i32, i32* %0, align 4 + %arrayidx9 = getelementptr inbounds i8, i8* %tmp1, i32 4 %3 = bitcast i8* %arrayidx9 to i32* - %tmp10 = load i32* %3, align 4 - %arrayidx12 = getelementptr inbounds i8* %tmp1, i32 8 + %tmp10 = load i32, i32* %3, align 4 + %arrayidx12 = getelementptr inbounds i8, i8* %tmp1, i32 8 %4 = bitcast i8* %arrayidx12 to i32* - %tmp13 = load i32* %4, align 4 - %tmp16 = load i32* %.pre-phi, align 4 - %arrayidx18 = getelementptr inbounds i8* %tmp1, i32 16 + %tmp13 = load i32, i32* %4, align 4 + %tmp16 = load i32, i32* %.pre-phi, align 4 + %arrayidx18 = getelementptr inbounds i8, i8* %tmp1, i32 16 %5 = bitcast i8* %arrayidx18 to i32* - %tmp19 = load i32* %5, align 4 - %arrayidx21 = getelementptr inbounds i8* %tmp1, i32 20 + %tmp19 = load i32, i32* %5, align 4 + %arrayidx21 = getelementptr inbounds i8, i8* %tmp1, i32 20 %6 = bitcast i8* %arrayidx21 to i32* - %tmp22 = load i32* %6, align 4 - %arrayidx24 = getelementptr inbounds i8* %tmp1, i32 24 + %tmp22 = load i32, i32* %6, align 4 + %arrayidx24 = getelementptr inbounds i8, i8* %tmp1, i32 24 %7 = bitcast i8* %arrayidx24 to i32* - %tmp25 = load i32* %7, align 4 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str, i32 0, i32 0), i32 %tmp7, i32 %tmp10, i32 %tmp13, i32 %tmp16, i32 %tmp19, i32 %tmp22, i32 %tmp25) nounwind + %tmp25 = load i32, i32* %7, align 4 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str, i32 0, i32 0), i32 %tmp7, i32 %tmp10, i32 %tmp13, i32 %tmp16, i32 %tmp19, i32 %tmp22, i32 %tmp25) nounwind ret i32 0 } diff --git a/test/CodeGen/Mips/alloca16.ll b/test/CodeGen/Mips/alloca16.ll index 4f60598..be8cc74 100644 --- a/test/CodeGen/Mips/alloca16.ll +++ b/test/CodeGen/Mips/alloca16.ll @@ -12,7 +12,7 @@ define void @temp(i32 %foo) nounwind { entry: %foo.addr = alloca i32, align 4 store i32 %foo, i32* %foo.addr, align 4 - %0 = load i32* %foo.addr, align 4 + %0 = load i32, i32* %foo.addr, align 4 store i32 %0, i32* @t, align 4 ret void } @@ -28,46 +28,46 @@ entry: %sssi = alloca i32, align 4 %ip = alloca i32*, align 4 %sssj = alloca i32, align 4 - %0 = load i32* @iiii, align 4 + %0 = load i32, i32* @iiii, align 4 store i32 %0, i32* %sssi, align 4 - %1 = load i32* @kkkk, align 4 + %1 = load i32, i32* @kkkk, align 4 %mul = mul nsw i32 %1, 100 %2 = alloca i8, i32 %mul %3 = bitcast i8* %2 to i32* store i32* %3, i32** %ip, align 4 - %4 = load i32* @jjjj, align 4 + %4 = load i32, i32* @jjjj, align 4 store i32 %4, i32* %sssj, align 4 - %5 = load i32* @jjjj, align 4 - %6 = load i32* @iiii, align 4 - %7 = load i32** %ip, align 4 - %arrayidx = getelementptr inbounds i32* %7, i32 %6 + %5 = load i32, i32* @jjjj, align 4 + %6 = load i32, i32* @iiii, align 4 + %7 = load i32*, i32** %ip, align 4 + %arrayidx = getelementptr inbounds i32, i32* %7, i32 %6 store i32 %5, i32* %arrayidx, align 4 - %8 = load i32* @kkkk, align 4 - %9 = load i32* @jjjj, align 4 - %10 = load i32** %ip, align 4 - %arrayidx1 = getelementptr inbounds i32* %10, i32 %9 + %8 = load i32, i32* @kkkk, align 4 + %9 = load i32, i32* @jjjj, align 4 + %10 = load i32*, i32** %ip, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %10, i32 %9 store i32 %8, i32* %arrayidx1, align 4 - %11 = load i32* @iiii, align 4 - %12 = load i32* @kkkk, align 4 - %13 = load i32** %ip, align 4 - %arrayidx2 = getelementptr inbounds i32* %13, i32 %12 + %11 = load i32, i32* @iiii, align 4 + %12 = load i32, i32* @kkkk, align 4 + %13 = load i32*, i32** %ip, align 4 + %arrayidx2 = getelementptr inbounds i32, i32* %13, i32 %12 store i32 %11, i32* %arrayidx2, align 4 - %14 = load i32** %ip, align 4 - %arrayidx3 = getelementptr inbounds i32* %14, i32 25 - %15 = load i32* %arrayidx3, align 4 + %14 = load i32*, i32** %ip, align 4 + %arrayidx3 = getelementptr inbounds i32, i32* %14, i32 25 + %15 = load i32, i32* %arrayidx3, align 4 store i32 %15, i32* @riii, align 4 - %16 = load i32** %ip, align 4 - %arrayidx4 = getelementptr inbounds i32* %16, i32 35 - %17 = load i32* %arrayidx4, align 4 + %16 = load i32*, i32** %ip, align 4 + %arrayidx4 = getelementptr inbounds i32, i32* %16, i32 35 + %17 = load i32, i32* %arrayidx4, align 4 store i32 %17, i32* @rjjj, align 4 - %18 = load i32** %ip, align 4 - %arrayidx5 = getelementptr inbounds i32* %18, i32 100 - %19 = load i32* %arrayidx5, align 4 + %18 = load i32*, i32** %ip, align 4 + %arrayidx5 = getelementptr inbounds i32, i32* %18, i32 100 + %19 = load i32, i32* %arrayidx5, align 4 store i32 %19, i32* @rkkk, align 4 - %20 = load i32* @t, align 4 - %21 = load i32** %ip, align 4 - %arrayidx6 = getelementptr inbounds i32* %21, i32 %20 - %22 = load i32* %arrayidx6, align 4 + %20 = load i32, i32* @t, align 4 + %21 = load i32*, i32** %ip, align 4 + %arrayidx6 = getelementptr inbounds i32, i32* %21, i32 %20 + %22 = load i32, i32* %arrayidx6, align 4 ; 16: addiu $sp, -16 call void @temp(i32 %22) ; 16: addiu $sp, 16 diff --git a/test/CodeGen/Mips/and1.ll b/test/CodeGen/Mips/and1.ll index 4ff1204..be9ba3e 100644 --- a/test/CodeGen/Mips/and1.ll +++ b/test/CodeGen/Mips/and1.ll @@ -6,11 +6,11 @@ define i32 @main() nounwind { entry: - %0 = load i32* @x, align 4 - %1 = load i32* @y, align 4 + %0 = load i32, i32* @x, align 4 + %1 = load i32, i32* @y, align 4 %and = and i32 %0, %1 ; 16: and ${{[0-9]+}}, ${{[0-9]+}} - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %and) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %and) ret i32 0 } diff --git a/test/CodeGen/Mips/atomic.ll b/test/CodeGen/Mips/atomic.ll index ccfeb00..031cce0 100644 --- a/test/CodeGen/Mips/atomic.ll +++ b/test/CodeGen/Mips/atomic.ll @@ -54,7 +54,7 @@ define i32 @AtomicSwap32(i32 signext %newval) nounwind { entry: %newval.addr = alloca i32, align 4 store i32 %newval, i32* %newval.addr, align 4 - %tmp = load i32* %newval.addr, align 4 + %tmp = load i32, i32* %newval.addr, align 4 %0 = atomicrmw xchg i32* @x, i32 %tmp monotonic ret i32 %0 @@ -74,7 +74,7 @@ define i32 @AtomicCmpSwap32(i32 signext %oldval, i32 signext %newval) nounwind { entry: %newval.addr = alloca i32, align 4 store i32 %newval, i32* %newval.addr, align 4 - %tmp = load i32* %newval.addr, align 4 + %tmp = load i32, i32* %newval.addr, align 4 %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic monotonic %1 = extractvalue { i32, i1 } %0, 0 ret i32 %1 @@ -429,7 +429,7 @@ entry: ; FIXME: At the moment, we don't seem to do addr+offset for any atomic load/store. define i32 @AtomicLoadAdd32_OffGt9Bit(i32 signext %incr) nounwind { entry: - %0 = atomicrmw add i32* getelementptr(i32* @x, i32 256), i32 %incr monotonic + %0 = atomicrmw add i32* getelementptr(i32, i32* @x, i32 256), i32 %incr monotonic ret i32 %0 ; ALL-LABEL: AtomicLoadAdd32_OffGt9Bit: diff --git a/test/CodeGen/Mips/atomicops.ll b/test/CodeGen/Mips/atomicops.ll index c264152..920357d 100644 --- a/test/CodeGen/Mips/atomicops.ll +++ b/test/CodeGen/Mips/atomicops.ll @@ -18,15 +18,15 @@ entry: store volatile i32 0, i32* %x, align 4 %0 = atomicrmw add i32* %x, i32 1 seq_cst %add.i = add nsw i32 %0, 2 - %1 = load volatile i32* %x, align 4 - %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %add.i, i32 %1) nounwind + %1 = load volatile i32, i32* %x, align 4 + %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %add.i, i32 %1) nounwind %pair = cmpxchg i32* %x, i32 1, i32 2 seq_cst seq_cst %2 = extractvalue { i32, i1 } %pair, 0 - %3 = load volatile i32* %x, align 4 - %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %2, i32 %3) nounwind + %3 = load volatile i32, i32* %x, align 4 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %2, i32 %3) nounwind %4 = atomicrmw xchg i32* %x, i32 1 seq_cst - %5 = load volatile i32* %x, align 4 - %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %4, i32 %5) nounwind + %5 = load volatile i32, i32* %x, align 4 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %4, i32 %5) nounwind ; 16-LABEL: main: ; 16: lw ${{[0-9]+}}, %call16(__sync_synchronize)(${{[0-9]+}}) ; 16: lw ${{[0-9]+}}, %call16(__sync_fetch_and_add_4)(${{[0-9]+}}) diff --git a/test/CodeGen/Mips/beqzc.ll b/test/CodeGen/Mips/beqzc.ll index 4a294c2..afb66a9 100644 --- a/test/CodeGen/Mips/beqzc.ll +++ b/test/CodeGen/Mips/beqzc.ll @@ -6,7 +6,7 @@ ; Function Attrs: nounwind optsize define i32 @main() #0 { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 0 %. = select i1 %cmp, i32 10, i32 55 store i32 %., i32* @j, align 4 diff --git a/test/CodeGen/Mips/beqzc1.ll b/test/CodeGen/Mips/beqzc1.ll index 8f929a8..fe0dd2a 100644 --- a/test/CodeGen/Mips/beqzc1.ll +++ b/test/CodeGen/Mips/beqzc1.ll @@ -6,7 +6,7 @@ ; Function Attrs: nounwind optsize define i32 @main() #0 { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.end diff --git a/test/CodeGen/Mips/biggot.ll b/test/CodeGen/Mips/biggot.ll index da287ee..b56ce6b 100644 --- a/test/CodeGen/Mips/biggot.ll +++ b/test/CodeGen/Mips/biggot.ll @@ -20,7 +20,7 @@ entry: ; N64: daddu $[[R3:[0-9]+]], $[[R2]], ${{[a-z0-9]+}} ; N64: ld ${{[0-9]+}}, %call_lo(foo0)($[[R3]]) - %0 = load i32* @v0, align 4 + %0 = load i32, i32* @v0, align 4 tail call void @foo0(i32 %0) nounwind ret void } diff --git a/test/CodeGen/Mips/brconeq.ll b/test/CodeGen/Mips/brconeq.ll index 6133915..f555528 100644 --- a/test/CodeGen/Mips/brconeq.ll +++ b/test/CodeGen/Mips/brconeq.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @j, align 4 %cmp = icmp eq i32 %0, %1 ; 16: cmp ${{[0-9]+}}, ${{[0-9]+}} ; 16: bteqz $[[LABEL:[0-9A-Ba-b_]+]] diff --git a/test/CodeGen/Mips/brconeqk.ll b/test/CodeGen/Mips/brconeqk.ll index 2c0e72d..59edae8 100644 --- a/test/CodeGen/Mips/brconeqk.ll +++ b/test/CodeGen/Mips/brconeqk.ll @@ -5,7 +5,7 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 10 br i1 %cmp, label %if.end, label %if.then ; 16: cmpi ${{[0-9]+}}, {{[0-9]+}} diff --git a/test/CodeGen/Mips/brconeqz.ll b/test/CodeGen/Mips/brconeqz.ll index 5586e7b..22c5664 100644 --- a/test/CodeGen/Mips/brconeqz.ll +++ b/test/CodeGen/Mips/brconeqz.ll @@ -5,7 +5,7 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.end, label %if.then ; 16: beqz ${{[0-9]+}}, $[[LABEL:[0-9A-Ba-b_]+]] diff --git a/test/CodeGen/Mips/brconge.ll b/test/CodeGen/Mips/brconge.ll index 02f0a63..46d1984 100644 --- a/test/CodeGen/Mips/brconge.ll +++ b/test/CodeGen/Mips/brconge.ll @@ -8,8 +8,8 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @j, align 4 %cmp = icmp slt i32 %0, %1 br i1 %cmp, label %if.then, label %if.end @@ -22,7 +22,7 @@ if.then: ; preds = %entry br label %if.end if.end: ; preds = %if.then, %entry - %2 = load i32* @k, align 4 + %2 = load i32, i32* @k, align 4 %cmp1 = icmp slt i32 %0, %2 br i1 %cmp1, label %if.then2, label %if.end3 diff --git a/test/CodeGen/Mips/brcongt.ll b/test/CodeGen/Mips/brcongt.ll index 767b51b..cefacb8 100644 --- a/test/CodeGen/Mips/brcongt.ll +++ b/test/CodeGen/Mips/brcongt.ll @@ -7,8 +7,8 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @j, align 4 %cmp = icmp sgt i32 %0, %1 br i1 %cmp, label %if.end, label %if.then ; 16: slt ${{[0-9]+}}, ${{[0-9]+}} diff --git a/test/CodeGen/Mips/brconle.ll b/test/CodeGen/Mips/brconle.ll index 854b248..e1f15ec 100644 --- a/test/CodeGen/Mips/brconle.ll +++ b/test/CodeGen/Mips/brconle.ll @@ -8,8 +8,8 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @i, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @i, align 4 %cmp = icmp sgt i32 %0, %1 br i1 %cmp, label %if.then, label %if.end @@ -22,7 +22,7 @@ if.then: ; preds = %entry br label %if.end if.end: ; preds = %if.then, %entry - %2 = load i32* @k, align 4 + %2 = load i32, i32* @k, align 4 %cmp1 = icmp sgt i32 %1, %2 br i1 %cmp1, label %if.then2, label %if.end3 diff --git a/test/CodeGen/Mips/brconlt.ll b/test/CodeGen/Mips/brconlt.ll index 931a3e8..049f35c 100644 --- a/test/CodeGen/Mips/brconlt.ll +++ b/test/CodeGen/Mips/brconlt.ll @@ -7,8 +7,8 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @i, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @i, align 4 %cmp = icmp slt i32 %0, %1 br i1 %cmp, label %if.end, label %if.then diff --git a/test/CodeGen/Mips/brconne.ll b/test/CodeGen/Mips/brconne.ll index 5d5bde3..b260320 100644 --- a/test/CodeGen/Mips/brconne.ll +++ b/test/CodeGen/Mips/brconne.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @i, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, %1 br i1 %cmp, label %if.then, label %if.end ; 16: cmp ${{[0-9]+}}, ${{[0-9]+}} diff --git a/test/CodeGen/Mips/brconnek.ll b/test/CodeGen/Mips/brconnek.ll index 6208d7c..778a5cc 100644 --- a/test/CodeGen/Mips/brconnek.ll +++ b/test/CodeGen/Mips/brconnek.ll @@ -5,7 +5,7 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 + %0 = load i32, i32* @j, align 4 %cmp = icmp eq i32 %0, 5 br i1 %cmp, label %if.then, label %if.end diff --git a/test/CodeGen/Mips/brconnez.ll b/test/CodeGen/Mips/brconnez.ll index 47db790..754714b 100644 --- a/test/CodeGen/Mips/brconnez.ll +++ b/test/CodeGen/Mips/brconnez.ll @@ -5,7 +5,7 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 + %0 = load i32, i32* @j, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.end diff --git a/test/CodeGen/Mips/brdelayslot.ll b/test/CodeGen/Mips/brdelayslot.ll index 68341c1..0f46619 100644 --- a/test/CodeGen/Mips/brdelayslot.ll +++ b/test/CodeGen/Mips/brdelayslot.ll @@ -54,18 +54,18 @@ declare void @foo4(double) define void @foo5(i32 %a) nounwind { entry: - %0 = load i32* @g2, align 4 + %0 = load i32, i32* @g2, align 4 %tobool = icmp eq i32 %a, 0 br i1 %tobool, label %if.else, label %if.then if.then: - %1 = load i32* @g1, align 4 + %1 = load i32, i32* @g1, align 4 %add = add nsw i32 %1, %0 store i32 %add, i32* @g1, align 4 br label %if.end if.else: - %2 = load i32* @g3, align 4 + %2 = load i32, i32* @g3, align 4 %sub = sub nsw i32 %2, %0 store i32 %sub, i32* @g3, align 4 br label %if.end @@ -99,9 +99,9 @@ declare void @foo7(double, float) define i32 @foo8(i32 %a) nounwind { entry: store i32 %a, i32* @g1, align 4 - %0 = load void ()** @foo9, align 4 + %0 = load void ()*, void ()** @foo9, align 4 tail call void %0() nounwind - %1 = load i32* @g1, align 4 + %1 = load i32, i32* @g1, align 4 %add = add nsw i32 %1, %a ret i32 %add } @@ -144,8 +144,8 @@ entry: for.body: ; preds = %entry, %for.body %s.06 = phi i32 [ %add, %for.body ], [ 0, %entry ] %i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ] - %arrayidx = getelementptr inbounds i32* %a, i32 %i.05 - %0 = load i32* %arrayidx, align 4 + %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.05 + %0 = load i32, i32* %arrayidx, align 4 %add = add nsw i32 %0, %s.06 %inc = add nsw i32 %i.05, 1 %exitcond = icmp eq i32 %inc, %n diff --git a/test/CodeGen/Mips/brind.ll b/test/CodeGen/Mips/brind.ll index 4c591fa..a3e9b80 100644 --- a/test/CodeGen/Mips/brind.ll +++ b/test/CodeGen/Mips/brind.ll @@ -9,29 +9,29 @@ define i32 @main() nounwind { entry: - %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str, i32 0, i32 0)) + %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str, i32 0, i32 0)) br label %L1 L1: ; preds = %entry, %L3 %i.0 = phi i32 [ 0, %entry ], [ %inc, %L3 ] - %puts5 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str5, i32 0, i32 0)) + %puts5 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str5, i32 0, i32 0)) br label %L2 L2: ; preds = %L1, %L3 %i.1 = phi i32 [ %i.0, %L1 ], [ %inc, %L3 ] - %puts6 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str6, i32 0, i32 0)) + %puts6 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str6, i32 0, i32 0)) br label %L3 L3: ; preds = %L2, %L3 %i.2 = phi i32 [ %i.1, %L2 ], [ %inc, %L3 ] - %puts7 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str7, i32 0, i32 0)) + %puts7 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str7, i32 0, i32 0)) %inc = add i32 %i.2, 1 - %arrayidx = getelementptr inbounds [5 x i8*]* @main.L, i32 0, i32 %i.2 - %0 = load i8** %arrayidx, align 4 + %arrayidx = getelementptr inbounds [5 x i8*], [5 x i8*]* @main.L, i32 0, i32 %i.2 + %0 = load i8*, i8** %arrayidx, align 4 indirectbr i8* %0, [label %L1, label %L2, label %L3, label %L4] ; 16: jrc ${{[0-9]+}} L4: ; preds = %L3 - %puts8 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str8, i32 0, i32 0)) + %puts8 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str8, i32 0, i32 0)) ret i32 0 } diff --git a/test/CodeGen/Mips/cache-intrinsic.ll b/test/CodeGen/Mips/cache-intrinsic.ll index 2fa4115..461c181 100644 --- a/test/CodeGen/Mips/cache-intrinsic.ll +++ b/test/CodeGen/Mips/cache-intrinsic.ll @@ -10,10 +10,10 @@ define i32 @main() { entry: %retval = alloca i32, align 4 store i32 0, i32* %retval - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0)) - %call1 = call i8* @strcpy(i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds ([25 x i8]* @.str1, i32 0, i32 0)) #3 - call void @llvm.clear_cache(i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds (i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i32 32)) #3 - %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0)) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0)) + %call1 = call i8* @strcpy(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds ([25 x i8], [25 x i8]* @.str1, i32 0, i32 0)) #3 + call void @llvm.clear_cache(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds (i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0), i32 32)) #3 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0)) ret i32 0 } diff --git a/test/CodeGen/Mips/cconv/arguments-float.ll b/test/CodeGen/Mips/cconv/arguments-float.ll index ee40d7f..156d6f1 100644 --- a/test/CodeGen/Mips/cconv/arguments-float.ll +++ b/test/CodeGen/Mips/cconv/arguments-float.ll @@ -24,23 +24,23 @@ define void @double_args(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i) nounwind { entry: - %0 = getelementptr [11 x double]* @doubles, i32 0, i32 1 + %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1 store volatile double %a, double* %0 - %1 = getelementptr [11 x double]* @doubles, i32 0, i32 2 + %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2 store volatile double %b, double* %1 - %2 = getelementptr [11 x double]* @doubles, i32 0, i32 3 + %2 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 3 store volatile double %c, double* %2 - %3 = getelementptr [11 x double]* @doubles, i32 0, i32 4 + %3 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 4 store volatile double %d, double* %3 - %4 = getelementptr [11 x double]* @doubles, i32 0, i32 5 + %4 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 5 store volatile double %e, double* %4 - %5 = getelementptr [11 x double]* @doubles, i32 0, i32 6 + %5 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 6 store volatile double %f, double* %5 - %6 = getelementptr [11 x double]* @doubles, i32 0, i32 7 + %6 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 7 store volatile double %g, double* %6 - %7 = getelementptr [11 x double]* @doubles, i32 0, i32 8 + %7 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 8 store volatile double %h, double* %7 - %8 = getelementptr [11 x double]* @doubles, i32 0, i32 9 + %8 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 9 store volatile double %i, double* %8 ret void } @@ -105,25 +105,25 @@ define void @float_args(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i, float %j) nounwind { entry: - %0 = getelementptr [11 x float]* @floats, i32 0, i32 1 + %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1 store volatile float %a, float* %0 - %1 = getelementptr [11 x float]* @floats, i32 0, i32 2 + %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2 store volatile float %b, float* %1 - %2 = getelementptr [11 x float]* @floats, i32 0, i32 3 + %2 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 3 store volatile float %c, float* %2 - %3 = getelementptr [11 x float]* @floats, i32 0, i32 4 + %3 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 4 store volatile float %d, float* %3 - %4 = getelementptr [11 x float]* @floats, i32 0, i32 5 + %4 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 5 store volatile float %e, float* %4 - %5 = getelementptr [11 x float]* @floats, i32 0, i32 6 + %5 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 6 store volatile float %f, float* %5 - %6 = getelementptr [11 x float]* @floats, i32 0, i32 7 + %6 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 7 store volatile float %g, float* %6 - %7 = getelementptr [11 x float]* @floats, i32 0, i32 8 + %7 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 8 store volatile float %h, float* %7 - %8 = getelementptr [11 x float]* @floats, i32 0, i32 9 + %8 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 9 store volatile float %i, float* %8 - %9 = getelementptr [11 x float]* @floats, i32 0, i32 10 + %9 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 10 store volatile float %j, float* %9 ret void } @@ -170,9 +170,9 @@ entry: define void @double_arg2(i8 %a, double %b) nounwind { entry: - %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1 + %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1 store volatile i8 %a, i8* %0 - %1 = getelementptr [11 x double]* @doubles, i32 0, i32 1 + %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1 store volatile double %b, double* %1 ret void } @@ -197,9 +197,9 @@ entry: define void @float_arg2(i8 signext %a, float %b) nounwind { entry: - %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1 + %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1 store volatile i8 %a, i8* %0 - %1 = getelementptr [11 x float]* @floats, i32 0, i32 1 + %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1 store volatile float %b, float* %1 ret void } diff --git a/test/CodeGen/Mips/cconv/arguments-fp128.ll b/test/CodeGen/Mips/cconv/arguments-fp128.ll index 1666974..fabc107 100644 --- a/test/CodeGen/Mips/cconv/arguments-fp128.ll +++ b/test/CodeGen/Mips/cconv/arguments-fp128.ll @@ -13,15 +13,15 @@ define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind { entry: - %0 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 1 + %0 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 1 store volatile fp128 %a, fp128* %0 - %1 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 2 + %1 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 2 store volatile fp128 %b, fp128* %1 - %2 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 3 + %2 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 3 store volatile fp128 %c, fp128* %2 - %3 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 4 + %3 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 4 store volatile fp128 %d, fp128* %3 - %4 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 5 + %4 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 5 store volatile fp128 %e, fp128* %4 ret void } diff --git a/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll b/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll index 380bd5c..9f1fe91 100644 --- a/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll +++ b/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll @@ -25,14 +25,14 @@ define void @double_args(double %a, ...) nounwind { entry: - %0 = getelementptr [11 x double]* @doubles, i32 0, i32 1 + %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1 store volatile double %a, double* %0 %ap = alloca i8* %ap2 = bitcast i8** %ap to i8* call void @llvm.va_start(i8* %ap2) %b = va_arg i8** %ap, double - %1 = getelementptr [11 x double]* @doubles, i32 0, i32 2 + %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2 store volatile double %b, double* %1 call void @llvm.va_end(i8* %ap2) ret void @@ -90,14 +90,14 @@ entry: define void @float_args(float %a, ...) nounwind { entry: - %0 = getelementptr [11 x float]* @floats, i32 0, i32 1 + %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1 store volatile float %a, float* %0 %ap = alloca i8* %ap2 = bitcast i8** %ap to i8* call void @llvm.va_start(i8* %ap2) %b = va_arg i8** %ap, float - %1 = getelementptr [11 x float]* @floats, i32 0, i32 2 + %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2 store volatile float %b, float* %1 call void @llvm.va_end(i8* %ap2) ret void diff --git a/test/CodeGen/Mips/cconv/arguments-hard-float.ll b/test/CodeGen/Mips/cconv/arguments-hard-float.ll index 3221e23..24148ed 100644 --- a/test/CodeGen/Mips/cconv/arguments-hard-float.ll +++ b/test/CodeGen/Mips/cconv/arguments-hard-float.ll @@ -24,23 +24,23 @@ define void @double_args(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i) nounwind { entry: - %0 = getelementptr [11 x double]* @doubles, i32 0, i32 1 + %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1 store volatile double %a, double* %0 - %1 = getelementptr [11 x double]* @doubles, i32 0, i32 2 + %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2 store volatile double %b, double* %1 - %2 = getelementptr [11 x double]* @doubles, i32 0, i32 3 + %2 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 3 store volatile double %c, double* %2 - %3 = getelementptr [11 x double]* @doubles, i32 0, i32 4 + %3 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 4 store volatile double %d, double* %3 - %4 = getelementptr [11 x double]* @doubles, i32 0, i32 5 + %4 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 5 store volatile double %e, double* %4 - %5 = getelementptr [11 x double]* @doubles, i32 0, i32 6 + %5 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 6 store volatile double %f, double* %5 - %6 = getelementptr [11 x double]* @doubles, i32 0, i32 7 + %6 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 7 store volatile double %g, double* %6 - %7 = getelementptr [11 x double]* @doubles, i32 0, i32 8 + %7 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 8 store volatile double %h, double* %7 - %8 = getelementptr [11 x double]* @doubles, i32 0, i32 9 + %8 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 9 store volatile double %i, double* %8 ret void } @@ -87,23 +87,23 @@ entry: define void @float_args(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i) nounwind { entry: - %0 = getelementptr [11 x float]* @floats, i32 0, i32 1 + %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1 store volatile float %a, float* %0 - %1 = getelementptr [11 x float]* @floats, i32 0, i32 2 + %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2 store volatile float %b, float* %1 - %2 = getelementptr [11 x float]* @floats, i32 0, i32 3 + %2 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 3 store volatile float %c, float* %2 - %3 = getelementptr [11 x float]* @floats, i32 0, i32 4 + %3 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 4 store volatile float %d, float* %3 - %4 = getelementptr [11 x float]* @floats, i32 0, i32 5 + %4 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 5 store volatile float %e, float* %4 - %5 = getelementptr [11 x float]* @floats, i32 0, i32 6 + %5 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 6 store volatile float %f, float* %5 - %6 = getelementptr [11 x float]* @floats, i32 0, i32 7 + %6 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 7 store volatile float %g, float* %6 - %7 = getelementptr [11 x float]* @floats, i32 0, i32 8 + %7 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 8 store volatile float %h, float* %7 - %8 = getelementptr [11 x float]* @floats, i32 0, i32 9 + %8 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 9 store volatile float %i, float* %8 ret void } @@ -153,9 +153,9 @@ entry: define void @double_arg2(i8 %a, double %b) nounwind { entry: - %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1 + %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1 store volatile i8 %a, i8* %0 - %1 = getelementptr [11 x double]* @doubles, i32 0, i32 1 + %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1 store volatile double %b, double* %1 ret void } @@ -184,9 +184,9 @@ entry: define void @float_arg2(i8 %a, float %b) nounwind { entry: - %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1 + %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1 store volatile i8 %a, i8* %0 - %1 = getelementptr [11 x float]* @floats, i32 0, i32 1 + %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1 store volatile float %b, float* %1 ret void } diff --git a/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll b/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll index 583759a..26eb569 100644 --- a/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll +++ b/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll @@ -13,15 +13,15 @@ define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind { entry: - %0 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 1 + %0 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 1 store volatile fp128 %a, fp128* %0 - %1 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 2 + %1 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 2 store volatile fp128 %b, fp128* %1 - %2 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 3 + %2 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 3 store volatile fp128 %c, fp128* %2 - %3 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 4 + %3 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 4 store volatile fp128 %d, fp128* %3 - %4 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 5 + %4 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 5 store volatile fp128 %e, fp128* %4 ret void } diff --git a/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll b/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll new file mode 100644 index 0000000..087a051 --- /dev/null +++ b/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll @@ -0,0 +1,80 @@ +; RUN: llc < %s -march=mips64 -target-abi n64 -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=MIPSEB +; RUN: llc < %s -march=mips64el -target-abi n64 -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=MIPSEL +; RUN: llc < %s -march=mips64 -target-abi n32 -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=MIPSEB +; RUN: llc < %s -march=mips64el -target-abi n32 -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=MIPSEL + +; #include <stdio.h> +; +; struct S1 { +; char x1; +; short x2; +; char x3; +; }; +; +; struct S2 { +; char x1; +; char x2; +; char x3; +; char x4; +; char x5; +; }; +; +; void fS1(struct S1 s); +; void fS2(struct S2 s); +; +; void f1() { +; struct S1 s1_1; +; fS1(s1_1); +; } +; +; void f2() { +; struct S2 s2_1; +; fS2(s2_1); +; } +; +; int main() { +; f1(); +; f2(); +; } + +%struct.S1 = type { i8, i16, i8 } +%struct.S2 = type { i8, i8, i8, i8, i8 } + +declare void @fS1(i48 inreg) #1 +declare void @fS2(i40 inreg) #1 + +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #2 + +define void @f1() #0 { +entry: + %s1_1 = alloca %struct.S1, align 2 + %s1_1.coerce = alloca { i48 } + %0 = bitcast { i48 }* %s1_1.coerce to i8* + %1 = bitcast %struct.S1* %s1_1 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 6, i32 0, i1 false) + %2 = getelementptr { i48 }, { i48 }* %s1_1.coerce, i32 0, i32 0 + %3 = load i48, i48* %2, align 1 + call void @fS1(i48 inreg %3) + ret void + ; ALL-LABEL: f1: + + ; MIPSEB: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16 + ; MIPSEL-NOT: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16 +} + +define void @f2() #0 { +entry: + %s2_1 = alloca %struct.S2, align 1 + %s2_1.coerce = alloca { i40 } + %0 = bitcast { i40 }* %s2_1.coerce to i8* + %1 = bitcast %struct.S2* %s2_1 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 5, i32 0, i1 false) + %2 = getelementptr { i40 }, { i40 }* %s2_1.coerce, i32 0, i32 0 + %3 = load i40, i40* %2, align 1 + call void @fS2(i40 inreg %3) + ret void + ; ALL-LABEL: f2: + + ; MIPSEB: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 24 + ; MIPSEL-NOT: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 24 +} diff --git a/test/CodeGen/Mips/cconv/arguments-struct.ll b/test/CodeGen/Mips/cconv/arguments-struct.ll index 7ff894f..ee6bfae 100644 --- a/test/CodeGen/Mips/cconv/arguments-struct.ll +++ b/test/CodeGen/Mips/cconv/arguments-struct.ll @@ -19,7 +19,7 @@ define void @s_i8(i8 inreg %a) nounwind { entry: - store i8 %a, i8* getelementptr inbounds ([2 x i8]* @bytes, i32 0, i32 1) + store i8 %a, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @bytes, i32 0, i32 1) ret void } diff --git a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll index 458b124..abb3601 100644 --- a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll +++ b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll @@ -140,11 +140,11 @@ define void @smallStruct_1b(%struct.SmallStruct_1b* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_1b*, align 8 store %struct.SmallStruct_1b* %ss, %struct.SmallStruct_1b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_1b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_1b* %0 to { i8 }* - %2 = getelementptr { i8 }* %1, i32 0, i32 0 - %3 = load i8* %2, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8 inreg %3) + %2 = getelementptr { i8 }, { i8 }* %1, i32 0, i32 0 + %3 = load i8, i8* %2, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i8 inreg %3) ret void ; CHECK-LABEL: smallStruct_1b: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56 @@ -154,11 +154,11 @@ define void @smallStruct_2b(%struct.SmallStruct_2b* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_2b*, align 8 store %struct.SmallStruct_2b* %ss, %struct.SmallStruct_2b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_2b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_2b*, %struct.SmallStruct_2b** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_2b* %0 to { i16 }* - %2 = getelementptr { i16 }* %1, i32 0, i32 0 - %3 = load i16* %2, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i16 inreg %3) + %2 = getelementptr { i16 }, { i16 }* %1, i32 0, i32 0 + %3 = load i16, i16* %2, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i16 inreg %3) ret void ; CHECK-LABEL: smallStruct_2b: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 48 @@ -169,13 +169,13 @@ entry: %ss.addr = alloca %struct.SmallStruct_3b*, align 8 %.coerce = alloca { i24 } store %struct.SmallStruct_3b* %ss, %struct.SmallStruct_3b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_3b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_3b*, %struct.SmallStruct_3b** %ss.addr, align 8 %1 = bitcast { i24 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_3b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 3, i32 0, i1 false) - %3 = getelementptr { i24 }* %.coerce, i32 0, i32 0 - %4 = load i24* %3, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i24 inreg %4) + %3 = getelementptr { i24 }, { i24 }* %.coerce, i32 0, i32 0 + %4 = load i24, i24* %3, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i24 inreg %4) ret void ; CHECK-LABEL: smallStruct_3b: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 40 @@ -187,11 +187,11 @@ define void @smallStruct_4b(%struct.SmallStruct_4b* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_4b*, align 8 store %struct.SmallStruct_4b* %ss, %struct.SmallStruct_4b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_4b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_4b*, %struct.SmallStruct_4b** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_4b* %0 to { i32 }* - %2 = getelementptr { i32 }* %1, i32 0, i32 0 - %3 = load i32* %2, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 inreg %3) + %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0 + %3 = load i32, i32* %2, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i32 inreg %3) ret void ; CHECK-LABEL: smallStruct_4b: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 32 @@ -202,13 +202,13 @@ entry: %ss.addr = alloca %struct.SmallStruct_5b*, align 8 %.coerce = alloca { i40 } store %struct.SmallStruct_5b* %ss, %struct.SmallStruct_5b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_5b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_5b*, %struct.SmallStruct_5b** %ss.addr, align 8 %1 = bitcast { i40 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_5b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 5, i32 0, i1 false) - %3 = getelementptr { i40 }* %.coerce, i32 0, i32 0 - %4 = load i40* %3, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i40 inreg %4) + %3 = getelementptr { i40 }, { i40 }* %.coerce, i32 0, i32 0 + %4 = load i40, i40* %3, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i40 inreg %4) ret void ; CHECK-LABEL: smallStruct_5b: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 24 @@ -219,13 +219,13 @@ entry: %ss.addr = alloca %struct.SmallStruct_6b*, align 8 %.coerce = alloca { i48 } store %struct.SmallStruct_6b* %ss, %struct.SmallStruct_6b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_6b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_6b*, %struct.SmallStruct_6b** %ss.addr, align 8 %1 = bitcast { i48 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_6b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false) - %3 = getelementptr { i48 }* %.coerce, i32 0, i32 0 - %4 = load i48* %3, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) + %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0 + %4 = load i48, i48* %3, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) ret void ; CHECK-LABEL: smallStruct_6b: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16 @@ -236,13 +236,13 @@ entry: %ss.addr = alloca %struct.SmallStruct_7b*, align 8 %.coerce = alloca { i56 } store %struct.SmallStruct_7b* %ss, %struct.SmallStruct_7b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_7b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_7b*, %struct.SmallStruct_7b** %ss.addr, align 8 %1 = bitcast { i56 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_7b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 7, i32 0, i1 false) - %3 = getelementptr { i56 }* %.coerce, i32 0, i32 0 - %4 = load i56* %3, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i56 inreg %4) + %3 = getelementptr { i56 }, { i56 }* %.coerce, i32 0, i32 0 + %4 = load i56, i56* %3, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i56 inreg %4) ret void ; CHECK-LABEL: smallStruct_7b: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 8 @@ -252,11 +252,11 @@ define void @smallStruct_8b(%struct.SmallStruct_8b* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_8b*, align 8 store %struct.SmallStruct_8b* %ss, %struct.SmallStruct_8b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_8b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_8b*, %struct.SmallStruct_8b** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_8b* %0 to { i64 }* - %2 = getelementptr { i64 }* %1, i32 0, i32 0 - %3 = load i64* %2, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3) + %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0 + %3 = load i64, i64* %2, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %3) ret void ; CHECK-LABEL: smallStruct_8b: ; CHECK-NOT: dsll @@ -267,15 +267,15 @@ entry: %ss.addr = alloca %struct.SmallStruct_9b*, align 8 %.coerce = alloca { i64, i8 } store %struct.SmallStruct_9b* %ss, %struct.SmallStruct_9b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_9b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_9b*, %struct.SmallStruct_9b** %ss.addr, align 8 %1 = bitcast { i64, i8 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_9b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 9, i32 0, i1 false) - %3 = getelementptr { i64, i8 }* %.coerce, i32 0, i32 0 - %4 = load i64* %3, align 1 - %5 = getelementptr { i64, i8 }* %.coerce, i32 0, i32 1 - %6 = load i8* %5, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %4, i8 inreg %6) + %3 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 0 + %4 = load i64, i64* %3, align 1 + %5 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 1 + %6 = load i8, i8* %5, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %4, i8 inreg %6) ret void ; CHECK-LABEL: smallStruct_9b: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56 diff --git a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll index 899a3e8..7da6ab1 100644 --- a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll +++ b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll @@ -74,11 +74,11 @@ define void @smallStruct_1b1s(%struct.SmallStruct_1b1s* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_1b1s*, align 8 store %struct.SmallStruct_1b1s* %ss, %struct.SmallStruct_1b1s** %ss.addr, align 8 - %0 = load %struct.SmallStruct_1b1s** %ss.addr, align 8 + %0 = load %struct.SmallStruct_1b1s*, %struct.SmallStruct_1b1s** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_1b1s* %0 to { i32 }* - %2 = getelementptr { i32 }* %1, i32 0, i32 0 - %3 = load i32* %2, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 inreg %3) + %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0 + %3 = load i32, i32* %2, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i32 inreg %3) ret void ; CHECK-LABEL: smallStruct_1b1s: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 32 @@ -88,11 +88,11 @@ define void @smallStruct_1b1i(%struct.SmallStruct_1b1i* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_1b1i*, align 8 store %struct.SmallStruct_1b1i* %ss, %struct.SmallStruct_1b1i** %ss.addr, align 8 - %0 = load %struct.SmallStruct_1b1i** %ss.addr, align 8 + %0 = load %struct.SmallStruct_1b1i*, %struct.SmallStruct_1b1i** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_1b1i* %0 to { i64 }* - %2 = getelementptr { i64 }* %1, i32 0, i32 0 - %3 = load i64* %2, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3) + %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0 + %3 = load i64, i64* %2, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %3) ret void ; CHECK-LABEL: smallStruct_1b1i: ; CHECK-NOT: dsll @@ -103,13 +103,13 @@ entry: %ss.addr = alloca %struct.SmallStruct_1b1s1b*, align 8 %.coerce = alloca { i48 } store %struct.SmallStruct_1b1s1b* %ss, %struct.SmallStruct_1b1s1b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_1b1s1b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_1b1s1b*, %struct.SmallStruct_1b1s1b** %ss.addr, align 8 %1 = bitcast { i48 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_1b1s1b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false) - %3 = getelementptr { i48 }* %.coerce, i32 0, i32 0 - %4 = load i48* %3, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) + %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0 + %4 = load i48, i48* %3, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) ret void ; CHECK-LABEL: smallStruct_1b1s1b: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16 @@ -121,11 +121,11 @@ define void @smallStruct_1s1i(%struct.SmallStruct_1s1i* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_1s1i*, align 8 store %struct.SmallStruct_1s1i* %ss, %struct.SmallStruct_1s1i** %ss.addr, align 8 - %0 = load %struct.SmallStruct_1s1i** %ss.addr, align 8 + %0 = load %struct.SmallStruct_1s1i*, %struct.SmallStruct_1s1i** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_1s1i* %0 to { i64 }* - %2 = getelementptr { i64 }* %1, i32 0, i32 0 - %3 = load i64* %2, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3) + %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0 + %3 = load i64, i64* %2, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %3) ret void ; CHECK-LABEL: smallStruct_1s1i: ; CHECK-NOT: dsll @@ -136,13 +136,13 @@ entry: %ss.addr = alloca %struct.SmallStruct_3b1s*, align 8 %.coerce = alloca { i48 } store %struct.SmallStruct_3b1s* %ss, %struct.SmallStruct_3b1s** %ss.addr, align 8 - %0 = load %struct.SmallStruct_3b1s** %ss.addr, align 8 + %0 = load %struct.SmallStruct_3b1s*, %struct.SmallStruct_3b1s** %ss.addr, align 8 %1 = bitcast { i48 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_3b1s* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false) - %3 = getelementptr { i48 }* %.coerce, i32 0, i32 0 - %4 = load i48* %3, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) + %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0 + %4 = load i48, i48* %3, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) ret void ; CHECK-LABEL: smallStruct_3b1s: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16 diff --git a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll index 1f73625..f70b75f 100644 --- a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll +++ b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll @@ -110,43 +110,43 @@ entry: store %struct.SmallStruct_1b* %ss7, %struct.SmallStruct_1b** %ss7.addr, align 8 store %struct.SmallStruct_1b* %ss8, %struct.SmallStruct_1b** %ss8.addr, align 8 store %struct.SmallStruct_1b* %ss9, %struct.SmallStruct_1b** %ss9.addr, align 8 - %0 = load %struct.SmallStruct_1b** %ss1.addr, align 8 - %1 = load %struct.SmallStruct_1b** %ss2.addr, align 8 - %2 = load %struct.SmallStruct_1b** %ss3.addr, align 8 - %3 = load %struct.SmallStruct_1b** %ss4.addr, align 8 - %4 = load %struct.SmallStruct_1b** %ss5.addr, align 8 - %5 = load %struct.SmallStruct_1b** %ss6.addr, align 8 - %6 = load %struct.SmallStruct_1b** %ss7.addr, align 8 - %7 = load %struct.SmallStruct_1b** %ss8.addr, align 8 - %8 = load %struct.SmallStruct_1b** %ss9.addr, align 8 + %0 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss1.addr, align 8 + %1 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss2.addr, align 8 + %2 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss3.addr, align 8 + %3 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss4.addr, align 8 + %4 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss5.addr, align 8 + %5 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss6.addr, align 8 + %6 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss7.addr, align 8 + %7 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss8.addr, align 8 + %8 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss9.addr, align 8 %9 = bitcast %struct.SmallStruct_1b* %0 to { i8 }* - %10 = getelementptr { i8 }* %9, i32 0, i32 0 - %11 = load i8* %10, align 1 + %10 = getelementptr { i8 }, { i8 }* %9, i32 0, i32 0 + %11 = load i8, i8* %10, align 1 %12 = bitcast %struct.SmallStruct_1b* %1 to { i8 }* - %13 = getelementptr { i8 }* %12, i32 0, i32 0 - %14 = load i8* %13, align 1 + %13 = getelementptr { i8 }, { i8 }* %12, i32 0, i32 0 + %14 = load i8, i8* %13, align 1 %15 = bitcast %struct.SmallStruct_1b* %2 to { i8 }* - %16 = getelementptr { i8 }* %15, i32 0, i32 0 - %17 = load i8* %16, align 1 + %16 = getelementptr { i8 }, { i8 }* %15, i32 0, i32 0 + %17 = load i8, i8* %16, align 1 %18 = bitcast %struct.SmallStruct_1b* %3 to { i8 }* - %19 = getelementptr { i8 }* %18, i32 0, i32 0 - %20 = load i8* %19, align 1 + %19 = getelementptr { i8 }, { i8 }* %18, i32 0, i32 0 + %20 = load i8, i8* %19, align 1 %21 = bitcast %struct.SmallStruct_1b* %4 to { i8 }* - %22 = getelementptr { i8 }* %21, i32 0, i32 0 - %23 = load i8* %22, align 1 + %22 = getelementptr { i8 }, { i8 }* %21, i32 0, i32 0 + %23 = load i8, i8* %22, align 1 %24 = bitcast %struct.SmallStruct_1b* %5 to { i8 }* - %25 = getelementptr { i8 }* %24, i32 0, i32 0 - %26 = load i8* %25, align 1 + %25 = getelementptr { i8 }, { i8 }* %24, i32 0, i32 0 + %26 = load i8, i8* %25, align 1 %27 = bitcast %struct.SmallStruct_1b* %6 to { i8 }* - %28 = getelementptr { i8 }* %27, i32 0, i32 0 - %29 = load i8* %28, align 1 + %28 = getelementptr { i8 }, { i8 }* %27, i32 0, i32 0 + %29 = load i8, i8* %28, align 1 %30 = bitcast %struct.SmallStruct_1b* %7 to { i8 }* - %31 = getelementptr { i8 }* %30, i32 0, i32 0 - %32 = load i8* %31, align 1 + %31 = getelementptr { i8 }, { i8 }* %30, i32 0, i32 0 + %32 = load i8, i8* %31, align 1 %33 = bitcast %struct.SmallStruct_1b* %8 to { i8 }* - %34 = getelementptr { i8 }* %33, i32 0, i32 0 - %35 = load i8* %34, align 1 - call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8 inreg %11, i8 inreg %14, i8 inreg %17, i8 inreg %20, i8 inreg %23, i8 inreg %26, i8 inreg %29, i8 inreg %32, i8 inreg %35) + %34 = getelementptr { i8 }, { i8 }* %33, i32 0, i32 0 + %35 = load i8, i8* %34, align 1 + call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i8 inreg %11, i8 inreg %14, i8 inreg %17, i8 inreg %20, i8 inreg %23, i8 inreg %26, i8 inreg %29, i8 inreg %32, i8 inreg %35) ret void ; CHECK-LABEL: smallStruct_1b_x9: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56 diff --git a/test/CodeGen/Mips/cconv/arguments-varargs.ll b/test/CodeGen/Mips/cconv/arguments-varargs.ll index 6e6f48b..af217c9 100644 --- a/test/CodeGen/Mips/cconv/arguments-varargs.ll +++ b/test/CodeGen/Mips/cconv/arguments-varargs.ll @@ -119,12 +119,12 @@ entry: call void asm sideeffect "# ANCHOR1", ""() %arg1 = va_arg i8** %ap, i16 - %e1 = getelementptr [3 x i16]* @hwords, i32 0, i32 1 + %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1 store volatile i16 %arg1, i16* %e1, align 2 call void asm sideeffect "# ANCHOR2", ""() %arg2 = va_arg i8** %ap, i16 - %e2 = getelementptr [3 x i16]* @hwords, i32 0, i32 2 + %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2 store volatile i16 %arg2, i16* %e2, align 2 call void @llvm.va_end(i8* %ap2) @@ -237,12 +237,12 @@ entry: call void asm sideeffect "# ANCHOR1", ""() %arg1 = va_arg i8** %ap, i32 - %e1 = getelementptr [3 x i32]* @words, i32 0, i32 1 + %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1 store volatile i32 %arg1, i32* %e1, align 4 call void asm sideeffect "# ANCHOR2", ""() %arg2 = va_arg i8** %ap, i32 - %e2 = getelementptr [3 x i32]* @words, i32 0, i32 2 + %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2 store volatile i32 %arg2, i32* %e2, align 4 call void @llvm.va_end(i8* %ap2) @@ -364,12 +364,12 @@ entry: call void asm sideeffect "# ANCHOR1", ""() %arg1 = va_arg i8** %ap, i64 - %e1 = getelementptr [3 x i64]* @dwords, i32 0, i32 1 + %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1 store volatile i64 %arg1, i64* %e1, align 8 call void asm sideeffect "# ANCHOR2", ""() %arg2 = va_arg i8** %ap, i64 - %e2 = getelementptr [3 x i64]* @dwords, i32 0, i32 2 + %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2 store volatile i64 %arg2, i64* %e2, align 8 call void @llvm.va_end(i8* %ap2) @@ -482,12 +482,12 @@ entry: call void asm sideeffect "# ANCHOR1", ""() %arg1 = va_arg i8** %ap, i16 - %e1 = getelementptr [3 x i16]* @hwords, i32 0, i32 1 + %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1 store volatile i16 %arg1, i16* %e1, align 2 call void asm sideeffect "# ANCHOR2", ""() %arg2 = va_arg i8** %ap, i16 - %e2 = getelementptr [3 x i16]* @hwords, i32 0, i32 2 + %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2 store volatile i16 %arg2, i16* %e2, align 2 call void @llvm.va_end(i8* %ap2) @@ -600,12 +600,12 @@ entry: call void asm sideeffect "# ANCHOR1", ""() %arg1 = va_arg i8** %ap, i32 - %e1 = getelementptr [3 x i32]* @words, i32 0, i32 1 + %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1 store volatile i32 %arg1, i32* %e1, align 4 call void asm sideeffect "# ANCHOR2", ""() %arg2 = va_arg i8** %ap, i32 - %e2 = getelementptr [3 x i32]* @words, i32 0, i32 2 + %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2 store volatile i32 %arg2, i32* %e2, align 4 call void @llvm.va_end(i8* %ap2) @@ -727,12 +727,12 @@ entry: call void asm sideeffect "# ANCHOR1", ""() %arg1 = va_arg i8** %ap, i64 - %e1 = getelementptr [3 x i64]* @dwords, i32 0, i32 1 + %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1 store volatile i64 %arg1, i64* %e1, align 8 call void asm sideeffect "# ANCHOR2", ""() %arg2 = va_arg i8** %ap, i64 - %e2 = getelementptr [3 x i64]* @dwords, i32 0, i32 2 + %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2 store volatile i64 %arg2, i64* %e2, align 8 call void @llvm.va_end(i8* %ap2) @@ -844,12 +844,12 @@ entry: call void asm sideeffect "# ANCHOR1", ""() %arg1 = va_arg i8** %ap, i16 - %e1 = getelementptr [3 x i16]* @hwords, i32 0, i32 1 + %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1 store volatile i16 %arg1, i16* %e1, align 2 call void asm sideeffect "# ANCHOR2", ""() %arg2 = va_arg i8** %ap, i16 - %e2 = getelementptr [3 x i16]* @hwords, i32 0, i32 2 + %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2 store volatile i16 %arg2, i16* %e2, align 2 call void @llvm.va_end(i8* %ap2) @@ -961,12 +961,12 @@ entry: call void asm sideeffect "# ANCHOR1", ""() %arg1 = va_arg i8** %ap, i32 - %e1 = getelementptr [3 x i32]* @words, i32 0, i32 1 + %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1 store volatile i32 %arg1, i32* %e1, align 4 call void asm sideeffect "# ANCHOR2", ""() %arg2 = va_arg i8** %ap, i32 - %e2 = getelementptr [3 x i32]* @words, i32 0, i32 2 + %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2 store volatile i32 %arg2, i32* %e2, align 4 call void @llvm.va_end(i8* %ap2) @@ -1087,12 +1087,12 @@ entry: call void asm sideeffect "# ANCHOR1", ""() %arg1 = va_arg i8** %ap, i64 - %e1 = getelementptr [3 x i64]* @dwords, i32 0, i32 1 + %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1 store volatile i64 %arg1, i64* %e1, align 8 call void asm sideeffect "# ANCHOR2", ""() %arg2 = va_arg i8** %ap, i64 - %e2 = getelementptr [3 x i64]* @dwords, i32 0, i32 2 + %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2 store volatile i64 %arg2, i64* %e2, align 8 call void @llvm.va_end(i8* %ap2) diff --git a/test/CodeGen/Mips/cconv/arguments.ll b/test/CodeGen/Mips/cconv/arguments.ll index 98671aa..430705f 100644 --- a/test/CodeGen/Mips/cconv/arguments.ll +++ b/test/CodeGen/Mips/cconv/arguments.ll @@ -28,25 +28,25 @@ define void @align_to_arg_slots(i8 signext %a, i8 signext %b, i8 signext %c, i8 signext %g, i8 signext %h, i8 signext %i, i8 signext %j) nounwind { entry: - %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1 + %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1 store volatile i8 %a, i8* %0 - %1 = getelementptr [11 x i8]* @bytes, i32 0, i32 2 + %1 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 2 store volatile i8 %b, i8* %1 - %2 = getelementptr [11 x i8]* @bytes, i32 0, i32 3 + %2 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 3 store volatile i8 %c, i8* %2 - %3 = getelementptr [11 x i8]* @bytes, i32 0, i32 4 + %3 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 4 store volatile i8 %d, i8* %3 - %4 = getelementptr [11 x i8]* @bytes, i32 0, i32 5 + %4 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 5 store volatile i8 %e, i8* %4 - %5 = getelementptr [11 x i8]* @bytes, i32 0, i32 6 + %5 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 6 store volatile i8 %f, i8* %5 - %6 = getelementptr [11 x i8]* @bytes, i32 0, i32 7 + %6 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 7 store volatile i8 %g, i8* %6 - %7 = getelementptr [11 x i8]* @bytes, i32 0, i32 8 + %7 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 8 store volatile i8 %h, i8* %7 - %8 = getelementptr [11 x i8]* @bytes, i32 0, i32 9 + %8 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 9 store volatile i8 %i, i8* %8 - %9 = getelementptr [11 x i8]* @bytes, i32 0, i32 10 + %9 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 10 store volatile i8 %j, i8* %9 ret void } @@ -95,23 +95,23 @@ define void @slot_skipping(i8 signext %a, i64 signext %b, i8 signext %c, i8 signext %d, i8 signext %e, i8 signext %f, i8 signext %g, i64 signext %i, i8 signext %j) nounwind { entry: - %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1 + %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1 store volatile i8 %a, i8* %0 - %1 = getelementptr [11 x i64]* @dwords, i32 0, i32 1 + %1 = getelementptr [11 x i64], [11 x i64]* @dwords, i32 0, i32 1 store volatile i64 %b, i64* %1 - %2 = getelementptr [11 x i8]* @bytes, i32 0, i32 2 + %2 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 2 store volatile i8 %c, i8* %2 - %3 = getelementptr [11 x i8]* @bytes, i32 0, i32 3 + %3 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 3 store volatile i8 %d, i8* %3 - %4 = getelementptr [11 x i8]* @bytes, i32 0, i32 4 + %4 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 4 store volatile i8 %e, i8* %4 - %5 = getelementptr [11 x i8]* @bytes, i32 0, i32 5 + %5 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 5 store volatile i8 %f, i8* %5 - %6 = getelementptr [11 x i8]* @bytes, i32 0, i32 6 + %6 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 6 store volatile i8 %g, i8* %6 - %7 = getelementptr [11 x i64]* @dwords, i32 0, i32 2 + %7 = getelementptr [11 x i64], [11 x i64]* @dwords, i32 0, i32 2 store volatile i64 %i, i64* %7 - %8 = getelementptr [11 x i8]* @bytes, i32 0, i32 7 + %8 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 7 store volatile i8 %j, i8* %8 ret void } diff --git a/test/CodeGen/Mips/cconv/return-float.ll b/test/CodeGen/Mips/cconv/return-float.ll index 8c4c31c..4355a55 100644 --- a/test/CodeGen/Mips/cconv/return-float.ll +++ b/test/CodeGen/Mips/cconv/return-float.ll @@ -21,7 +21,7 @@ define float @retfloat() nounwind { entry: - %0 = load volatile float* @float + %0 = load volatile float, float* @float ret float %0 } @@ -35,7 +35,7 @@ entry: define double @retdouble() nounwind { entry: - %0 = load volatile double* @double + %0 = load volatile double, double* @double ret double %0 } diff --git a/test/CodeGen/Mips/cconv/return-hard-float.ll b/test/CodeGen/Mips/cconv/return-hard-float.ll index f0aeb12..14853c8 100644 --- a/test/CodeGen/Mips/cconv/return-hard-float.ll +++ b/test/CodeGen/Mips/cconv/return-hard-float.ll @@ -24,7 +24,7 @@ define float @retfloat() nounwind { entry: - %0 = load volatile float* @float + %0 = load volatile float, float* @float ret float %0 } @@ -38,7 +38,7 @@ entry: define double @retdouble() nounwind { entry: - %0 = load volatile double* @double + %0 = load volatile double, double* @double ret double %0 } @@ -50,7 +50,7 @@ entry: define { double, double } @retComplexDouble() #0 { %retval = alloca { double, double }, align 8 - %1 = load { double, double }* %retval + %1 = load { double, double }, { double, double }* %retval ret { double, double } %1 } diff --git a/test/CodeGen/Mips/cconv/return-hard-fp128.ll b/test/CodeGen/Mips/cconv/return-hard-fp128.ll index 05dacfe..34e9647 100644 --- a/test/CodeGen/Mips/cconv/return-hard-fp128.ll +++ b/test/CodeGen/Mips/cconv/return-hard-fp128.ll @@ -13,7 +13,7 @@ define fp128 @retldouble() nounwind { entry: - %0 = load volatile fp128* @fp128 + %0 = load volatile fp128, fp128* @fp128 ret fp128 %0 } diff --git a/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll b/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll index 4ce26b1..c4c8f10 100644 --- a/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll +++ b/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll @@ -10,7 +10,7 @@ define inreg {fp128} @ret_struct_fp128() nounwind { entry: - %0 = load volatile {fp128}* @struct_fp128 + %0 = load volatile {fp128}, {fp128}* @struct_fp128 ret {fp128} %0 } diff --git a/test/CodeGen/Mips/cconv/return-struct.ll b/test/CodeGen/Mips/cconv/return-struct.ll index 3d591df..8decd04 100644 --- a/test/CodeGen/Mips/cconv/return-struct.ll +++ b/test/CodeGen/Mips/cconv/return-struct.ll @@ -22,7 +22,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i define inreg {i8} @ret_struct_i8() nounwind { entry: - %0 = load volatile {i8}* @struct_byte + %0 = load volatile {i8}, {i8}* @struct_byte ret {i8} %0 } @@ -52,9 +52,9 @@ define inreg {i16} @ret_struct_i16() nounwind { entry: %retval = alloca {i8,i8}, align 1 %0 = bitcast {i8,i8}* %retval to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds ({i8,i8}* @struct_2byte, i32 0, i32 0), i64 2, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds ({i8,i8}, {i8,i8}* @struct_2byte, i32 0, i32 0), i64 2, i32 1, i1 false) %1 = bitcast {i8,i8}* %retval to {i16}* - %2 = load volatile {i16}* %1 + %2 = load volatile {i16}, {i16}* %1 ret {i16} %2 } @@ -91,7 +91,7 @@ entry: ; missed by the CCPromoteToType and the shift didn't happen. define inreg {i48} @ret_struct_3xi16() nounwind { entry: - %0 = load volatile i48* bitcast ({[3 x i16]}* @struct_3xi16 to i48*), align 2 + %0 = load volatile i48, i48* bitcast ({[3 x i16]}* @struct_3xi16 to i48*), align 2 %1 = insertvalue {i48} undef, i48 %0, 0 ret {i48} %1 } @@ -174,7 +174,7 @@ entry: ; This time we let the backend lower the sret argument. define {[6 x i32]} @ret_struct_6xi32() { entry: - %0 = load volatile {[6 x i32]}* @struct_6xi32, align 2 + %0 = load volatile {[6 x i32]}, {[6 x i32]}* @struct_6xi32, align 2 ret {[6 x i32]} %0 } diff --git a/test/CodeGen/Mips/cconv/return.ll b/test/CodeGen/Mips/cconv/return.ll index 516026d..a537672 100644 --- a/test/CodeGen/Mips/cconv/return.ll +++ b/test/CodeGen/Mips/cconv/return.ll @@ -24,7 +24,7 @@ define i8 @reti8() nounwind { entry: - %0 = load volatile i8* @byte + %0 = load volatile i8, i8* @byte ret i8 %0 } @@ -38,7 +38,7 @@ entry: define i32 @reti32() nounwind { entry: - %0 = load volatile i32* @word + %0 = load volatile i32, i32* @word ret i32 %0 } @@ -52,7 +52,7 @@ entry: define i64 @reti64() nounwind { entry: - %0 = load volatile i64* @dword + %0 = load volatile i64, i64* @dword ret i64 %0 } diff --git a/test/CodeGen/Mips/cfi_offset.ll b/test/CodeGen/Mips/cfi_offset.ll index e23855b..6e78344 100644 --- a/test/CodeGen/Mips/cfi_offset.ll +++ b/test/CodeGen/Mips/cfi_offset.ll @@ -32,8 +32,8 @@ define void @bar() { ; CHECK: .cfi_offset 31, -20 ; CHECK: .cfi_offset 16, -24 - %val1 = load volatile double* @var - %val2 = load volatile double* @var + %val1 = load volatile double, double* @var + %val2 = load volatile double, double* @var call void (...)* @foo() nounwind store volatile double %val1, double* @var store volatile double %val2, double* @var diff --git a/test/CodeGen/Mips/check-adde-redundant-moves.ll b/test/CodeGen/Mips/check-adde-redundant-moves.ll new file mode 100644 index 0000000..7bc63a4 --- /dev/null +++ b/test/CodeGen/Mips/check-adde-redundant-moves.ll @@ -0,0 +1,35 @@ +; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \ +; RUN: -check-prefix=ALL -check-prefix=GP32 +; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \ +; RUN: -check-prefix=ALL -check-prefix=GP32 +; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \ +; RUN: -check-prefix=ALL -check-prefix=GP32 +; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \ +; RUN: -check-prefix=ALL -check-prefix=GP32 +; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \ +; RUN: -check-prefix=ALL -check-prefix=GP32 +; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \ +; RUN: -check-prefix=ALL -check-prefix=GP32 +; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s -check-prefix=ALL +; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s -check-prefix=ALL +; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s -check-prefix=ALL +; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL +; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s -check-prefix=ALL +; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s -check-prefix=ALL +; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL + +define i64 @add_i64(i64 %a) { + ; GP32-LABEL: add_i64 + + ; GP32-NOT: move $[[T0:[0-9]+]], $[[T0]] + %r = add i64 5, %a + ret i64 %r +} + +define i128 @add_i128(i128 %a) { + ; ALL-LABEL: add_i128 + + ; ALL-NOT: move $[[T0:[0-9]+]], $[[T0]] + %r = add i128 5, %a + ret i128 %r +} diff --git a/test/CodeGen/Mips/ci2.ll b/test/CodeGen/Mips/ci2.ll index e2068fd..63ed683 100644 --- a/test/CodeGen/Mips/ci2.ll +++ b/test/CodeGen/Mips/ci2.ll @@ -8,7 +8,7 @@ define void @foo() #0 { entry: store i32 305419896, i32* @i, align 4 - %0 = load i32* @b, align 4 + %0 = load i32, i32* @b, align 4 %tobool = icmp ne i32 %0, 0 br i1 %tobool, label %if.then, label %if.else diff --git a/test/CodeGen/Mips/cmov.ll b/test/CodeGen/Mips/cmov.ll index b12c2df..a8008a2 100644 --- a/test/CodeGen/Mips/cmov.ll +++ b/test/CodeGen/Mips/cmov.ll @@ -41,8 +41,8 @@ define i32* @cmov1(i32 signext %s) nounwind readonly { entry: %tobool = icmp ne i32 %s, 0 - %tmp1 = load i32** @i3, align 4 - %cond = select i1 %tobool, i32* getelementptr inbounds ([3 x i32]* @i1, i32 0, i32 0), i32* %tmp1 + %tmp1 = load i32*, i32** @i3, align 4 + %cond = select i1 %tobool, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @i1, i32 0, i32 0), i32* %tmp1 ret i32* %cond } @@ -81,8 +81,8 @@ entry: define i32 @cmov2(i32 signext %s) nounwind readonly { entry: %tobool = icmp ne i32 %s, 0 - %tmp1 = load i32* @c, align 4 - %tmp2 = load i32* @d, align 4 + %tmp1 = load i32, i32* @c, align 4 + %tmp2 = load i32, i32* @d, align 4 %cond = select i1 %tobool, i32 %tmp1, i32 %tmp2 ret i32 %cond } diff --git a/test/CodeGen/Mips/cmplarge.ll b/test/CodeGen/Mips/cmplarge.ll index 2a3d30a..7901906 100644 --- a/test/CodeGen/Mips/cmplarge.ll +++ b/test/CodeGen/Mips/cmplarge.ll @@ -9,8 +9,8 @@ target triple = "mipsel--linux-gnu" define void @getSubImagesLuma(%struct.StorablePicture* nocapture %s) #0 { entry: - %size_y = getelementptr inbounds %struct.StorablePicture* %s, i32 0, i32 1 - %0 = load i32* %size_y, align 4 + %size_y = getelementptr inbounds %struct.StorablePicture, %struct.StorablePicture* %s, i32 0, i32 1 + %0 = load i32, i32* %size_y, align 4 %sub = add nsw i32 %0, -1 %add5 = add nsw i32 %0, 20 %cmp6 = icmp sgt i32 %add5, -20 @@ -20,7 +20,7 @@ for.body: ; preds = %entry, %for.body %j.07 = phi i32 [ %inc, %for.body ], [ -20, %entry ] %call = tail call i32 bitcast (i32 (...)* @iClip3 to i32 (i32, i32, i32)*)(i32 0, i32 %sub, i32 %j.07) #2 %inc = add nsw i32 %j.07, 1 - %1 = load i32* %size_y, align 4 + %1 = load i32, i32* %size_y, align 4 %add = add nsw i32 %1, 20 %cmp = icmp slt i32 %inc, %add br i1 %cmp, label %for.body, label %for.end diff --git a/test/CodeGen/Mips/const4a.ll b/test/CodeGen/Mips/const4a.ll index ac6795b..9022eb4 100644 --- a/test/CodeGen/Mips/const4a.ll +++ b/test/CodeGen/Mips/const4a.ll @@ -14,7 +14,7 @@ target triple = "mips--linux-gnu" define void @t() #0 { entry: store i32 -559023410, i32* @i, align 4 - %0 = load i32* @b, align 4 + %0 = load i32, i32* @b, align 4 ; no-load-relax: lw ${{[0-9]+}}, $CPI0_1 # 16 bit inst %tobool = icmp ne i32 %0, 0 br i1 %tobool, label %if.then, label %if.else diff --git a/test/CodeGen/Mips/ctlz.ll b/test/CodeGen/Mips/ctlz.ll index 1f87166..96af197 100644 --- a/test/CodeGen/Mips/ctlz.ll +++ b/test/CodeGen/Mips/ctlz.ll @@ -9,7 +9,7 @@ define i32 @main() #0 { entry: %retval = alloca i32, align 4 store i32 0, i32* %retval - %0 = load i32* @x, align 4 + %0 = load i32, i32* @x, align 4 %1 = call i32 @llvm.ctlz.i32(i32 %0, i1 true) store i32 %1, i32* @y, align 4 ret i32 0 diff --git a/test/CodeGen/Mips/disable-tail-merge.ll b/test/CodeGen/Mips/disable-tail-merge.ll index b4c093a..9396db7 100644 --- a/test/CodeGen/Mips/disable-tail-merge.ll +++ b/test/CodeGen/Mips/disable-tail-merge.ll @@ -9,20 +9,20 @@ define i32 @test1(i32 %a) { entry: %tobool = icmp eq i32 %a, 0 - %0 = load i32* @g0, align 4 + %0 = load i32, i32* @g0, align 4 br i1 %tobool, label %if.else, label %if.then if.then: %add = add nsw i32 %0, 1 store i32 %add, i32* @g0, align 4 - %1 = load i32* @g1, align 4 + %1 = load i32, i32* @g1, align 4 %add1 = add nsw i32 %1, 23 br label %if.end if.else: %add2 = add nsw i32 %0, 11 store i32 %add2, i32* @g0, align 4 - %2 = load i32* @g1, align 4 + %2 = load i32, i32* @g1, align 4 %add3 = add nsw i32 %2, 23 br label %if.end diff --git a/test/CodeGen/Mips/div.ll b/test/CodeGen/Mips/div.ll index 00e2c19..731841c 100644 --- a/test/CodeGen/Mips/div.ll +++ b/test/CodeGen/Mips/div.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %div = sdiv i32 %0, %1 ; 16: div $zero, ${{[0-9]+}}, ${{[0-9]+}} ; 16: mflo ${{[0-9]+}} diff --git a/test/CodeGen/Mips/div_rem.ll b/test/CodeGen/Mips/div_rem.ll index 950192e..e64529c 100644 --- a/test/CodeGen/Mips/div_rem.ll +++ b/test/CodeGen/Mips/div_rem.ll @@ -7,8 +7,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %div = sdiv i32 %0, %1 store i32 %div, i32* @kkkk, align 4 %rem = srem i32 %0, %1 diff --git a/test/CodeGen/Mips/divrem.ll b/test/CodeGen/Mips/divrem.ll index a9cfe0f..918db05 100644 --- a/test/CodeGen/Mips/divrem.ll +++ b/test/CodeGen/Mips/divrem.ll @@ -220,8 +220,8 @@ entry: ; FIXME: It's not clear what this is supposed to test. define i32 @killFlags() { entry: - %0 = load i32* @g0, align 4 - %1 = load i32* @g1, align 4 + %0 = load i32, i32* @g0, align 4 + %1 = load i32, i32* @g1, align 4 %div = sdiv i32 %0, %1 ret i32 %div } diff --git a/test/CodeGen/Mips/divu.ll b/test/CodeGen/Mips/divu.ll index b96a439..5bc765a 100644 --- a/test/CodeGen/Mips/divu.ll +++ b/test/CodeGen/Mips/divu.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %div = udiv i32 %0, %1 ; 16: divu $zero, ${{[0-9]+}}, ${{[0-9]+}} ; 16: mflo ${{[0-9]+}} diff --git a/test/CodeGen/Mips/divu_remu.ll b/test/CodeGen/Mips/divu_remu.ll index a6c1563..a079440 100644 --- a/test/CodeGen/Mips/divu_remu.ll +++ b/test/CodeGen/Mips/divu_remu.ll @@ -8,8 +8,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %div = udiv i32 %0, %1 store i32 %div, i32* @kkkk, align 4 %rem = urem i32 %0, %1 diff --git a/test/CodeGen/Mips/dsp-patterns.ll b/test/CodeGen/Mips/dsp-patterns.ll index f5bb3ab..837c0d8 100644 --- a/test/CodeGen/Mips/dsp-patterns.ll +++ b/test/CodeGen/Mips/dsp-patterns.ll @@ -6,8 +6,8 @@ define zeroext i8 @test_lbux(i8* nocapture %b, i32 %i) { entry: - %add.ptr = getelementptr inbounds i8* %b, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %b, i32 %i + %0 = load i8, i8* %add.ptr, align 1 ret i8 %0 } @@ -16,8 +16,8 @@ entry: define signext i16 @test_lhx(i16* nocapture %b, i32 %i) { entry: - %add.ptr = getelementptr inbounds i16* %b, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %b, i32 %i + %0 = load i16, i16* %add.ptr, align 2 ret i16 %0 } @@ -26,8 +26,8 @@ entry: define i32 @test_lwx(i32* nocapture %b, i32 %i) { entry: - %add.ptr = getelementptr inbounds i32* %b, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %b, i32 %i + %0 = load i32, i32* %add.ptr, align 4 ret i32 %0 } diff --git a/test/CodeGen/Mips/dsp-vec-load-store.ll b/test/CodeGen/Mips/dsp-vec-load-store.ll index 7e4a8fe..f925180 100644 --- a/test/CodeGen/Mips/dsp-vec-load-store.ll +++ b/test/CodeGen/Mips/dsp-vec-load-store.ll @@ -5,7 +5,7 @@ define void @extend_load_trunc_store_v2i8() { entry: - %0 = load <2 x i8>* @g1, align 2 + %0 = load <2 x i8>, <2 x i8>* @g1, align 2 store <2 x i8> %0, <2 x i8>* @g0, align 2 ret void } diff --git a/test/CodeGen/Mips/eh.ll b/test/CodeGen/Mips/eh.ll index fc9e2ef..fcbd99e 100644 --- a/test/CodeGen/Mips/eh.ll +++ b/test/CodeGen/Mips/eh.ll @@ -27,6 +27,7 @@ lpad: ; preds = %entry ; CHECK-EL: bne $5 %exn.val = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 + cleanup catch i8* bitcast (i8** @_ZTId to i8*) %exn = extractvalue { i8*, i32 } %exn.val, 0 %sel = extractvalue { i8*, i32 } %exn.val, 1 @@ -37,7 +38,7 @@ lpad: ; preds = %entry catch: ; preds = %lpad %3 = tail call i8* @__cxa_begin_catch(i8* %exn) nounwind %4 = bitcast i8* %3 to double* - %exn.scalar = load double* %4, align 8 + %exn.scalar = load double, double* %4, align 8 %add = fadd double %exn.scalar, %i2 store double %add, double* @g1, align 8 tail call void @__cxa_end_catch() nounwind diff --git a/test/CodeGen/Mips/emit-big-cst.ll b/test/CodeGen/Mips/emit-big-cst.ll index a168743..9bc96c8 100644 --- a/test/CodeGen/Mips/emit-big-cst.ll +++ b/test/CodeGen/Mips/emit-big-cst.ll @@ -10,7 +10,7 @@ define void @accessBig(i64* %storage) { %addr = bitcast i64* %storage to i82* - %bigLoadedCst = load volatile i82* @bigCst + %bigLoadedCst = load volatile i82, i82* @bigCst %tmp = add i82 %bigLoadedCst, 1 store i82 %tmp, i82* %addr ret void diff --git a/test/CodeGen/Mips/ex2.ll b/test/CodeGen/Mips/ex2.ll index 6d024c2..7547fdf 100644 --- a/test/CodeGen/Mips/ex2.ll +++ b/test/CodeGen/Mips/ex2.ll @@ -17,12 +17,12 @@ entry: store i32 0, i32* %retval %exception = call i8* @__cxa_allocate_exception(i32 4) nounwind %0 = bitcast i8* %exception to i8** - store i8* getelementptr inbounds ([6 x i8]* @.str, i32 0, i32 0), i8** %0 + store i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), i8** %0 call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIPKc to i8*), i8* null) noreturn unreachable return: ; No predecessors! - %1 = load i32* %retval + %1 = load i32, i32* %retval ret i32 %1 } diff --git a/test/CodeGen/Mips/extins.ll b/test/CodeGen/Mips/extins.ll index efaeeea..6604f89 100644 --- a/test/CodeGen/Mips/extins.ll +++ b/test/CodeGen/Mips/extins.ll @@ -16,7 +16,7 @@ entry: ; 16-NOT: ins ${{[0-9]+}} %and = shl i32 %s, 5 %shl = and i32 %and, 16352 - %tmp3 = load i32* %d, align 4 + %tmp3 = load i32, i32* %d, align 4 %and5 = and i32 %tmp3, -16353 %or = or i32 %and5, %shl store i32 %or, i32* %d, align 4 diff --git a/test/CodeGen/Mips/f16abs.ll b/test/CodeGen/Mips/f16abs.ll index 0fba9c4..8389832 100644 --- a/test/CodeGen/Mips/f16abs.ll +++ b/test/CodeGen/Mips/f16abs.ll @@ -11,12 +11,12 @@ ; Function Attrs: nounwind optsize define i32 @main() #0 { entry: - %0 = load double* @y, align 8 + %0 = load double, double* @y, align 8 %call = tail call double @fabs(double %0) #2 store double %call, double* @x, align 8 ; static-NOT: .ent __call_stub_fp_fabs ; static-NOT: jal fabs - %1 = load float* @y1, align 4 + %1 = load float, float* @y1, align 4 %call2 = tail call float @fabsf(float %1) #2 store float %call2, float* @x1, align 4 ; static-NOT: .ent __call_stub_fp_fabsf diff --git a/test/CodeGen/Mips/fastcc.ll b/test/CodeGen/Mips/fastcc.ll index 6b022c5..299e0d6 100644 --- a/test/CodeGen/Mips/fastcc.ll +++ b/test/CodeGen/Mips/fastcc.ll @@ -108,23 +108,23 @@ entry: ; CHECK-NACL-NOT: lw $15 ; CHECK-NACL-NOT: lw $24 - %0 = load i32* @gi0, align 4 - %1 = load i32* @gi1, align 4 - %2 = load i32* @gi2, align 4 - %3 = load i32* @gi3, align 4 - %4 = load i32* @gi4, align 4 - %5 = load i32* @gi5, align 4 - %6 = load i32* @gi6, align 4 - %7 = load i32* @gi7, align 4 - %8 = load i32* @gi8, align 4 - %9 = load i32* @gi9, align 4 - %10 = load i32* @gi10, align 4 - %11 = load i32* @gi11, align 4 - %12 = load i32* @gi12, align 4 - %13 = load i32* @gi13, align 4 - %14 = load i32* @gi14, align 4 - %15 = load i32* @gi15, align 4 - %16 = load i32* @gi16, align 4 + %0 = load i32, i32* @gi0, align 4 + %1 = load i32, i32* @gi1, align 4 + %2 = load i32, i32* @gi2, align 4 + %3 = load i32, i32* @gi3, align 4 + %4 = load i32, i32* @gi4, align 4 + %5 = load i32, i32* @gi5, align 4 + %6 = load i32, i32* @gi6, align 4 + %7 = load i32, i32* @gi7, align 4 + %8 = load i32, i32* @gi8, align 4 + %9 = load i32, i32* @gi9, align 4 + %10 = load i32, i32* @gi10, align 4 + %11 = load i32, i32* @gi11, align 4 + %12 = load i32, i32* @gi12, align 4 + %13 = load i32, i32* @gi13, align 4 + %14 = load i32, i32* @gi14, align 4 + %15 = load i32, i32* @gi15, align 4 + %16 = load i32, i32* @gi16, align 4 tail call fastcc void @callee0(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, i32 %14, i32 %15, i32 %16) ret void } @@ -196,27 +196,27 @@ entry: ; CHECK: lwc1 $f1 ; CHECK: lwc1 $f0 - %0 = load float* @gfa0, align 4 - %1 = load float* @gfa1, align 4 - %2 = load float* @gfa2, align 4 - %3 = load float* @gfa3, align 4 - %4 = load float* @gfa4, align 4 - %5 = load float* @gfa5, align 4 - %6 = load float* @gfa6, align 4 - %7 = load float* @gfa7, align 4 - %8 = load float* @gfa8, align 4 - %9 = load float* @gfa9, align 4 - %10 = load float* @gfa10, align 4 - %11 = load float* @gfa11, align 4 - %12 = load float* @gfa12, align 4 - %13 = load float* @gfa13, align 4 - %14 = load float* @gfa14, align 4 - %15 = load float* @gfa15, align 4 - %16 = load float* @gfa16, align 4 - %17 = load float* @gfa17, align 4 - %18 = load float* @gfa18, align 4 - %19 = load float* @gfa19, align 4 - %20 = load float* @gfa20, align 4 + %0 = load float, float* @gfa0, align 4 + %1 = load float, float* @gfa1, align 4 + %2 = load float, float* @gfa2, align 4 + %3 = load float, float* @gfa3, align 4 + %4 = load float, float* @gfa4, align 4 + %5 = load float, float* @gfa5, align 4 + %6 = load float, float* @gfa6, align 4 + %7 = load float, float* @gfa7, align 4 + %8 = load float, float* @gfa8, align 4 + %9 = load float, float* @gfa9, align 4 + %10 = load float, float* @gfa10, align 4 + %11 = load float, float* @gfa11, align 4 + %12 = load float, float* @gfa12, align 4 + %13 = load float, float* @gfa13, align 4 + %14 = load float, float* @gfa14, align 4 + %15 = load float, float* @gfa15, align 4 + %16 = load float, float* @gfa16, align 4 + %17 = load float, float* @gfa17, align 4 + %18 = load float, float* @gfa18, align 4 + %19 = load float, float* @gfa19, align 4 + %20 = load float, float* @gfa20, align 4 tail call fastcc void @callee1(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10, float %11, float %12, float %13, float %14, float %15, float %16, float %17, float %18, float %19, float %20) ret void } @@ -292,17 +292,17 @@ entry: ; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], 40($[[R0]]) ; NOODDSPREG-DAG: swc1 $[[F0]], 0($sp) - %0 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 0), align 4 - %1 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 1), align 4 - %2 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 2), align 4 - %3 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 3), align 4 - %4 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 4), align 4 - %5 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 5), align 4 - %6 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 6), align 4 - %7 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 7), align 4 - %8 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 8), align 4 - %9 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 9), align 4 - %10 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 10), align 4 + %0 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4 + %1 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 1), align 4 + %2 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 2), align 4 + %3 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 3), align 4 + %4 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 4), align 4 + %5 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 5), align 4 + %6 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 6), align 4 + %7 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 7), align 4 + %8 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 8), align 4 + %9 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 9), align 4 + %10 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 10), align 4 tail call fastcc void @callee2(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10) @@ -336,17 +336,17 @@ entry: ; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], [[OFFSET]]($sp) ; NOODDSPREG-DAG: swc1 $[[F0]], 40($[[R0]]) - store float %a0, float* getelementptr ([11 x float]* @fa, i32 0, i32 0), align 4 - store float %a1, float* getelementptr ([11 x float]* @fa, i32 0, i32 1), align 4 - store float %a2, float* getelementptr ([11 x float]* @fa, i32 0, i32 2), align 4 - store float %a3, float* getelementptr ([11 x float]* @fa, i32 0, i32 3), align 4 - store float %a4, float* getelementptr ([11 x float]* @fa, i32 0, i32 4), align 4 - store float %a5, float* getelementptr ([11 x float]* @fa, i32 0, i32 5), align 4 - store float %a6, float* getelementptr ([11 x float]* @fa, i32 0, i32 6), align 4 - store float %a7, float* getelementptr ([11 x float]* @fa, i32 0, i32 7), align 4 - store float %a8, float* getelementptr ([11 x float]* @fa, i32 0, i32 8), align 4 - store float %a9, float* getelementptr ([11 x float]* @fa, i32 0, i32 9), align 4 - store float %a10, float* getelementptr ([11 x float]* @fa, i32 0, i32 10), align 4 + store float %a0, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4 + store float %a1, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 1), align 4 + store float %a2, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 2), align 4 + store float %a3, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 3), align 4 + store float %a4, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 4), align 4 + store float %a5, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 5), align 4 + store float %a6, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 6), align 4 + store float %a7, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 7), align 4 + store float %a8, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 8), align 4 + store float %a9, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 9), align 4 + store float %a10, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 10), align 4 ret void } @@ -373,17 +373,17 @@ entry: ; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], 80($[[R0]]) ; FP64-NOODDSPREG-DAG: sdc1 $[[F0]], 0($sp) - %0 = load double* getelementptr ([11 x double]* @da, i32 0, i32 0), align 8 - %1 = load double* getelementptr ([11 x double]* @da, i32 0, i32 1), align 8 - %2 = load double* getelementptr ([11 x double]* @da, i32 0, i32 2), align 8 - %3 = load double* getelementptr ([11 x double]* @da, i32 0, i32 3), align 8 - %4 = load double* getelementptr ([11 x double]* @da, i32 0, i32 4), align 8 - %5 = load double* getelementptr ([11 x double]* @da, i32 0, i32 5), align 8 - %6 = load double* getelementptr ([11 x double]* @da, i32 0, i32 6), align 8 - %7 = load double* getelementptr ([11 x double]* @da, i32 0, i32 7), align 8 - %8 = load double* getelementptr ([11 x double]* @da, i32 0, i32 8), align 8 - %9 = load double* getelementptr ([11 x double]* @da, i32 0, i32 9), align 8 - %10 = load double* getelementptr ([11 x double]* @da, i32 0, i32 10), align 8 + %0 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 0), align 8 + %1 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 1), align 8 + %2 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 2), align 8 + %3 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 3), align 8 + %4 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 4), align 8 + %5 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 5), align 8 + %6 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 6), align 8 + %7 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 7), align 8 + %8 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 8), align 8 + %9 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 9), align 8 + %10 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 10), align 8 tail call fastcc void @callee3(double %0, double %1, double %2, double %3, double %4, double %5, double %6, double %7, double %8, double %9, double %10) @@ -417,16 +417,16 @@ entry: ; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], [[OFFSET]]($sp) ; FP64-NOODDSPREG-DAG: sdc1 $[[F0]], 80($[[R0]]) - store double %a0, double* getelementptr ([11 x double]* @da, i32 0, i32 0), align 8 - store double %a1, double* getelementptr ([11 x double]* @da, i32 0, i32 1), align 8 - store double %a2, double* getelementptr ([11 x double]* @da, i32 0, i32 2), align 8 - store double %a3, double* getelementptr ([11 x double]* @da, i32 0, i32 3), align 8 - store double %a4, double* getelementptr ([11 x double]* @da, i32 0, i32 4), align 8 - store double %a5, double* getelementptr ([11 x double]* @da, i32 0, i32 5), align 8 - store double %a6, double* getelementptr ([11 x double]* @da, i32 0, i32 6), align 8 - store double %a7, double* getelementptr ([11 x double]* @da, i32 0, i32 7), align 8 - store double %a8, double* getelementptr ([11 x double]* @da, i32 0, i32 8), align 8 - store double %a9, double* getelementptr ([11 x double]* @da, i32 0, i32 9), align 8 - store double %a10, double* getelementptr ([11 x double]* @da, i32 0, i32 10), align 8 + store double %a0, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 0), align 8 + store double %a1, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 1), align 8 + store double %a2, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 2), align 8 + store double %a3, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 3), align 8 + store double %a4, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 4), align 8 + store double %a5, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 5), align 8 + store double %a6, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 6), align 8 + store double %a7, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 7), align 8 + store double %a8, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 8), align 8 + store double %a9, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 9), align 8 + store double %a10, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 10), align 8 ret void } diff --git a/test/CodeGen/Mips/fcmp.ll b/test/CodeGen/Mips/fcmp.ll index 8e83b00..aa1f09bf 100644 --- a/test/CodeGen/Mips/fcmp.ll +++ b/test/CodeGen/Mips/fcmp.ll @@ -1,10 +1,17 @@ -; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=32-C -; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=32-C -; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=32-CMP -; RUN: llc < %s -march=mips64el -mcpu=mips4 | FileCheck %s -check-prefix=ALL -check-prefix=64-C -; RUN: llc < %s -march=mips64el -mcpu=mips64 | FileCheck %s -check-prefix=ALL -check-prefix=64-C -; RUN: llc < %s -march=mips64el -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=64-C -; RUN: llc < %s -march=mips64el -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL -check-prefix=64-CMP +; RUN: llc < %s -march=mips -mcpu=mips32 | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32-C +; RUN: llc < %s -march=mips -mcpu=mips32r2 | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32-C +; RUN: llc < %s -march=mips -mcpu=mips32r6 | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32-CMP +; RUN: llc < %s -march=mips64 -mcpu=mips4 | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=64-C +; RUN: llc < %s -march=mips64 -mcpu=mips64 | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=64-C +; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=64-C +; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=64-CMP define i32 @false_f32(float %a, float %b) nounwind { ; ALL-LABEL: false_f32: @@ -18,15 +25,13 @@ define i32 @false_f32(float %a, float %b) nounwind { define i32 @oeq_f32(float %a, float %b) nounwind { ; ALL-LABEL: oeq_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.eq.s $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.eq.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -44,15 +49,13 @@ define i32 @oeq_f32(float %a, float %b) nounwind { define i32 @ogt_f32(float %a, float %b) nounwind { ; ALL-LABEL: ogt_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ule.s $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ule.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -70,15 +73,13 @@ define i32 @ogt_f32(float %a, float %b) nounwind { define i32 @oge_f32(float %a, float %b) nounwind { ; ALL-LABEL: oge_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ult.s $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ult.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -96,15 +97,13 @@ define i32 @oge_f32(float %a, float %b) nounwind { define i32 @olt_f32(float %a, float %b) nounwind { ; ALL-LABEL: olt_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.olt.s $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.olt.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -122,15 +121,13 @@ define i32 @olt_f32(float %a, float %b) nounwind { define i32 @ole_f32(float %a, float %b) nounwind { ; ALL-LABEL: ole_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ole.s $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ole.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -148,15 +145,13 @@ define i32 @ole_f32(float %a, float %b) nounwind { define i32 @one_f32(float %a, float %b) nounwind { ; ALL-LABEL: one_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ueq.s $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ueq.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -176,15 +171,13 @@ define i32 @one_f32(float %a, float %b) nounwind { define i32 @ord_f32(float %a, float %b) nounwind { ; ALL-LABEL: ord_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.un.s $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.un.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -204,15 +197,13 @@ define i32 @ord_f32(float %a, float %b) nounwind { define i32 @ueq_f32(float %a, float %b) nounwind { ; ALL-LABEL: ueq_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ueq.s $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ueq.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -230,15 +221,13 @@ define i32 @ueq_f32(float %a, float %b) nounwind { define i32 @ugt_f32(float %a, float %b) nounwind { ; ALL-LABEL: ugt_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ole.s $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ole.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -256,15 +245,13 @@ define i32 @ugt_f32(float %a, float %b) nounwind { define i32 @uge_f32(float %a, float %b) nounwind { ; ALL-LABEL: uge_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.olt.s $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.olt.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -282,15 +269,13 @@ define i32 @uge_f32(float %a, float %b) nounwind { define i32 @ult_f32(float %a, float %b) nounwind { ; ALL-LABEL: ult_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ult.s $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ult.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -300,6 +285,7 @@ define i32 @ult_f32(float %a, float %b) nounwind { ; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] ; 64-CMP-DAG: andi $2, $[[T1]], 1 + %1 = fcmp ult float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -308,15 +294,13 @@ define i32 @ult_f32(float %a, float %b) nounwind { define i32 @ule_f32(float %a, float %b) nounwind { ; ALL-LABEL: ule_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ule.s $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ule.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -334,15 +318,13 @@ define i32 @ule_f32(float %a, float %b) nounwind { define i32 @une_f32(float %a, float %b) nounwind { ; ALL-LABEL: une_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.eq.s $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.eq.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -362,15 +344,13 @@ define i32 @une_f32(float %a, float %b) nounwind { define i32 @uno_f32(float %a, float %b) nounwind { ; ALL-LABEL: uno_f32: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.un.s $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.un.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -406,15 +386,13 @@ define i32 @false_f64(double %a, double %b) nounwind { define i32 @oeq_f64(double %a, double %b) nounwind { ; ALL-LABEL: oeq_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.eq.d $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.eq.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -432,15 +410,13 @@ define i32 @oeq_f64(double %a, double %b) nounwind { define i32 @ogt_f64(double %a, double %b) nounwind { ; ALL-LABEL: ogt_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ule.d $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ule.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -458,15 +434,13 @@ define i32 @ogt_f64(double %a, double %b) nounwind { define i32 @oge_f64(double %a, double %b) nounwind { ; ALL-LABEL: oge_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ult.d $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ult.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -484,15 +458,13 @@ define i32 @oge_f64(double %a, double %b) nounwind { define i32 @olt_f64(double %a, double %b) nounwind { ; ALL-LABEL: olt_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.olt.d $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.olt.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -510,15 +482,13 @@ define i32 @olt_f64(double %a, double %b) nounwind { define i32 @ole_f64(double %a, double %b) nounwind { ; ALL-LABEL: ole_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ole.d $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ole.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -536,15 +506,13 @@ define i32 @ole_f64(double %a, double %b) nounwind { define i32 @one_f64(double %a, double %b) nounwind { ; ALL-LABEL: one_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ueq.d $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ueq.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -564,15 +532,13 @@ define i32 @one_f64(double %a, double %b) nounwind { define i32 @ord_f64(double %a, double %b) nounwind { ; ALL-LABEL: ord_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.un.d $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.un.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -592,15 +558,13 @@ define i32 @ord_f64(double %a, double %b) nounwind { define i32 @ueq_f64(double %a, double %b) nounwind { ; ALL-LABEL: ueq_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ueq.d $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ueq.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -618,15 +582,13 @@ define i32 @ueq_f64(double %a, double %b) nounwind { define i32 @ugt_f64(double %a, double %b) nounwind { ; ALL-LABEL: ugt_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ole.d $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ole.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -644,15 +606,13 @@ define i32 @ugt_f64(double %a, double %b) nounwind { define i32 @uge_f64(double %a, double %b) nounwind { ; ALL-LABEL: uge_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.olt.d $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.olt.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -670,15 +630,13 @@ define i32 @uge_f64(double %a, double %b) nounwind { define i32 @ult_f64(double %a, double %b) nounwind { ; ALL-LABEL: ult_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ult.d $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ult.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -696,15 +654,13 @@ define i32 @ult_f64(double %a, double %b) nounwind { define i32 @ule_f64(double %a, double %b) nounwind { ; ALL-LABEL: ule_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.ule.d $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.ule.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -722,15 +678,13 @@ define i32 @ule_f64(double %a, double %b) nounwind { define i32 @une_f64(double %a, double %b) nounwind { ; ALL-LABEL: une_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.eq.d $f12, $f14 -; 32-C-DAG: movf $[[T0]], $1, $fcc0 +; 32-C: movt $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.eq.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $1, $fcc0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -750,15 +704,13 @@ define i32 @une_f64(double %a, double %b) nounwind { define i32 @uno_f64(double %a, double %b) nounwind { ; ALL-LABEL: uno_f64: -; 32-C-DAG: addiu $[[T0:2]], $zero, 0 -; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 32-C-DAG: addiu $2, $zero, 1 ; 32-C-DAG: c.un.d $f12, $f14 -; 32-C-DAG: movt $[[T0]], $1, $fcc0 +; 32-C: movf $2, $zero, $fcc0 -; 64-C-DAG: addiu $[[T0:2]], $zero, 0 -; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1 +; 64-C-DAG: addiu $2, $zero, 1 ; 64-C-DAG: c.un.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $1, $fcc0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] diff --git a/test/CodeGen/Mips/fixdfsf.ll b/test/CodeGen/Mips/fixdfsf.ll index 4271ac2..8695799 100644 --- a/test/CodeGen/Mips/fixdfsf.ll +++ b/test/CodeGen/Mips/fixdfsf.ll @@ -7,7 +7,7 @@ ; Function Attrs: nounwind optsize define void @foo() { entry: - %0 = load double* @x, align 8 + %0 = load double, double* @x, align 8 %conv = fptoui double %0 to i32 store i32 %conv, i32* @y, align 4 ; pic1: lw ${{[0-9]+}}, %call16(__fixunsdfsi)(${{[0-9]+}}) diff --git a/test/CodeGen/Mips/fp-indexed-ls.ll b/test/CodeGen/Mips/fp-indexed-ls.ll index ea337de..219ca99 100644 --- a/test/CodeGen/Mips/fp-indexed-ls.ll +++ b/test/CodeGen/Mips/fp-indexed-ls.ll @@ -45,8 +45,8 @@ entry: ; CHECK-NACL-NOT: lwxc1 - %arrayidx = getelementptr inbounds float* %b, i32 %o - %0 = load float* %arrayidx, align 4 + %arrayidx = getelementptr inbounds float, float* %b, i32 %o + %0 = load float, float* %arrayidx, align 4 ret float %0 } @@ -76,8 +76,8 @@ entry: ; CHECK-NACL-NOT: ldxc1 - %arrayidx = getelementptr inbounds double* %b, i32 %o - %0 = load double* %arrayidx, align 8 + %arrayidx = getelementptr inbounds double, double* %b, i32 %o + %0 = load double, double* %arrayidx, align 8 ret double %0 } @@ -100,8 +100,8 @@ entry: ; luxc1 was removed in MIPS64r6 ; MIPS64R6-NOT: luxc1 - %arrayidx1 = getelementptr inbounds [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c - %0 = load float* %arrayidx1, align 1 + %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c + %0 = load float, float* %arrayidx1, align 1 ret float %0 } @@ -129,8 +129,8 @@ entry: ; CHECK-NACL-NOT: swxc1 - %0 = load float* @gf, align 4 - %arrayidx = getelementptr inbounds float* %b, i32 %o + %0 = load float, float* @gf, align 4 + %arrayidx = getelementptr inbounds float, float* %b, i32 %o store float %0, float* %arrayidx, align 4 ret void } @@ -159,8 +159,8 @@ entry: ; CHECK-NACL-NOT: sdxc1 - %0 = load double* @gd, align 8 - %arrayidx = getelementptr inbounds double* %b, i32 %o + %0 = load double, double* @gd, align 8 + %arrayidx = getelementptr inbounds double, double* %b, i32 %o store double %0, double* %arrayidx, align 8 ret void } @@ -179,8 +179,8 @@ entry: ; MIPS64R6-NOT: suxc1 - %0 = load float* @gf, align 4 - %arrayidx1 = getelementptr inbounds [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c + %0 = load float, float* @gf, align 4 + %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c store float %0, float* %arrayidx1, align 1 ret void } @@ -199,8 +199,8 @@ entry: ; MIPS64R6-NOT: luxc1 - %arrayidx1 = getelementptr inbounds [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c - %0 = load double* %arrayidx1, align 1 + %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c + %0 = load double, double* %arrayidx1, align 1 ret double %0 } @@ -218,8 +218,8 @@ entry: ; MIPS64R6-NOT: suxc1 - %0 = load double* @gd, align 8 - %arrayidx1 = getelementptr inbounds [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c + %0 = load double, double* @gd, align 8 + %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c store double %0, double* %arrayidx1, align 1 ret void } @@ -238,7 +238,7 @@ entry: ; MIPS64R6-NOT: luxc1 - %0 = load float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1 + %0 = load float, float* getelementptr inbounds (%struct.S3, %struct.S3* @s3, i32 0, i32 1), align 1 ret float %0 } @@ -256,7 +256,7 @@ entry: ; MIPS64R6-NOT: suxc1 - store float %f, float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1 + store float %f, float* getelementptr inbounds (%struct.S3, %struct.S3* @s3, i32 0, i32 1), align 1 ret void } diff --git a/test/CodeGen/Mips/fp-spill-reload.ll b/test/CodeGen/Mips/fp-spill-reload.ll index f9887a5..4a53ad8 100644 --- a/test/CodeGen/Mips/fp-spill-reload.ll +++ b/test/CodeGen/Mips/fp-spill-reload.ll @@ -5,27 +5,27 @@ define void @foo0(i32* nocapture %b) nounwind { entry: ; CHECK: sw $fp ; CHECK: lw $fp - %0 = load i32* %b, align 4 - %arrayidx.1 = getelementptr inbounds i32* %b, i32 1 - %1 = load i32* %arrayidx.1, align 4 + %0 = load i32, i32* %b, align 4 + %arrayidx.1 = getelementptr inbounds i32, i32* %b, i32 1 + %1 = load i32, i32* %arrayidx.1, align 4 %add.1 = add nsw i32 %1, 1 - %arrayidx.2 = getelementptr inbounds i32* %b, i32 2 - %2 = load i32* %arrayidx.2, align 4 + %arrayidx.2 = getelementptr inbounds i32, i32* %b, i32 2 + %2 = load i32, i32* %arrayidx.2, align 4 %add.2 = add nsw i32 %2, 2 - %arrayidx.3 = getelementptr inbounds i32* %b, i32 3 - %3 = load i32* %arrayidx.3, align 4 + %arrayidx.3 = getelementptr inbounds i32, i32* %b, i32 3 + %3 = load i32, i32* %arrayidx.3, align 4 %add.3 = add nsw i32 %3, 3 - %arrayidx.4 = getelementptr inbounds i32* %b, i32 4 - %4 = load i32* %arrayidx.4, align 4 + %arrayidx.4 = getelementptr inbounds i32, i32* %b, i32 4 + %4 = load i32, i32* %arrayidx.4, align 4 %add.4 = add nsw i32 %4, 4 - %arrayidx.5 = getelementptr inbounds i32* %b, i32 5 - %5 = load i32* %arrayidx.5, align 4 + %arrayidx.5 = getelementptr inbounds i32, i32* %b, i32 5 + %5 = load i32, i32* %arrayidx.5, align 4 %add.5 = add nsw i32 %5, 5 - %arrayidx.6 = getelementptr inbounds i32* %b, i32 6 - %6 = load i32* %arrayidx.6, align 4 + %arrayidx.6 = getelementptr inbounds i32, i32* %b, i32 6 + %6 = load i32, i32* %arrayidx.6, align 4 %add.6 = add nsw i32 %6, 6 - %arrayidx.7 = getelementptr inbounds i32* %b, i32 7 - %7 = load i32* %arrayidx.7, align 4 + %arrayidx.7 = getelementptr inbounds i32, i32* %b, i32 7 + %7 = load i32, i32* %arrayidx.7, align 4 %add.7 = add nsw i32 %7, 7 call void @foo2(i32 %0, i32 %add.1, i32 %add.2, i32 %add.3, i32 %add.4, i32 %add.5, i32 %add.6, i32 %add.7) nounwind call void bitcast (void (...)* @foo1 to void ()*)() nounwind diff --git a/test/CodeGen/Mips/fp16instrinsmc.ll b/test/CodeGen/Mips/fp16instrinsmc.ll index 84d3814..797be26 100644 --- a/test/CodeGen/Mips/fp16instrinsmc.ll +++ b/test/CodeGen/Mips/fp16instrinsmc.ll @@ -23,8 +23,8 @@ define void @foo1() #0 { ; fmask: .set reorder ; fmask: .end foo1 entry: - %0 = load float* @x, align 4 - %1 = load float* @one, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @one, align 4 %call = call float @copysignf(float %0, float %1) #2 store float %call, float* @y, align 4 ret void @@ -39,8 +39,8 @@ define void @foo2() #0 { ; fmask: save {{.*}} ; fmask: .end foo2 entry: - %0 = load float* @x, align 4 - %1 = load float* @negone, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @negone, align 4 %call = call float @copysignf(float %0, float %1) #2 store float %call, float* @y, align 4 ret void @@ -57,8 +57,8 @@ entry: ; fmask: .set macro ; fmask: .set reorder ; fmask: .end foo3 - %0 = load double* @xd, align 8 - %1 = load float* @oned, align 4 + %0 = load double, double* @xd, align 8 + %1 = load float, float* @oned, align 4 %conv = fpext float %1 to double %call = call double @copysign(double %0, double %conv) #2 store double %call, double* @yd, align 8 @@ -74,8 +74,8 @@ entry: ; fmask: .ent foo4 ; fmask: save {{.*}} ; fmask: .end foo4 - %0 = load double* @xd, align 8 - %1 = load double* @negoned, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @negoned, align 8 %call = call double @copysign(double %0, double %1) #2 store double %call, double* @yd, align 8 ret void @@ -84,7 +84,7 @@ entry: ; Function Attrs: nounwind define void @foo5() #0 { entry: - %0 = load float* @xn, align 4 + %0 = load float, float* @xn, align 4 %call = call float @fabsf(float %0) #2 store float %call, float* @y, align 4 ret void @@ -96,7 +96,7 @@ declare float @fabsf(float) #1 ; Function Attrs: nounwind define void @foo6() #0 { entry: - %0 = load double* @xdn, align 8 + %0 = load double, double* @xdn, align 8 %call = call double @fabs(double %0) #2 store double %call, double* @yd, align 8 ret void @@ -108,7 +108,7 @@ declare double @fabs(double) #1 ; Function Attrs: nounwind define void @foo7() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @sinf(float %0) #3 ;pic: lw ${{[0-9]+}}, %call16(sinf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -122,7 +122,7 @@ declare float @sinf(float) #0 ; Function Attrs: nounwind define void @foo8() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @sin(double %0) #3 ;pic: lw ${{[0-9]+}}, %call16(sin)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -136,7 +136,7 @@ declare double @sin(double) #0 ; Function Attrs: nounwind define void @foo9() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @cosf(float %0) #3 ;pic: lw ${{[0-9]+}}, %call16(cosf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -150,7 +150,7 @@ declare float @cosf(float) #0 ; Function Attrs: nounwind define void @foo10() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @cos(double %0) #3 ;pic: lw ${{[0-9]+}}, %call16(cos)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -164,7 +164,7 @@ declare double @cos(double) #0 ; Function Attrs: nounwind define void @foo11() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @sqrtf(float %0) #3 ;pic: lw ${{[0-9]+}}, %call16(sqrtf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -178,7 +178,7 @@ declare float @sqrtf(float) #0 ; Function Attrs: nounwind define void @foo12() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @sqrt(double %0) #3 ;pic: lw ${{[0-9]+}}, %call16(sqrt)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -192,7 +192,7 @@ declare double @sqrt(double) #0 ; Function Attrs: nounwind define void @foo13() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @floorf(float %0) #2 ;pic: lw ${{[0-9]+}}, %call16(floorf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -206,7 +206,7 @@ declare float @floorf(float) #1 ; Function Attrs: nounwind define void @foo14() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @floor(double %0) #2 ;pic: lw ${{[0-9]+}}, %call16(floor)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -220,7 +220,7 @@ declare double @floor(double) #1 ; Function Attrs: nounwind define void @foo15() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @nearbyintf(float %0) #2 ;pic: lw ${{[0-9]+}}, %call16(nearbyintf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -234,7 +234,7 @@ declare float @nearbyintf(float) #1 ; Function Attrs: nounwind define void @foo16() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @nearbyint(double %0) #2 ;pic: lw ${{[0-9]+}}, %call16(nearbyint)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -248,7 +248,7 @@ declare double @nearbyint(double) #1 ; Function Attrs: nounwind define void @foo17() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @ceilf(float %0) #2 ;pic: lw ${{[0-9]+}}, %call16(ceilf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -262,7 +262,7 @@ declare float @ceilf(float) #1 ; Function Attrs: nounwind define void @foo18() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @ceil(double %0) #2 ;pic: lw ${{[0-9]+}}, %call16(ceil)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -276,7 +276,7 @@ declare double @ceil(double) #1 ; Function Attrs: nounwind define void @foo19() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @rintf(float %0) #2 ;pic: lw ${{[0-9]+}}, %call16(rintf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -290,7 +290,7 @@ declare float @rintf(float) #1 ; Function Attrs: nounwind define void @foo20() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @rint(double %0) #2 ;pic: lw ${{[0-9]+}}, %call16(rint)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -304,7 +304,7 @@ declare double @rint(double) #1 ; Function Attrs: nounwind define void @foo21() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @truncf(float %0) #2 ;pic: lw ${{[0-9]+}}, %call16(truncf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -318,7 +318,7 @@ declare float @truncf(float) #1 ; Function Attrs: nounwind define void @foo22() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @trunc(double %0) #2 ;pic: lw ${{[0-9]+}}, %call16(trunc)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -332,7 +332,7 @@ declare double @trunc(double) #1 ; Function Attrs: nounwind define void @foo23() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @log2f(float %0) #3 ;pic: lw ${{[0-9]+}}, %call16(log2f)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -346,7 +346,7 @@ declare float @log2f(float) #0 ; Function Attrs: nounwind define void @foo24() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @log2(double %0) #3 ;pic: lw ${{[0-9]+}}, %call16(log2)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -360,7 +360,7 @@ declare double @log2(double) #0 ; Function Attrs: nounwind define void @foo25() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @exp2f(float %0) #3 ;pic: lw ${{[0-9]+}}, %call16(exp2f)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -374,7 +374,7 @@ declare float @exp2f(float) #0 ; Function Attrs: nounwind define void @foo26() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @exp2(double %0) #3 ;pic: lw ${{[0-9]+}}, %call16(exp2)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) diff --git a/test/CodeGen/Mips/fp16static.ll b/test/CodeGen/Mips/fp16static.ll index beb063d..4e5059e 100644 --- a/test/CodeGen/Mips/fp16static.ll +++ b/test/CodeGen/Mips/fp16static.ll @@ -4,8 +4,8 @@ define void @foo() nounwind { entry: - %0 = load float* @x, align 4 - %1 = load float* @x, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @x, align 4 %mul = fmul float %0, %1 store float %mul, float* @x, align 4 ; CHECK-STATIC16: jal __mips16_mulsf3 diff --git a/test/CodeGen/Mips/fpneeded.ll b/test/CodeGen/Mips/fpneeded.ll index fdd8e8f..a89e2a5 100644 --- a/test/CodeGen/Mips/fpneeded.ll +++ b/test/CodeGen/Mips/fpneeded.ll @@ -76,8 +76,8 @@ entry: define void @foo1() #0 { entry: store float 1.000000e+00, float* @zz, align 4 - %0 = load float* @y, align 4 - %1 = load float* @x, align 4 + %0 = load float, float* @y, align 4 + %1 = load float, float* @x, align 4 %add = fadd float %0, %1 store float %add, float* @z, align 4 ret void @@ -96,7 +96,7 @@ entry: define void @foo2() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 call void @vf(float %0) ret void } diff --git a/test/CodeGen/Mips/fpnotneeded.ll b/test/CodeGen/Mips/fpnotneeded.ll index e12d7ba..02b8e8a 100644 --- a/test/CodeGen/Mips/fpnotneeded.ll +++ b/test/CodeGen/Mips/fpnotneeded.ll @@ -19,7 +19,7 @@ entry: define i32 @iv() #0 { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 ret i32 %0 } diff --git a/test/CodeGen/Mips/global-address.ll b/test/CodeGen/Mips/global-address.ll index ae6afeb..ecf5e56 100644 --- a/test/CodeGen/Mips/global-address.ll +++ b/test/CodeGen/Mips/global-address.ll @@ -33,9 +33,9 @@ entry: ; STATIC-N64: lw ${{[0-9]+}}, %got_ofst(s1)($[[R1]]) ; STATIC-N64: ld ${{[0-9]+}}, %got_disp(g1) - %0 = load i32* @s1, align 4 + %0 = load i32, i32* @s1, align 4 tail call void @foo1(i32 %0) nounwind - %1 = load i32* @g1, align 4 + %1 = load i32, i32* @g1, align 4 store i32 %1, i32* @s1, align 4 %add = add nsw i32 %1, 2 store i32 %add, i32* @g1, align 4 diff --git a/test/CodeGen/Mips/gpreg-lazy-binding.ll b/test/CodeGen/Mips/gpreg-lazy-binding.ll index 3a636d8..800a74f 100644 --- a/test/CodeGen/Mips/gpreg-lazy-binding.ll +++ b/test/CodeGen/Mips/gpreg-lazy-binding.ll @@ -19,7 +19,7 @@ declare void @externalFunc() define internal fastcc void @internalFunc() nounwind noinline { entry: - %0 = load i32* @g, align 4 + %0 = load i32, i32* @g, align 4 %inc = add nsw i32 %0, 1 store i32 %inc, i32* @g, align 4 ret void diff --git a/test/CodeGen/Mips/gprestore.ll b/test/CodeGen/Mips/gprestore.ll index cbcf0c9..0b005ab 100644 --- a/test/CodeGen/Mips/gprestore.ll +++ b/test/CodeGen/Mips/gprestore.ll @@ -18,10 +18,10 @@ entry: ; CHECK-NOT: got({{.*}})($gp) ; CHECK: lw $gp tail call void (...)* @f1() nounwind - %tmp = load i32* @p, align 4 + %tmp = load i32, i32* @p, align 4 tail call void @f2(i32 %tmp) nounwind - %tmp1 = load i32* @q, align 4 - %tmp2 = load i32* @r, align 4 + %tmp1 = load i32, i32* @q, align 4 + %tmp2 = load i32, i32* @r, align 4 tail call void @f3(i32 %tmp1, i32 %tmp2) nounwind ret void } diff --git a/test/CodeGen/Mips/helloworld.ll b/test/CodeGen/Mips/helloworld.ll index 36f4ad6..768abc2 100644 --- a/test/CodeGen/Mips/helloworld.ll +++ b/test/CodeGen/Mips/helloworld.ll @@ -12,7 +12,7 @@ define i32 @main() nounwind { entry: - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str, i32 0, i32 0)) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0)) ret i32 0 ; SR: .set mips16 diff --git a/test/CodeGen/Mips/hf16_1.ll b/test/CodeGen/Mips/hf16_1.ll index 9879cd5..103fd2d 100644 --- a/test/CodeGen/Mips/hf16_1.ll +++ b/test/CodeGen/Mips/hf16_1.ll @@ -11,96 +11,96 @@ define void @foo() nounwind { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 call void @v_sf(float %0) - %1 = load double* @xd, align 8 + %1 = load double, double* @xd, align 8 call void @v_df(double %1) - %2 = load float* @x, align 4 - %3 = load float* @y, align 4 + %2 = load float, float* @x, align 4 + %3 = load float, float* @y, align 4 call void @v_sf_sf(float %2, float %3) - %4 = load double* @xd, align 8 - %5 = load float* @x, align 4 + %4 = load double, double* @xd, align 8 + %5 = load float, float* @x, align 4 call void @v_df_sf(double %4, float %5) - %6 = load double* @xd, align 8 - %7 = load double* @yd, align 8 + %6 = load double, double* @xd, align 8 + %7 = load double, double* @yd, align 8 call void @v_df_df(double %6, double %7) %call = call float @sf_v() - %8 = load float* @x, align 4 + %8 = load float, float* @x, align 4 %call1 = call float @sf_sf(float %8) - %9 = load double* @xd, align 8 + %9 = load double, double* @xd, align 8 %call2 = call float @sf_df(double %9) - %10 = load float* @x, align 4 - %11 = load float* @y, align 4 + %10 = load float, float* @x, align 4 + %11 = load float, float* @y, align 4 %call3 = call float @sf_sf_sf(float %10, float %11) - %12 = load double* @xd, align 8 - %13 = load float* @x, align 4 + %12 = load double, double* @xd, align 8 + %13 = load float, float* @x, align 4 %call4 = call float @sf_df_sf(double %12, float %13) - %14 = load double* @xd, align 8 - %15 = load double* @yd, align 8 + %14 = load double, double* @xd, align 8 + %15 = load double, double* @yd, align 8 %call5 = call float @sf_df_df(double %14, double %15) %call6 = call double @df_v() - %16 = load float* @x, align 4 + %16 = load float, float* @x, align 4 %call7 = call double @df_sf(float %16) - %17 = load double* @xd, align 8 + %17 = load double, double* @xd, align 8 %call8 = call double @df_df(double %17) - %18 = load float* @x, align 4 - %19 = load float* @y, align 4 + %18 = load float, float* @x, align 4 + %19 = load float, float* @y, align 4 %call9 = call double @df_sf_sf(float %18, float %19) - %20 = load double* @xd, align 8 - %21 = load float* @x, align 4 + %20 = load double, double* @xd, align 8 + %21 = load float, float* @x, align 4 %call10 = call double @df_df_sf(double %20, float %21) - %22 = load double* @xd, align 8 - %23 = load double* @yd, align 8 + %22 = load double, double* @xd, align 8 + %23 = load double, double* @yd, align 8 %call11 = call double @df_df_df(double %22, double %23) %call12 = call { float, float } @sc_v() %24 = extractvalue { float, float } %call12, 0 %25 = extractvalue { float, float } %call12, 1 - %26 = load float* @x, align 4 + %26 = load float, float* @x, align 4 %call13 = call { float, float } @sc_sf(float %26) %27 = extractvalue { float, float } %call13, 0 %28 = extractvalue { float, float } %call13, 1 - %29 = load double* @xd, align 8 + %29 = load double, double* @xd, align 8 %call14 = call { float, float } @sc_df(double %29) %30 = extractvalue { float, float } %call14, 0 %31 = extractvalue { float, float } %call14, 1 - %32 = load float* @x, align 4 - %33 = load float* @y, align 4 + %32 = load float, float* @x, align 4 + %33 = load float, float* @y, align 4 %call15 = call { float, float } @sc_sf_sf(float %32, float %33) %34 = extractvalue { float, float } %call15, 0 %35 = extractvalue { float, float } %call15, 1 - %36 = load double* @xd, align 8 - %37 = load float* @x, align 4 + %36 = load double, double* @xd, align 8 + %37 = load float, float* @x, align 4 %call16 = call { float, float } @sc_df_sf(double %36, float %37) %38 = extractvalue { float, float } %call16, 0 %39 = extractvalue { float, float } %call16, 1 - %40 = load double* @xd, align 8 - %41 = load double* @yd, align 8 + %40 = load double, double* @xd, align 8 + %41 = load double, double* @yd, align 8 %call17 = call { float, float } @sc_df_df(double %40, double %41) %42 = extractvalue { float, float } %call17, 0 %43 = extractvalue { float, float } %call17, 1 %call18 = call { double, double } @dc_v() %44 = extractvalue { double, double } %call18, 0 %45 = extractvalue { double, double } %call18, 1 - %46 = load float* @x, align 4 + %46 = load float, float* @x, align 4 %call19 = call { double, double } @dc_sf(float %46) %47 = extractvalue { double, double } %call19, 0 %48 = extractvalue { double, double } %call19, 1 - %49 = load double* @xd, align 8 + %49 = load double, double* @xd, align 8 %call20 = call { double, double } @dc_df(double %49) %50 = extractvalue { double, double } %call20, 0 %51 = extractvalue { double, double } %call20, 1 - %52 = load float* @x, align 4 - %53 = load float* @y, align 4 + %52 = load float, float* @x, align 4 + %53 = load float, float* @y, align 4 %call21 = call { double, double } @dc_sf_sf(float %52, float %53) %54 = extractvalue { double, double } %call21, 0 %55 = extractvalue { double, double } %call21, 1 - %56 = load double* @xd, align 8 - %57 = load float* @x, align 4 + %56 = load double, double* @xd, align 8 + %57 = load float, float* @x, align 4 %call22 = call { double, double } @dc_df_sf(double %56, float %57) %58 = extractvalue { double, double } %call22, 0 %59 = extractvalue { double, double } %call22, 1 - %60 = load double* @xd, align 8 - %61 = load double* @yd, align 8 + %60 = load double, double* @xd, align 8 + %61 = load double, double* @yd, align 8 %call23 = call { double, double } @dc_df_df(double %60, double %61) %62 = extractvalue { double, double } %call23, 0 %63 = extractvalue { double, double } %call23, 1 diff --git a/test/CodeGen/Mips/hf16call32.ll b/test/CodeGen/Mips/hf16call32.ll index aec9c71..59cf413 100644 --- a/test/CodeGen/Mips/hf16call32.ll +++ b/test/CodeGen/Mips/hf16call32.ll @@ -33,30 +33,30 @@ entry: store float 1.000000e+00, float* @y, align 4 store double 1.000000e+00, double* @xd, align 8 store double 1.000000e+00, double* @yd, align 8 - store float 1.000000e+00, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0) - store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1) - store double 1.000000e+00, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0) - store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1) + store float 1.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0) + store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1) + store double 1.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0) + store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1) store float 1.000000e+00, float* @ret_sf, align 4 store double 1.000000e+00, double* @ret_df, align 8 - store float 1.000000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) - store double 1.000000e+00, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) + store float 1.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0) + store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1) + store double 1.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0) + store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1) store float 0.000000e+00, float* @lx, align 4 store float 0.000000e+00, float* @ly, align 4 store double 0.000000e+00, double* @lxd, align 8 store double 0.000000e+00, double* @lyd, align 8 - store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @lxy, i32 0, i32 0) - store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @lxy, i32 0, i32 1) - store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @lxyd, i32 0, i32 0) - store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @lxyd, i32 0, i32 1) + store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lxy, i32 0, i32 0) + store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lxy, i32 0, i32 1) + store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lxyd, i32 0, i32 0) + store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lxyd, i32 0, i32 1) store float 0.000000e+00, float* @lret_sf, align 4 store double 0.000000e+00, double* @lret_df, align 8 - store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) - store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) + store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0) + store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1) + store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0) + store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1) ret void } @@ -67,686 +67,686 @@ entry: store i32 0, i32* %retval call void @clear() store float 1.500000e+00, float* @lx, align 4 - %0 = load float* @lx, align 4 + %0 = load float, float* @lx, align 4 call void @v_sf(float %0) - %1 = load float* @x, align 4 + %1 = load float, float* @x, align 4 %conv = fpext float %1 to double - %2 = load float* @lx, align 4 + %2 = load float, float* @lx, align 4 %conv1 = fpext float %2 to double - %3 = load float* @x, align 4 - %4 = load float* @lx, align 4 + %3 = load float, float* @x, align 4 + %4 = load float, float* @lx, align 4 %cmp = fcmp oeq float %3, %4 %conv2 = zext i1 %cmp to i32 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %conv, double %conv1, i32 %conv2) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %conv, double %conv1, i32 %conv2) call void @clear() store double 0x41678C29C0000000, double* @lxd, align 8 - %5 = load double* @lxd, align 8 + %5 = load double, double* @lxd, align 8 call void @v_df(double %5) - %6 = load double* @xd, align 8 - %7 = load double* @lxd, align 8 - %8 = load double* @xd, align 8 - %9 = load double* @lxd, align 8 + %6 = load double, double* @xd, align 8 + %7 = load double, double* @lxd, align 8 + %8 = load double, double* @xd, align 8 + %9 = load double, double* @lxd, align 8 %cmp3 = fcmp oeq double %8, %9 %conv4 = zext i1 %cmp3 to i32 - %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %6, double %7, i32 %conv4) + %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %6, double %7, i32 %conv4) call void @clear() store float 9.000000e+00, float* @lx, align 4 store float 1.000000e+01, float* @ly, align 4 - %10 = load float* @lx, align 4 - %11 = load float* @ly, align 4 + %10 = load float, float* @lx, align 4 + %11 = load float, float* @ly, align 4 call void @v_sf_sf(float %10, float %11) - %12 = load float* @x, align 4 + %12 = load float, float* @x, align 4 %conv6 = fpext float %12 to double - %13 = load float* @lx, align 4 + %13 = load float, float* @lx, align 4 %conv7 = fpext float %13 to double - %14 = load float* @y, align 4 + %14 = load float, float* @y, align 4 %conv8 = fpext float %14 to double - %15 = load float* @ly, align 4 + %15 = load float, float* @ly, align 4 %conv9 = fpext float %15 to double - %16 = load float* @x, align 4 - %17 = load float* @lx, align 4 + %16 = load float, float* @x, align 4 + %17 = load float, float* @lx, align 4 %cmp10 = fcmp oeq float %16, %17 br i1 %cmp10, label %land.rhs, label %land.end land.rhs: ; preds = %entry - %18 = load float* @y, align 4 - %19 = load float* @ly, align 4 + %18 = load float, float* @y, align 4 + %19 = load float, float* @ly, align 4 %cmp12 = fcmp oeq float %18, %19 br label %land.end land.end: ; preds = %land.rhs, %entry %20 = phi i1 [ false, %entry ], [ %cmp12, %land.rhs ] %land.ext = zext i1 %20 to i32 - %call14 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %conv6, double %conv7, double %conv8, double %conv9, i32 %land.ext) + %call14 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv6, double %conv7, double %conv8, double %conv9, i32 %land.ext) call void @clear() store float 0x3FFE666660000000, float* @lx, align 4 store double 0x4007E613249FF279, double* @lyd, align 8 - %21 = load float* @lx, align 4 - %22 = load double* @lyd, align 8 + %21 = load float, float* @lx, align 4 + %22 = load double, double* @lyd, align 8 call void @v_sf_df(float %21, double %22) - %23 = load float* @x, align 4 + %23 = load float, float* @x, align 4 %conv15 = fpext float %23 to double - %24 = load float* @lx, align 4 + %24 = load float, float* @lx, align 4 %conv16 = fpext float %24 to double - %25 = load double* @yd, align 8 - %26 = load double* @lyd, align 8 - %27 = load float* @x, align 4 - %28 = load float* @lx, align 4 + %25 = load double, double* @yd, align 8 + %26 = load double, double* @lyd, align 8 + %27 = load float, float* @x, align 4 + %28 = load float, float* @lx, align 4 %cmp17 = fcmp oeq float %27, %28 %conv18 = zext i1 %cmp17 to i32 - %29 = load double* @yd, align 8 - %30 = load double* @lyd, align 8 + %29 = load double, double* @yd, align 8 + %30 = load double, double* @lyd, align 8 %cmp19 = fcmp oeq double %29, %30 %conv20 = zext i1 %cmp19 to i32 %and = and i32 %conv18, %conv20 - %call21 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %conv15, double %conv16, double %25, double %26, i32 %and) + %call21 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv15, double %conv16, double %25, double %26, i32 %and) call void @clear() store double 0x4194E54F94000000, double* @lxd, align 8 store float 7.600000e+01, float* @ly, align 4 - %31 = load double* @lxd, align 8 - %32 = load float* @ly, align 4 + %31 = load double, double* @lxd, align 8 + %32 = load float, float* @ly, align 4 call void @v_df_sf(double %31, float %32) - %33 = load double* @xd, align 8 - %34 = load double* @lxd, align 8 - %35 = load float* @y, align 4 + %33 = load double, double* @xd, align 8 + %34 = load double, double* @lxd, align 8 + %35 = load float, float* @y, align 4 %conv22 = fpext float %35 to double - %36 = load float* @ly, align 4 + %36 = load float, float* @ly, align 4 %conv23 = fpext float %36 to double - %37 = load double* @xd, align 8 - %38 = load double* @lxd, align 8 + %37 = load double, double* @xd, align 8 + %38 = load double, double* @lxd, align 8 %cmp24 = fcmp oeq double %37, %38 %conv25 = zext i1 %cmp24 to i32 - %39 = load float* @y, align 4 - %40 = load float* @ly, align 4 + %39 = load float, float* @y, align 4 + %40 = load float, float* @ly, align 4 %cmp26 = fcmp oeq float %39, %40 %conv27 = zext i1 %cmp26 to i32 %and28 = and i32 %conv25, %conv27 - %call29 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %33, double %34, double %conv22, double %conv23, i32 %and28) + %call29 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %33, double %34, double %conv22, double %conv23, i32 %and28) call void @clear() store double 7.365198e+07, double* @lxd, align 8 store double 0x416536CD80000000, double* @lyd, align 8 - %41 = load double* @lxd, align 8 - %42 = load double* @lyd, align 8 + %41 = load double, double* @lxd, align 8 + %42 = load double, double* @lyd, align 8 call void @v_df_df(double %41, double %42) - %43 = load double* @xd, align 8 - %44 = load double* @lxd, align 8 - %45 = load double* @yd, align 8 - %46 = load double* @lyd, align 8 - %47 = load double* @xd, align 8 - %48 = load double* @lxd, align 8 + %43 = load double, double* @xd, align 8 + %44 = load double, double* @lxd, align 8 + %45 = load double, double* @yd, align 8 + %46 = load double, double* @lyd, align 8 + %47 = load double, double* @xd, align 8 + %48 = load double, double* @lxd, align 8 %cmp30 = fcmp oeq double %47, %48 %conv31 = zext i1 %cmp30 to i32 - %49 = load double* @yd, align 8 - %50 = load double* @lyd, align 8 + %49 = load double, double* @yd, align 8 + %50 = load double, double* @lyd, align 8 %cmp32 = fcmp oeq double %49, %50 %conv33 = zext i1 %cmp32 to i32 %and34 = and i32 %conv31, %conv33 - %call35 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %43, double %44, double %45, double %46, i32 %and34) + %call35 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %43, double %44, double %45, double %46, i32 %and34) call void @clear() store float 0x4016666660000000, float* @ret_sf, align 4 %call36 = call float @sf_v() store float %call36, float* @lret_sf, align 4 - %51 = load float* @ret_sf, align 4 + %51 = load float, float* @ret_sf, align 4 %conv37 = fpext float %51 to double - %52 = load float* @lret_sf, align 4 + %52 = load float, float* @lret_sf, align 4 %conv38 = fpext float %52 to double - %53 = load float* @ret_sf, align 4 - %54 = load float* @lret_sf, align 4 + %53 = load float, float* @ret_sf, align 4 + %54 = load float, float* @lret_sf, align 4 %cmp39 = fcmp oeq float %53, %54 %conv40 = zext i1 %cmp39 to i32 - %call41 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %conv37, double %conv38, i32 %conv40) + %call41 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %conv37, double %conv38, i32 %conv40) call void @clear() store float 4.587300e+06, float* @ret_sf, align 4 store float 3.420000e+02, float* @lx, align 4 - %55 = load float* @lx, align 4 + %55 = load float, float* @lx, align 4 %call42 = call float @sf_sf(float %55) store float %call42, float* @lret_sf, align 4 - %56 = load float* @ret_sf, align 4 + %56 = load float, float* @ret_sf, align 4 %conv43 = fpext float %56 to double - %57 = load float* @lret_sf, align 4 + %57 = load float, float* @lret_sf, align 4 %conv44 = fpext float %57 to double - %58 = load float* @x, align 4 + %58 = load float, float* @x, align 4 %conv45 = fpext float %58 to double - %59 = load float* @lx, align 4 + %59 = load float, float* @lx, align 4 %conv46 = fpext float %59 to double - %60 = load float* @ret_sf, align 4 - %61 = load float* @lret_sf, align 4 + %60 = load float, float* @ret_sf, align 4 + %61 = load float, float* @lret_sf, align 4 %cmp47 = fcmp oeq float %60, %61 %conv48 = zext i1 %cmp47 to i32 - %62 = load float* @x, align 4 - %63 = load float* @lx, align 4 + %62 = load float, float* @x, align 4 + %63 = load float, float* @lx, align 4 %cmp49 = fcmp oeq float %62, %63 %conv50 = zext i1 %cmp49 to i32 %and51 = and i32 %conv48, %conv50 - %call52 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %conv43, double %conv44, double %conv45, double %conv46, i32 %and51) + %call52 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv43, double %conv44, double %conv45, double %conv46, i32 %and51) call void @clear() store float 4.445910e+06, float* @ret_sf, align 4 store double 0x419A7DB294000000, double* @lxd, align 8 - %64 = load double* @lxd, align 8 + %64 = load double, double* @lxd, align 8 %call53 = call float @sf_df(double %64) store float %call53, float* @lret_sf, align 4 - %65 = load float* @ret_sf, align 4 + %65 = load float, float* @ret_sf, align 4 %conv54 = fpext float %65 to double - %66 = load float* @lret_sf, align 4 + %66 = load float, float* @lret_sf, align 4 %conv55 = fpext float %66 to double - %67 = load double* @xd, align 8 - %68 = load double* @lxd, align 8 - %69 = load float* @ret_sf, align 4 - %70 = load float* @lret_sf, align 4 + %67 = load double, double* @xd, align 8 + %68 = load double, double* @lxd, align 8 + %69 = load float, float* @ret_sf, align 4 + %70 = load float, float* @lret_sf, align 4 %cmp56 = fcmp oeq float %69, %70 %conv57 = zext i1 %cmp56 to i32 - %71 = load double* @xd, align 8 - %72 = load double* @lxd, align 8 + %71 = load double, double* @xd, align 8 + %72 = load double, double* @lxd, align 8 %cmp58 = fcmp oeq double %71, %72 %conv59 = zext i1 %cmp58 to i32 %and60 = and i32 %conv57, %conv59 - %call61 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %conv54, double %conv55, double %67, double %68, i32 %and60) + %call61 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv54, double %conv55, double %67, double %68, i32 %and60) call void @clear() store float 0x3FFF4BC6A0000000, float* @ret_sf, align 4 store float 4.445500e+03, float* @lx, align 4 store float 0x4068ACCCC0000000, float* @ly, align 4 - %73 = load float* @lx, align 4 - %74 = load float* @ly, align 4 + %73 = load float, float* @lx, align 4 + %74 = load float, float* @ly, align 4 %call62 = call float @sf_sf_sf(float %73, float %74) store float %call62, float* @lret_sf, align 4 - %75 = load float* @ret_sf, align 4 + %75 = load float, float* @ret_sf, align 4 %conv63 = fpext float %75 to double - %76 = load float* @lret_sf, align 4 + %76 = load float, float* @lret_sf, align 4 %conv64 = fpext float %76 to double - %77 = load float* @x, align 4 + %77 = load float, float* @x, align 4 %conv65 = fpext float %77 to double - %78 = load float* @lx, align 4 + %78 = load float, float* @lx, align 4 %conv66 = fpext float %78 to double - %79 = load float* @y, align 4 + %79 = load float, float* @y, align 4 %conv67 = fpext float %79 to double - %80 = load float* @ly, align 4 + %80 = load float, float* @ly, align 4 %conv68 = fpext float %80 to double - %81 = load float* @ret_sf, align 4 - %82 = load float* @lret_sf, align 4 + %81 = load float, float* @ret_sf, align 4 + %82 = load float, float* @lret_sf, align 4 %cmp69 = fcmp oeq float %81, %82 br i1 %cmp69, label %land.lhs.true, label %land.end76 land.lhs.true: ; preds = %land.end - %83 = load float* @x, align 4 - %84 = load float* @lx, align 4 + %83 = load float, float* @x, align 4 + %84 = load float, float* @lx, align 4 %cmp71 = fcmp oeq float %83, %84 br i1 %cmp71, label %land.rhs73, label %land.end76 land.rhs73: ; preds = %land.lhs.true - %85 = load float* @y, align 4 - %86 = load float* @ly, align 4 + %85 = load float, float* @y, align 4 + %86 = load float, float* @ly, align 4 %cmp74 = fcmp oeq float %85, %86 br label %land.end76 land.end76: ; preds = %land.rhs73, %land.lhs.true, %land.end %87 = phi i1 [ false, %land.lhs.true ], [ false, %land.end ], [ %cmp74, %land.rhs73 ] %land.ext77 = zext i1 %87 to i32 - %call78 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %conv63, double %conv64, double %conv65, double %conv66, double %conv67, double %conv68, i32 %land.ext77) + %call78 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv63, double %conv64, double %conv65, double %conv66, double %conv67, double %conv68, i32 %land.ext77) call void @clear() store float 9.991300e+04, float* @ret_sf, align 4 store float 1.114500e+04, float* @lx, align 4 store double 9.994445e+07, double* @lyd, align 8 - %88 = load float* @lx, align 4 - %89 = load double* @lyd, align 8 + %88 = load float, float* @lx, align 4 + %89 = load double, double* @lyd, align 8 %call79 = call float @sf_sf_df(float %88, double %89) store float %call79, float* @lret_sf, align 4 - %90 = load float* @ret_sf, align 4 + %90 = load float, float* @ret_sf, align 4 %conv80 = fpext float %90 to double - %91 = load float* @lret_sf, align 4 + %91 = load float, float* @lret_sf, align 4 %conv81 = fpext float %91 to double - %92 = load float* @x, align 4 + %92 = load float, float* @x, align 4 %conv82 = fpext float %92 to double - %93 = load float* @lx, align 4 + %93 = load float, float* @lx, align 4 %conv83 = fpext float %93 to double - %94 = load double* @yd, align 8 - %95 = load double* @lyd, align 8 - %96 = load float* @ret_sf, align 4 - %97 = load float* @lret_sf, align 4 + %94 = load double, double* @yd, align 8 + %95 = load double, double* @lyd, align 8 + %96 = load float, float* @ret_sf, align 4 + %97 = load float, float* @lret_sf, align 4 %cmp84 = fcmp oeq float %96, %97 br i1 %cmp84, label %land.lhs.true86, label %land.end92 land.lhs.true86: ; preds = %land.end76 - %98 = load float* @x, align 4 - %99 = load float* @lx, align 4 + %98 = load float, float* @x, align 4 + %99 = load float, float* @lx, align 4 %cmp87 = fcmp oeq float %98, %99 br i1 %cmp87, label %land.rhs89, label %land.end92 land.rhs89: ; preds = %land.lhs.true86 - %100 = load double* @yd, align 8 - %101 = load double* @lyd, align 8 + %100 = load double, double* @yd, align 8 + %101 = load double, double* @lyd, align 8 %cmp90 = fcmp oeq double %100, %101 br label %land.end92 land.end92: ; preds = %land.rhs89, %land.lhs.true86, %land.end76 %102 = phi i1 [ false, %land.lhs.true86 ], [ false, %land.end76 ], [ %cmp90, %land.rhs89 ] %land.ext93 = zext i1 %102 to i32 - %call94 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %conv80, double %conv81, double %conv82, double %conv83, double %94, double %95, i32 %land.ext93) + %call94 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv80, double %conv81, double %conv82, double %conv83, double %94, double %95, i32 %land.ext93) call void @clear() store float 0x417CCC7A00000000, float* @ret_sf, align 4 store double 0x4172034530000000, double* @lxd, align 8 store float 4.456200e+04, float* @ly, align 4 - %103 = load double* @lxd, align 8 - %104 = load float* @ly, align 4 + %103 = load double, double* @lxd, align 8 + %104 = load float, float* @ly, align 4 %call95 = call float @sf_df_sf(double %103, float %104) store float %call95, float* @lret_sf, align 4 - %105 = load float* @ret_sf, align 4 + %105 = load float, float* @ret_sf, align 4 %conv96 = fpext float %105 to double - %106 = load float* @lret_sf, align 4 + %106 = load float, float* @lret_sf, align 4 %conv97 = fpext float %106 to double - %107 = load double* @xd, align 8 - %108 = load double* @lxd, align 8 - %109 = load float* @y, align 4 + %107 = load double, double* @xd, align 8 + %108 = load double, double* @lxd, align 8 + %109 = load float, float* @y, align 4 %conv98 = fpext float %109 to double - %110 = load float* @ly, align 4 + %110 = load float, float* @ly, align 4 %conv99 = fpext float %110 to double - %111 = load float* @ret_sf, align 4 - %112 = load float* @lret_sf, align 4 + %111 = load float, float* @ret_sf, align 4 + %112 = load float, float* @lret_sf, align 4 %cmp100 = fcmp oeq float %111, %112 br i1 %cmp100, label %land.lhs.true102, label %land.end108 land.lhs.true102: ; preds = %land.end92 - %113 = load double* @xd, align 8 - %114 = load double* @lxd, align 8 + %113 = load double, double* @xd, align 8 + %114 = load double, double* @lxd, align 8 %cmp103 = fcmp oeq double %113, %114 br i1 %cmp103, label %land.rhs105, label %land.end108 land.rhs105: ; preds = %land.lhs.true102 - %115 = load float* @y, align 4 - %116 = load float* @ly, align 4 + %115 = load float, float* @y, align 4 + %116 = load float, float* @ly, align 4 %cmp106 = fcmp oeq float %115, %116 br label %land.end108 land.end108: ; preds = %land.rhs105, %land.lhs.true102, %land.end92 %117 = phi i1 [ false, %land.lhs.true102 ], [ false, %land.end92 ], [ %cmp106, %land.rhs105 ] %land.ext109 = zext i1 %117 to i32 - %call110 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %conv96, double %conv97, double %107, double %108, double %conv98, double %conv99, i32 %land.ext109) + %call110 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv96, double %conv97, double %107, double %108, double %conv98, double %conv99, i32 %land.ext109) call void @clear() store float 3.987721e+06, float* @ret_sf, align 4 store double 0x3FF1F49F6DDDC2D8, double* @lxd, align 8 store double 0x409129F306A2B170, double* @lyd, align 8 - %118 = load double* @lxd, align 8 - %119 = load double* @lyd, align 8 + %118 = load double, double* @lxd, align 8 + %119 = load double, double* @lyd, align 8 %call111 = call float @sf_df_df(double %118, double %119) store float %call111, float* @lret_sf, align 4 - %120 = load float* @ret_sf, align 4 + %120 = load float, float* @ret_sf, align 4 %conv112 = fpext float %120 to double - %121 = load float* @lret_sf, align 4 + %121 = load float, float* @lret_sf, align 4 %conv113 = fpext float %121 to double - %122 = load double* @xd, align 8 - %123 = load double* @lxd, align 8 - %124 = load double* @yd, align 8 - %125 = load double* @lyd, align 8 - %126 = load float* @ret_sf, align 4 - %127 = load float* @lret_sf, align 4 + %122 = load double, double* @xd, align 8 + %123 = load double, double* @lxd, align 8 + %124 = load double, double* @yd, align 8 + %125 = load double, double* @lyd, align 8 + %126 = load float, float* @ret_sf, align 4 + %127 = load float, float* @lret_sf, align 4 %cmp114 = fcmp oeq float %126, %127 br i1 %cmp114, label %land.lhs.true116, label %land.end122 land.lhs.true116: ; preds = %land.end108 - %128 = load double* @xd, align 8 - %129 = load double* @lxd, align 8 + %128 = load double, double* @xd, align 8 + %129 = load double, double* @lxd, align 8 %cmp117 = fcmp oeq double %128, %129 br i1 %cmp117, label %land.rhs119, label %land.end122 land.rhs119: ; preds = %land.lhs.true116 - %130 = load double* @yd, align 8 - %131 = load double* @lyd, align 8 + %130 = load double, double* @yd, align 8 + %131 = load double, double* @lyd, align 8 %cmp120 = fcmp oeq double %130, %131 br label %land.end122 land.end122: ; preds = %land.rhs119, %land.lhs.true116, %land.end108 %132 = phi i1 [ false, %land.lhs.true116 ], [ false, %land.end108 ], [ %cmp120, %land.rhs119 ] %land.ext123 = zext i1 %132 to i32 - %call124 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %conv112, double %conv113, double %122, double %123, double %124, double %125, i32 %land.ext123) + %call124 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv112, double %conv113, double %122, double %123, double %124, double %125, i32 %land.ext123) call void @clear() store double 1.561234e+01, double* @ret_df, align 8 %call125 = call double @df_v() store double %call125, double* @lret_df, align 8 - %133 = load double* @ret_df, align 8 - %134 = load double* @lret_df, align 8 - %135 = load double* @ret_df, align 8 - %136 = load double* @lret_df, align 8 + %133 = load double, double* @ret_df, align 8 + %134 = load double, double* @lret_df, align 8 + %135 = load double, double* @ret_df, align 8 + %136 = load double, double* @lret_df, align 8 %cmp126 = fcmp oeq double %135, %136 %conv127 = zext i1 %cmp126 to i32 - %call128 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %133, double %134, i32 %conv127) + %call128 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %133, double %134, i32 %conv127) call void @clear() store double 1.345873e+01, double* @ret_df, align 8 store float 3.434520e+05, float* @lx, align 4 - %137 = load float* @lx, align 4 + %137 = load float, float* @lx, align 4 %call129 = call double @df_sf(float %137) store double %call129, double* @lret_df, align 8 - %138 = load double* @ret_df, align 8 - %139 = load double* @lret_df, align 8 - %140 = load float* @x, align 4 + %138 = load double, double* @ret_df, align 8 + %139 = load double, double* @lret_df, align 8 + %140 = load float, float* @x, align 4 %conv130 = fpext float %140 to double - %141 = load float* @lx, align 4 + %141 = load float, float* @lx, align 4 %conv131 = fpext float %141 to double - %142 = load double* @ret_df, align 8 - %143 = load double* @lret_df, align 8 + %142 = load double, double* @ret_df, align 8 + %143 = load double, double* @lret_df, align 8 %cmp132 = fcmp oeq double %142, %143 %conv133 = zext i1 %cmp132 to i32 - %144 = load float* @x, align 4 - %145 = load float* @lx, align 4 + %144 = load float, float* @x, align 4 + %145 = load float, float* @lx, align 4 %cmp134 = fcmp oeq float %144, %145 %conv135 = zext i1 %cmp134 to i32 %and136 = and i32 %conv133, %conv135 - %call137 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %138, double %139, double %conv130, double %conv131, i32 %and136) + %call137 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %138, double %139, double %conv130, double %conv131, i32 %and136) call void @clear() store double 0x4084F3AB7AA25D8D, double* @ret_df, align 8 store double 0x4114F671D2F1A9FC, double* @lxd, align 8 - %146 = load double* @lxd, align 8 + %146 = load double, double* @lxd, align 8 %call138 = call double @df_df(double %146) store double %call138, double* @lret_df, align 8 - %147 = load double* @ret_df, align 8 - %148 = load double* @lret_df, align 8 - %149 = load double* @xd, align 8 - %150 = load double* @lxd, align 8 - %151 = load double* @ret_df, align 8 - %152 = load double* @lret_df, align 8 + %147 = load double, double* @ret_df, align 8 + %148 = load double, double* @lret_df, align 8 + %149 = load double, double* @xd, align 8 + %150 = load double, double* @lxd, align 8 + %151 = load double, double* @ret_df, align 8 + %152 = load double, double* @lret_df, align 8 %cmp139 = fcmp oeq double %151, %152 %conv140 = zext i1 %cmp139 to i32 - %153 = load double* @xd, align 8 - %154 = load double* @lxd, align 8 + %153 = load double, double* @xd, align 8 + %154 = load double, double* @lxd, align 8 %cmp141 = fcmp oeq double %153, %154 %conv142 = zext i1 %cmp141 to i32 %and143 = and i32 %conv140, %conv142 - %call144 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %147, double %148, double %149, double %150, i32 %and143) + %call144 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %147, double %148, double %149, double %150, i32 %and143) call void @clear() store double 6.781956e+03, double* @ret_df, align 8 store float 4.445500e+03, float* @lx, align 4 store float 0x4068ACCCC0000000, float* @ly, align 4 - %155 = load float* @lx, align 4 - %156 = load float* @ly, align 4 + %155 = load float, float* @lx, align 4 + %156 = load float, float* @ly, align 4 %call145 = call double @df_sf_sf(float %155, float %156) store double %call145, double* @lret_df, align 8 - %157 = load double* @ret_df, align 8 - %158 = load double* @lret_df, align 8 - %159 = load float* @x, align 4 + %157 = load double, double* @ret_df, align 8 + %158 = load double, double* @lret_df, align 8 + %159 = load float, float* @x, align 4 %conv146 = fpext float %159 to double - %160 = load float* @lx, align 4 + %160 = load float, float* @lx, align 4 %conv147 = fpext float %160 to double - %161 = load float* @y, align 4 + %161 = load float, float* @y, align 4 %conv148 = fpext float %161 to double - %162 = load float* @ly, align 4 + %162 = load float, float* @ly, align 4 %conv149 = fpext float %162 to double - %163 = load double* @ret_df, align 8 - %164 = load double* @lret_df, align 8 + %163 = load double, double* @ret_df, align 8 + %164 = load double, double* @lret_df, align 8 %cmp150 = fcmp oeq double %163, %164 br i1 %cmp150, label %land.lhs.true152, label %land.end158 land.lhs.true152: ; preds = %land.end122 - %165 = load float* @x, align 4 - %166 = load float* @lx, align 4 + %165 = load float, float* @x, align 4 + %166 = load float, float* @lx, align 4 %cmp153 = fcmp oeq float %165, %166 br i1 %cmp153, label %land.rhs155, label %land.end158 land.rhs155: ; preds = %land.lhs.true152 - %167 = load float* @y, align 4 - %168 = load float* @ly, align 4 + %167 = load float, float* @y, align 4 + %168 = load float, float* @ly, align 4 %cmp156 = fcmp oeq float %167, %168 br label %land.end158 land.end158: ; preds = %land.rhs155, %land.lhs.true152, %land.end122 %169 = phi i1 [ false, %land.lhs.true152 ], [ false, %land.end122 ], [ %cmp156, %land.rhs155 ] %land.ext159 = zext i1 %169 to i32 - %call160 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %157, double %158, double %conv146, double %conv147, double %conv148, double %conv149, i32 %land.ext159) + %call160 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %157, double %158, double %conv146, double %conv147, double %conv148, double %conv149, i32 %land.ext159) call void @clear() store double 1.889130e+05, double* @ret_df, align 8 store float 9.111450e+05, float* @lx, align 4 store double 0x4185320A58000000, double* @lyd, align 8 - %170 = load float* @lx, align 4 - %171 = load double* @lyd, align 8 + %170 = load float, float* @lx, align 4 + %171 = load double, double* @lyd, align 8 %call161 = call double @df_sf_df(float %170, double %171) store double %call161, double* @lret_df, align 8 - %172 = load double* @ret_df, align 8 - %173 = load double* @lret_df, align 8 - %174 = load float* @x, align 4 + %172 = load double, double* @ret_df, align 8 + %173 = load double, double* @lret_df, align 8 + %174 = load float, float* @x, align 4 %conv162 = fpext float %174 to double - %175 = load float* @lx, align 4 + %175 = load float, float* @lx, align 4 %conv163 = fpext float %175 to double - %176 = load double* @yd, align 8 - %177 = load double* @lyd, align 8 - %178 = load double* @ret_df, align 8 - %179 = load double* @lret_df, align 8 + %176 = load double, double* @yd, align 8 + %177 = load double, double* @lyd, align 8 + %178 = load double, double* @ret_df, align 8 + %179 = load double, double* @lret_df, align 8 %cmp164 = fcmp oeq double %178, %179 br i1 %cmp164, label %land.lhs.true166, label %land.end172 land.lhs.true166: ; preds = %land.end158 - %180 = load float* @x, align 4 - %181 = load float* @lx, align 4 + %180 = load float, float* @x, align 4 + %181 = load float, float* @lx, align 4 %cmp167 = fcmp oeq float %180, %181 br i1 %cmp167, label %land.rhs169, label %land.end172 land.rhs169: ; preds = %land.lhs.true166 - %182 = load double* @yd, align 8 - %183 = load double* @lyd, align 8 + %182 = load double, double* @yd, align 8 + %183 = load double, double* @lyd, align 8 %cmp170 = fcmp oeq double %182, %183 br label %land.end172 land.end172: ; preds = %land.rhs169, %land.lhs.true166, %land.end158 %184 = phi i1 [ false, %land.lhs.true166 ], [ false, %land.end158 ], [ %cmp170, %land.rhs169 ] %land.ext173 = zext i1 %184 to i32 - %call174 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %172, double %173, double %conv162, double %conv163, double %176, double %177, i32 %land.ext173) + %call174 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %172, double %173, double %conv162, double %conv163, double %176, double %177, i32 %land.ext173) call void @clear() store double 0x418B2DB900000000, double* @ret_df, align 8 store double 0x41B1EF2ED3000000, double* @lxd, align 8 store float 1.244562e+06, float* @ly, align 4 - %185 = load double* @lxd, align 8 - %186 = load float* @ly, align 4 + %185 = load double, double* @lxd, align 8 + %186 = load float, float* @ly, align 4 %call175 = call double @df_df_sf(double %185, float %186) store double %call175, double* @lret_df, align 8 - %187 = load double* @ret_df, align 8 - %188 = load double* @lret_df, align 8 - %189 = load double* @xd, align 8 - %190 = load double* @lxd, align 8 - %191 = load float* @y, align 4 + %187 = load double, double* @ret_df, align 8 + %188 = load double, double* @lret_df, align 8 + %189 = load double, double* @xd, align 8 + %190 = load double, double* @lxd, align 8 + %191 = load float, float* @y, align 4 %conv176 = fpext float %191 to double - %192 = load float* @ly, align 4 + %192 = load float, float* @ly, align 4 %conv177 = fpext float %192 to double - %193 = load double* @ret_df, align 8 - %194 = load double* @lret_df, align 8 + %193 = load double, double* @ret_df, align 8 + %194 = load double, double* @lret_df, align 8 %cmp178 = fcmp oeq double %193, %194 br i1 %cmp178, label %land.lhs.true180, label %land.end186 land.lhs.true180: ; preds = %land.end172 - %195 = load double* @xd, align 8 - %196 = load double* @lxd, align 8 + %195 = load double, double* @xd, align 8 + %196 = load double, double* @lxd, align 8 %cmp181 = fcmp oeq double %195, %196 br i1 %cmp181, label %land.rhs183, label %land.end186 land.rhs183: ; preds = %land.lhs.true180 - %197 = load float* @y, align 4 - %198 = load float* @ly, align 4 + %197 = load float, float* @y, align 4 + %198 = load float, float* @ly, align 4 %cmp184 = fcmp oeq float %197, %198 br label %land.end186 land.end186: ; preds = %land.rhs183, %land.lhs.true180, %land.end172 %199 = phi i1 [ false, %land.lhs.true180 ], [ false, %land.end172 ], [ %cmp184, %land.rhs183 ] %land.ext187 = zext i1 %199 to i32 - %call188 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %187, double %188, double %189, double %190, double %conv176, double %conv177, i32 %land.ext187) + %call188 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %187, double %188, double %189, double %190, double %conv176, double %conv177, i32 %land.ext187) call void @clear() store double 3.987721e+06, double* @ret_df, align 8 store double 5.223560e+00, double* @lxd, align 8 store double 0x40B7D37CC1A8AC5C, double* @lyd, align 8 - %200 = load double* @lxd, align 8 - %201 = load double* @lyd, align 8 + %200 = load double, double* @lxd, align 8 + %201 = load double, double* @lyd, align 8 %call189 = call double @df_df_df(double %200, double %201) store double %call189, double* @lret_df, align 8 - %202 = load double* @ret_df, align 8 - %203 = load double* @lret_df, align 8 - %204 = load double* @xd, align 8 - %205 = load double* @lxd, align 8 - %206 = load double* @yd, align 8 - %207 = load double* @lyd, align 8 - %208 = load double* @ret_df, align 8 - %209 = load double* @lret_df, align 8 + %202 = load double, double* @ret_df, align 8 + %203 = load double, double* @lret_df, align 8 + %204 = load double, double* @xd, align 8 + %205 = load double, double* @lxd, align 8 + %206 = load double, double* @yd, align 8 + %207 = load double, double* @lyd, align 8 + %208 = load double, double* @ret_df, align 8 + %209 = load double, double* @lret_df, align 8 %cmp190 = fcmp oeq double %208, %209 br i1 %cmp190, label %land.lhs.true192, label %land.end198 land.lhs.true192: ; preds = %land.end186 - %210 = load double* @xd, align 8 - %211 = load double* @lxd, align 8 + %210 = load double, double* @xd, align 8 + %211 = load double, double* @lxd, align 8 %cmp193 = fcmp oeq double %210, %211 br i1 %cmp193, label %land.rhs195, label %land.end198 land.rhs195: ; preds = %land.lhs.true192 - %212 = load double* @yd, align 8 - %213 = load double* @lyd, align 8 + %212 = load double, double* @yd, align 8 + %213 = load double, double* @lyd, align 8 %cmp196 = fcmp oeq double %212, %213 br label %land.end198 land.end198: ; preds = %land.rhs195, %land.lhs.true192, %land.end186 %214 = phi i1 [ false, %land.lhs.true192 ], [ false, %land.end186 ], [ %cmp196, %land.rhs195 ] %land.ext199 = zext i1 %214 to i32 - %call200 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %202, double %203, double %204, double %205, double %206, double %207, i32 %land.ext199) + %call200 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %202, double %203, double %204, double %205, double %206, double %207, i32 %land.ext199) call void @clear() - store float 4.500000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - store float 7.000000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) + store float 4.500000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0) + store float 7.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1) %call201 = call { float, float } @sc_v() %215 = extractvalue { float, float } %call201, 0 %216 = extractvalue { float, float } %call201, 1 - store float %215, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - store float %216, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) - %ret_sc.real = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) + store float %215, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0) + store float %216, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1) + %ret_sc.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1) %conv202 = fpext float %ret_sc.real to double %conv203 = fpext float %ret_sc.imag to double - %ret_sc.real204 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag205 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) + %ret_sc.real204 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag205 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1) %conv206 = fpext float %ret_sc.real204 to double %conv207 = fpext float %ret_sc.imag205 to double - %lret_sc.real = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %lret_sc.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1) %conv208 = fpext float %lret_sc.real to double %conv209 = fpext float %lret_sc.imag to double - %lret_sc.real210 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag211 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %lret_sc.real210 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag211 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1) %conv212 = fpext float %lret_sc.real210 to double %conv213 = fpext float %lret_sc.imag211 to double - %ret_sc.real214 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag215 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) - %lret_sc.real216 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag217 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %ret_sc.real214 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag215 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1) + %lret_sc.real216 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag217 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1) %cmp.r = fcmp oeq float %ret_sc.real214, %lret_sc.real216 %cmp.i = fcmp oeq float %ret_sc.imag215, %lret_sc.imag217 %and.ri = and i1 %cmp.r, %cmp.i %conv218 = zext i1 %and.ri to i32 - %call219 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([18 x i8]* @.str3, i32 0, i32 0), double %conv202, double %conv207, double %conv208, double %conv213, i32 %conv218) + %call219 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str3, i32 0, i32 0), double %conv202, double %conv207, double %conv208, double %conv213, i32 %conv218) call void @clear() store float 0x3FF7A99300000000, float* @lx, align 4 - store float 4.500000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - store float 7.000000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) - %217 = load float* @lx, align 4 + store float 4.500000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0) + store float 7.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1) + %217 = load float, float* @lx, align 4 %call220 = call { float, float } @sc_sf(float %217) %218 = extractvalue { float, float } %call220, 0 %219 = extractvalue { float, float } %call220, 1 - store float %218, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - store float %219, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) - %ret_sc.real221 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag222 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) + store float %218, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0) + store float %219, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1) + %ret_sc.real221 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag222 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1) %conv223 = fpext float %ret_sc.real221 to double %conv224 = fpext float %ret_sc.imag222 to double - %ret_sc.real225 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag226 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) + %ret_sc.real225 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag226 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1) %conv227 = fpext float %ret_sc.real225 to double %conv228 = fpext float %ret_sc.imag226 to double - %lret_sc.real229 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag230 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %lret_sc.real229 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag230 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1) %conv231 = fpext float %lret_sc.real229 to double %conv232 = fpext float %lret_sc.imag230 to double - %lret_sc.real233 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag234 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %lret_sc.real233 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag234 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1) %conv235 = fpext float %lret_sc.real233 to double %conv236 = fpext float %lret_sc.imag234 to double - %220 = load float* @x, align 4 + %220 = load float, float* @x, align 4 %conv237 = fpext float %220 to double - %221 = load float* @lx, align 4 + %221 = load float, float* @lx, align 4 %conv238 = fpext float %221 to double - %ret_sc.real239 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag240 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) - %lret_sc.real241 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag242 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %ret_sc.real239 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag240 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1) + %lret_sc.real241 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag242 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1) %cmp.r243 = fcmp oeq float %ret_sc.real239, %lret_sc.real241 %cmp.i244 = fcmp oeq float %ret_sc.imag240, %lret_sc.imag242 %and.ri245 = and i1 %cmp.r243, %cmp.i244 br i1 %and.ri245, label %land.rhs247, label %land.end250 land.rhs247: ; preds = %land.end198 - %222 = load float* @x, align 4 - %223 = load float* @lx, align 4 + %222 = load float, float* @x, align 4 + %223 = load float, float* @lx, align 4 %cmp248 = fcmp oeq float %222, %223 br label %land.end250 land.end250: ; preds = %land.rhs247, %land.end198 %224 = phi i1 [ false, %land.end198 ], [ %cmp248, %land.rhs247 ] %land.ext251 = zext i1 %224 to i32 - %call252 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([24 x i8]* @.str4, i32 0, i32 0), double %conv223, double %conv228, double %conv231, double %conv236, double %conv237, double %conv238, i32 %land.ext251) + %call252 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str4, i32 0, i32 0), double %conv223, double %conv228, double %conv231, double %conv236, double %conv237, double %conv238, i32 %land.ext251) call void @clear() - store double 1.234500e+03, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - store double 7.677000e+03, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) + store double 1.234500e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0) + store double 7.677000e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1) %call253 = call { double, double } @dc_v() %225 = extractvalue { double, double } %call253, 0 %226 = extractvalue { double, double } %call253, 1 - store double %225, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - store double %226, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %ret_dc.real = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %ret_dc.real254 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag255 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %lret_dc.real = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %lret_dc.real256 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag257 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %ret_dc.real258 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag259 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %lret_dc.real260 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag261 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) + store double %225, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0) + store double %226, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1) + %ret_dc.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1) + %ret_dc.real254 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag255 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1) + %lret_dc.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1) + %lret_dc.real256 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag257 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1) + %ret_dc.real258 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag259 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1) + %lret_dc.real260 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag261 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1) %cmp.r262 = fcmp oeq double %ret_dc.real258, %lret_dc.real260 %cmp.i263 = fcmp oeq double %ret_dc.imag259, %lret_dc.imag261 %and.ri264 = and i1 %cmp.r262, %cmp.i263 %conv265 = zext i1 %and.ri264 to i32 - %call266 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([18 x i8]* @.str3, i32 0, i32 0), double %ret_dc.real, double %ret_dc.imag255, double %lret_dc.real, double %lret_dc.imag257, i32 %conv265) + %call266 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str3, i32 0, i32 0), double %ret_dc.real, double %ret_dc.imag255, double %lret_dc.real, double %lret_dc.imag257, i32 %conv265) call void @clear() store double 0x40AAF6F532617C1C, double* @lxd, align 8 - store double 4.444500e+03, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - store double 7.888000e+03, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %227 = load float* @lx, align 4 + store double 4.444500e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0) + store double 7.888000e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1) + %227 = load float, float* @lx, align 4 %call267 = call { double, double } @dc_sf(float %227) %228 = extractvalue { double, double } %call267, 0 %229 = extractvalue { double, double } %call267, 1 - store double %228, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - store double %229, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %ret_dc.real268 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag269 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %ret_dc.real270 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag271 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %lret_dc.real272 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag273 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %lret_dc.real274 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag275 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %230 = load float* @x, align 4 + store double %228, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0) + store double %229, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1) + %ret_dc.real268 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag269 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1) + %ret_dc.real270 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag271 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1) + %lret_dc.real272 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag273 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1) + %lret_dc.real274 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag275 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1) + %230 = load float, float* @x, align 4 %conv276 = fpext float %230 to double - %231 = load float* @lx, align 4 + %231 = load float, float* @lx, align 4 %conv277 = fpext float %231 to double - %ret_dc.real278 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag279 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %lret_dc.real280 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag281 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) + %ret_dc.real278 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag279 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1) + %lret_dc.real280 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag281 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1) %cmp.r282 = fcmp oeq double %ret_dc.real278, %lret_dc.real280 %cmp.i283 = fcmp oeq double %ret_dc.imag279, %lret_dc.imag281 %and.ri284 = and i1 %cmp.r282, %cmp.i283 br i1 %and.ri284, label %land.rhs286, label %land.end289 land.rhs286: ; preds = %land.end250 - %232 = load float* @x, align 4 - %233 = load float* @lx, align 4 + %232 = load float, float* @x, align 4 + %233 = load float, float* @lx, align 4 %cmp287 = fcmp oeq float %232, %233 br label %land.end289 land.end289: ; preds = %land.rhs286, %land.end250 %234 = phi i1 [ false, %land.end250 ], [ %cmp287, %land.rhs286 ] %land.ext290 = zext i1 %234 to i32 - %call291 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([24 x i8]* @.str4, i32 0, i32 0), double %ret_dc.real268, double %ret_dc.imag271, double %lret_dc.real272, double %lret_dc.imag275, double %conv276, double %conv277, i32 %land.ext290) - %235 = load i32* %retval + %call291 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str4, i32 0, i32 0), double %ret_dc.real268, double %ret_dc.imag271, double %lret_dc.real272, double %lret_dc.imag275, double %conv276, double %conv277, i32 %land.ext290) + %235 = load i32, i32* %retval ret i32 %235 } diff --git a/test/CodeGen/Mips/hf16call32_body.ll b/test/CodeGen/Mips/hf16call32_body.ll index adac314..d06256c 100644 --- a/test/CodeGen/Mips/hf16call32_body.ll +++ b/test/CodeGen/Mips/hf16call32_body.ll @@ -14,7 +14,7 @@ define void @v_sf(float %p) #0 { entry: %p.addr = alloca float, align 4 store float %p, float* %p.addr, align 4 - %0 = load float* %p.addr, align 4 + %0 = load float, float* %p.addr, align 4 store float %0, float* @x, align 4 ret void } @@ -33,7 +33,7 @@ define void @v_df(double %p) #0 { entry: %p.addr = alloca double, align 8 store double %p, double* %p.addr, align 8 - %0 = load double* %p.addr, align 8 + %0 = load double, double* %p.addr, align 8 store double %0, double* @xd, align 8 ret void } @@ -54,9 +54,9 @@ entry: %p2.addr = alloca float, align 4 store float %p1, float* %p1.addr, align 4 store float %p2, float* %p2.addr, align 4 - %0 = load float* %p1.addr, align 4 + %0 = load float, float* %p1.addr, align 4 store float %0, float* @x, align 4 - %1 = load float* %p2.addr, align 4 + %1 = load float, float* %p2.addr, align 4 store float %1, float* @y, align 4 ret void } @@ -77,9 +77,9 @@ entry: %p2.addr = alloca double, align 8 store float %p1, float* %p1.addr, align 4 store double %p2, double* %p2.addr, align 8 - %0 = load float* %p1.addr, align 4 + %0 = load float, float* %p1.addr, align 4 store float %0, float* @x, align 4 - %1 = load double* %p2.addr, align 8 + %1 = load double, double* %p2.addr, align 8 store double %1, double* @yd, align 8 ret void } @@ -101,9 +101,9 @@ entry: %p2.addr = alloca float, align 4 store double %p1, double* %p1.addr, align 8 store float %p2, float* %p2.addr, align 4 - %0 = load double* %p1.addr, align 8 + %0 = load double, double* %p1.addr, align 8 store double %0, double* @xd, align 8 - %1 = load float* %p2.addr, align 4 + %1 = load float, float* %p2.addr, align 4 store float %1, float* @y, align 4 ret void } @@ -125,9 +125,9 @@ entry: %p2.addr = alloca double, align 8 store double %p1, double* %p1.addr, align 8 store double %p2, double* %p2.addr, align 8 - %0 = load double* %p1.addr, align 8 + %0 = load double, double* %p1.addr, align 8 store double %0, double* @xd, align 8 - %1 = load double* %p2.addr, align 8 + %1 = load double, double* %p2.addr, align 8 store double %1, double* @yd, align 8 ret void } @@ -146,7 +146,7 @@ entry: ; Function Attrs: nounwind define float @sf_v() #0 { entry: - %0 = load float* @ret_sf, align 4 + %0 = load float, float* @ret_sf, align 4 ret float %0 } @@ -155,9 +155,9 @@ define float @sf_sf(float %p) #0 { entry: %p.addr = alloca float, align 4 store float %p, float* %p.addr, align 4 - %0 = load float* %p.addr, align 4 + %0 = load float, float* %p.addr, align 4 store float %0, float* @x, align 4 - %1 = load float* @ret_sf, align 4 + %1 = load float, float* @ret_sf, align 4 ret float %1 } @@ -176,9 +176,9 @@ define float @sf_df(double %p) #0 { entry: %p.addr = alloca double, align 8 store double %p, double* %p.addr, align 8 - %0 = load double* %p.addr, align 8 + %0 = load double, double* %p.addr, align 8 store double %0, double* @xd, align 8 - %1 = load float* @ret_sf, align 4 + %1 = load float, float* @ret_sf, align 4 ret float %1 } @@ -198,11 +198,11 @@ entry: %p2.addr = alloca float, align 4 store float %p1, float* %p1.addr, align 4 store float %p2, float* %p2.addr, align 4 - %0 = load float* %p1.addr, align 4 + %0 = load float, float* %p1.addr, align 4 store float %0, float* @x, align 4 - %1 = load float* %p2.addr, align 4 + %1 = load float, float* %p2.addr, align 4 store float %1, float* @y, align 4 - %2 = load float* @ret_sf, align 4 + %2 = load float, float* @ret_sf, align 4 ret float %2 } @@ -222,11 +222,11 @@ entry: %p2.addr = alloca double, align 8 store float %p1, float* %p1.addr, align 4 store double %p2, double* %p2.addr, align 8 - %0 = load float* %p1.addr, align 4 + %0 = load float, float* %p1.addr, align 4 store float %0, float* @x, align 4 - %1 = load double* %p2.addr, align 8 + %1 = load double, double* %p2.addr, align 8 store double %1, double* @yd, align 8 - %2 = load float* @ret_sf, align 4 + %2 = load float, float* @ret_sf, align 4 ret float %2 } @@ -247,11 +247,11 @@ entry: %p2.addr = alloca float, align 4 store double %p1, double* %p1.addr, align 8 store float %p2, float* %p2.addr, align 4 - %0 = load double* %p1.addr, align 8 + %0 = load double, double* %p1.addr, align 8 store double %0, double* @xd, align 8 - %1 = load float* %p2.addr, align 4 + %1 = load float, float* %p2.addr, align 4 store float %1, float* @y, align 4 - %2 = load float* @ret_sf, align 4 + %2 = load float, float* @ret_sf, align 4 ret float %2 } @@ -272,11 +272,11 @@ entry: %p2.addr = alloca double, align 8 store double %p1, double* %p1.addr, align 8 store double %p2, double* %p2.addr, align 8 - %0 = load double* %p1.addr, align 8 + %0 = load double, double* %p1.addr, align 8 store double %0, double* @xd, align 8 - %1 = load double* %p2.addr, align 8 + %1 = load double, double* %p2.addr, align 8 store double %1, double* @yd, align 8 - %2 = load float* @ret_sf, align 4 + %2 = load float, float* @ret_sf, align 4 ret float %2 } diff --git a/test/CodeGen/Mips/hf1_body.ll b/test/CodeGen/Mips/hf1_body.ll index 5acfe86..71a1b96 100644 --- a/test/CodeGen/Mips/hf1_body.ll +++ b/test/CodeGen/Mips/hf1_body.ll @@ -7,7 +7,7 @@ define void @v_sf(float %p) #0 { entry: %p.addr = alloca float, align 4 store float %p, float* %p.addr, align 4 - %0 = load float* %p.addr, align 4 + %0 = load float, float* %p.addr, align 4 store float %0, float* @x, align 4 ret void } diff --git a/test/CodeGen/Mips/hfptrcall.ll b/test/CodeGen/Mips/hfptrcall.ll index 683952d..de809f1 100644 --- a/test/CodeGen/Mips/hfptrcall.ll +++ b/test/CodeGen/Mips/hfptrcall.ll @@ -34,11 +34,11 @@ entry: define { float, float } @scv() #0 { entry: %retval = alloca { float, float }, align 4 - %real = getelementptr inbounds { float, float }* %retval, i32 0, i32 0 - %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1 + %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0 + %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1 store float 5.000000e+00, float* %real store float 9.900000e+01, float* %imag - %0 = load { float, float }* %retval + %0 = load { float, float }, { float, float }* %retval ret { float, float } %0 } @@ -50,11 +50,11 @@ entry: define { double, double } @dcv() #0 { entry: %retval = alloca { double, double }, align 8 - %real = getelementptr inbounds { double, double }* %retval, i32 0, i32 0 - %imag = getelementptr inbounds { double, double }* %retval, i32 0, i32 1 + %real = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 0 + %imag = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 1 store double 0x416BC8B0A0000000, double* %real store double 0x41CDCCB763800000, double* %imag - %0 = load { double, double }* %retval + %0 = load { double, double }, { double, double }* %retval ret { double, double } %0 } @@ -65,43 +65,43 @@ entry: ; Function Attrs: nounwind define i32 @main() #0 { entry: - %0 = load float ()** @ptrsv, align 4 + %0 = load float ()*, float ()** @ptrsv, align 4 %call = call float %0() store float %call, float* @x, align 4 - %1 = load float* @x, align 4 + %1 = load float, float* @x, align 4 %conv = fpext float %1 to double - %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), double %conv) - %2 = load double ()** @ptrdv, align 4 + %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), double %conv) + %2 = load double ()*, double ()** @ptrdv, align 4 %call2 = call double %2() store double %call2, double* @xd, align 8 - %3 = load double* @xd, align 8 - %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), double %3) - %4 = load { float, float } ()** @ptrscv, align 4 + %3 = load double, double* @xd, align 8 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), double %3) + %4 = load { float, float } ()*, { float, float } ()** @ptrscv, align 4 %call4 = call { float, float } %4() %5 = extractvalue { float, float } %call4, 0 %6 = extractvalue { float, float } %call4, 1 - store float %5, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0) - store float %6, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1) - %xy.real = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0) - %xy.imag = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1) + store float %5, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0) + store float %6, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1) + %xy.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0) + %xy.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1) %conv5 = fpext float %xy.real to double %conv6 = fpext float %xy.imag to double - %xy.real7 = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0) - %xy.imag8 = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1) + %xy.real7 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0) + %xy.imag8 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1) %conv9 = fpext float %xy.real7 to double %conv10 = fpext float %xy.imag8 to double - %call11 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str1, i32 0, i32 0), double %conv5, double %conv10) - %7 = load { double, double } ()** @ptrdcv, align 4 + %call11 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str1, i32 0, i32 0), double %conv5, double %conv10) + %7 = load { double, double } ()*, { double, double } ()** @ptrdcv, align 4 %call12 = call { double, double } %7() %8 = extractvalue { double, double } %call12, 0 %9 = extractvalue { double, double } %call12, 1 - store double %8, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0) - store double %9, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1) - %xyd.real = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0) - %xyd.imag = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1) - %xyd.real13 = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0) - %xyd.imag14 = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1) - %call15 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str1, i32 0, i32 0), double %xyd.real, double %xyd.imag14) + store double %8, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0) + store double %9, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1) + %xyd.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0) + %xyd.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1) + %xyd.real13 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0) + %xyd.imag14 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1) + %call15 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str1, i32 0, i32 0), double %xyd.real, double %xyd.imag14) ret i32 0 } diff --git a/test/CodeGen/Mips/i32k.ll b/test/CodeGen/Mips/i32k.ll index 73f1302..5c5761f 100644 --- a/test/CodeGen/Mips/i32k.ll +++ b/test/CodeGen/Mips/i32k.ll @@ -4,14 +4,14 @@ define i32 @main() nounwind { entry: - %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 1075344593) nounwind + %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 1075344593) nounwind ; 16: lw ${{[0-9]+}}, 1f ; 16: b 2f ; 16: .align 2 ; 16: 1: .word 1075344593 ; 16: 2: - %call1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 -1075344593) nounwind + %call1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 -1075344593) nounwind ; 16: lw ${{[0-9]+}}, 1f ; 16: b 2f diff --git a/test/CodeGen/Mips/inlineasm-assembler-directives.ll b/test/CodeGen/Mips/inlineasm-assembler-directives.ll index e4a6d1e..88ceed4 100644 --- a/test/CodeGen/Mips/inlineasm-assembler-directives.ll +++ b/test/CodeGen/Mips/inlineasm-assembler-directives.ll @@ -16,7 +16,7 @@ entry: %a = alloca i32, align 4 %b = alloca i32, align 4 store i32 20, i32* %a, align 4 - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %1 = call i32 asm sideeffect "addi $$9, $1, 8\0A\09subi $0, $$9, 6", "=r,r,~{$1}"(i32 %0) store i32 %1, i32* %b, align 4 ret void diff --git a/test/CodeGen/Mips/inlineasm-operand-code.ll b/test/CodeGen/Mips/inlineasm-operand-code.ll index 3d9dec7..b9415ee 100644 --- a/test/CodeGen/Mips/inlineasm-operand-code.ll +++ b/test/CodeGen/Mips/inlineasm-operand-code.ll @@ -125,7 +125,7 @@ entry: ;CHECK_BIG_32: #APP ;CHECK_BIG_32: or ${{[0-9]+}},$[[SECOND]],${{[0-9]+}} ;CHECK_BIG_32: #NO_APP - %bosco = load i64* getelementptr inbounds (%union.u_tag* @uval, i32 0, i32 0), align 8 + %bosco = load i64, i64* getelementptr inbounds (%union.u_tag, %union.u_tag* @uval, i32 0, i32 0), align 8 %trunc1 = trunc i64 %bosco to i32 tail call i32 asm sideeffect "or $0,${1:D},$2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind ret i32 0 @@ -149,7 +149,7 @@ entry: ;CHECK_BIG_32: #APP ;CHECK_BIG_32: or ${{[0-9]+}},$[[SECOND]],${{[0-9]+}} ;CHECK_BIG_32: #NO_APP - %bosco = load i64* getelementptr inbounds (%union.u_tag* @uval, i32 0, i32 0), align 8 + %bosco = load i64, i64* getelementptr inbounds (%union.u_tag, %union.u_tag* @uval, i32 0, i32 0), align 8 %trunc1 = trunc i64 %bosco to i32 tail call i32 asm sideeffect "or $0,${1:L},$2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind ret i32 0 @@ -173,7 +173,7 @@ entry: ;CHECK_BIG_32: #APP ;CHECK_BIG_32: or ${{[0-9]+}},$[[FIRST]],${{[0-9]+}} ;CHECK_BIG_32: #NO_APP - %bosco = load i64* getelementptr inbounds (%union.u_tag* @uval, i32 0, i32 0), align 8 + %bosco = load i64, i64* getelementptr inbounds (%union.u_tag, %union.u_tag* @uval, i32 0, i32 0), align 8 %trunc1 = trunc i64 %bosco to i32 tail call i32 asm sideeffect "or $0,${1:M},$2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind ret i32 0 diff --git a/test/CodeGen/Mips/inlineasm64.ll b/test/CodeGen/Mips/inlineasm64.ll index a8e949b..82abdf8 100644 --- a/test/CodeGen/Mips/inlineasm64.ll +++ b/test/CodeGen/Mips/inlineasm64.ll @@ -8,8 +8,8 @@ define void @foo1() nounwind { entry: ; CHECK: foo1 ; CHECK: daddu - %0 = load i64* @gl1, align 8 - %1 = load i64* @gl0, align 8 + %0 = load i64, i64* @gl1, align 8 + %1 = load i64, i64* @gl0, align 8 %2 = tail call i64 asm "daddu $0, $1, $2", "=r,r,r"(i64 %0, i64 %1) nounwind store i64 %2, i64* @gl2, align 8 ret void diff --git a/test/CodeGen/Mips/inlineasm_constraint_ZC.ll b/test/CodeGen/Mips/inlineasm_constraint_ZC.ll new file mode 100644 index 0000000..c1746a6 --- /dev/null +++ b/test/CodeGen/Mips/inlineasm_constraint_ZC.ll @@ -0,0 +1,167 @@ +; RUN: llc -march=mipsel -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=09BIT +; RUN: llc -march=mipsel -mattr=+micromips < %s | FileCheck %s -check-prefix=ALL -check-prefix=12BIT +; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=ALL -check-prefix=16BIT + +@data = global [8193 x i32] zeroinitializer + +define void @ZC(i32 *%p) nounwind { +entry: + ; ALL-LABEL: ZC: + + call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0)) + + ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)( + ; ALL: #APP + ; ALL: lw $1, 0($[[BASEPTR]]) + ; ALL: #NO_APP + + ret void +} + +define void @ZC_offset_n4(i32 *%p) nounwind { +entry: + ; ALL-LABEL: ZC_offset_n4: + + call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 -1)) + + ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)( + ; ALL: #APP + ; ALL: lw $1, -4($[[BASEPTR]]) + ; ALL: #NO_APP + + ret void +} + +define void @ZC_offset_4(i32 *%p) nounwind { +entry: + ; ALL-LABEL: ZC_offset_4: + + call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1)) + + ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)( + ; ALL: #APP + ; ALL: lw $1, 4($[[BASEPTR]]) + ; ALL: #NO_APP + + ret void +} + +define void @ZC_offset_252(i32 *%p) nounwind { +entry: + ; ALL-LABEL: ZC_offset_252: + + call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 63)) + + ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)( + ; ALL: #APP + ; ALL: lw $1, 252($[[BASEPTR]]) + ; ALL: #NO_APP + + ret void +} + +define void @ZC_offset_256(i32 *%p) nounwind { +entry: + ; ALL-LABEL: ZC_offset_256: + + call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 64)) + + ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)( + + ; 09BIT: addiu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], 256 + + ; ALL: #APP + + ; 09BIT: lw $1, 0($[[BASEPTR2]]) + ; 12BIT: lw $1, 256($[[BASEPTR]]) + ; 16BIT: lw $1, 256($[[BASEPTR]]) + + ; ALL: #NO_APP + + ret void +} + +define void @ZC_offset_2044(i32 *%p) nounwind { +entry: + ; ALL-LABEL: ZC_offset_2044: + + call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 511)) + + ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)( + + ; 09BIT: addiu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], 2044 + + ; ALL: #APP + + ; 09BIT: lw $1, 0($[[BASEPTR2]]) + ; 12BIT: lw $1, 2044($[[BASEPTR]]) + ; 16BIT: lw $1, 2044($[[BASEPTR]]) + + ; ALL: #NO_APP + + ret void +} + +define void @ZC_offset_2048(i32 *%p) nounwind { +entry: + ; ALL-LABEL: ZC_offset_2048: + + call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 512)) + + ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)( + + ; 09BIT: addiu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], 2048 + ; 12BIT: addiu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], 2048 + + ; ALL: #APP + + ; 09BIT: lw $1, 0($[[BASEPTR2]]) + ; 12BIT: lw $1, 0($[[BASEPTR2]]) + ; 16BIT: lw $1, 2048($[[BASEPTR]]) + + ; ALL: #NO_APP + + ret void +} + +define void @ZC_offset_32764(i32 *%p) nounwind { +entry: + ; ALL-LABEL: ZC_offset_32764: + + call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191)) + + ; ALL-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)( + + ; 09BIT: addiu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], 32764 + ; 12BIT: addiu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], 32764 + + ; ALL: #APP + + ; 09BIT: lw $1, 0($[[BASEPTR2]]) + ; 12BIT: lw $1, 0($[[BASEPTR2]]) + ; 16BIT: lw $1, 32764($[[BASEPTR]]) + + ; ALL: #NO_APP + + ret void +} + +define void @ZC_offset_32768(i32 *%p) nounwind { +entry: + ; ALL-LABEL: ZC_offset_32768: + + call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192)) + + ; ALL-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)( + ; ALL-DAG: ori $[[T0:[0-9]+]], $zero, 32768 + + ; 09BIT: addu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], $[[T0]] + ; 12BIT: addu16 $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], $[[T0]] + ; 16BIT: addu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], $[[T0]] + + ; ALL: #APP + ; ALL: lw $1, 0($[[BASEPTR2]]) + ; ALL: #NO_APP + + ret void +} diff --git a/test/CodeGen/Mips/inlineasm_constraint_m.ll b/test/CodeGen/Mips/inlineasm_constraint_m.ll new file mode 100644 index 0000000..00053ad --- /dev/null +++ b/test/CodeGen/Mips/inlineasm_constraint_m.ll @@ -0,0 +1,61 @@ +; RUN: llc -march=mipsel < %s | FileCheck %s + +@data = global [8193 x i32] zeroinitializer + +define void @m(i32 *%p) nounwind { +entry: + ; CHECK-LABEL: m: + + call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0)) + + ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)( + ; CHECK: #APP + ; CHECK: lw $1, 0($[[BASEPTR]]) + ; CHECK: #NO_APP + + ret void +} + +define void @m_offset_4(i32 *%p) nounwind { +entry: + ; CHECK-LABEL: m_offset_4: + + call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1)) + + ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)( + ; CHECK: #APP + ; CHECK: lw $1, 4($[[BASEPTR]]) + ; CHECK: #NO_APP + + ret void +} + +define void @m_offset_32764(i32 *%p) nounwind { +entry: + ; CHECK-LABEL: m_offset_32764: + + call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191)) + + ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)( + ; CHECK: #APP + ; CHECK: lw $1, 32764($[[BASEPTR]]) + ; CHECK: #NO_APP + + ret void +} + +define void @m_offset_32768(i32 *%p) nounwind { +entry: + ; CHECK-LABEL: m_offset_32768: + + call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192)) + + ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)( + ; CHECK-DAG: ori $[[T0:[0-9]+]], $zero, 32768 + ; CHECK: addu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], $[[T0]] + ; CHECK: #APP + ; CHECK: lw $1, 0($[[BASEPTR2]]) + ; CHECK: #NO_APP + + ret void +} diff --git a/test/CodeGen/Mips/inlineasmmemop.ll b/test/CodeGen/Mips/inlineasmmemop.ll index 5518520..9e9b6cd 100644 --- a/test/CodeGen/Mips/inlineasmmemop.ll +++ b/test/CodeGen/Mips/inlineasmmemop.ll @@ -6,14 +6,13 @@ define i32 @f1(i32 %x) nounwind { entry: ; CHECK-LABEL: f1: -; CHECK: addiu $[[T0:[0-9]+]], $sp ; CHECK: #APP -; CHECK: sw $4, 0($[[T0]]) +; CHECK: sw $4, [[OFFSET:[0-9]+]]($sp) ; CHECK: #NO_APP +; CHECK: lw $[[T1:[0-9]+]], %got(g1) ; CHECK: #APP -; CHECK: lw $[[T3:[0-9]+]], 0($[[T0]]) +; CHECK: lw $[[T3:[0-9]+]], [[OFFSET]]($sp) ; CHECK: #NO_APP -; CHECK: lw $[[T1:[0-9]+]], %got(g1) ; CHECK: sw $[[T3]], 0($[[T1]]) %l1 = alloca i32, align 4 @@ -27,13 +26,13 @@ entry: ; "D": Second word of a double word. This works for any memory element ; double or single. ; CHECK: #APP -; CHECK: lw ${{[0-9]+}},4(${{[0-9]+}}); +; CHECK: lw ${{[0-9]+}}, 16(${{[0-9]+}}); ; CHECK: #NO_APP ; No "D": First word of a double word. This works for any memory element ; double or single. ; CHECK: #APP -; CHECK: lw ${{[0-9]+}},0(${{[0-9]+}}); +; CHECK: lw ${{[0-9]+}}, 12(${{[0-9]+}}); ; CHECK: #NO_APP @b = common global [20 x i32] zeroinitializer, align 4 @@ -41,8 +40,8 @@ entry: define void @main() { entry: ; Second word: - tail call void asm sideeffect " lw $0,${1:D};", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32]* @b, i32 0, i32 3)) + tail call void asm sideeffect " lw $0, ${1:D};", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3)) ; First word. Notice, no 'D': - tail call void asm sideeffect " lw $0,${1};", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32]* @b, i32 0, i32 3)) + tail call void asm sideeffect " lw $0, ${1};", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3)) ret void } diff --git a/test/CodeGen/Mips/internalfunc.ll b/test/CodeGen/Mips/internalfunc.ll index 863375a..bde7357 100644 --- a/test/CodeGen/Mips/internalfunc.ll +++ b/test/CodeGen/Mips/internalfunc.ll @@ -20,7 +20,7 @@ entry: br i1 %tobool, label %if.end, label %if.then if.then: ; preds = %entry - %tmp1 = load void (...)** @caller.sf1, align 4 + %tmp1 = load void (...)*, void (...)** @caller.sf1, align 4 tail call void (...)* %tmp1() nounwind br label %if.end @@ -30,7 +30,7 @@ if.end: ; preds = %entry, %if.then ; CHECK: lw $[[R3:[0-9]+]], %got(caller.sf1) ; CHECK: sw ${{[0-9]+}}, %lo(caller.sf1)($[[R3]]) %tobool3 = icmp ne i32 %a0, 0 - %tmp4 = load void (...)** @gf1, align 4 + %tmp4 = load void (...)*, void (...)** @gf1, align 4 %cond = select i1 %tobool3, void (...)* %tmp4, void (...)* bitcast (void ()* @sf2 to void (...)*) store void (...)* %cond, void (...)** @caller.sf1, align 4 ret void @@ -38,7 +38,7 @@ if.end: ; preds = %entry, %if.then define internal void @sf2() nounwind { entry: - %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0)) nounwind + %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0)) nounwind ret void } @@ -46,7 +46,7 @@ declare i32 @printf(i8* nocapture, ...) nounwind define internal fastcc void @f2() nounwind noinline { entry: - %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0)) nounwind + %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0)) nounwind ret void } diff --git a/test/CodeGen/Mips/jtstat.ll b/test/CodeGen/Mips/jtstat.ll index 01afc08..35f71cf 100644 --- a/test/CodeGen/Mips/jtstat.ll +++ b/test/CodeGen/Mips/jtstat.ll @@ -8,7 +8,7 @@ define void @test(i32 %i) nounwind { entry: %i.addr = alloca i32, align 4 store i32 %i, i32* %i.addr, align 4 - %0 = load i32* %i.addr, align 4 + %0 = load i32, i32* %i.addr, align 4 switch i32 %0, label %sw.epilog [ i32 115, label %sw.bb i32 105, label %sw.bb1 diff --git a/test/CodeGen/Mips/l3mc.ll b/test/CodeGen/Mips/l3mc.ll index 3bfb389..6aeed04 100644 --- a/test/CodeGen/Mips/l3mc.ll +++ b/test/CodeGen/Mips/l3mc.ll @@ -42,28 +42,28 @@ ; Function Attrs: nounwind define void @_Z3foov() #0 { entry: - %0 = load double* @d1, align 8 + %0 = load double, double* @d1, align 8 %conv = fptosi double %0 to i64 store i64 %conv, i64* @ll1, align 8 - %1 = load double* @d2, align 8 + %1 = load double, double* @d2, align 8 %conv1 = fptoui double %1 to i64 store i64 %conv1, i64* @ull1, align 8 - %2 = load float* @f1, align 4 + %2 = load float, float* @f1, align 4 %conv2 = fptosi float %2 to i64 store i64 %conv2, i64* @ll2, align 8 - %3 = load float* @f2, align 4 + %3 = load float, float* @f2, align 4 %conv3 = fptoui float %3 to i64 store i64 %conv3, i64* @ull2, align 8 - %4 = load double* @d3, align 8 + %4 = load double, double* @d3, align 8 %conv4 = fptosi double %4 to i32 store i32 %conv4, i32* @l1, align 4 - %5 = load double* @d4, align 8 + %5 = load double, double* @d4, align 8 %conv5 = fptoui double %5 to i32 store i32 %conv5, i32* @ul1, align 4 - %6 = load float* @f3, align 4 + %6 = load float, float* @f3, align 4 %conv6 = fptosi float %6 to i32 store i32 %conv6, i32* @l2, align 4 - %7 = load float* @f4, align 4 + %7 = load float, float* @f4, align 4 %conv7 = fptoui float %7 to i32 store i32 %conv7, i32* @ul2, align 4 ret void @@ -72,28 +72,28 @@ entry: ; Function Attrs: nounwind define void @_Z3goov() #0 { entry: - %0 = load i64* @ll1, align 8 + %0 = load i64, i64* @ll1, align 8 %conv = sitofp i64 %0 to double store double %conv, double* @d1, align 8 - %1 = load i64* @ull1, align 8 + %1 = load i64, i64* @ull1, align 8 %conv1 = uitofp i64 %1 to double store double %conv1, double* @d2, align 8 - %2 = load i64* @ll2, align 8 + %2 = load i64, i64* @ll2, align 8 %conv2 = sitofp i64 %2 to float store float %conv2, float* @f1, align 4 - %3 = load i64* @ull2, align 8 + %3 = load i64, i64* @ull2, align 8 %conv3 = uitofp i64 %3 to float store float %conv3, float* @f2, align 4 - %4 = load i32* @l1, align 4 + %4 = load i32, i32* @l1, align 4 %conv4 = sitofp i32 %4 to double store double %conv4, double* @d3, align 8 - %5 = load i32* @ul1, align 4 + %5 = load i32, i32* @ul1, align 4 %conv5 = uitofp i32 %5 to double store double %conv5, double* @d4, align 8 - %6 = load i32* @l2, align 4 + %6 = load i32, i32* @l2, align 4 %conv6 = sitofp i32 %6 to float store float %conv6, float* @f3, align 4 - %7 = load i32* @ul2, align 4 + %7 = load i32, i32* @ul2, align 4 %conv7 = uitofp i32 %7 to float store float %conv7, float* @f4, align 4 ret void diff --git a/test/CodeGen/Mips/largeimm1.ll b/test/CodeGen/Mips/largeimm1.ll index 1c0f69c..06c4d6b 100644 --- a/test/CodeGen/Mips/largeimm1.ll +++ b/test/CodeGen/Mips/largeimm1.ll @@ -5,7 +5,7 @@ define void @f() nounwind { entry: %a1 = alloca [1073741824 x i8], align 1 - %arrayidx = getelementptr inbounds [1073741824 x i8]* %a1, i32 0, i32 1048676 + %arrayidx = getelementptr inbounds [1073741824 x i8], [1073741824 x i8]* %a1, i32 0, i32 1048676 call void @f2(i8* %arrayidx) nounwind ret void } diff --git a/test/CodeGen/Mips/largeimmprinting.ll b/test/CodeGen/Mips/largeimmprinting.ll index 918dfee..a53a953 100644 --- a/test/CodeGen/Mips/largeimmprinting.ll +++ b/test/CodeGen/Mips/largeimmprinting.ll @@ -27,8 +27,8 @@ entry: ; 64: sd $ra, 24($[[R1]]) %agg.tmp = alloca %struct.S1, align 1 - %tmp = getelementptr inbounds %struct.S1* %agg.tmp, i32 0, i32 0, i32 0 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* getelementptr inbounds (%struct.S1* @s1, i32 0, i32 0, i32 0), i32 65536, i32 1, i1 false) + %tmp = getelementptr inbounds %struct.S1, %struct.S1* %agg.tmp, i32 0, i32 0, i32 0 + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* getelementptr inbounds (%struct.S1, %struct.S1* @s1, i32 0, i32 0, i32 0), i32 65536, i32 1, i1 false) call void @f2(%struct.S1* byval %agg.tmp) nounwind ret void } diff --git a/test/CodeGen/Mips/lb1.ll b/test/CodeGen/Mips/lb1.ll index aac2767..ad94c5f 100644 --- a/test/CodeGen/Mips/lb1.ll +++ b/test/CodeGen/Mips/lb1.ll @@ -6,12 +6,12 @@ define i32 @main() nounwind { entry: %i = alloca i32, align 4 - %0 = load i8* @c, align 1 + %0 = load i8, i8* @c, align 1 ; 16: lb ${{[0-9]+}}, 0(${{[0-9]+}}) %conv = sext i8 %0 to i32 store i32 %conv, i32* %i, align 4 - %1 = load i32* %i, align 4 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %1) + %1 = load i32, i32* %i, align 4 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1) ret i32 0 } diff --git a/test/CodeGen/Mips/lbu1.ll b/test/CodeGen/Mips/lbu1.ll index 63e0cca..a8ef2ff 100644 --- a/test/CodeGen/Mips/lbu1.ll +++ b/test/CodeGen/Mips/lbu1.ll @@ -6,13 +6,13 @@ define i32 @main() nounwind { entry: %i = alloca i32, align 4 - %0 = load i8* @c, align 1 + %0 = load i8, i8* @c, align 1 %conv = zext i8 %0 to i32 ; 16: lbu ${{[0-9]+}}, 0(${{[0-9]+}}) store i32 %conv, i32* %i, align 4 - %1 = load i8* @c, align 1 + %1 = load i8, i8* @c, align 1 %conv1 = zext i8 %1 to i32 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %conv1) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %conv1) ret i32 0 } diff --git a/test/CodeGen/Mips/lcb2.ll b/test/CodeGen/Mips/lcb2.ll index 59b96e6..716a6bb 100644 --- a/test/CodeGen/Mips/lcb2.ll +++ b/test/CodeGen/Mips/lcb2.ll @@ -9,7 +9,7 @@ ; Function Attrs: nounwind optsize define i32 @bnez() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.end @@ -31,7 +31,7 @@ if.end: ; preds = %if.then, %entry ; Function Attrs: nounwind optsize define i32 @beqz() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -60,8 +60,8 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define void @bteqz() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp eq i32 %0, %1 br i1 %cmp, label %if.then, label %if.else @@ -90,15 +90,15 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define void @btz() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp1 = icmp sgt i32 %0, %1 br i1 %cmp1, label %if.then, label %if.end if.then: ; preds = %entry, %if.then tail call void asm sideeffect ".space 60000", ""() #1, !srcloc !10 - %2 = load i32* @i, align 4, !tbaa !1 - %3 = load i32* @j, align 4, !tbaa !1 + %2 = load i32, i32* @i, align 4, !tbaa !1 + %3 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp sgt i32 %2, %3 br i1 %cmp, label %if.then, label %if.end diff --git a/test/CodeGen/Mips/lcb3c.ll b/test/CodeGen/Mips/lcb3c.ll index eb83291..d6e259c 100644 --- a/test/CodeGen/Mips/lcb3c.ll +++ b/test/CodeGen/Mips/lcb3c.ll @@ -7,7 +7,7 @@ ; Function Attrs: nounwind define i32 @s() #0 { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -30,7 +30,7 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind define i32 @b() #0 { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else diff --git a/test/CodeGen/Mips/lcb4a.ll b/test/CodeGen/Mips/lcb4a.ll index fbcadd2..0285ae1 100644 --- a/test/CodeGen/Mips/lcb4a.ll +++ b/test/CodeGen/Mips/lcb4a.ll @@ -7,7 +7,7 @@ ; Function Attrs: nounwind optsize define i32 @foo() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -32,7 +32,7 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define i32 @goo() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else diff --git a/test/CodeGen/Mips/lcb5.ll b/test/CodeGen/Mips/lcb5.ll index b2a8d1d..172ecb3 100644 --- a/test/CodeGen/Mips/lcb5.ll +++ b/test/CodeGen/Mips/lcb5.ll @@ -7,7 +7,7 @@ ; Function Attrs: nounwind optsize define i32 @x0() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -33,7 +33,7 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define i32 @x1() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -61,7 +61,7 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define i32 @y0() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -86,7 +86,7 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define i32 @y1() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -114,8 +114,8 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define void @z0() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp eq i32 %0, %1 br i1 %cmp, label %if.then, label %if.else @@ -140,8 +140,8 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define void @z1() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp eq i32 %0, %1 br i1 %cmp, label %if.then, label %if.else @@ -169,15 +169,15 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define void @z3() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp1 = icmp sgt i32 %0, %1 br i1 %cmp1, label %if.then, label %if.end if.then: ; preds = %entry, %if.then tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !17 - %2 = load i32* @i, align 4, !tbaa !1 - %3 = load i32* @j, align 4, !tbaa !1 + %2 = load i32, i32* @i, align 4, !tbaa !1 + %3 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp sgt i32 %2, %3 br i1 %cmp, label %if.then, label %if.end @@ -192,15 +192,15 @@ if.end: ; preds = %if.then, %entry ; Function Attrs: nounwind optsize define void @z4() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp1 = icmp sgt i32 %0, %1 br i1 %cmp1, label %if.then, label %if.end if.then: ; preds = %entry, %if.then tail call void asm sideeffect ".space 10000000", ""() #1, !srcloc !18 - %2 = load i32* @i, align 4, !tbaa !1 - %3 = load i32* @j, align 4, !tbaa !1 + %2 = load i32, i32* @i, align 4, !tbaa !1 + %3 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp sgt i32 %2, %3 br i1 %cmp, label %if.then, label %if.end diff --git a/test/CodeGen/Mips/lh1.ll b/test/CodeGen/Mips/lh1.ll index 1f95b09..3b245b1 100644 --- a/test/CodeGen/Mips/lh1.ll +++ b/test/CodeGen/Mips/lh1.ll @@ -6,12 +6,12 @@ define i32 @main() nounwind { entry: %i = alloca i32, align 4 - %0 = load i16* @s, align 2 + %0 = load i16, i16* @s, align 2 %conv = sext i16 %0 to i32 ; 16: lh ${{[0-9]+}}, 0(${{[0-9]+}}) store i32 %conv, i32* %i, align 4 - %1 = load i32* %i, align 4 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %1) + %1 = load i32, i32* %i, align 4 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1) ret i32 0 } diff --git a/test/CodeGen/Mips/lhu1.ll b/test/CodeGen/Mips/lhu1.ll index 0cfcede..02abfb7 100644 --- a/test/CodeGen/Mips/lhu1.ll +++ b/test/CodeGen/Mips/lhu1.ll @@ -7,12 +7,12 @@ define i32 @main() nounwind { entry: %i = alloca i32, align 4 - %0 = load i16* @s, align 2 + %0 = load i16, i16* @s, align 2 %conv = zext i16 %0 to i32 ; 16: lhu ${{[0-9]+}}, 0(${{[0-9]+}}) store i32 %conv, i32* %i, align 4 - %1 = load i32* %i, align 4 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %1) + %1 = load i32, i32* %i, align 4 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1) ret i32 0 } diff --git a/test/CodeGen/Mips/llcarry.ll b/test/CodeGen/Mips/llcarry.ll index 7763dae..f4120ec 100644 --- a/test/CodeGen/Mips/llcarry.ll +++ b/test/CodeGen/Mips/llcarry.ll @@ -9,8 +9,8 @@ define void @test1() nounwind { entry: - %0 = load i64* @i, align 8 - %1 = load i64* @j, align 8 + %0 = load i64, i64* @i, align 8 + %1 = load i64, i64* @j, align 8 %add = add nsw i64 %1, %0 store i64 %add, i64* @k, align 8 ; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} @@ -23,8 +23,8 @@ entry: define void @test2() nounwind { entry: - %0 = load i64* @i, align 8 - %1 = load i64* @j, align 8 + %0 = load i64, i64* @i, align 8 + %1 = load i64, i64* @j, align 8 %sub = sub nsw i64 %0, %1 ; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} ; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} @@ -37,7 +37,7 @@ entry: define void @test3() nounwind { entry: - %0 = load i64* @ii, align 8 + %0 = load i64, i64* @ii, align 8 %add = add nsw i64 %0, 15 ; 16: addiu ${{[0-9]+}}, 15 ; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} diff --git a/test/CodeGen/Mips/load-store-left-right.ll b/test/CodeGen/Mips/load-store-left-right.ll index b8e6e83..a01d246 100644 --- a/test/CodeGen/Mips/load-store-left-right.ll +++ b/test/CodeGen/Mips/load-store-left-right.ll @@ -43,7 +43,7 @@ entry: ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)( ; MIPS64R6: lw $2, 0($[[PTR]]) - %0 = load i32* getelementptr inbounds (%struct.SI* @si, i32 0, i32 0), align 1 + %0 = load i32, i32* getelementptr inbounds (%struct.SI, %struct.SI* @si, i32 0, i32 0), align 1 ret i32 %0 } @@ -69,7 +69,7 @@ entry: ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)( ; MIPS64R6: sw $4, 0($[[PTR]]) - store i32 %a, i32* getelementptr inbounds (%struct.SI* @si, i32 0, i32 0), align 1 + store i32 %a, i32* getelementptr inbounds (%struct.SI, %struct.SI* @si, i32 0, i32 0), align 1 ret void } @@ -100,7 +100,7 @@ entry: ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(sll)( ; MIPS64R6: ld $2, 0($[[PTR]]) - %0 = load i64* getelementptr inbounds (%struct.SLL* @sll, i64 0, i32 0), align 1 + %0 = load i64, i64* getelementptr inbounds (%struct.SLL, %struct.SLL* @sll, i64 0, i32 0), align 1 ret i64 %0 } @@ -129,7 +129,7 @@ entry: ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)( ; MIPS64R6: lw $2, 0($[[PTR]]) - %0 = load i32* getelementptr inbounds (%struct.SI* @si, i64 0, i32 0), align 1 + %0 = load i32, i32* getelementptr inbounds (%struct.SI, %struct.SI* @si, i64 0, i32 0), align 1 %conv = sext i32 %0 to i64 ret i64 %conv } @@ -165,7 +165,7 @@ entry: ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(sui)( ; MIPS64R6: lwu $2, 0($[[PTR]]) - %0 = load i32* getelementptr inbounds (%struct.SUI* @sui, i64 0, i32 0), align 1 + %0 = load i32, i32* getelementptr inbounds (%struct.SUI, %struct.SUI* @sui, i64 0, i32 0), align 1 %conv = zext i32 %0 to i64 ret i64 %conv } @@ -197,7 +197,7 @@ entry: ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(sll)( ; MIPS64R6: sd $4, 0($[[PTR]]) - store i64 %a, i64* getelementptr inbounds (%struct.SLL* @sll, i64 0, i32 0), align 1 + store i64 %a, i64* getelementptr inbounds (%struct.SLL, %struct.SLL* @sll, i64 0, i32 0), align 1 ret void } @@ -223,7 +223,7 @@ entry: ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)( ; MIPS64R6: sw $4, 0($[[PTR]]) - store i32 %a, i32* getelementptr inbounds (%struct.SI* @si, i64 0, i32 0), align 1 + store i32 %a, i32* getelementptr inbounds (%struct.SI, %struct.SI* @si, i64 0, i32 0), align 1 ret void } @@ -257,8 +257,8 @@ entry: ; ALL-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) ; ALL-DAG: sb $[[R1]], 3($[[PTR]]) - %0 = load %struct.S0* getelementptr inbounds (%struct.S0* @struct_s0, i32 0), align 1 - store %struct.S0 %0, %struct.S0* getelementptr inbounds (%struct.S0* @struct_s0, i32 1), align 1 + %0 = load %struct.S0, %struct.S0* getelementptr inbounds (%struct.S0, %struct.S0* @struct_s0, i32 0), align 1 + store %struct.S0 %0, %struct.S0* getelementptr inbounds (%struct.S0, %struct.S0* @struct_s0, i32 1), align 1 ret void } @@ -300,8 +300,8 @@ entry: ; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]]) ; MIPS64R6-DAG: sh $[[R1]], 6($[[PTR]]) - %0 = load %struct.S1* getelementptr inbounds (%struct.S1* @struct_s1, i32 0), align 1 - store %struct.S1 %0, %struct.S1* getelementptr inbounds (%struct.S1* @struct_s1, i32 1), align 1 + %0 = load %struct.S1, %struct.S1* getelementptr inbounds (%struct.S1, %struct.S1* @struct_s1, i32 0), align 1 + store %struct.S1 %0, %struct.S1* getelementptr inbounds (%struct.S1, %struct.S1* @struct_s1, i32 1), align 1 ret void } @@ -361,8 +361,8 @@ entry: ; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 4($[[PTR]]) ; MIPS64R6-DAG: sw $[[R1]], 12($[[PTR]]) - %0 = load %struct.S2* getelementptr inbounds (%struct.S2* @struct_s2, i32 0), align 1 - store %struct.S2 %0, %struct.S2* getelementptr inbounds (%struct.S2* @struct_s2, i32 1), align 1 + %0 = load %struct.S2, %struct.S2* getelementptr inbounds (%struct.S2, %struct.S2* @struct_s2, i32 0), align 1 + store %struct.S2 %0, %struct.S2* getelementptr inbounds (%struct.S2, %struct.S2* @struct_s2, i32 1), align 1 ret void } diff --git a/test/CodeGen/Mips/machineverifier.ll b/test/CodeGen/Mips/machineverifier.ll index c673fe5..d496b83 100644 --- a/test/CodeGen/Mips/machineverifier.ll +++ b/test/CodeGen/Mips/machineverifier.ll @@ -6,7 +6,7 @@ define void @foo() nounwind { entry: - %0 = load i32* @g, align 4 + %0 = load i32, i32* @g, align 4 %tobool = icmp eq i32 %0, 0 br i1 %tobool, label %if.end, label %if.then diff --git a/test/CodeGen/Mips/mbrsize4a.ll b/test/CodeGen/Mips/mbrsize4a.ll index 15e1f47..e7ca776 100644 --- a/test/CodeGen/Mips/mbrsize4a.ll +++ b/test/CodeGen/Mips/mbrsize4a.ll @@ -17,11 +17,11 @@ z: ; preds = %y, %entry br label %y y: ; preds = %z - %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0)) + %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0)) br label %z return: ; No predecessors! - %0 = load i32* %retval + %0 = load i32, i32* %retval ret i32 %0 ; jal16: jal $BB{{[0-9]+}}_{{[0-9]+}} } diff --git a/test/CodeGen/Mips/memcpy.ll b/test/CodeGen/Mips/memcpy.ll index 39764a9..5c4ebb2 100644 --- a/test/CodeGen/Mips/memcpy.ll +++ b/test/CodeGen/Mips/memcpy.ll @@ -8,9 +8,9 @@ define void @foo1(%struct.S1* %s1, i8 signext %n) nounwind { entry: ; CHECK-NOT: call16(memcpy - %arraydecay = getelementptr inbounds %struct.S1* %s1, i32 0, i32 1, i32 0 - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %arraydecay, i8* getelementptr inbounds ([31 x i8]* @.str, i32 0, i32 0), i32 31, i32 1, i1 false) - %arrayidx = getelementptr inbounds %struct.S1* %s1, i32 0, i32 1, i32 40 + %arraydecay = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1, i32 0 + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %arraydecay, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str, i32 0, i32 0), i32 31, i32 1, i1 false) + %arrayidx = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1, i32 40 store i8 %n, i8* %arrayidx, align 1 ret void } diff --git a/test/CodeGen/Mips/micromips-addiu.ll b/test/CodeGen/Mips/micromips-addiu.ll index c5bee34..3035782 100644 --- a/test/CodeGen/Mips/micromips-addiu.ll +++ b/test/CodeGen/Mips/micromips-addiu.ll @@ -8,20 +8,20 @@ define i32 @main() nounwind { entry: - %0 = load i32* @x, align 4 + %0 = load i32, i32* @x, align 4 %addiu1 = add i32 %0, -7 %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds - ([7 x i8]* @.str, i32 0, i32 0), i32 %addiu1) + ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %addiu1) - %1 = load i32* @y, align 4 + %1 = load i32, i32* @y, align 4 %addiu2 = add i32 %1, 55 %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds - ([7 x i8]* @.str, i32 0, i32 0), i32 %addiu2) + ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %addiu2) - %2 = load i32* @z, align 4 + %2 = load i32, i32* @z, align 4 %addiu3 = add i32 %2, 24 %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds - ([7 x i8]* @.str, i32 0, i32 0), i32 %addiu3) + ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %addiu3) ret i32 0 } diff --git a/test/CodeGen/Mips/micromips-addu16.ll b/test/CodeGen/Mips/micromips-addu16.ll new file mode 100644 index 0000000..3ecdf24 --- /dev/null +++ b/test/CodeGen/Mips/micromips-addu16.ll @@ -0,0 +1,18 @@ +; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips \ +; RUN: -relocation-model=pic -O3 < %s | FileCheck %s + +define i32 @main() { +entry: + %retval = alloca i32, align 4 + %a = alloca i32, align 4 + %b = alloca i32, align 4 + %c = alloca i32, align 4 + store i32 0, i32* %retval + %0 = load i32, i32* %b, align 4 + %1 = load i32, i32* %c, align 4 + %add = add nsw i32 %0, %1 + store i32 %add, i32* %a, align 4 + ret i32 0 +} + +; CHECK: addu16 diff --git a/test/CodeGen/Mips/micromips-and16.ll b/test/CodeGen/Mips/micromips-and16.ll index 4eacf18..d0a16ac 100644 --- a/test/CodeGen/Mips/micromips-and16.ll +++ b/test/CodeGen/Mips/micromips-and16.ll @@ -8,8 +8,8 @@ entry: %b = alloca i32, align 4 %c = alloca i32, align 4 store i32 0, i32* %retval - %0 = load i32* %b, align 4 - %1 = load i32* %c, align 4 + %0 = load i32, i32* %b, align 4 + %1 = load i32, i32* %c, align 4 %and = and i32 %0, %1 store i32 %and, i32* %a, align 4 ret i32 0 diff --git a/test/CodeGen/Mips/micromips-andi.ll b/test/CodeGen/Mips/micromips-andi.ll index b82d2b0..cec30e2 100644 --- a/test/CodeGen/Mips/micromips-andi.ll +++ b/test/CodeGen/Mips/micromips-andi.ll @@ -7,15 +7,15 @@ define i32 @main() nounwind { entry: - %0 = load i32* @x, align 4 + %0 = load i32, i32* @x, align 4 %and1 = and i32 %0, 4 %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds - ([7 x i8]* @.str, i32 0, i32 0), i32 %and1) + ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %and1) - %1 = load i32* @y, align 4 + %1 = load i32, i32* @y, align 4 %and2 = and i32 %1, 5 %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds - ([7 x i8]* @.str, i32 0, i32 0), i32 %and2) + ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %and2) ret i32 0 } diff --git a/test/CodeGen/Mips/micromips-compact-branches.ll b/test/CodeGen/Mips/micromips-compact-branches.ll index 670f9a0..c689944 100644 --- a/test/CodeGen/Mips/micromips-compact-branches.ll +++ b/test/CodeGen/Mips/micromips-compact-branches.ll @@ -4,7 +4,7 @@ define void @main() nounwind uwtable { entry: %x = alloca i32, align 4 - %0 = load i32* %x, align 4 + %0 = load i32, i32* %x, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.end diff --git a/test/CodeGen/Mips/micromips-delay-slot-jr.ll b/test/CodeGen/Mips/micromips-delay-slot-jr.ll index 09a98c2..c6636ff 100644 --- a/test/CodeGen/Mips/micromips-delay-slot-jr.ll +++ b/test/CodeGen/Mips/micromips-delay-slot-jr.ll @@ -11,14 +11,14 @@ entry: L1: ; preds = %entry, %L1 %i.0 = phi i32 [ 0, %entry ], [ %inc, %L1 ] - %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str, i32 0, i32 0)) + %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str, i32 0, i32 0)) %inc = add i32 %i.0, 1 - %arrayidx = getelementptr inbounds [3 x i8*]* @main.L, i32 0, i32 %i.0 - %0 = load i8** %arrayidx, align 4, !tbaa !1 + %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @main.L, i32 0, i32 %i.0 + %0 = load i8*, i8** %arrayidx, align 4, !tbaa !1 indirectbr i8* %0, [label %L1, label %L2] L2: ; preds = %L1 - %puts2 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str2, i32 0, i32 0)) + %puts2 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str2, i32 0, i32 0)) ret i32 0 } @@ -36,7 +36,7 @@ declare i32 @puts(i8* nocapture readonly) #1 @bar_ary = common global [4 x %struct.barstruct] zeroinitializer, align 4 define float* @spooky(i32 signext %i) #0 { - %safe = getelementptr inbounds [4 x %struct.barstruct]* @bar_ary, i32 0, i32 %i, i32 1 + %safe = getelementptr inbounds [4 x %struct.barstruct], [4 x %struct.barstruct]* @bar_ary, i32 0, i32 %i, i32 1 store float 1.420000e+02, float* %safe, align 4, !tbaa !1 ret float* %safe } diff --git a/test/CodeGen/Mips/micromips-delay-slot.ll b/test/CodeGen/Mips/micromips-delay-slot.ll index b5f6c56..ef65462 100644 --- a/test/CodeGen/Mips/micromips-delay-slot.ll +++ b/test/CodeGen/Mips/micromips-delay-slot.ll @@ -6,7 +6,7 @@ define i32 @foo(i32 signext %a) #0 { entry: %a.addr = alloca i32, align 4 store i32 %a, i32* %a.addr, align 4 - %0 = load i32* %a.addr, align 4 + %0 = load i32, i32* %a.addr, align 4 %shl = shl i32 %0, 2 %call = call i32 @bar(i32 signext %shl) ret i32 %call diff --git a/test/CodeGen/Mips/micromips-gp-rc.ll b/test/CodeGen/Mips/micromips-gp-rc.ll new file mode 100644 index 0000000..f139f7a --- /dev/null +++ b/test/CodeGen/Mips/micromips-gp-rc.ll @@ -0,0 +1,18 @@ +; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips \ +; RUN: -relocation-model=pic -O3 < %s | FileCheck %s + +@g = external global i32 + +; Function Attrs: noreturn nounwind +define void @foo() #0 { +entry: + %0 = load i32, i32* @g, align 4 + tail call void @exit(i32 signext %0) + unreachable +} + +; Function Attrs: noreturn +declare void @exit(i32 signext) + +; CHECK: move $gp, ${{[0-9]+}} + diff --git a/test/CodeGen/Mips/micromips-jal.ll b/test/CodeGen/Mips/micromips-jal.ll index fccc229..51832fe 100644 --- a/test/CodeGen/Mips/micromips-jal.ll +++ b/test/CodeGen/Mips/micromips-jal.ll @@ -7,8 +7,8 @@ entry: %b.addr = alloca i32, align 4 store i32 %a, i32* %a.addr, align 4 store i32 %b, i32* %b.addr, align 4 - %0 = load i32* %a.addr, align 4 - %1 = load i32* %b.addr, align 4 + %0 = load i32, i32* %a.addr, align 4 + %1 = load i32, i32* %b.addr, align 4 %add = add nsw i32 %0, %1 ret i32 %add } @@ -20,11 +20,11 @@ entry: %y = alloca i32, align 4 %z = alloca i32, align 4 store i32 0, i32* %retval - %0 = load i32* %y, align 4 - %1 = load i32* %z, align 4 + %0 = load i32, i32* %y, align 4 + %1 = load i32, i32* %z, align 4 %call = call i32 @sum(i32 %0, i32 %1) store i32 %call, i32* %x, align 4 - %2 = load i32* %x, align 4 + %2 = load i32, i32* %x, align 4 ret i32 %2 } diff --git a/test/CodeGen/Mips/micromips-load-effective-address.ll b/test/CodeGen/Mips/micromips-load-effective-address.ll index afba760..4704580 100644 --- a/test/CodeGen/Mips/micromips-load-effective-address.ll +++ b/test/CodeGen/Mips/micromips-load-effective-address.ll @@ -7,10 +7,10 @@ entry: %y.addr = alloca i32*, align 8 store i32* %x, i32** %x.addr, align 8 store i32* %y, i32** %y.addr, align 8 - %0 = load i32** %x.addr, align 8 - %1 = load i32* %0, align 4 - %2 = load i32** %y.addr, align 8 - %3 = load i32* %2, align 4 + %0 = load i32*, i32** %x.addr, align 8 + %1 = load i32, i32* %0, align 4 + %2 = load i32*, i32** %y.addr, align 8 + %3 = load i32, i32* %2, align 4 %add = add nsw i32 %1, %3 ret i32 %add } diff --git a/test/CodeGen/Mips/micromips-not16.ll b/test/CodeGen/Mips/micromips-not16.ll new file mode 100644 index 0000000..d31aefa --- /dev/null +++ b/test/CodeGen/Mips/micromips-not16.ll @@ -0,0 +1,26 @@ +; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips \ +; RUN: -relocation-model=pic -O3 < %s | FileCheck %s + +define i32 @main() { +entry: + %retval = alloca i32, align 4 + %x = alloca i64, align 8 + store i32 0, i32* %retval + %0 = load i64, i64* %x, align 8 + %cmp = icmp ne i64 %0, 9223372036854775807 + br i1 %cmp, label %if.then, label %if.end + +if.then: + store i32 1, i32* %retval + br label %return + +if.end: + store i32 0, i32* %retval + br label %return + +return: + %1 = load i32, i32* %retval + ret i32 %1 +} + +; CHECK: not16 diff --git a/test/CodeGen/Mips/micromips-or16.ll b/test/CodeGen/Mips/micromips-or16.ll index ab7e79a..82ea9c6 100644 --- a/test/CodeGen/Mips/micromips-or16.ll +++ b/test/CodeGen/Mips/micromips-or16.ll @@ -8,8 +8,8 @@ entry: %b = alloca i32, align 4 %c = alloca i32, align 4 store i32 0, i32* %retval - %0 = load i32* %b, align 4 - %1 = load i32* %c, align 4 + %0 = load i32, i32* %b, align 4 + %1 = load i32, i32* %c, align 4 %or = or i32 %0, %1 store i32 %or, i32* %a, align 4 ret i32 0 diff --git a/test/CodeGen/Mips/micromips-rdhwr-directives.ll b/test/CodeGen/Mips/micromips-rdhwr-directives.ll index af40a87..ebe4ddd 100644 --- a/test/CodeGen/Mips/micromips-rdhwr-directives.ll +++ b/test/CodeGen/Mips/micromips-rdhwr-directives.ll @@ -10,6 +10,6 @@ entry: ; CHECK: rdhwr ; CHECK: .set pop - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 ret i32 %0 } diff --git a/test/CodeGen/Mips/micromips-shift.ll b/test/CodeGen/Mips/micromips-shift.ll index 8215010..ed1bcbb 100644 --- a/test/CodeGen/Mips/micromips-shift.ll +++ b/test/CodeGen/Mips/micromips-shift.ll @@ -8,11 +8,11 @@ define i32 @shift_left() nounwind { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %shl = shl i32 %0, 4 store i32 %shl, i32* @b, align 4 - %1 = load i32* @c, align 4 + %1 = load i32, i32* @c, align 4 %shl1 = shl i32 %1, 10 store i32 %shl1, i32* @d, align 4 @@ -29,11 +29,11 @@ entry: define i32 @shift_right() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %shr = lshr i32 %0, 4 store i32 %shr, i32* @j, align 4 - %1 = load i32* @m, align 4 + %1 = load i32, i32* @m, align 4 %shr1 = lshr i32 %1, 10 store i32 %shr1, i32* @n, align 4 diff --git a/test/CodeGen/Mips/micromips-subu16.ll b/test/CodeGen/Mips/micromips-subu16.ll new file mode 100644 index 0000000..d415574 --- /dev/null +++ b/test/CodeGen/Mips/micromips-subu16.ll @@ -0,0 +1,18 @@ +; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips \ +; RUN: -relocation-model=pic -O3 < %s | FileCheck %s + +define i32 @main() { +entry: + %retval = alloca i32, align 4 + %a = alloca i32, align 4 + %b = alloca i32, align 4 + %c = alloca i32, align 4 + store i32 0, i32* %retval + %0 = load i32, i32* %b, align 4 + %1 = load i32, i32* %c, align 4 + %sub = sub nsw i32 %0, %1 + store i32 %sub, i32* %a, align 4 + ret i32 0 +} + +; CHECK: subu16 diff --git a/test/CodeGen/Mips/micromips-sw-lw-16.ll b/test/CodeGen/Mips/micromips-sw-lw-16.ll index bc09554..3583726 100644 --- a/test/CodeGen/Mips/micromips-sw-lw-16.ll +++ b/test/CodeGen/Mips/micromips-sw-lw-16.ll @@ -6,17 +6,17 @@ define void @bar(i32* %p) #0 { entry: %p.addr = alloca i32*, align 4 store i32* %p, i32** %p.addr, align 4 - %0 = load i32** %p.addr, align 4 - %1 = load i32* %0, align 4 + %0 = load i32*, i32** %p.addr, align 4 + %1 = load i32, i32* %0, align 4 %add = add nsw i32 7, %1 - %2 = load i32** %p.addr, align 4 + %2 = load i32*, i32** %p.addr, align 4 store i32 %add, i32* %2, align 4 - %3 = load i32** %p.addr, align 4 - %add.ptr = getelementptr inbounds i32* %3, i32 1 - %4 = load i32* %add.ptr, align 4 + %3 = load i32*, i32** %p.addr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %3, i32 1 + %4 = load i32, i32* %add.ptr, align 4 %add1 = add nsw i32 7, %4 - %5 = load i32** %p.addr, align 4 - %add.ptr2 = getelementptr inbounds i32* %5, i32 1 + %5 = load i32*, i32** %p.addr, align 4 + %add.ptr2 = getelementptr inbounds i32, i32* %5, i32 1 store i32 %add1, i32* %add.ptr2, align 4 ret void } diff --git a/test/CodeGen/Mips/micromips-xor16.ll b/test/CodeGen/Mips/micromips-xor16.ll index 9915112..53c75ac 100644 --- a/test/CodeGen/Mips/micromips-xor16.ll +++ b/test/CodeGen/Mips/micromips-xor16.ll @@ -8,8 +8,8 @@ entry: %b = alloca i32, align 4 %c = alloca i32, align 4 store i32 0, i32* %retval - %0 = load i32* %b, align 4 - %1 = load i32* %c, align 4 + %0 = load i32, i32* %b, align 4 + %1 = load i32, i32* %c, align 4 %xor = xor i32 %0, %1 store i32 %xor, i32* %a, align 4 ret i32 0 diff --git a/test/CodeGen/Mips/mips16_32_8.ll b/test/CodeGen/Mips/mips16_32_8.ll index 2f5bc21..e79cda5 100644 --- a/test/CodeGen/Mips/mips16_32_8.ll +++ b/test/CodeGen/Mips/mips16_32_8.ll @@ -22,13 +22,13 @@ entry: define void @nofoo() #1 { entry: store i32 20, i32* @i, align 4 - %0 = load float* @x, align 4 - %1 = load float* @y, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @y, align 4 %add = fadd float %0, %1 store float %add, float* @f, align 4 - %2 = load float* @f, align 4 + %2 = load float, float* @f, align 4 %conv = fpext float %2 to double - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), double %conv) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), double %conv) ret void } @@ -48,11 +48,11 @@ declare i32 @printf(i8*, ...) #2 define i32 @main() #3 { entry: call void @foo() - %0 = load i32* @i, align 4 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str1, i32 0, i32 0), i32 %0) + %0 = load i32, i32* @i, align 4 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str1, i32 0, i32 0), i32 %0) call void @nofoo() - %1 = load i32* @i, align 4 - %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str2, i32 0, i32 0), i32 %1) + %1 = load i32, i32* @i, align 4 + %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str2, i32 0, i32 0), i32 %1) ret i32 0 } diff --git a/test/CodeGen/Mips/mips16_fpret.ll b/test/CodeGen/Mips/mips16_fpret.ll index fe87604..0f09c41 100644 --- a/test/CodeGen/Mips/mips16_fpret.ll +++ b/test/CodeGen/Mips/mips16_fpret.ll @@ -11,7 +11,7 @@ define float @foox() { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 ret float %0 ; 1: .ent foox ; 1: lw $2, %lo(x)(${{[0-9]+}}) @@ -20,7 +20,7 @@ entry: define double @foodx() { entry: - %0 = load double* @dx, align 8 + %0 = load double, double* @dx, align 8 ret double %0 ; 1: .ent foodx ; 1: lw $2, %lo(dx)(${{[0-9]+}}) @@ -34,13 +34,13 @@ entry: define { float, float } @foocx() { entry: %retval = alloca { float, float }, align 4 - %cx.real = load float* getelementptr inbounds ({ float, float }* @cx, i32 0, i32 0) - %cx.imag = load float* getelementptr inbounds ({ float, float }* @cx, i32 0, i32 1) - %real = getelementptr inbounds { float, float }* %retval, i32 0, i32 0 - %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1 + %cx.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cx, i32 0, i32 0) + %cx.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cx, i32 0, i32 1) + %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0 + %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1 store float %cx.real, float* %real store float %cx.imag, float* %imag - %0 = load { float, float }* %retval + %0 = load { float, float }, { float, float }* %retval ret { float, float } %0 ; 1: .ent foocx ; 1: lw $2, %lo(cx)(${{[0-9]+}}) @@ -53,13 +53,13 @@ entry: define { double, double } @foodcx() { entry: %retval = alloca { double, double }, align 8 - %dcx.real = load double* getelementptr inbounds ({ double, double }* @dcx, i32 0, i32 0) - %dcx.imag = load double* getelementptr inbounds ({ double, double }* @dcx, i32 0, i32 1) - %real = getelementptr inbounds { double, double }* %retval, i32 0, i32 0 - %imag = getelementptr inbounds { double, double }* %retval, i32 0, i32 1 + %dcx.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @dcx, i32 0, i32 0) + %dcx.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @dcx, i32 0, i32 1) + %real = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 0 + %imag = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 1 store double %dcx.real, double* %real store double %dcx.imag, double* %imag - %0 = load { double, double }* %retval + %0 = load { double, double }, { double, double }* %retval ret { double, double } %0 ; 1: .ent foodcx ; 1: lw ${{[0-9]}}, %lo(dcx)(${{[0-9]+}}) diff --git a/test/CodeGen/Mips/mips16ex.ll b/test/CodeGen/Mips/mips16ex.ll index a1a9919..25957fb 100644 --- a/test/CodeGen/Mips/mips16ex.ll +++ b/test/CodeGen/Mips/mips16ex.ll @@ -1,8 +1,10 @@ ; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 -;16: .cfi_personality +;16: main: ;16-NEXT: [[TMP:.*]]: -;16-NEXT: $eh_func_begin0 = ([[TMP]]) +;16-NEXT: $func_begin0 = ([[TMP]]) +;16-NEXT: .cfi_startproc +;16-NEXT: .cfi_personality @.str = private unnamed_addr constant [7 x i8] c"hello\0A\00", align 1 @_ZTIi = external constant i8* @.str1 = private unnamed_addr constant [15 x i8] c"exception %i \0A\00", align 1 @@ -14,7 +16,7 @@ entry: %ehselector.slot = alloca i32 %e = alloca i32, align 4 store i32 0, i32* %retval - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0)) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0)) %exception = call i8* @__cxa_allocate_exception(i32 4) nounwind %0 = bitcast i8* %exception to i32* store i32 20, i32* %0 @@ -31,19 +33,19 @@ lpad: ; preds = %entry br label %catch.dispatch catch.dispatch: ; preds = %lpad - %sel = load i32* %ehselector.slot + %sel = load i32, i32* %ehselector.slot %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) nounwind %matches = icmp eq i32 %sel, %4 br i1 %matches, label %catch, label %eh.resume catch: ; preds = %catch.dispatch - %exn = load i8** %exn.slot + %exn = load i8*, i8** %exn.slot %5 = call i8* @__cxa_begin_catch(i8* %exn) nounwind %6 = bitcast i8* %5 to i32* - %exn.scalar = load i32* %6 + %exn.scalar = load i32, i32* %6 store i32 %exn.scalar, i32* %e, align 4 - %7 = load i32* %e, align 4 - %call2 = invoke i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([15 x i8]* @.str1, i32 0, i32 0), i32 %7) + %7 = load i32, i32* %e, align 4 + %call2 = invoke i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str1, i32 0, i32 0), i32 %7) to label %invoke.cont unwind label %lpad1 invoke.cont: ; preds = %catch @@ -64,8 +66,8 @@ lpad1: ; preds = %catch br label %eh.resume eh.resume: ; preds = %lpad1, %catch.dispatch - %exn3 = load i8** %exn.slot - %sel4 = load i32* %ehselector.slot + %exn3 = load i8*, i8** %exn.slot + %sel4 = load i32, i32* %ehselector.slot %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn3, 0 %lpad.val5 = insertvalue { i8*, i32 } %lpad.val, i32 %sel4, 1 resume { i8*, i32 } %lpad.val5 diff --git a/test/CodeGen/Mips/mips16fpe.ll b/test/CodeGen/Mips/mips16fpe.ll index 987980e..f8b916d 100644 --- a/test/CodeGen/Mips/mips16fpe.ll +++ b/test/CodeGen/Mips/mips16fpe.ll @@ -42,8 +42,8 @@ define void @test_addsf3() nounwind { entry: ;16hf-LABEL: test_addsf3: - %0 = load float* @x, align 4 - %1 = load float* @y, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @y, align 4 %add = fadd float %0, %1 store float %add, float* @addsf3_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_addsf3)(${{[0-9]+}}) @@ -53,8 +53,8 @@ entry: define void @test_adddf3() nounwind { entry: ;16hf-LABEL: test_adddf3: - %0 = load double* @xd, align 8 - %1 = load double* @yd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @yd, align 8 %add = fadd double %0, %1 store double %add, double* @adddf3_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_adddf3)(${{[0-9]+}}) @@ -64,8 +64,8 @@ entry: define void @test_subsf3() nounwind { entry: ;16hf-LABEL: test_subsf3: - %0 = load float* @x, align 4 - %1 = load float* @y, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @y, align 4 %sub = fsub float %0, %1 store float %sub, float* @subsf3_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_subsf3)(${{[0-9]+}}) @@ -75,8 +75,8 @@ entry: define void @test_subdf3() nounwind { entry: ;16hf-LABEL: test_subdf3: - %0 = load double* @xd, align 8 - %1 = load double* @yd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @yd, align 8 %sub = fsub double %0, %1 store double %sub, double* @subdf3_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_subdf3)(${{[0-9]+}}) @@ -86,8 +86,8 @@ entry: define void @test_mulsf3() nounwind { entry: ;16hf-LABEL: test_mulsf3: - %0 = load float* @x, align 4 - %1 = load float* @y, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @y, align 4 %mul = fmul float %0, %1 store float %mul, float* @mulsf3_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_mulsf3)(${{[0-9]+}}) @@ -97,8 +97,8 @@ entry: define void @test_muldf3() nounwind { entry: ;16hf-LABEL: test_muldf3: - %0 = load double* @xd, align 8 - %1 = load double* @yd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @yd, align 8 %mul = fmul double %0, %1 store double %mul, double* @muldf3_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_muldf3)(${{[0-9]+}}) @@ -108,8 +108,8 @@ entry: define void @test_divsf3() nounwind { entry: ;16hf-LABEL: test_divsf3: - %0 = load float* @y, align 4 - %1 = load float* @x, align 4 + %0 = load float, float* @y, align 4 + %1 = load float, float* @x, align 4 %div = fdiv float %0, %1 store float %div, float* @divsf3_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_divsf3)(${{[0-9]+}}) @@ -119,9 +119,9 @@ entry: define void @test_divdf3() nounwind { entry: ;16hf-LABEL: test_divdf3: - %0 = load double* @yd, align 8 + %0 = load double, double* @yd, align 8 %mul = fmul double %0, 2.000000e+00 - %1 = load double* @xd, align 8 + %1 = load double, double* @xd, align 8 %div = fdiv double %mul, %1 store double %div, double* @divdf3_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_divdf3)(${{[0-9]+}}) @@ -131,7 +131,7 @@ entry: define void @test_extendsfdf2() nounwind { entry: ;16hf-LABEL: test_extendsfdf2: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %conv = fpext float %0 to double store double %conv, double* @extendsfdf2_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_extendsfdf2)(${{[0-9]+}}) @@ -141,7 +141,7 @@ entry: define void @test_truncdfsf2() nounwind { entry: ;16hf-LABEL: test_truncdfsf2: - %0 = load double* @xd2, align 8 + %0 = load double, double* @xd2, align 8 %conv = fptrunc double %0 to float store float %conv, float* @truncdfsf2_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_truncdfsf2)(${{[0-9]+}}) @@ -151,7 +151,7 @@ entry: define void @test_fix_truncsfsi() nounwind { entry: ;16hf-LABEL: test_fix_truncsfsi: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %conv = fptosi float %0 to i32 store i32 %conv, i32* @fix_truncsfsi_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_fix_truncsfsi)(${{[0-9]+}}) @@ -161,7 +161,7 @@ entry: define void @test_fix_truncdfsi() nounwind { entry: ;16hf-LABEL: test_fix_truncdfsi: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %conv = fptosi double %0 to i32 store i32 %conv, i32* @fix_truncdfsi_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_fix_truncdfsi)(${{[0-9]+}}) @@ -171,7 +171,7 @@ entry: define void @test_floatsisf() nounwind { entry: ;16hf-LABEL: test_floatsisf: - %0 = load i32* @si, align 4 + %0 = load i32, i32* @si, align 4 %conv = sitofp i32 %0 to float store float %conv, float* @floatsisf_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatsisf)(${{[0-9]+}}) @@ -181,7 +181,7 @@ entry: define void @test_floatsidf() nounwind { entry: ;16hf-LABEL: test_floatsidf: - %0 = load i32* @si, align 4 + %0 = load i32, i32* @si, align 4 %conv = sitofp i32 %0 to double store double %conv, double* @floatsidf_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatsidf)(${{[0-9]+}}) @@ -191,7 +191,7 @@ entry: define void @test_floatunsisf() nounwind { entry: ;16hf-LABEL: test_floatunsisf: - %0 = load i32* @ui, align 4 + %0 = load i32, i32* @ui, align 4 %conv = uitofp i32 %0 to float store float %conv, float* @floatunsisf_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatunsisf)(${{[0-9]+}}) @@ -201,7 +201,7 @@ entry: define void @test_floatunsidf() nounwind { entry: ;16hf-LABEL: test_floatunsidf: - %0 = load i32* @ui, align 4 + %0 = load i32, i32* @ui, align 4 %conv = uitofp i32 %0 to double store double %conv, double* @floatunsidf_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatunsidf)(${{[0-9]+}}) @@ -211,8 +211,8 @@ entry: define void @test_eqsf2() nounwind { entry: ;16hf-LABEL: test_eqsf2: - %0 = load float* @x, align 4 - %1 = load float* @xx, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @xx, align 4 %cmp = fcmp oeq float %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @eqsf2_result, align 4 @@ -223,8 +223,8 @@ entry: define void @test_eqdf2() nounwind { entry: ;16hf-LABEL: test_eqdf2: - %0 = load double* @xd, align 8 - %1 = load double* @xxd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @xxd, align 8 %cmp = fcmp oeq double %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @eqdf2_result, align 4 @@ -235,8 +235,8 @@ entry: define void @test_nesf2() nounwind { entry: ;16hf-LABEL: test_nesf2: - %0 = load float* @x, align 4 - %1 = load float* @y, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @y, align 4 %cmp = fcmp une float %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @nesf2_result, align 4 @@ -247,8 +247,8 @@ entry: define void @test_nedf2() nounwind { entry: ;16hf-LABEL: test_nedf2: - %0 = load double* @xd, align 8 - %1 = load double* @yd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @yd, align 8 %cmp = fcmp une double %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @nedf2_result, align 4 @@ -259,10 +259,10 @@ entry: define void @test_gesf2() nounwind { entry: ;16hf-LABEL: test_gesf2: - %0 = load float* @x, align 4 - %1 = load float* @xx, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @xx, align 4 %cmp = fcmp oge float %0, %1 - %2 = load float* @y, align 4 + %2 = load float, float* @y, align 4 %cmp1 = fcmp oge float %2, %0 %and3 = and i1 %cmp, %cmp1 %and = zext i1 %and3 to i32 @@ -274,10 +274,10 @@ entry: define void @test_gedf2() nounwind { entry: ;16hf-LABEL: test_gedf2: - %0 = load double* @xd, align 8 - %1 = load double* @xxd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @xxd, align 8 %cmp = fcmp oge double %0, %1 - %2 = load double* @yd, align 8 + %2 = load double, double* @yd, align 8 %cmp1 = fcmp oge double %2, %0 %and3 = and i1 %cmp, %cmp1 %and = zext i1 %and3 to i32 @@ -289,10 +289,10 @@ entry: define void @test_ltsf2() nounwind { entry: ;16hf-LABEL: test_ltsf2: - %0 = load float* @x, align 4 - %1 = load float* @xx, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @xx, align 4 %lnot = fcmp uge float %0, %1 - %2 = load float* @y, align 4 + %2 = load float, float* @y, align 4 %cmp1 = fcmp olt float %0, %2 %and2 = and i1 %lnot, %cmp1 %and = zext i1 %and2 to i32 @@ -305,10 +305,10 @@ entry: define void @test_ltdf2() nounwind { entry: ;16hf-LABEL: test_ltdf2: - %0 = load double* @xd, align 8 - %1 = load double* @xxd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @xxd, align 8 %lnot = fcmp uge double %0, %1 - %2 = load double* @yd, align 8 + %2 = load double, double* @yd, align 8 %cmp1 = fcmp olt double %0, %2 %and2 = and i1 %lnot, %cmp1 %and = zext i1 %and2 to i32 @@ -321,10 +321,10 @@ entry: define void @test_lesf2() nounwind { entry: ;16hf-LABEL: test_lesf2: - %0 = load float* @x, align 4 - %1 = load float* @xx, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @xx, align 4 %cmp = fcmp ole float %0, %1 - %2 = load float* @y, align 4 + %2 = load float, float* @y, align 4 %cmp1 = fcmp ole float %0, %2 %and3 = and i1 %cmp, %cmp1 %and = zext i1 %and3 to i32 @@ -336,10 +336,10 @@ entry: define void @test_ledf2() nounwind { entry: ;16hf-LABEL: test_ledf2: - %0 = load double* @xd, align 8 - %1 = load double* @xxd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @xxd, align 8 %cmp = fcmp ole double %0, %1 - %2 = load double* @yd, align 8 + %2 = load double, double* @yd, align 8 %cmp1 = fcmp ole double %0, %2 %and3 = and i1 %cmp, %cmp1 %and = zext i1 %and3 to i32 @@ -351,10 +351,10 @@ entry: define void @test_gtsf2() nounwind { entry: ;16hf-LABEL: test_gtsf2: - %0 = load float* @x, align 4 - %1 = load float* @xx, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @xx, align 4 %lnot = fcmp ule float %0, %1 - %2 = load float* @y, align 4 + %2 = load float, float* @y, align 4 %cmp1 = fcmp ogt float %2, %0 %and2 = and i1 %lnot, %cmp1 %and = zext i1 %and2 to i32 @@ -366,10 +366,10 @@ entry: define void @test_gtdf2() nounwind { entry: ;16hf-LABEL: test_gtdf2: - %0 = load double* @xd, align 8 - %1 = load double* @xxd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @xxd, align 8 %lnot = fcmp ule double %0, %1 - %2 = load double* @yd, align 8 + %2 = load double, double* @yd, align 8 %cmp1 = fcmp ogt double %2, %0 %and2 = and i1 %lnot, %cmp1 %and = zext i1 %and2 to i32 diff --git a/test/CodeGen/Mips/mips64-f128-call.ll b/test/CodeGen/Mips/mips64-f128-call.ll index 455e540..9a093e6 100644 --- a/test/CodeGen/Mips/mips64-f128-call.ll +++ b/test/CodeGen/Mips/mips64-f128-call.ll @@ -19,7 +19,7 @@ entry: define void @foo1() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 tail call void @foo2(fp128 %0) ret void } @@ -38,7 +38,7 @@ define fp128 @foo3() { entry: %call = tail call fp128 @foo4() store fp128 %call, fp128* @gld0, align 16 - %0 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld1, align 16 ret fp128 %0 } diff --git a/test/CodeGen/Mips/mips64-f128.ll b/test/CodeGen/Mips/mips64-f128.ll index 6987d4a..0217c7c 100644 --- a/test/CodeGen/Mips/mips64-f128.ll +++ b/test/CodeGen/Mips/mips64-f128.ll @@ -18,8 +18,8 @@ define fp128 @addLD() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %add = fadd fp128 %0, %1 ret fp128 %add } @@ -29,8 +29,8 @@ entry: define fp128 @subLD() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %sub = fsub fp128 %0, %1 ret fp128 %sub } @@ -40,8 +40,8 @@ entry: define fp128 @mulLD() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %mul = fmul fp128 %0, %1 ret fp128 %mul } @@ -51,8 +51,8 @@ entry: define fp128 @divLD() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %div = fdiv fp128 %0, %1 ret fp128 %div } @@ -247,7 +247,7 @@ entry: define fp128 @libcall1_fabsl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @fabsl(fp128 %0) nounwind readnone ret fp128 %call } @@ -259,7 +259,7 @@ declare fp128 @fabsl(fp128) #1 define fp128 @libcall1_ceill() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @ceill(fp128 %0) nounwind readnone ret fp128 %call } @@ -271,7 +271,7 @@ declare fp128 @ceill(fp128) #1 define fp128 @libcall1_sinl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @sinl(fp128 %0) nounwind ret fp128 %call } @@ -283,7 +283,7 @@ declare fp128 @sinl(fp128) #2 define fp128 @libcall1_cosl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @cosl(fp128 %0) nounwind ret fp128 %call } @@ -295,7 +295,7 @@ declare fp128 @cosl(fp128) #2 define fp128 @libcall1_expl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @expl(fp128 %0) nounwind ret fp128 %call } @@ -307,7 +307,7 @@ declare fp128 @expl(fp128) #2 define fp128 @libcall1_exp2l() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @exp2l(fp128 %0) nounwind ret fp128 %call } @@ -319,7 +319,7 @@ declare fp128 @exp2l(fp128) #2 define fp128 @libcall1_logl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @logl(fp128 %0) nounwind ret fp128 %call } @@ -331,7 +331,7 @@ declare fp128 @logl(fp128) #2 define fp128 @libcall1_log2l() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @log2l(fp128 %0) nounwind ret fp128 %call } @@ -343,7 +343,7 @@ declare fp128 @log2l(fp128) #2 define fp128 @libcall1_log10l() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @log10l(fp128 %0) nounwind ret fp128 %call } @@ -355,7 +355,7 @@ declare fp128 @log10l(fp128) #2 define fp128 @libcall1_nearbyintl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @nearbyintl(fp128 %0) nounwind readnone ret fp128 %call } @@ -367,7 +367,7 @@ declare fp128 @nearbyintl(fp128) #1 define fp128 @libcall1_floorl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @floorl(fp128 %0) nounwind readnone ret fp128 %call } @@ -379,7 +379,7 @@ declare fp128 @floorl(fp128) #1 define fp128 @libcall1_sqrtl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @sqrtl(fp128 %0) nounwind ret fp128 %call } @@ -391,7 +391,7 @@ declare fp128 @sqrtl(fp128) #2 define fp128 @libcall1_rintl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @rintl(fp128 %0) nounwind readnone ret fp128 %call } @@ -424,8 +424,8 @@ declare fp128 @llvm.powi.f128(fp128, i32) #3 define fp128 @libcall2_copysignl() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %call = tail call fp128 @copysignl(fp128 %0, fp128 %1) nounwind readnone ret fp128 %call } @@ -437,8 +437,8 @@ declare fp128 @copysignl(fp128, fp128) #1 define fp128 @libcall2_powl() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %call = tail call fp128 @powl(fp128 %0, fp128 %1) nounwind ret fp128 %call } @@ -450,8 +450,8 @@ declare fp128 @powl(fp128, fp128) #2 define fp128 @libcall2_fmodl() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %call = tail call fp128 @fmodl(fp128 %0, fp128 %1) nounwind ret fp128 %call } @@ -463,9 +463,9 @@ declare fp128 @fmodl(fp128, fp128) #2 define fp128 @libcall3_fmal() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld2, align 16 - %2 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld2, align 16 + %2 = load fp128, fp128* @gld1, align 16 %3 = tail call fp128 @llvm.fma.f128(fp128 %0, fp128 %2, fp128 %1) ret fp128 %3 } @@ -539,19 +539,19 @@ entry: define fp128 @load_LD_LD() { entry: - %0 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld1, align 16 ret fp128 %0 } ; ALL-LABEL: load_LD_float: ; ALL: ld $[[R0:[0-9]+]], %got_disp(gf1) -; ALL: lwu $4, 0($[[R0]]) +; ALL: lw $4, 0($[[R0]]) ; ALL: ld $25, %call16(__extendsftf2) ; ALL: jalr $25 define fp128 @load_LD_float() { entry: - %0 = load float* @gf1, align 4 + %0 = load float, float* @gf1, align 4 %conv = fpext float %0 to fp128 ret fp128 %conv } @@ -564,7 +564,7 @@ entry: define fp128 @load_LD_double() { entry: - %0 = load double* @gd1, align 8 + %0 = load double, double* @gd1, align 8 %conv = fpext double %0 to fp128 ret fp128 %conv } @@ -579,7 +579,7 @@ entry: define void @store_LD_LD() { entry: - %0 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld1, align 16 store fp128 %0, fp128* @gld0, align 16 ret void } @@ -595,7 +595,7 @@ entry: define void @store_LD_float() { entry: - %0 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld1, align 16 %conv = fptrunc fp128 %0 to float store float %conv, float* @gf1, align 4 ret void @@ -612,7 +612,7 @@ entry: define void @store_LD_double() { entry: - %0 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld1, align 16 %conv = fptrunc fp128 %0 to double store double %conv, double* @gd1, align 8 ret void diff --git a/test/CodeGen/Mips/mips64directive.ll b/test/CodeGen/Mips/mips64directive.ll index c4ba534..b1052f7 100644 --- a/test/CodeGen/Mips/mips64directive.ll +++ b/test/CodeGen/Mips/mips64directive.ll @@ -6,7 +6,7 @@ ; CHECK: 8byte define i64 @foo1() nounwind readonly { entry: - %0 = load i64* @gl, align 8 + %0 = load i64, i64* @gl, align 8 ret i64 %0 } diff --git a/test/CodeGen/Mips/mips64fpldst.ll b/test/CodeGen/Mips/mips64fpldst.ll index 5d62156..55d5c77 100644 --- a/test/CodeGen/Mips/mips64fpldst.ll +++ b/test/CodeGen/Mips/mips64fpldst.ll @@ -16,7 +16,7 @@ entry: ; CHECK-N32: funcfl1 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(f0) ; CHECK-N32: lwc1 $f{{[0-9]+}}, 0($[[R0]]) - %0 = load float* @f0, align 4 + %0 = load float, float* @f0, align 4 ret float %0 } @@ -28,7 +28,7 @@ entry: ; CHECK-N32: funcfl2 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(d0) ; CHECK-N32: ldc1 $f{{[0-9]+}}, 0($[[R0]]) - %0 = load double* @d0, align 8 + %0 = load double, double* @d0, align 8 ret double %0 } @@ -40,7 +40,7 @@ entry: ; CHECK-N32: funcfs1 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(f0) ; CHECK-N32: swc1 $f{{[0-9]+}}, 0($[[R0]]) - %0 = load float* @f1, align 4 + %0 = load float, float* @f1, align 4 store float %0, float* @f0, align 4 ret void } @@ -53,7 +53,7 @@ entry: ; CHECK-N32: funcfs2 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(d0) ; CHECK-N32: sdc1 $f{{[0-9]+}}, 0($[[R0]]) - %0 = load double* @d1, align 8 + %0 = load double, double* @d1, align 8 store double %0, double* @d0, align 8 ret void } diff --git a/test/CodeGen/Mips/mips64instrs.ll b/test/CodeGen/Mips/mips64instrs.ll index ed617be..d64cdce 100644 --- a/test/CodeGen/Mips/mips64instrs.ll +++ b/test/CodeGen/Mips/mips64instrs.ll @@ -123,8 +123,8 @@ entry: ; GPRMULDIV: ddiv $2, $[[T0]], $[[T1]] ; GPRMULDIV: teq $[[T1]], $zero, 7 - %0 = load i64* @gll0, align 8 - %1 = load i64* @gll1, align 8 + %0 = load i64, i64* @gll0, align 8 + %1 = load i64, i64* @gll1, align 8 %div = sdiv i64 %0, %1 ret i64 %div } @@ -144,8 +144,8 @@ entry: ; GPRMULDIV: ddivu $2, $[[T0]], $[[T1]] ; GPRMULDIV: teq $[[T1]], $zero, 7 - %0 = load i64* @gll0, align 8 - %1 = load i64* @gll1, align 8 + %0 = load i64, i64* @gll0, align 8 + %1 = load i64, i64* @gll1, align 8 %div = udiv i64 %0, %1 ret i64 %div } diff --git a/test/CodeGen/Mips/mips64intldst.ll b/test/CodeGen/Mips/mips64intldst.ll index 1ceafc1..658ab88 100644 --- a/test/CodeGen/Mips/mips64intldst.ll +++ b/test/CodeGen/Mips/mips64intldst.ll @@ -20,7 +20,7 @@ entry: ; CHECK-N32: func1 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(c) ; CHECK-N32: lb ${{[0-9]+}}, 0($[[R0]]) - %0 = load i8* @c, align 4 + %0 = load i8, i8* @c, align 4 %conv = sext i8 %0 to i64 ret i64 %conv } @@ -33,7 +33,7 @@ entry: ; CHECK-N32: func2 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(s) ; CHECK-N32: lh ${{[0-9]+}}, 0($[[R0]]) - %0 = load i16* @s, align 4 + %0 = load i16, i16* @s, align 4 %conv = sext i16 %0 to i64 ret i64 %conv } @@ -46,7 +46,7 @@ entry: ; CHECK-N32: func3 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(i) ; CHECK-N32: lw ${{[0-9]+}}, 0($[[R0]]) - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %conv = sext i32 %0 to i64 ret i64 %conv } @@ -59,7 +59,7 @@ entry: ; CHECK-N32: func4 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(l) ; CHECK-N32: ld ${{[0-9]+}}, 0($[[R0]]) - %0 = load i64* @l, align 8 + %0 = load i64, i64* @l, align 8 ret i64 %0 } @@ -71,7 +71,7 @@ entry: ; CHECK-N32: ufunc1 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(uc) ; CHECK-N32: lbu ${{[0-9]+}}, 0($[[R0]]) - %0 = load i8* @uc, align 4 + %0 = load i8, i8* @uc, align 4 %conv = zext i8 %0 to i64 ret i64 %conv } @@ -84,7 +84,7 @@ entry: ; CHECK-N32: ufunc2 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(us) ; CHECK-N32: lhu ${{[0-9]+}}, 0($[[R0]]) - %0 = load i16* @us, align 4 + %0 = load i16, i16* @us, align 4 %conv = zext i16 %0 to i64 ret i64 %conv } @@ -97,7 +97,7 @@ entry: ; CHECK-N32: ufunc3 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(ui) ; CHECK-N32: lwu ${{[0-9]+}}, 0($[[R0]]) - %0 = load i32* @ui, align 4 + %0 = load i32, i32* @ui, align 4 %conv = zext i32 %0 to i64 ret i64 %conv } @@ -110,7 +110,7 @@ entry: ; CHECK-N32: sfunc1 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(c) ; CHECK-N32: sb ${{[0-9]+}}, 0($[[R0]]) - %0 = load i64* @l1, align 8 + %0 = load i64, i64* @l1, align 8 %conv = trunc i64 %0 to i8 store i8 %conv, i8* @c, align 4 ret void @@ -124,7 +124,7 @@ entry: ; CHECK-N32: sfunc2 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(s) ; CHECK-N32: sh ${{[0-9]+}}, 0($[[R0]]) - %0 = load i64* @l1, align 8 + %0 = load i64, i64* @l1, align 8 %conv = trunc i64 %0 to i16 store i16 %conv, i16* @s, align 4 ret void @@ -138,7 +138,7 @@ entry: ; CHECK-N32: sfunc3 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(i) ; CHECK-N32: sw ${{[0-9]+}}, 0($[[R0]]) - %0 = load i64* @l1, align 8 + %0 = load i64, i64* @l1, align 8 %conv = trunc i64 %0 to i32 store i32 %conv, i32* @i, align 4 ret void @@ -152,7 +152,7 @@ entry: ; CHECK-N32: sfunc4 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(l) ; CHECK-N32: sd ${{[0-9]+}}, 0($[[R0]]) - %0 = load i64* @l1, align 8 + %0 = load i64, i64* @l1, align 8 store i64 %0, i64* @l, align 8 ret void } diff --git a/test/CodeGen/Mips/mips64signextendsesf.ll b/test/CodeGen/Mips/mips64signextendsesf.ll new file mode 100644 index 0000000..a76388d --- /dev/null +++ b/test/CodeGen/Mips/mips64signextendsesf.ll @@ -0,0 +1,214 @@ +; RUN: llc -march=mips64 -mcpu=mips64r2 -soft-float -O2 < %s | FileCheck %s + +define void @foosf() #0 { +entry: + %in = alloca float, align 4 + %out = alloca float, align 4 + store volatile float 0xBFD59E1380000000, float* %in, align 4 + %in.0.in.0. = load volatile float, float* %in, align 4 + %rintf = tail call float @rintf(float %in.0.in.0.) #1 + store volatile float %rintf, float* %out, align 4 + ret void + +; CHECK-LABEL: foosf +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +declare float @rintf(float) + +define float @foosf1(float* nocapture readonly %a) #0 { +entry: + %0 = load float, float* %a, align 4 + %call = tail call float @roundf(float %0) #2 + ret float %call + +; CHECK-LABEL: foosf1 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +declare float @roundf(float) #1 + +define float @foosf2(float* nocapture readonly %a) #0 { +entry: + %0 = load float, float* %a, align 4 + %call = tail call float @truncf(float %0) #2 + ret float %call + +; CHECK-LABEL: foosf2 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +declare float @truncf(float) #1 + +define float @foosf3(float* nocapture readonly %a) #0 { +entry: + %0 = load float, float* %a, align 4 + %call = tail call float @floorf(float %0) #2 + ret float %call + +; CHECK-LABEL: foosf3 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +declare float @floorf(float) #1 + +define float @foosf4(float* nocapture readonly %a) #0 { +entry: + %0 = load float, float* %a, align 4 + %call = tail call float @nearbyintf(float %0) #2 + ret float %call + +; CHECK-LABEL: foosf4 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +declare float @nearbyintf(float) #1 + +define float @foosf5(float* nocapture readonly %a) #0 { +entry: + %0 = load float, float* %a, align 4 + %mul = fmul float %0, undef + ret float %mul + +; CHECK-LABEL: foosf5 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +define float @foosf6(float* nocapture readonly %a) #0 { +entry: + %0 = load float, float* %a, align 4 + %sub = fsub float %0, undef + ret float %sub + +; CHECK-LABEL: foosf6 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +define float @foosf7(float* nocapture readonly %a) #0 { +entry: + %0 = load float, float* %a, align 4 + %add = fadd float %0, undef + ret float %add + +; CHECK-LABEL: foosf7 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +define float @foosf8(float* nocapture readonly %a) #0 { +entry: + %b = alloca float, align 4 + %b.0.b.0. = load volatile float, float* %b, align 4 + %0 = load float, float* %a, align 4 + %div = fdiv float %b.0.b.0., %0 + ret float %div + +; CHECK-LABEL: foosf8 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +define float @foosf9() #0 { +entry: + %b = alloca float, align 4 + %b.0.b.0. = load volatile float, float* %b, align 4 + %conv = fpext float %b.0.b.0. to double + %b.0.b.0.3 = load volatile float, float* %b, align 4 + %conv1 = fpext float %b.0.b.0.3 to double + %call = tail call double @pow(double %conv, double %conv1) #1 + %conv2 = fptrunc double %call to float + ret float %conv2 + +; CHECK-LABEL: foosf9 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +declare double @pow(double, double) #0 + +define float @foosf10() #0 { +entry: + %a = alloca float, align 4 + %a.0.a.0. = load volatile float, float* %a, align 4 + %conv = fpext float %a.0.a.0. to double + %call = tail call double @sin(double %conv) #1 + %conv1 = fptrunc double %call to float + ret float %conv1 + +; CHECK-LABEL: foosf10 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +declare double @sin(double) #0 + +define float @foosf11() #0 { +entry: + %b = alloca float, align 4 + %b.0.b.0. = load volatile float, float* %b, align 4 + %call = tail call float @ceilf(float %b.0.b.0.) #2 + ret float %call + +; CHECK-LABEL: foosf11 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +declare float @ceilf(float) #1 + +define float @foosf12() #0 { +entry: + %b = alloca float, align 4 + %a = alloca float, align 4 + %b.0.b.0. = load volatile float, float* %b, align 4 + %a.0.a.0. = load volatile float, float* %a, align 4 + %call = tail call float @fmaxf(float %b.0.b.0., float %a.0.a.0.) #2 + ret float %call + +; CHECK-LABEL: foosf12 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +declare float @fmaxf(float, float) #1 + +define float @foosf13() #0 { +entry: + %b = alloca float, align 4 + %a = alloca float, align 4 + %b.0.b.0. = load volatile float, float* %b, align 4 + %a.0.a.0. = load volatile float, float* %a, align 4 + %call = tail call float @fminf(float %b.0.b.0., float %a.0.a.0.) #2 + ret float %call + +; CHECK-LABEL: foosf13 +; CHECK-NOT: dsll +; CHECK-NOT: dsrl +; CHECK-NOT: lwu +} + +declare float @fminf(float, float) #1 + + +attributes #0 = { nounwind "use-soft-float"="true" } +attributes #1 = { nounwind readnone "use-soft-float"="true" }
\ No newline at end of file diff --git a/test/CodeGen/Mips/mips64sinttofpsf.ll b/test/CodeGen/Mips/mips64sinttofpsf.ll index d3d4603..7bd75bb 100644 --- a/test/CodeGen/Mips/mips64sinttofpsf.ll +++ b/test/CodeGen/Mips/mips64sinttofpsf.ll @@ -5,7 +5,7 @@ define double @foo() #0 { entry: %x = alloca i32, align 4 store volatile i32 -32, i32* %x, align 4 - %0 = load volatile i32* %x, align 4 + %0 = load volatile i32, i32* %x, align 4 %conv = sitofp i32 %0 to double ret double %conv diff --git a/test/CodeGen/Mips/mipslopat.ll b/test/CodeGen/Mips/mipslopat.ll index 1f433b9..63b68c1 100644 --- a/test/CodeGen/Mips/mipslopat.ll +++ b/test/CodeGen/Mips/mipslopat.ll @@ -6,10 +6,10 @@ define void @simple_vol_file() nounwind { entry: - %tmp = load volatile i32** @stat_vol_ptr_int, align 4 + %tmp = load volatile i32*, i32** @stat_vol_ptr_int, align 4 %0 = bitcast i32* %tmp to i8* call void @llvm.prefetch(i8* %0, i32 0, i32 0, i32 1) - %tmp1 = load i32** @stat_ptr_vol_int, align 4 + %tmp1 = load i32*, i32** @stat_ptr_vol_int, align 4 %1 = bitcast i32* %tmp1 to i8* call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1) ret void diff --git a/test/CodeGen/Mips/misha.ll b/test/CodeGen/Mips/misha.ll index 65d3b7b..23ad7f6 100644 --- a/test/CodeGen/Mips/misha.ll +++ b/test/CodeGen/Mips/misha.ll @@ -8,15 +8,15 @@ entry: br i1 %cmp8, label %for.end, label %for.body.lr.ph for.body.lr.ph: ; preds = %entry - %.pre = load i8* %to, align 1 + %.pre = load i8, i8* %to, align 1 br label %for.body for.body: ; preds = %for.body.lr.ph, %for.body %1 = phi i8 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ] %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] %from.addr.09 = phi i8* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ] - %incdec.ptr = getelementptr inbounds i8* %from.addr.09, i32 1 - %2 = load i8* %from.addr.09, align 1 + %incdec.ptr = getelementptr inbounds i8, i8* %from.addr.09, i32 1 + %2 = load i8, i8* %from.addr.09, align 1 %conv27 = zext i8 %2 to i32 %conv36 = zext i8 %1 to i32 %add = add nsw i32 %conv36, %conv27 @@ -44,15 +44,15 @@ entry: br i1 %cmp8, label %for.end, label %for.body.lr.ph for.body.lr.ph: ; preds = %entry - %.pre = load i16* %to, align 2 + %.pre = load i16, i16* %to, align 2 br label %for.body for.body: ; preds = %for.body.lr.ph, %for.body %1 = phi i16 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ] %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] %from.addr.09 = phi i16* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ] - %incdec.ptr = getelementptr inbounds i16* %from.addr.09, i32 1 - %2 = load i16* %from.addr.09, align 2 + %incdec.ptr = getelementptr inbounds i16, i16* %from.addr.09, i32 1 + %2 = load i16, i16* %from.addr.09, align 2 %conv27 = zext i16 %2 to i32 %conv36 = zext i16 %1 to i32 %add = add nsw i32 %conv36, %conv27 diff --git a/test/CodeGen/Mips/mno-ldc1-sdc1.ll b/test/CodeGen/Mips/mno-ldc1-sdc1.ll index db653ea..c7eda33 100644 --- a/test/CodeGen/Mips/mno-ldc1-sdc1.ll +++ b/test/CodeGen/Mips/mno-ldc1-sdc1.ll @@ -111,7 +111,7 @@ define double @test_ldc1() { entry: - %0 = load double* @g0, align 8 + %0 = load double, double* @g0, align 8 ret double %0 } @@ -212,8 +212,8 @@ entry: define double @test_ldxc1(double* nocapture readonly %a, i32 %i) { entry: - %arrayidx = getelementptr inbounds double* %a, i32 %i - %0 = load double* %arrayidx, align 8 + %arrayidx = getelementptr inbounds double, double* %a, i32 %i + %0 = load double, double* %arrayidx, align 8 ret double %0 } @@ -243,7 +243,7 @@ entry: define void @test_sdxc1(double %b, double* nocapture %a, i32 %i) { entry: - %arrayidx = getelementptr inbounds double* %a, i32 %i + %arrayidx = getelementptr inbounds double, double* %a, i32 %i store double %b, double* %arrayidx, align 8 ret void } diff --git a/test/CodeGen/Mips/msa/2r.ll b/test/CodeGen/Mips/msa/2r.ll index da35ad8..501936c 100644 --- a/test/CodeGen/Mips/msa/2r.ll +++ b/test/CodeGen/Mips/msa/2r.ll @@ -8,7 +8,7 @@ define void @llvm_mips_nloc_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_nloc_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nloc_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.nloc.b(<16 x i8> %0) store <16 x i8> %1, <16 x i8>* @llvm_mips_nloc_b_RES ret void @@ -29,7 +29,7 @@ declare <16 x i8> @llvm.mips.nloc.b(<16 x i8>) nounwind define void @llvm_mips_nloc_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_nloc_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nloc_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.nloc.h(<8 x i16> %0) store <8 x i16> %1, <8 x i16>* @llvm_mips_nloc_h_RES ret void @@ -50,7 +50,7 @@ declare <8 x i16> @llvm.mips.nloc.h(<8 x i16>) nounwind define void @llvm_mips_nloc_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_nloc_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nloc_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.nloc.w(<4 x i32> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_nloc_w_RES ret void @@ -71,7 +71,7 @@ declare <4 x i32> @llvm.mips.nloc.w(<4 x i32>) nounwind define void @llvm_mips_nloc_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_nloc_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nloc_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.nloc.d(<2 x i64> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_nloc_d_RES ret void @@ -92,7 +92,7 @@ declare <2 x i64> @llvm.mips.nloc.d(<2 x i64>) nounwind define void @llvm_mips_nlzc_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_nlzc_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nlzc_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.nlzc.b(<16 x i8> %0) store <16 x i8> %1, <16 x i8>* @llvm_mips_nlzc_b_RES ret void @@ -113,7 +113,7 @@ declare <16 x i8> @llvm.mips.nlzc.b(<16 x i8>) nounwind define void @llvm_mips_nlzc_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_nlzc_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nlzc_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.nlzc.h(<8 x i16> %0) store <8 x i16> %1, <8 x i16>* @llvm_mips_nlzc_h_RES ret void @@ -134,7 +134,7 @@ declare <8 x i16> @llvm.mips.nlzc.h(<8 x i16>) nounwind define void @llvm_mips_nlzc_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_nlzc_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nlzc_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.nlzc.w(<4 x i32> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_nlzc_w_RES ret void @@ -155,7 +155,7 @@ declare <4 x i32> @llvm.mips.nlzc.w(<4 x i32>) nounwind define void @llvm_mips_nlzc_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_nlzc_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nlzc_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.nlzc.d(<2 x i64> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_nlzc_d_RES ret void @@ -176,7 +176,7 @@ declare <2 x i64> @llvm.mips.nlzc.d(<2 x i64>) nounwind define void @llvm_mips_pcnt_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_pcnt_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pcnt_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.pcnt.b(<16 x i8> %0) store <16 x i8> %1, <16 x i8>* @llvm_mips_pcnt_b_RES ret void @@ -197,7 +197,7 @@ declare <16 x i8> @llvm.mips.pcnt.b(<16 x i8>) nounwind define void @llvm_mips_pcnt_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_pcnt_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pcnt_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.pcnt.h(<8 x i16> %0) store <8 x i16> %1, <8 x i16>* @llvm_mips_pcnt_h_RES ret void @@ -218,7 +218,7 @@ declare <8 x i16> @llvm.mips.pcnt.h(<8 x i16>) nounwind define void @llvm_mips_pcnt_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_pcnt_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pcnt_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.pcnt.w(<4 x i32> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_pcnt_w_RES ret void @@ -239,7 +239,7 @@ declare <4 x i32> @llvm.mips.pcnt.w(<4 x i32>) nounwind define void @llvm_mips_pcnt_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_pcnt_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pcnt_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.pcnt.d(<2 x i64> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_pcnt_d_RES ret void diff --git a/test/CodeGen/Mips/msa/2r_vector_scalar.ll b/test/CodeGen/Mips/msa/2r_vector_scalar.ll index 64e459e..ddcd3cf 100644 --- a/test/CodeGen/Mips/msa/2r_vector_scalar.ll +++ b/test/CodeGen/Mips/msa/2r_vector_scalar.ll @@ -15,7 +15,7 @@ define void @llvm_mips_fill_b_test() nounwind { entry: - %0 = load i32* @llvm_mips_fill_b_ARG1 + %0 = load i32, i32* @llvm_mips_fill_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.fill.b(i32 %0) store <16 x i8> %1, <16 x i8>* @llvm_mips_fill_b_RES ret void @@ -35,7 +35,7 @@ declare <16 x i8> @llvm.mips.fill.b(i32) nounwind define void @llvm_mips_fill_h_test() nounwind { entry: - %0 = load i32* @llvm_mips_fill_h_ARG1 + %0 = load i32, i32* @llvm_mips_fill_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.fill.h(i32 %0) store <8 x i16> %1, <8 x i16>* @llvm_mips_fill_h_RES ret void @@ -55,7 +55,7 @@ declare <8 x i16> @llvm.mips.fill.h(i32) nounwind define void @llvm_mips_fill_w_test() nounwind { entry: - %0 = load i32* @llvm_mips_fill_w_ARG1 + %0 = load i32, i32* @llvm_mips_fill_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.fill.w(i32 %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_fill_w_RES ret void @@ -75,7 +75,7 @@ declare <4 x i32> @llvm.mips.fill.w(i32) nounwind define void @llvm_mips_fill_d_test() nounwind { entry: - %0 = load i64* @llvm_mips_fill_d_ARG1 + %0 = load i64, i64* @llvm_mips_fill_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.fill.d(i64 %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_fill_d_RES ret void diff --git a/test/CodeGen/Mips/msa/2rf.ll b/test/CodeGen/Mips/msa/2rf.ll index b361ef5..1dbfbda 100644 --- a/test/CodeGen/Mips/msa/2rf.ll +++ b/test/CodeGen/Mips/msa/2rf.ll @@ -8,7 +8,7 @@ define void @llvm_mips_flog2_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_flog2_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_flog2_w_ARG1 %1 = tail call <4 x float> @llvm.mips.flog2.w(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES ret void @@ -29,7 +29,7 @@ declare <4 x float> @llvm.mips.flog2.w(<4 x float>) nounwind define void @llvm_mips_flog2_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_flog2_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_flog2_d_ARG1 %1 = tail call <2 x double> @llvm.mips.flog2.d(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES ret void @@ -47,7 +47,7 @@ declare <2 x double> @llvm.mips.flog2.d(<2 x double>) nounwind define void @flog2_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_flog2_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_flog2_w_ARG1 %1 = tail call <4 x float> @llvm.log2.v4f32(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES ret void @@ -65,7 +65,7 @@ declare <4 x float> @llvm.log2.v4f32(<4 x float> %val) define void @flog2_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_flog2_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_flog2_d_ARG1 %1 = tail call <2 x double> @llvm.log2.v2f64(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES ret void @@ -86,7 +86,7 @@ declare <2 x double> @llvm.log2.v2f64(<2 x double> %val) define void @llvm_mips_frint_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_frint_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_frint_w_ARG1 %1 = tail call <4 x float> @llvm.mips.frint.w(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES ret void @@ -107,7 +107,7 @@ declare <4 x float> @llvm.mips.frint.w(<4 x float>) nounwind define void @llvm_mips_frint_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_frint_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_frint_d_ARG1 %1 = tail call <2 x double> @llvm.mips.frint.d(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES ret void @@ -125,7 +125,7 @@ declare <2 x double> @llvm.mips.frint.d(<2 x double>) nounwind define void @frint_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_frint_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_frint_w_ARG1 %1 = tail call <4 x float> @llvm.rint.v4f32(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES ret void @@ -143,7 +143,7 @@ declare <4 x float> @llvm.rint.v4f32(<4 x float>) nounwind define void @frint_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_frint_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_frint_d_ARG1 %1 = tail call <2 x double> @llvm.rint.v2f64(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES ret void @@ -164,7 +164,7 @@ declare <2 x double> @llvm.rint.v2f64(<2 x double>) nounwind define void @llvm_mips_frcp_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_frcp_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_frcp_w_ARG1 %1 = tail call <4 x float> @llvm.mips.frcp.w(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_frcp_w_RES ret void @@ -185,7 +185,7 @@ declare <4 x float> @llvm.mips.frcp.w(<4 x float>) nounwind define void @llvm_mips_frcp_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_frcp_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_frcp_d_ARG1 %1 = tail call <2 x double> @llvm.mips.frcp.d(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_frcp_d_RES ret void @@ -206,7 +206,7 @@ declare <2 x double> @llvm.mips.frcp.d(<2 x double>) nounwind define void @llvm_mips_frsqrt_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_frsqrt_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_frsqrt_w_ARG1 %1 = tail call <4 x float> @llvm.mips.frsqrt.w(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_frsqrt_w_RES ret void @@ -227,7 +227,7 @@ declare <4 x float> @llvm.mips.frsqrt.w(<4 x float>) nounwind define void @llvm_mips_frsqrt_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_frsqrt_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_frsqrt_d_ARG1 %1 = tail call <2 x double> @llvm.mips.frsqrt.d(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_frsqrt_d_RES ret void @@ -248,7 +248,7 @@ declare <2 x double> @llvm.mips.frsqrt.d(<2 x double>) nounwind define void @llvm_mips_fsqrt_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsqrt_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsqrt_w_ARG1 %1 = tail call <4 x float> @llvm.mips.fsqrt.w(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES ret void @@ -269,7 +269,7 @@ declare <4 x float> @llvm.mips.fsqrt.w(<4 x float>) nounwind define void @llvm_mips_fsqrt_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsqrt_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsqrt_d_ARG1 %1 = tail call <2 x double> @llvm.mips.fsqrt.d(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES ret void @@ -287,7 +287,7 @@ declare <2 x double> @llvm.mips.fsqrt.d(<2 x double>) nounwind define void @fsqrt_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsqrt_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsqrt_w_ARG1 %1 = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES ret void @@ -305,7 +305,7 @@ declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind define void @fsqrt_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsqrt_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsqrt_d_ARG1 %1 = tail call <2 x double> @llvm.sqrt.v2f64(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES ret void diff --git a/test/CodeGen/Mips/msa/2rf_exup.ll b/test/CodeGen/Mips/msa/2rf_exup.ll index 8d7cc36..fd81ff6 100644 --- a/test/CodeGen/Mips/msa/2rf_exup.ll +++ b/test/CodeGen/Mips/msa/2rf_exup.ll @@ -9,7 +9,7 @@ define void @llvm_mips_fexupl_w_test() nounwind { entry: - %0 = load <8 x half>* @llvm_mips_fexupl_w_ARG1 + %0 = load <8 x half>, <8 x half>* @llvm_mips_fexupl_w_ARG1 %1 = tail call <4 x float> @llvm.mips.fexupl.w(<8 x half> %0) store <4 x float> %1, <4 x float>* @llvm_mips_fexupl_w_RES ret void @@ -28,7 +28,7 @@ declare <4 x float> @llvm.mips.fexupl.w(<8 x half>) nounwind define void @llvm_mips_fexupl_d_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fexupl_d_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fexupl_d_ARG1 %1 = tail call <2 x double> @llvm.mips.fexupl.d(<4 x float> %0) store <2 x double> %1, <2 x double>* @llvm_mips_fexupl_d_RES ret void @@ -47,7 +47,7 @@ declare <2 x double> @llvm.mips.fexupl.d(<4 x float>) nounwind define void @llvm_mips_fexupr_w_test() nounwind { entry: - %0 = load <8 x half>* @llvm_mips_fexupr_w_ARG1 + %0 = load <8 x half>, <8 x half>* @llvm_mips_fexupr_w_ARG1 %1 = tail call <4 x float> @llvm.mips.fexupr.w(<8 x half> %0) store <4 x float> %1, <4 x float>* @llvm_mips_fexupr_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x float> @llvm.mips.fexupr.w(<8 x half>) nounwind define void @llvm_mips_fexupr_d_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fexupr_d_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fexupr_d_ARG1 %1 = tail call <2 x double> @llvm.mips.fexupr.d(<4 x float> %0) store <2 x double> %1, <2 x double>* @llvm_mips_fexupr_d_RES ret void diff --git a/test/CodeGen/Mips/msa/2rf_float_int.ll b/test/CodeGen/Mips/msa/2rf_float_int.ll index 3b5dfda..3690158 100644 --- a/test/CodeGen/Mips/msa/2rf_float_int.ll +++ b/test/CodeGen/Mips/msa/2rf_float_int.ll @@ -9,7 +9,7 @@ define void @llvm_mips_ffint_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ffint_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffint_s_w_ARG1 %1 = tail call <4 x float> @llvm.mips.ffint.s.w(<4 x i32> %0) store <4 x float> %1, <4 x float>* @llvm_mips_ffint_s_w_RES ret void @@ -30,7 +30,7 @@ declare <4 x float> @llvm.mips.ffint.s.w(<4 x i32>) nounwind define void @llvm_mips_ffint_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ffint_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ffint_s_d_ARG1 %1 = tail call <2 x double> @llvm.mips.ffint.s.d(<2 x i64> %0) store <2 x double> %1, <2 x double>* @llvm_mips_ffint_s_d_RES ret void @@ -51,7 +51,7 @@ declare <2 x double> @llvm.mips.ffint.s.d(<2 x i64>) nounwind define void @llvm_mips_ffint_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ffint_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffint_u_w_ARG1 %1 = tail call <4 x float> @llvm.mips.ffint.u.w(<4 x i32> %0) store <4 x float> %1, <4 x float>* @llvm_mips_ffint_u_w_RES ret void @@ -72,7 +72,7 @@ declare <4 x float> @llvm.mips.ffint.u.w(<4 x i32>) nounwind define void @llvm_mips_ffint_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ffint_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ffint_u_d_ARG1 %1 = tail call <2 x double> @llvm.mips.ffint.u.d(<2 x i64> %0) store <2 x double> %1, <2 x double>* @llvm_mips_ffint_u_d_RES ret void diff --git a/test/CodeGen/Mips/msa/2rf_fq.ll b/test/CodeGen/Mips/msa/2rf_fq.ll index 021dd93..05c649e 100644 --- a/test/CodeGen/Mips/msa/2rf_fq.ll +++ b/test/CodeGen/Mips/msa/2rf_fq.ll @@ -9,7 +9,7 @@ define void @llvm_mips_ffql_w_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ffql_w_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ffql_w_ARG1 %1 = tail call <4 x float> @llvm.mips.ffql.w(<8 x i16> %0) store <4 x float> %1, <4 x float>* @llvm_mips_ffql_w_RES ret void @@ -28,7 +28,7 @@ declare <4 x float> @llvm.mips.ffql.w(<8 x i16>) nounwind define void @llvm_mips_ffql_d_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ffql_d_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffql_d_ARG1 %1 = tail call <2 x double> @llvm.mips.ffql.d(<4 x i32> %0) store <2 x double> %1, <2 x double>* @llvm_mips_ffql_d_RES ret void @@ -47,7 +47,7 @@ declare <2 x double> @llvm.mips.ffql.d(<4 x i32>) nounwind define void @llvm_mips_ffqr_w_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ffqr_w_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ffqr_w_ARG1 %1 = tail call <4 x float> @llvm.mips.ffqr.w(<8 x i16> %0) store <4 x float> %1, <4 x float>* @llvm_mips_ffqr_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x float> @llvm.mips.ffqr.w(<8 x i16>) nounwind define void @llvm_mips_ffqr_d_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ffqr_d_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffqr_d_ARG1 %1 = tail call <2 x double> @llvm.mips.ffqr.d(<4 x i32> %0) store <2 x double> %1, <2 x double>* @llvm_mips_ffqr_d_RES ret void diff --git a/test/CodeGen/Mips/msa/2rf_int_float.ll b/test/CodeGen/Mips/msa/2rf_int_float.ll index 4665ae0..77d1404 100644 --- a/test/CodeGen/Mips/msa/2rf_int_float.ll +++ b/test/CodeGen/Mips/msa/2rf_int_float.ll @@ -10,7 +10,7 @@ define void @llvm_mips_fclass_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fclass_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fclass_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.fclass.w(<4 x float> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_fclass_w_RES ret void @@ -31,7 +31,7 @@ declare <4 x i32> @llvm.mips.fclass.w(<4 x float>) nounwind define void @llvm_mips_fclass_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fclass_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fclass_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.fclass.d(<2 x double> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_fclass_d_RES ret void @@ -52,7 +52,7 @@ declare <2 x i64> @llvm.mips.fclass.d(<2 x double>) nounwind define void @llvm_mips_ftrunc_s_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_ftrunc_s_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_ftrunc_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_s_w_RES ret void @@ -73,7 +73,7 @@ declare <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float>) nounwind define void @llvm_mips_ftrunc_s_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_ftrunc_s_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_ftrunc_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_s_d_RES ret void @@ -94,7 +94,7 @@ declare <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double>) nounwind define void @llvm_mips_ftrunc_u_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_ftrunc_u_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_ftrunc_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_u_w_RES ret void @@ -115,7 +115,7 @@ declare <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float>) nounwind define void @llvm_mips_ftrunc_u_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_ftrunc_u_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_ftrunc_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_u_d_RES ret void @@ -136,7 +136,7 @@ declare <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double>) nounwind define void @llvm_mips_ftint_s_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_ftint_s_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_ftint_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.ftint.s.w(<4 x float> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_s_w_RES ret void @@ -157,7 +157,7 @@ declare <4 x i32> @llvm.mips.ftint.s.w(<4 x float>) nounwind define void @llvm_mips_ftint_s_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_ftint_s_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_ftint_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.ftint.s.d(<2 x double> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_s_d_RES ret void @@ -178,7 +178,7 @@ declare <2 x i64> @llvm.mips.ftint.s.d(<2 x double>) nounwind define void @llvm_mips_ftint_u_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_ftint_u_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_ftint_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.ftint.u.w(<4 x float> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_u_w_RES ret void @@ -199,7 +199,7 @@ declare <4 x i32> @llvm.mips.ftint.u.w(<4 x float>) nounwind define void @llvm_mips_ftint_u_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_ftint_u_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_ftint_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.ftint.u.d(<2 x double> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_u_d_RES ret void diff --git a/test/CodeGen/Mips/msa/2rf_tq.ll b/test/CodeGen/Mips/msa/2rf_tq.ll index 6f3c508..9b7f02a 100644 --- a/test/CodeGen/Mips/msa/2rf_tq.ll +++ b/test/CodeGen/Mips/msa/2rf_tq.ll @@ -10,8 +10,8 @@ define void @llvm_mips_ftq_h_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_ftq_h_ARG1 - %1 = load <4 x float>* @llvm_mips_ftq_h_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_ftq_h_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_ftq_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ftq.h(<4 x float> %0, <4 x float> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ftq_h_RES ret void @@ -32,8 +32,8 @@ declare <8 x i16> @llvm.mips.ftq.h(<4 x float>, <4 x float>) nounwind define void @llvm_mips_ftq_w_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_ftq_w_ARG1 - %1 = load <2 x double>* @llvm_mips_ftq_w_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_ftq_w_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_ftq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ftq.w(<2 x double> %0, <2 x double> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ftq_w_RES ret void diff --git a/test/CodeGen/Mips/msa/3r-a.ll b/test/CodeGen/Mips/msa/3r-a.ll index dab15b6..db772f9 100644 --- a/test/CodeGen/Mips/msa/3r-a.ll +++ b/test/CodeGen/Mips/msa/3r-a.ll @@ -15,8 +15,8 @@ define void @llvm_mips_add_a_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_add_a_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_add_a_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_add_a_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_add_a_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.add.a.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_add_a_b_RES ret void @@ -40,8 +40,8 @@ declare <16 x i8> @llvm.mips.add.a.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_add_a_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_add_a_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_add_a_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_add_a_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_add_a_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.add.a.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_add_a_h_RES ret void @@ -65,8 +65,8 @@ declare <8 x i16> @llvm.mips.add.a.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_add_a_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_add_a_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_add_a_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_add_a_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_add_a_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.add.a.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_add_a_w_RES ret void @@ -90,8 +90,8 @@ declare <4 x i32> @llvm.mips.add.a.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_add_a_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_add_a_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_add_a_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_add_a_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_add_a_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.add.a.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_add_a_d_RES ret void @@ -115,8 +115,8 @@ declare <2 x i64> @llvm.mips.add.a.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_adds_a_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_adds_a_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_adds_a_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_a_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_a_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.adds.a.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_a_b_RES ret void @@ -140,8 +140,8 @@ declare <16 x i8> @llvm.mips.adds.a.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_adds_a_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_adds_a_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_adds_a_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_a_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_a_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.adds.a.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_a_h_RES ret void @@ -165,8 +165,8 @@ declare <8 x i16> @llvm.mips.adds.a.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_adds_a_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_adds_a_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_adds_a_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_a_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_a_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.adds.a.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_a_w_RES ret void @@ -190,8 +190,8 @@ declare <4 x i32> @llvm.mips.adds.a.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_adds_a_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_adds_a_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_adds_a_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_a_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_a_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.adds.a.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_a_d_RES ret void @@ -215,8 +215,8 @@ declare <2 x i64> @llvm.mips.adds.a.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_adds_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_adds_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_adds_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.adds.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_s_b_RES ret void @@ -240,8 +240,8 @@ declare <16 x i8> @llvm.mips.adds.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_adds_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_adds_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_adds_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.adds.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_s_h_RES ret void @@ -265,8 +265,8 @@ declare <8 x i16> @llvm.mips.adds.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_adds_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_adds_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_adds_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.adds.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_s_w_RES ret void @@ -290,8 +290,8 @@ declare <4 x i32> @llvm.mips.adds.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_adds_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_adds_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_adds_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.adds.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_s_d_RES ret void @@ -315,8 +315,8 @@ declare <2 x i64> @llvm.mips.adds.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_adds_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_adds_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_adds_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.adds.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_u_b_RES ret void @@ -340,8 +340,8 @@ declare <16 x i8> @llvm.mips.adds.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_adds_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_adds_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_adds_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.adds.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_u_h_RES ret void @@ -365,8 +365,8 @@ declare <8 x i16> @llvm.mips.adds.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_adds_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_adds_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_adds_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.adds.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_u_w_RES ret void @@ -390,8 +390,8 @@ declare <4 x i32> @llvm.mips.adds.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_adds_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_adds_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_adds_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.adds.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_u_d_RES ret void @@ -415,8 +415,8 @@ declare <2 x i64> @llvm.mips.adds.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_addv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES ret void @@ -440,8 +440,8 @@ declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_addv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES ret void @@ -465,8 +465,8 @@ declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_addv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES ret void @@ -490,8 +490,8 @@ declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_addv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES ret void @@ -512,8 +512,8 @@ declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind define void @addv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG2 %2 = add <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES ret void @@ -532,8 +532,8 @@ entry: define void @addv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG2 %2 = add <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES ret void @@ -552,8 +552,8 @@ entry: define void @addv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG2 %2 = add <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES ret void @@ -572,8 +572,8 @@ entry: define void @addv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG2 %2 = add <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES ret void @@ -595,8 +595,8 @@ entry: define void @llvm_mips_asub_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_asub_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_asub_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.asub.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_s_b_RES ret void @@ -620,8 +620,8 @@ declare <16 x i8> @llvm.mips.asub.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_asub_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_asub_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_asub_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.asub.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_s_h_RES ret void @@ -645,8 +645,8 @@ declare <8 x i16> @llvm.mips.asub.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_asub_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_asub_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_asub_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.asub.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_s_w_RES ret void @@ -670,8 +670,8 @@ declare <4 x i32> @llvm.mips.asub.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_asub_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_asub_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_asub_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.asub.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_s_d_RES ret void @@ -695,8 +695,8 @@ declare <2 x i64> @llvm.mips.asub.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_asub_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_asub_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_asub_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.asub.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_u_b_RES ret void @@ -720,8 +720,8 @@ declare <16 x i8> @llvm.mips.asub.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_asub_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_asub_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_asub_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.asub.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_u_h_RES ret void @@ -745,8 +745,8 @@ declare <8 x i16> @llvm.mips.asub.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_asub_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_asub_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_asub_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.asub.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_u_w_RES ret void @@ -770,8 +770,8 @@ declare <4 x i32> @llvm.mips.asub.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_asub_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_asub_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_asub_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.asub.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_u_d_RES ret void @@ -795,8 +795,8 @@ declare <2 x i64> @llvm.mips.asub.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_ave_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ave_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ave_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ave.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_s_b_RES ret void @@ -820,8 +820,8 @@ declare <16 x i8> @llvm.mips.ave.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ave_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ave_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ave_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ave.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_s_h_RES ret void @@ -845,8 +845,8 @@ declare <8 x i16> @llvm.mips.ave.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ave_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ave_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ave_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ave.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_s_w_RES ret void @@ -870,8 +870,8 @@ declare <4 x i32> @llvm.mips.ave.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ave_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ave_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ave_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ave.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_s_d_RES ret void @@ -895,8 +895,8 @@ declare <2 x i64> @llvm.mips.ave.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_ave_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ave_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ave_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ave.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_u_b_RES ret void @@ -920,8 +920,8 @@ declare <16 x i8> @llvm.mips.ave.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ave_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ave_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ave_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ave.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_u_h_RES ret void @@ -945,8 +945,8 @@ declare <8 x i16> @llvm.mips.ave.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ave_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ave_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ave_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ave.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_u_w_RES ret void @@ -970,8 +970,8 @@ declare <4 x i32> @llvm.mips.ave.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ave_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ave_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ave_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ave.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_u_d_RES ret void @@ -995,8 +995,8 @@ declare <2 x i64> @llvm.mips.ave.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_aver_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_aver_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_aver_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.aver.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_s_b_RES ret void @@ -1020,8 +1020,8 @@ declare <16 x i8> @llvm.mips.aver.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_aver_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_aver_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_aver_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.aver.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_s_h_RES ret void @@ -1045,8 +1045,8 @@ declare <8 x i16> @llvm.mips.aver.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_aver_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_aver_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_aver_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.aver.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_s_w_RES ret void @@ -1070,8 +1070,8 @@ declare <4 x i32> @llvm.mips.aver.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_aver_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_aver_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_aver_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.aver.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_s_d_RES ret void @@ -1095,8 +1095,8 @@ declare <2 x i64> @llvm.mips.aver.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_aver_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_aver_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_aver_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.aver.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_u_b_RES ret void @@ -1120,8 +1120,8 @@ declare <16 x i8> @llvm.mips.aver.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_aver_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_aver_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_aver_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.aver.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_u_h_RES ret void @@ -1145,8 +1145,8 @@ declare <8 x i16> @llvm.mips.aver.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_aver_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_aver_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_aver_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.aver.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_u_w_RES ret void @@ -1170,8 +1170,8 @@ declare <4 x i32> @llvm.mips.aver.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_aver_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_aver_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_aver_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.aver.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_u_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3r-b.ll b/test/CodeGen/Mips/msa/3r-b.ll index a05d19b..2ecdc42 100644 --- a/test/CodeGen/Mips/msa/3r-b.ll +++ b/test/CodeGen/Mips/msa/3r-b.ll @@ -10,8 +10,8 @@ define void @llvm_mips_bclr_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bclr_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bclr_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bclr.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_bclr_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.bclr.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_bclr_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bclr_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bclr_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.bclr.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_bclr_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.bclr.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_bclr_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bclr_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bclr_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_bclr_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.bclr.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_bclr_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bclr_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bclr_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.bclr.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_bclr_d_RES ret void @@ -99,9 +99,9 @@ declare <2 x i64> @llvm.mips.bclr.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_binsl_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_binsl_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_binsl_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_binsl_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.binsl.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_binsl_b_RES ret void @@ -127,9 +127,9 @@ declare <16 x i8> @llvm.mips.binsl.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_binsl_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_binsl_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_binsl_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_binsl_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.binsl.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_binsl_h_RES ret void @@ -155,9 +155,9 @@ declare <8 x i16> @llvm.mips.binsl.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_binsl_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_binsl_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_binsl_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_binsl_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.binsl.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_binsl_w_RES ret void @@ -183,9 +183,9 @@ declare <4 x i32> @llvm.mips.binsl.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_binsl_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_binsl_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_binsl_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_binsl_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.binsl.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_binsl_d_RES ret void @@ -211,9 +211,9 @@ declare <2 x i64> @llvm.mips.binsl.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind define void @llvm_mips_binsr_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_binsr_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_binsr_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_binsr_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.binsr.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_binsr_b_RES ret void @@ -239,9 +239,9 @@ declare <16 x i8> @llvm.mips.binsr.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_binsr_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_binsr_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_binsr_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_binsr_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.binsr.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_binsr_h_RES ret void @@ -267,9 +267,9 @@ declare <8 x i16> @llvm.mips.binsr.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_binsr_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_binsr_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_binsr_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_binsr_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.binsr.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_binsr_w_RES ret void @@ -295,9 +295,9 @@ declare <4 x i32> @llvm.mips.binsr.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_binsr_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_binsr_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_binsr_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_binsr_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.binsr.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_binsr_d_RES ret void @@ -322,8 +322,8 @@ declare <2 x i64> @llvm.mips.binsr.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind define void @llvm_mips_bneg_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bneg_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bneg_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bneg.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_bneg_b_RES ret void @@ -344,8 +344,8 @@ declare <16 x i8> @llvm.mips.bneg.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_bneg_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bneg_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bneg_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.bneg.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_bneg_h_RES ret void @@ -366,8 +366,8 @@ declare <8 x i16> @llvm.mips.bneg.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_bneg_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bneg_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bneg_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_bneg_w_RES ret void @@ -388,8 +388,8 @@ declare <4 x i32> @llvm.mips.bneg.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_bneg_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bneg_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bneg_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.bneg.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_bneg_d_RES ret void @@ -410,8 +410,8 @@ declare <2 x i64> @llvm.mips.bneg.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_bset_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bset_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bset_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bset.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_bset_b_RES ret void @@ -432,8 +432,8 @@ declare <16 x i8> @llvm.mips.bset.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_bset_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bset_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bset_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.bset.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_bset_h_RES ret void @@ -454,8 +454,8 @@ declare <8 x i16> @llvm.mips.bset.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_bset_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bset_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bset_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_bset_w_RES ret void @@ -476,8 +476,8 @@ declare <4 x i32> @llvm.mips.bset.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_bset_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bset_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bset_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.bset.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_bset_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3r-c.ll b/test/CodeGen/Mips/msa/3r-c.ll index 6ec92c2..a3913e0 100644 --- a/test/CodeGen/Mips/msa/3r-c.ll +++ b/test/CodeGen/Mips/msa/3r-c.ll @@ -10,8 +10,8 @@ define void @llvm_mips_ceq_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ceq_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ceq_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ceq.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ceq_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.ceq.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ceq_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ceq_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ceq_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ceq.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ceq_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.ceq.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ceq_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ceq_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ceq_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ceq.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ceq_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.ceq.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ceq_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ceq_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ceq_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ceq.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ceq_d_RES ret void @@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.ceq.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_cle_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_cle_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_cle_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.cle.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_s_b_RES ret void @@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.cle.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_cle_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_cle_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_cle_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.cle.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_s_h_RES ret void @@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.cle.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_cle_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_cle_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_cle_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.cle.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_s_w_RES ret void @@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.cle.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_cle_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_cle_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_cle_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.cle.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_s_d_RES ret void @@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.cle.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_cle_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_cle_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_cle_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.cle.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_u_b_RES ret void @@ -208,8 +208,8 @@ declare <16 x i8> @llvm.mips.cle.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_cle_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_cle_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_cle_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.cle.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_u_h_RES ret void @@ -230,8 +230,8 @@ declare <8 x i16> @llvm.mips.cle.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_cle_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_cle_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_cle_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.cle.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_u_w_RES ret void @@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.cle.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_cle_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_cle_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_cle_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.cle.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_u_d_RES ret void @@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.cle.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_clt_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clt_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_clt_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.clt.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_s_b_RES ret void @@ -296,8 +296,8 @@ declare <16 x i8> @llvm.mips.clt.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_clt_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clt_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_clt_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.clt.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_s_h_RES ret void @@ -318,8 +318,8 @@ declare <8 x i16> @llvm.mips.clt.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_clt_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clt_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_clt_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.clt.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_s_w_RES ret void @@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.clt.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_clt_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clt_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_clt_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.clt.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_s_d_RES ret void @@ -362,8 +362,8 @@ declare <2 x i64> @llvm.mips.clt.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_clt_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clt_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_clt_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.clt.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_u_b_RES ret void @@ -384,8 +384,8 @@ declare <16 x i8> @llvm.mips.clt.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_clt_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clt_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_clt_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.clt.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_u_h_RES ret void @@ -406,8 +406,8 @@ declare <8 x i16> @llvm.mips.clt.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_clt_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clt_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_clt_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.clt.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_u_w_RES ret void @@ -428,8 +428,8 @@ declare <4 x i32> @llvm.mips.clt.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_clt_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clt_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_clt_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.clt.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_u_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3r-d.ll b/test/CodeGen/Mips/msa/3r-d.ll index 0099554..4fc32b7 100644 --- a/test/CodeGen/Mips/msa/3r-d.ll +++ b/test/CodeGen/Mips/msa/3r-d.ll @@ -10,8 +10,8 @@ define void @llvm_mips_div_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.div.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_div_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.div.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_div_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.div.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_div_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.div.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES ret void @@ -95,8 +95,8 @@ declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>) nounwind define void @div_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2 %2 = sdiv <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES ret void @@ -111,8 +111,8 @@ entry: define void @div_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2 %2 = sdiv <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES ret void @@ -127,8 +127,8 @@ entry: define void @div_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2 %2 = sdiv <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES ret void @@ -143,8 +143,8 @@ entry: define void @div_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2 %2 = sdiv <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES ret void @@ -163,8 +163,8 @@ entry: define void @llvm_mips_div_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.div.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES ret void @@ -185,8 +185,8 @@ declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_div_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.div.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES ret void @@ -207,8 +207,8 @@ declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_div_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.div.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES ret void @@ -229,8 +229,8 @@ declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_div_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.div.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES ret void @@ -248,8 +248,8 @@ declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>) nounwind define void @div_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2 %2 = udiv <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES ret void @@ -264,8 +264,8 @@ entry: define void @div_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2 %2 = udiv <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES ret void @@ -280,8 +280,8 @@ entry: define void @div_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2 %2 = udiv <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES ret void @@ -296,8 +296,8 @@ entry: define void @div_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2 %2 = udiv <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES ret void @@ -326,8 +326,8 @@ entry: define void @llvm_mips_dotp_s_h_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.dotp.s.h(<16 x i8> %0, <16 x i8> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_s_h_RES ret void @@ -353,8 +353,8 @@ declare <8 x i16> @llvm.mips.dotp.s.h(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dotp_s_w_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.dotp.s.w(<8 x i16> %0, <8 x i16> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_s_w_RES ret void @@ -377,8 +377,8 @@ declare <4 x i32> @llvm.mips.dotp.s.w(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dotp_s_d_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.dotp.s.d(<4 x i32> %0, <4 x i32> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_s_d_RES ret void @@ -409,8 +409,8 @@ declare <2 x i64> @llvm.mips.dotp.s.d(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_dotp_u_h_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.dotp.u.h(<16 x i8> %0, <16 x i8> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_u_h_RES ret void @@ -436,8 +436,8 @@ declare <8 x i16> @llvm.mips.dotp.u.h(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dotp_u_w_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.dotp.u.w(<8 x i16> %0, <8 x i16> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_u_w_RES ret void @@ -460,8 +460,8 @@ declare <4 x i32> @llvm.mips.dotp.u.w(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dotp_u_d_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.dotp.u.d(<4 x i32> %0, <4 x i32> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_u_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3r-i.ll b/test/CodeGen/Mips/msa/3r-i.ll index 2ef3047..7147b75 100644 --- a/test/CodeGen/Mips/msa/3r-i.ll +++ b/test/CodeGen/Mips/msa/3r-i.ll @@ -10,8 +10,8 @@ define void @llvm_mips_ilvev_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ilvev_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ilvev_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvev_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvev_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ilvev.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvev_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.ilvev.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ilvev_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ilvev_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ilvev_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvev_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvev_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ilvev.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvev_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.ilvev.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ilvev_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ilvev_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ilvev_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvev_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvev_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ilvev.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvev_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.ilvev.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ilvev_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ilvev_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ilvev_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvev_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvev_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ilvev.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvev_d_RES ret void @@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.ilvev.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_ilvl_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ilvl_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ilvl_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvl_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvl_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ilvl.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvl_b_RES ret void @@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.ilvl.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ilvl_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ilvl_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ilvl_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvl_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvl_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ilvl.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvl_h_RES ret void @@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.ilvl.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ilvl_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ilvl_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ilvl_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvl_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvl_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ilvl.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvl_w_RES ret void @@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.ilvl.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ilvl_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ilvl_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ilvl_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvl_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvl_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ilvl.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvl_d_RES ret void @@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.ilvl.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_ilvod_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ilvod_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ilvod_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvod_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvod_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ilvod.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvod_b_RES ret void @@ -208,8 +208,8 @@ declare <16 x i8> @llvm.mips.ilvod.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ilvod_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ilvod_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ilvod_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvod_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvod_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ilvod.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvod_h_RES ret void @@ -230,8 +230,8 @@ declare <8 x i16> @llvm.mips.ilvod.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ilvod_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ilvod_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ilvod_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvod_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvod_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ilvod.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvod_w_RES ret void @@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.ilvod.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ilvod_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ilvod_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ilvod_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvod_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvod_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ilvod.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvod_d_RES ret void @@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.ilvod.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_ilvr_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ilvr_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ilvr_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvr_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvr_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ilvr.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvr_b_RES ret void @@ -296,8 +296,8 @@ declare <16 x i8> @llvm.mips.ilvr.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ilvr_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ilvr_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ilvr_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvr_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvr_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ilvr.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvr_h_RES ret void @@ -318,8 +318,8 @@ declare <8 x i16> @llvm.mips.ilvr.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ilvr_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ilvr_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ilvr_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvr_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvr_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ilvr.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvr_w_RES ret void @@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.ilvr.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ilvr_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ilvr_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ilvr_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvr_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvr_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ilvr.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvr_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3r-m.ll b/test/CodeGen/Mips/msa/3r-m.ll index ddfd720..39b4f7d 100644 --- a/test/CodeGen/Mips/msa/3r-m.ll +++ b/test/CodeGen/Mips/msa/3r-m.ll @@ -10,8 +10,8 @@ define void @llvm_mips_max_a_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_max_a_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_max_a_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_a_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_a_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.max.a.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_max_a_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.max.a.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_max_a_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_max_a_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_max_a_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_a_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_a_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.max.a.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_max_a_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.max.a.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_max_a_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_max_a_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_max_a_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_a_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_a_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.max.a.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_max_a_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.max.a.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_max_a_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_max_a_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_max_a_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_a_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_a_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.max.a.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_max_a_d_RES ret void @@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.max.a.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_max_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_max_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_max_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.max.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_max_s_b_RES ret void @@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.max.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_max_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_max_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_max_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.max.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_max_s_h_RES ret void @@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.max.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_max_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_max_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_max_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.max.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_max_s_w_RES ret void @@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.max.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_max_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_max_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_max_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.max.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_max_s_d_RES ret void @@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.max.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_max_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_max_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_max_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.max.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_max_u_b_RES ret void @@ -208,8 +208,8 @@ declare <16 x i8> @llvm.mips.max.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_max_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_max_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_max_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.max.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_max_u_h_RES ret void @@ -230,8 +230,8 @@ declare <8 x i16> @llvm.mips.max.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_max_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_max_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_max_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.max.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_max_u_w_RES ret void @@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.max.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_max_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_max_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_max_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.max.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_max_u_d_RES ret void @@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.max.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_min_a_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_min_a_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_min_a_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_a_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_a_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.min.a.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_min_a_b_RES ret void @@ -296,8 +296,8 @@ declare <16 x i8> @llvm.mips.min.a.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_min_a_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_min_a_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_min_a_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_a_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_a_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.min.a.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_min_a_h_RES ret void @@ -318,8 +318,8 @@ declare <8 x i16> @llvm.mips.min.a.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_min_a_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_min_a_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_min_a_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_a_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_a_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.min.a.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_min_a_w_RES ret void @@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.min.a.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_min_a_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_min_a_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_min_a_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_a_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_a_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.min.a.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_min_a_d_RES ret void @@ -362,8 +362,8 @@ declare <2 x i64> @llvm.mips.min.a.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_min_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_min_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_min_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.min.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_min_s_b_RES ret void @@ -384,8 +384,8 @@ declare <16 x i8> @llvm.mips.min.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_min_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_min_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_min_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.min.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_min_s_h_RES ret void @@ -406,8 +406,8 @@ declare <8 x i16> @llvm.mips.min.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_min_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_min_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_min_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.min.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_min_s_w_RES ret void @@ -428,8 +428,8 @@ declare <4 x i32> @llvm.mips.min.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_min_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_min_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_min_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.min.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_min_s_d_RES ret void @@ -450,8 +450,8 @@ declare <2 x i64> @llvm.mips.min.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_min_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_min_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_min_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.min.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_min_u_b_RES ret void @@ -472,8 +472,8 @@ declare <16 x i8> @llvm.mips.min.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_min_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_min_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_min_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.min.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_min_u_h_RES ret void @@ -494,8 +494,8 @@ declare <8 x i16> @llvm.mips.min.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_min_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_min_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_min_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.min.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_min_u_w_RES ret void @@ -516,8 +516,8 @@ declare <4 x i32> @llvm.mips.min.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_min_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_min_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_min_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.min.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_min_u_d_RES ret void @@ -538,8 +538,8 @@ declare <2 x i64> @llvm.mips.min.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_mod_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mod_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_mod_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.mod.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_s_b_RES ret void @@ -560,8 +560,8 @@ declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_mod_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mod_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mod_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.mod.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_s_h_RES ret void @@ -582,8 +582,8 @@ declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_mod_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mod_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mod_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.mod.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_s_w_RES ret void @@ -604,8 +604,8 @@ declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_mod_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mod_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_mod_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.mod.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_s_d_RES ret void @@ -626,8 +626,8 @@ declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_mod_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mod_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_mod_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.mod.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_u_b_RES ret void @@ -648,8 +648,8 @@ declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_mod_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mod_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mod_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.mod.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_u_h_RES ret void @@ -670,8 +670,8 @@ declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_mod_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mod_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mod_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.mod.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_u_w_RES ret void @@ -692,8 +692,8 @@ declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_mod_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mod_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_mod_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.mod.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_u_d_RES ret void @@ -714,8 +714,8 @@ declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_mulv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mulv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_mulv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.mulv.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES ret void @@ -736,8 +736,8 @@ declare <16 x i8> @llvm.mips.mulv.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_mulv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mulv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mulv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.mulv.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES ret void @@ -758,8 +758,8 @@ declare <8 x i16> @llvm.mips.mulv.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_mulv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mulv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mulv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.mulv.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES ret void @@ -780,8 +780,8 @@ declare <4 x i32> @llvm.mips.mulv.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_mulv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mulv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_mulv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.mulv.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES ret void @@ -798,8 +798,8 @@ declare <2 x i64> @llvm.mips.mulv.d(<2 x i64>, <2 x i64>) nounwind define void @mulv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mulv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_mulv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG2 %2 = mul <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES ret void @@ -814,8 +814,8 @@ entry: define void @mulv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mulv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mulv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG2 %2 = mul <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES ret void @@ -830,8 +830,8 @@ entry: define void @mulv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mulv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mulv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG2 %2 = mul <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES ret void @@ -846,8 +846,8 @@ entry: define void @mulv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mulv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_mulv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG2 %2 = mul <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3r-p.ll b/test/CodeGen/Mips/msa/3r-p.ll index 852023b..70b98aa 100644 --- a/test/CodeGen/Mips/msa/3r-p.ll +++ b/test/CodeGen/Mips/msa/3r-p.ll @@ -10,8 +10,8 @@ define void @llvm_mips_pckev_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_pckev_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_pckev_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pckev_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_pckev_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.pckev.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_pckev_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.pckev.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_pckev_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_pckev_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_pckev_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pckev_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_pckev_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.pckev.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_pckev_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.pckev.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_pckev_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_pckev_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_pckev_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pckev_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_pckev_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.pckev.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_pckev_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.pckev.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_pckev_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_pckev_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_pckev_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pckev_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_pckev_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.pckev.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_pckev_d_RES ret void @@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.pckev.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_pckod_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_pckod_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_pckod_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pckod_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_pckod_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.pckod.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_pckod_b_RES ret void @@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.pckod.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_pckod_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_pckod_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_pckod_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pckod_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_pckod_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.pckod.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_pckod_h_RES ret void @@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.pckod.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_pckod_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_pckod_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_pckod_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pckod_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_pckod_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.pckod.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_pckod_w_RES ret void @@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.pckod.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_pckod_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_pckod_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_pckod_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pckod_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_pckod_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.pckod.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_pckod_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3r-s.ll b/test/CodeGen/Mips/msa/3r-s.ll index 581c3bf..d04c5ff 100644 --- a/test/CodeGen/Mips/msa/3r-s.ll +++ b/test/CodeGen/Mips/msa/3r-s.ll @@ -11,9 +11,9 @@ define void @llvm_mips_sld_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sld_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sld_b_ARG2 - %2 = load i32* @llvm_mips_sld_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sld_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sld_b_ARG2 + %2 = load i32, i32* @llvm_mips_sld_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, <16 x i8> %1, i32 %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_sld_b_RES ret void @@ -39,9 +39,9 @@ declare <16 x i8> @llvm.mips.sld.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_sld_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sld_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sld_h_ARG2 - %2 = load i32* @llvm_mips_sld_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sld_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sld_h_ARG2 + %2 = load i32, i32* @llvm_mips_sld_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, <8 x i16> %1, i32 %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_sld_h_RES ret void @@ -67,9 +67,9 @@ declare <8 x i16> @llvm.mips.sld.h(<8 x i16>, <8 x i16>, i32) nounwind define void @llvm_mips_sld_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sld_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sld_w_ARG2 - %2 = load i32* @llvm_mips_sld_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sld_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sld_w_ARG2 + %2 = load i32, i32* @llvm_mips_sld_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, <4 x i32> %1, i32 %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_sld_w_RES ret void @@ -95,9 +95,9 @@ declare <4 x i32> @llvm.mips.sld.w(<4 x i32>, <4 x i32>, i32) nounwind define void @llvm_mips_sld_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sld_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sld_d_ARG2 - %2 = load i32* @llvm_mips_sld_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sld_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sld_d_ARG2 + %2 = load i32, i32* @llvm_mips_sld_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, <2 x i64> %1, i32 %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_sld_d_RES ret void @@ -122,8 +122,8 @@ declare <2 x i64> @llvm.mips.sld.d(<2 x i64>, <2 x i64>, i32) nounwind define void @llvm_mips_sll_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sll_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sll_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.sll.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES ret void @@ -146,8 +146,8 @@ declare <16 x i8> @llvm.mips.sll.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_sll_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sll_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sll_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.sll.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES ret void @@ -170,8 +170,8 @@ declare <8 x i16> @llvm.mips.sll.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_sll_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sll_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sll_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES ret void @@ -194,8 +194,8 @@ declare <4 x i32> @llvm.mips.sll.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_sll_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sll_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sll_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.sll.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES ret void @@ -214,8 +214,8 @@ declare <2 x i64> @llvm.mips.sll.d(<2 x i64>, <2 x i64>) nounwind define void @sll_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sll_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sll_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2 %2 = shl <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES ret void @@ -232,8 +232,8 @@ entry: define void @sll_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sll_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sll_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2 %2 = shl <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES ret void @@ -250,8 +250,8 @@ entry: define void @sll_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sll_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sll_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2 %2 = shl <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES ret void @@ -268,8 +268,8 @@ entry: define void @sll_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sll_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sll_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2 %2 = shl <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES ret void @@ -290,8 +290,8 @@ entry: define void @llvm_mips_sra_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sra_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sra_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.sra.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES ret void @@ -314,8 +314,8 @@ declare <16 x i8> @llvm.mips.sra.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_sra_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sra_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sra_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.sra.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES ret void @@ -338,8 +338,8 @@ declare <8 x i16> @llvm.mips.sra.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_sra_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sra_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sra_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES ret void @@ -362,8 +362,8 @@ declare <4 x i32> @llvm.mips.sra.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_sra_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sra_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sra_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.sra.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES ret void @@ -383,8 +383,8 @@ declare <2 x i64> @llvm.mips.sra.d(<2 x i64>, <2 x i64>) nounwind define void @sra_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sra_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sra_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2 %2 = ashr <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES ret void @@ -401,8 +401,8 @@ entry: define void @sra_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sra_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sra_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2 %2 = ashr <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES ret void @@ -419,8 +419,8 @@ entry: define void @sra_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sra_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sra_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2 %2 = ashr <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES ret void @@ -437,8 +437,8 @@ entry: define void @sra_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sra_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sra_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2 %2 = ashr <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES ret void @@ -459,8 +459,8 @@ entry: define void @llvm_mips_srar_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srar_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_srar_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srar_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srar_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.srar.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_srar_b_RES ret void @@ -483,8 +483,8 @@ declare <16 x i8> @llvm.mips.srar.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_srar_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srar_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_srar_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srar_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srar_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.srar.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_srar_h_RES ret void @@ -507,8 +507,8 @@ declare <8 x i16> @llvm.mips.srar.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_srar_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srar_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_srar_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srar_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srar_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.srar.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_srar_w_RES ret void @@ -531,8 +531,8 @@ declare <4 x i32> @llvm.mips.srar.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_srar_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srar_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_srar_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srar_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srar_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.srar.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_srar_d_RES ret void @@ -555,8 +555,8 @@ declare <2 x i64> @llvm.mips.srar.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_srl_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srl_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_srl_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.srl.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES ret void @@ -579,8 +579,8 @@ declare <16 x i8> @llvm.mips.srl.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_srl_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srl_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_srl_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.srl.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES ret void @@ -603,8 +603,8 @@ declare <8 x i16> @llvm.mips.srl.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_srl_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srl_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_srl_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES ret void @@ -627,8 +627,8 @@ declare <4 x i32> @llvm.mips.srl.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_srl_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srl_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_srl_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.srl.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES ret void @@ -651,8 +651,8 @@ declare <2 x i64> @llvm.mips.srl.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_srlr_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srlr_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_srlr_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srlr_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srlr_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.srlr.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_srlr_b_RES ret void @@ -675,8 +675,8 @@ declare <16 x i8> @llvm.mips.srlr.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_srlr_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srlr_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_srlr_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srlr_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srlr_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.srlr.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_srlr_h_RES ret void @@ -699,8 +699,8 @@ declare <8 x i16> @llvm.mips.srlr.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_srlr_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srlr_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_srlr_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srlr_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srlr_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.srlr.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_srlr_w_RES ret void @@ -723,8 +723,8 @@ declare <4 x i32> @llvm.mips.srlr.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_srlr_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srlr_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_srlr_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srlr_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srlr_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.srlr.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_srlr_d_RES ret void @@ -744,8 +744,8 @@ declare <2 x i64> @llvm.mips.srlr.d(<2 x i64>, <2 x i64>) nounwind define void @srl_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srl_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_srl_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2 %2 = lshr <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES ret void @@ -762,8 +762,8 @@ entry: define void @srl_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srl_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_srl_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2 %2 = lshr <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES ret void @@ -780,8 +780,8 @@ entry: define void @srl_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srl_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_srl_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2 %2 = lshr <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES ret void @@ -798,8 +798,8 @@ entry: define void @srl_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srl_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_srl_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2 %2 = lshr <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES ret void @@ -820,8 +820,8 @@ entry: define void @llvm_mips_subs_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subs_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subs_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.subs.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_s_b_RES ret void @@ -844,8 +844,8 @@ declare <16 x i8> @llvm.mips.subs.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_subs_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subs_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subs_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.subs.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_s_h_RES ret void @@ -868,8 +868,8 @@ declare <8 x i16> @llvm.mips.subs.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_subs_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subs_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subs_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.subs.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_s_w_RES ret void @@ -892,8 +892,8 @@ declare <4 x i32> @llvm.mips.subs.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_subs_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subs_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subs_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.subs.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_s_d_RES ret void @@ -916,8 +916,8 @@ declare <2 x i64> @llvm.mips.subs.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_subs_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subs_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subs_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.subs.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_u_b_RES ret void @@ -940,8 +940,8 @@ declare <16 x i8> @llvm.mips.subs.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_subs_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subs_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subs_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.subs.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_u_h_RES ret void @@ -964,8 +964,8 @@ declare <8 x i16> @llvm.mips.subs.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_subs_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subs_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subs_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.subs.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_u_w_RES ret void @@ -988,8 +988,8 @@ declare <4 x i32> @llvm.mips.subs.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_subs_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subs_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subs_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.subs.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_u_d_RES ret void @@ -1012,8 +1012,8 @@ declare <2 x i64> @llvm.mips.subs.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_subsus_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subsus_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subsus_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.subsus.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_subsus_u_b_RES ret void @@ -1036,8 +1036,8 @@ declare <16 x i8> @llvm.mips.subsus.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_subsus_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subsus_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subsus_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.subsus.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_subsus_u_h_RES ret void @@ -1060,8 +1060,8 @@ declare <8 x i16> @llvm.mips.subsus.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_subsus_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subsus_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subsus_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.subsus.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_subsus_u_w_RES ret void @@ -1084,8 +1084,8 @@ declare <4 x i32> @llvm.mips.subsus.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_subsus_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subsus_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subsus_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.subsus.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_subsus_u_d_RES ret void @@ -1108,8 +1108,8 @@ declare <2 x i64> @llvm.mips.subsus.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_subsuu_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subsuu_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subsuu_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_subsuu_s_b_RES ret void @@ -1132,8 +1132,8 @@ declare <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_subsuu_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subsuu_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subsuu_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_subsuu_s_h_RES ret void @@ -1156,8 +1156,8 @@ declare <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_subsuu_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subsuu_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subsuu_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_subsuu_s_w_RES ret void @@ -1180,8 +1180,8 @@ declare <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_subsuu_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subsuu_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subsuu_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_subsuu_s_d_RES ret void @@ -1204,8 +1204,8 @@ declare <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_subv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.subv.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES ret void @@ -1228,8 +1228,8 @@ declare <16 x i8> @llvm.mips.subv.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_subv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.subv.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES ret void @@ -1252,8 +1252,8 @@ declare <8 x i16> @llvm.mips.subv.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_subv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.subv.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES ret void @@ -1276,8 +1276,8 @@ declare <4 x i32> @llvm.mips.subv.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_subv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.subv.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES ret void @@ -1297,8 +1297,8 @@ declare <2 x i64> @llvm.mips.subv.d(<2 x i64>, <2 x i64>) nounwind define void @subv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG2 %2 = sub <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES ret void @@ -1315,8 +1315,8 @@ entry: define void @subv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG2 %2 = sub <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES ret void @@ -1333,8 +1333,8 @@ entry: define void @subv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG2 %2 = sub <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES ret void @@ -1351,8 +1351,8 @@ entry: define void @subv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG2 %2 = sub <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3r-v.ll b/test/CodeGen/Mips/msa/3r-v.ll index c9693f9..2d36da4 100644 --- a/test/CodeGen/Mips/msa/3r-v.ll +++ b/test/CodeGen/Mips/msa/3r-v.ll @@ -11,9 +11,9 @@ define void @llvm_mips_vshf_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_vshf_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_vshf_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_vshf_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_vshf_b_RES ret void @@ -36,9 +36,9 @@ declare <16 x i8> @llvm.mips.vshf.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_vshf_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_vshf_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_vshf_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_vshf_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_vshf_h_RES ret void @@ -61,9 +61,9 @@ declare <8 x i16> @llvm.mips.vshf.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_vshf_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_vshf_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_vshf_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_vshf_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_vshf_w_RES ret void @@ -86,9 +86,9 @@ declare <4 x i32> @llvm.mips.vshf.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_vshf_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_vshf_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_vshf_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_vshf_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_vshf_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3r_4r.ll b/test/CodeGen/Mips/msa/3r_4r.ll index b7fd728..73d104c 100644 --- a/test/CodeGen/Mips/msa/3r_4r.ll +++ b/test/CodeGen/Mips/msa/3r_4r.ll @@ -11,9 +11,9 @@ define void @llvm_mips_maddv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_maddv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_maddv_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_maddv_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.maddv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_maddv_b_RES ret void @@ -36,9 +36,9 @@ declare <16 x i8> @llvm.mips.maddv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_maddv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_maddv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_maddv_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_maddv_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.maddv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_maddv_h_RES ret void @@ -61,9 +61,9 @@ declare <8 x i16> @llvm.mips.maddv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_maddv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_maddv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_maddv_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_maddv_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.maddv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_maddv_w_RES ret void @@ -86,9 +86,9 @@ declare <4 x i32> @llvm.mips.maddv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_maddv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_maddv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_maddv_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_maddv_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.maddv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_maddv_d_RES ret void @@ -111,9 +111,9 @@ declare <2 x i64> @llvm.mips.maddv.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind define void @llvm_mips_msubv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_msubv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_msubv_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_msubv_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.msubv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_msubv_b_RES ret void @@ -136,9 +136,9 @@ declare <16 x i8> @llvm.mips.msubv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_msubv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_msubv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_msubv_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_msubv_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.msubv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_msubv_h_RES ret void @@ -161,9 +161,9 @@ declare <8 x i16> @llvm.mips.msubv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_msubv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_msubv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_msubv_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_msubv_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.msubv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_msubv_w_RES ret void @@ -186,9 +186,9 @@ declare <4 x i32> @llvm.mips.msubv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_msubv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_msubv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_msubv_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_msubv_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.msubv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_msubv_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3r_4r_widen.ll b/test/CodeGen/Mips/msa/3r_4r_widen.ll index 7063e45..fe248ee 100644 --- a/test/CodeGen/Mips/msa/3r_4r_widen.ll +++ b/test/CodeGen/Mips/msa/3r_4r_widen.ll @@ -12,9 +12,9 @@ define void @llvm_mips_dpadd_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dpadd_s_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dpadd_s_h_ARG2 - %2 = load <16 x i8>* @llvm_mips_dpadd_s_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_s_h_RES ret void @@ -37,9 +37,9 @@ declare <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dpadd_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dpadd_s_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dpadd_s_w_ARG2 - %2 = load <8 x i16>* @llvm_mips_dpadd_s_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_s_w_RES ret void @@ -62,9 +62,9 @@ declare <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dpadd_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_dpadd_s_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dpadd_s_d_ARG2 - %2 = load <4 x i32>* @llvm_mips_dpadd_s_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpadd_s_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_s_d_RES ret void @@ -87,9 +87,9 @@ declare <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_dpadd_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dpadd_u_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dpadd_u_h_ARG2 - %2 = load <16 x i8>* @llvm_mips_dpadd_u_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_u_h_RES ret void @@ -112,9 +112,9 @@ declare <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dpadd_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dpadd_u_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dpadd_u_w_ARG2 - %2 = load <8 x i16>* @llvm_mips_dpadd_u_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_u_w_RES ret void @@ -137,9 +137,9 @@ declare <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dpadd_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_dpadd_u_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dpadd_u_d_ARG2 - %2 = load <4 x i32>* @llvm_mips_dpadd_u_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpadd_u_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_u_d_RES ret void @@ -162,9 +162,9 @@ declare <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_dpsub_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dpsub_s_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dpsub_s_h_ARG2 - %2 = load <16 x i8>* @llvm_mips_dpsub_s_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_s_h_RES ret void @@ -187,9 +187,9 @@ declare <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dpsub_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dpsub_s_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dpsub_s_w_ARG2 - %2 = load <8 x i16>* @llvm_mips_dpsub_s_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_s_w_RES ret void @@ -212,9 +212,9 @@ declare <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dpsub_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_dpsub_s_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dpsub_s_d_ARG2 - %2 = load <4 x i32>* @llvm_mips_dpsub_s_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_s_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_s_d_RES ret void @@ -237,9 +237,9 @@ declare <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_dpsub_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dpsub_u_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dpsub_u_h_ARG2 - %2 = load <16 x i8>* @llvm_mips_dpsub_u_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_u_h_RES ret void @@ -262,9 +262,9 @@ declare <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dpsub_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dpsub_u_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dpsub_u_w_ARG2 - %2 = load <8 x i16>* @llvm_mips_dpsub_u_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_u_w_RES ret void @@ -287,9 +287,9 @@ declare <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dpsub_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_dpsub_u_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dpsub_u_d_ARG2 - %2 = load <4 x i32>* @llvm_mips_dpsub_u_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_u_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.dpsub.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_u_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3r_splat.ll b/test/CodeGen/Mips/msa/3r_splat.ll index 6b0cb26..56d26b0 100644 --- a/test/CodeGen/Mips/msa/3r_splat.ll +++ b/test/CodeGen/Mips/msa/3r_splat.ll @@ -11,7 +11,7 @@ define void @llvm_mips_splat_b_test(i32 %a) nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_splat_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_splat_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.splat.b(<16 x i8> %0, i32 %a) store <16 x i8> %1, <16 x i8>* @llvm_mips_splat_b_RES ret void @@ -32,7 +32,7 @@ declare <16 x i8> @llvm.mips.splat.b(<16 x i8>, i32) nounwind define void @llvm_mips_splat_h_test(i32 %a) nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_splat_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_splat_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.splat.h(<8 x i16> %0, i32 %a) store <8 x i16> %1, <8 x i16>* @llvm_mips_splat_h_RES ret void @@ -53,7 +53,7 @@ declare <8 x i16> @llvm.mips.splat.h(<8 x i16>, i32) nounwind define void @llvm_mips_splat_w_test(i32 %a) nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_splat_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_splat_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.splat.w(<4 x i32> %0, i32 %a) store <4 x i32> %1, <4 x i32>* @llvm_mips_splat_w_RES ret void @@ -74,7 +74,7 @@ declare <4 x i32> @llvm.mips.splat.w(<4 x i32>, i32) nounwind define void @llvm_mips_splat_d_test(i32 %a) nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_splat_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_splat_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> %0, i32 %a) store <2 x i64> %1, <2 x i64>* @llvm_mips_splat_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3rf.ll b/test/CodeGen/Mips/msa/3rf.ll index ae665af..dce0c27 100644 --- a/test/CodeGen/Mips/msa/3rf.ll +++ b/test/CodeGen/Mips/msa/3rf.ll @@ -9,8 +9,8 @@ define void @llvm_mips_fadd_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fadd_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fadd_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES ret void @@ -31,8 +31,8 @@ declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fadd_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fadd_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fadd_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES ret void @@ -49,8 +49,8 @@ declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>) nounwind define void @fadd_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fadd_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fadd_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG2 %2 = fadd <4 x float> %0, %1 store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES ret void @@ -65,8 +65,8 @@ entry: define void @fadd_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fadd_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fadd_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG2 %2 = fadd <2 x double> %0, %1 store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES ret void @@ -85,8 +85,8 @@ entry: define void @llvm_mips_fdiv_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fdiv_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fdiv_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fdiv.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES ret void @@ -107,8 +107,8 @@ declare <4 x float> @llvm.mips.fdiv.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fdiv_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fdiv_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fdiv_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fdiv.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES ret void @@ -125,8 +125,8 @@ declare <2 x double> @llvm.mips.fdiv.d(<2 x double>, <2 x double>) nounwind define void @fdiv_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fdiv_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fdiv_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG2 %2 = fdiv <4 x float> %0, %1 store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES ret void @@ -141,8 +141,8 @@ entry: define void @fdiv_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fdiv_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fdiv_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG2 %2 = fdiv <2 x double> %0, %1 store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES ret void @@ -161,8 +161,8 @@ entry: define void @llvm_mips_fmin_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmin_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmin_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmin_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmin_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fmin_w_RES ret void @@ -183,8 +183,8 @@ declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fmin_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmin_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmin_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmin_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmin_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fmin_d_RES ret void @@ -205,8 +205,8 @@ declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fmin_a_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmin_a_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmin_a_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmin_a_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmin_a_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fmin.a.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fmin_a_w_RES ret void @@ -227,8 +227,8 @@ declare <4 x float> @llvm.mips.fmin.a.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fmin_a_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmin_a_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmin_a_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmin_a_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmin_a_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fmin.a.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fmin_a_d_RES ret void @@ -249,8 +249,8 @@ declare <2 x double> @llvm.mips.fmin.a.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fmax_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmax_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmax_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmax_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmax_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fmax_w_RES ret void @@ -271,8 +271,8 @@ declare <4 x float> @llvm.mips.fmax.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fmax_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmax_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmax_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmax_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmax_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fmax_d_RES ret void @@ -293,8 +293,8 @@ declare <2 x double> @llvm.mips.fmax.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fmax_a_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmax_a_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmax_a_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmax_a_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmax_a_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fmax.a.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fmax_a_w_RES ret void @@ -315,8 +315,8 @@ declare <4 x float> @llvm.mips.fmax.a.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fmax_a_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmax_a_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmax_a_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmax_a_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmax_a_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fmax.a.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fmax_a_d_RES ret void @@ -337,8 +337,8 @@ declare <2 x double> @llvm.mips.fmax.a.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fmul_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmul_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmul_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES ret void @@ -359,8 +359,8 @@ declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fmul_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmul_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmul_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fmul.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES ret void @@ -377,8 +377,8 @@ declare <2 x double> @llvm.mips.fmul.d(<2 x double>, <2 x double>) nounwind define void @fmul_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmul_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmul_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG2 %2 = fmul <4 x float> %0, %1 store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES ret void @@ -393,8 +393,8 @@ entry: define void @fmul_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmul_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmul_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG2 %2 = fmul <2 x double> %0, %1 store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES ret void @@ -413,8 +413,8 @@ entry: define void @llvm_mips_fsub_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsub_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsub_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fsub.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES ret void @@ -435,8 +435,8 @@ declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsub_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsub_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsub_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fsub.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES ret void @@ -454,8 +454,8 @@ declare <2 x double> @llvm.mips.fsub.d(<2 x double>, <2 x double>) nounwind define void @fsub_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsub_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsub_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG2 %2 = fsub <4 x float> %0, %1 store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES ret void @@ -470,8 +470,8 @@ entry: define void @fsub_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsub_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsub_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG2 %2 = fsub <2 x double> %0, %1 store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3rf_4rf.ll b/test/CodeGen/Mips/msa/3rf_4rf.ll index 67ef7fd..f1a3002 100644 --- a/test/CodeGen/Mips/msa/3rf_4rf.ll +++ b/test/CodeGen/Mips/msa/3rf_4rf.ll @@ -11,9 +11,9 @@ define void @llvm_mips_fmadd_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmadd_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmadd_w_ARG2 - %2 = load <4 x float>* @llvm_mips_fmadd_w_ARG3 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG2 + %2 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG3 %3 = tail call <4 x float> @llvm.mips.fmadd.w(<4 x float> %0, <4 x float> %1, <4 x float> %2) store <4 x float> %3, <4 x float>* @llvm_mips_fmadd_w_RES ret void @@ -36,9 +36,9 @@ declare <4 x float> @llvm.mips.fmadd.w(<4 x float>, <4 x float>, <4 x float>) no define void @llvm_mips_fmadd_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmadd_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmadd_d_ARG2 - %2 = load <2 x double>* @llvm_mips_fmadd_d_ARG3 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG2 + %2 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG3 %3 = tail call <2 x double> @llvm.mips.fmadd.d(<2 x double> %0, <2 x double> %1, <2 x double> %2) store <2 x double> %3, <2 x double>* @llvm_mips_fmadd_d_RES ret void @@ -61,9 +61,9 @@ declare <2 x double> @llvm.mips.fmadd.d(<2 x double>, <2 x double>, <2 x double> define void @llvm_mips_fmsub_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmsub_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmsub_w_ARG2 - %2 = load <4 x float>* @llvm_mips_fmsub_w_ARG3 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG2 + %2 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG3 %3 = tail call <4 x float> @llvm.mips.fmsub.w(<4 x float> %0, <4 x float> %1, <4 x float> %2) store <4 x float> %3, <4 x float>* @llvm_mips_fmsub_w_RES ret void @@ -86,9 +86,9 @@ declare <4 x float> @llvm.mips.fmsub.w(<4 x float>, <4 x float>, <4 x float>) no define void @llvm_mips_fmsub_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmsub_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmsub_d_ARG2 - %2 = load <2 x double>* @llvm_mips_fmsub_d_ARG3 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG2 + %2 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG3 %3 = tail call <2 x double> @llvm.mips.fmsub.d(<2 x double> %0, <2 x double> %1, <2 x double> %2) store <2 x double> %3, <2 x double>* @llvm_mips_fmsub_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3rf_4rf_q.ll b/test/CodeGen/Mips/msa/3rf_4rf_q.ll index de28be0..704c4b7 100644 --- a/test/CodeGen/Mips/msa/3rf_4rf_q.ll +++ b/test/CodeGen/Mips/msa/3rf_4rf_q.ll @@ -11,9 +11,9 @@ define void @llvm_mips_madd_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_madd_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_madd_q_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_madd_q_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.madd.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_madd_q_h_RES ret void @@ -36,9 +36,9 @@ declare <8 x i16> @llvm.mips.madd.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_madd_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_madd_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_madd_q_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_madd_q_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.madd.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_madd_q_w_RES ret void @@ -61,9 +61,9 @@ declare <4 x i32> @llvm.mips.madd.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_maddr_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.maddr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_maddr_q_h_RES ret void @@ -86,9 +86,9 @@ declare <8 x i16> @llvm.mips.maddr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_maddr_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.maddr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_maddr_q_w_RES ret void @@ -111,9 +111,9 @@ declare <4 x i32> @llvm.mips.maddr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_msub_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_msub_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_msub_q_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_msub_q_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.msub.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_msub_q_h_RES ret void @@ -136,9 +136,9 @@ declare <8 x i16> @llvm.mips.msub.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_msub_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_msub_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_msub_q_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_msub_q_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.msub.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_msub_q_w_RES ret void @@ -161,9 +161,9 @@ declare <4 x i32> @llvm.mips.msub.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_msubr_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.msubr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_msubr_q_h_RES ret void @@ -186,9 +186,9 @@ declare <8 x i16> @llvm.mips.msubr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_msubr_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.msubr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_msubr_q_w_RES ret void diff --git a/test/CodeGen/Mips/msa/3rf_exdo.ll b/test/CodeGen/Mips/msa/3rf_exdo.ll index 8a7f268..1b1b2e9 100644 --- a/test/CodeGen/Mips/msa/3rf_exdo.ll +++ b/test/CodeGen/Mips/msa/3rf_exdo.ll @@ -10,8 +10,8 @@ define void @llvm_mips_fexdo_h_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fexdo_h_ARG1 - %1 = load <4 x float>* @llvm_mips_fexdo_h_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fexdo_h_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fexdo_h_ARG2 %2 = tail call <8 x half> @llvm.mips.fexdo.h(<4 x float> %0, <4 x float> %1) store <8 x half> %2, <8 x half>* @llvm_mips_fexdo_h_RES ret void @@ -32,8 +32,8 @@ declare <8 x half> @llvm.mips.fexdo.h(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fexdo_w_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fexdo_w_ARG1 - %1 = load <2 x double>* @llvm_mips_fexdo_w_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fexdo_w_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fexdo_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fexdo.w(<2 x double> %0, <2 x double> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fexdo_w_RES ret void diff --git a/test/CodeGen/Mips/msa/3rf_float_int.ll b/test/CodeGen/Mips/msa/3rf_float_int.ll index 7b01e17..2bd056d 100644 --- a/test/CodeGen/Mips/msa/3rf_float_int.ll +++ b/test/CodeGen/Mips/msa/3rf_float_int.ll @@ -10,8 +10,8 @@ define void @llvm_mips_fexp2_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fexp2_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_fexp2_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fexp2_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_fexp2_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fexp2.w(<4 x float> %0, <4 x i32> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fexp2_w_RES ret void @@ -32,8 +32,8 @@ declare <4 x float> @llvm.mips.fexp2.w(<4 x float>, <4 x i32>) nounwind define void @llvm_mips_fexp2_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fexp2_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_fexp2_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fexp2_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_fexp2_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fexp2.d(<2 x double> %0, <2 x i64> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fexp2_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3rf_int_float.ll b/test/CodeGen/Mips/msa/3rf_int_float.ll index 5624771..545e543 100644 --- a/test/CodeGen/Mips/msa/3rf_int_float.ll +++ b/test/CodeGen/Mips/msa/3rf_int_float.ll @@ -10,8 +10,8 @@ define void @llvm_mips_fcaf_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcaf_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcaf_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcaf_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcaf_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcaf.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcaf_w_RES ret void @@ -32,8 +32,8 @@ declare <4 x i32> @llvm.mips.fcaf.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcaf_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcaf_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcaf_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcaf_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcaf_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcaf.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcaf_d_RES ret void @@ -54,8 +54,8 @@ declare <2 x i64> @llvm.mips.fcaf.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fceq_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fceq_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fceq_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fceq_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fceq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fceq.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fceq_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.fceq.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fceq_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fceq_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fceq_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fceq_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fceq_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fceq.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fceq_d_RES ret void @@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.fceq.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcle_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcle_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcle_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcle_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcle_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcle.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcle_w_RES ret void @@ -120,8 +120,8 @@ declare <4 x i32> @llvm.mips.fcle.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcle_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcle_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcle_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcle_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcle_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcle.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcle_d_RES ret void @@ -142,8 +142,8 @@ declare <2 x i64> @llvm.mips.fcle.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fclt_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fclt_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fclt_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fclt_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fclt_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fclt.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fclt_w_RES ret void @@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.fclt.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fclt_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fclt_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fclt_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fclt_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fclt_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fclt.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fclt_d_RES ret void @@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.fclt.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcor_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcor_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcor_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcor_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcor_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcor.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcor_w_RES ret void @@ -208,8 +208,8 @@ declare <4 x i32> @llvm.mips.fcor.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcor_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcor_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcor_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcor_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcor_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcor.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcor_d_RES ret void @@ -230,8 +230,8 @@ declare <2 x i64> @llvm.mips.fcor.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcne_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcne_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcne_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcne_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcne_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcne.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcne_w_RES ret void @@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.fcne.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcne_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcne_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcne_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcne_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcne_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcne.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcne_d_RES ret void @@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.fcne.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcueq_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcueq_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcueq_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcueq_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcueq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcueq.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcueq_w_RES ret void @@ -296,8 +296,8 @@ declare <4 x i32> @llvm.mips.fcueq.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcueq_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcueq_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcueq_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcueq_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcueq_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcueq.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcueq_d_RES ret void @@ -318,8 +318,8 @@ declare <2 x i64> @llvm.mips.fcueq.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcult_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcult_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcult_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcult_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcult_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcult.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcult_w_RES ret void @@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.fcult.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcult_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcult_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcult_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcult_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcult_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcult.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcult_d_RES ret void @@ -362,8 +362,8 @@ declare <2 x i64> @llvm.mips.fcult.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcule_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcule_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcule_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcule_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcule_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcule.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcule_w_RES ret void @@ -384,8 +384,8 @@ declare <4 x i32> @llvm.mips.fcule.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcule_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcule_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcule_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcule_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcule_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcule.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcule_d_RES ret void @@ -406,8 +406,8 @@ declare <2 x i64> @llvm.mips.fcule.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcun_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcun_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcun_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcun_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcun_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcun.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcun_w_RES ret void @@ -428,8 +428,8 @@ declare <4 x i32> @llvm.mips.fcun.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcun_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcun_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcun_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcun_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcun_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcun.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcun_d_RES ret void @@ -450,8 +450,8 @@ declare <2 x i64> @llvm.mips.fcun.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcune_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcune_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcune_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcune_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcune_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcune.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcune_w_RES ret void @@ -472,8 +472,8 @@ declare <4 x i32> @llvm.mips.fcune.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcune_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcune_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcune_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcune_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcune_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcune.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcune_d_RES ret void @@ -494,8 +494,8 @@ declare <2 x i64> @llvm.mips.fcune.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsaf_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsaf_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsaf_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsaf_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsaf_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsaf.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsaf_w_RES ret void @@ -516,8 +516,8 @@ declare <4 x i32> @llvm.mips.fsaf.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsaf_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsaf_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsaf_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsaf_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsaf_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsaf.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsaf_d_RES ret void @@ -538,8 +538,8 @@ declare <2 x i64> @llvm.mips.fsaf.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fseq_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fseq_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fseq_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fseq_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fseq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fseq.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fseq_w_RES ret void @@ -560,8 +560,8 @@ declare <4 x i32> @llvm.mips.fseq.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fseq_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fseq_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fseq_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fseq_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fseq_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fseq.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fseq_d_RES ret void @@ -582,8 +582,8 @@ declare <2 x i64> @llvm.mips.fseq.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsle_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsle_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsle_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsle_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsle_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsle.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsle_w_RES ret void @@ -604,8 +604,8 @@ declare <4 x i32> @llvm.mips.fsle.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsle_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsle_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsle_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsle_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsle_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsle.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsle_d_RES ret void @@ -626,8 +626,8 @@ declare <2 x i64> @llvm.mips.fsle.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fslt_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fslt_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fslt_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fslt_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fslt_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fslt.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fslt_w_RES ret void @@ -648,8 +648,8 @@ declare <4 x i32> @llvm.mips.fslt.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fslt_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fslt_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fslt_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fslt_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fslt_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fslt.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fslt_d_RES ret void @@ -670,8 +670,8 @@ declare <2 x i64> @llvm.mips.fslt.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsor_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsor_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsor_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsor_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsor_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsor.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsor_w_RES ret void @@ -692,8 +692,8 @@ declare <4 x i32> @llvm.mips.fsor.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsor_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsor_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsor_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsor_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsor_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsor.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsor_d_RES ret void @@ -714,8 +714,8 @@ declare <2 x i64> @llvm.mips.fsor.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsne_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsne_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsne_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsne_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsne_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsne.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsne_w_RES ret void @@ -736,8 +736,8 @@ declare <4 x i32> @llvm.mips.fsne.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsne_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsne_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsne_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsne_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsne_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsne.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsne_d_RES ret void @@ -758,8 +758,8 @@ declare <2 x i64> @llvm.mips.fsne.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsueq_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsueq_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsueq_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsueq_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsueq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsueq.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsueq_w_RES ret void @@ -780,8 +780,8 @@ declare <4 x i32> @llvm.mips.fsueq.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsueq_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsueq_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsueq_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsueq_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsueq_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsueq.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsueq_d_RES ret void @@ -802,8 +802,8 @@ declare <2 x i64> @llvm.mips.fsueq.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsult_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsult_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsult_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsult_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsult_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsult.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsult_w_RES ret void @@ -824,8 +824,8 @@ declare <4 x i32> @llvm.mips.fsult.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsult_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsult_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsult_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsult_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsult_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsult.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsult_d_RES ret void @@ -846,8 +846,8 @@ declare <2 x i64> @llvm.mips.fsult.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsule_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsule_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsule_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsule_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsule_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsule.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsule_w_RES ret void @@ -868,8 +868,8 @@ declare <4 x i32> @llvm.mips.fsule.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsule_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsule_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsule_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsule_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsule_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsule.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsule_d_RES ret void @@ -890,8 +890,8 @@ declare <2 x i64> @llvm.mips.fsule.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsun_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsun_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsun_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsun_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsun_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsun.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsun_w_RES ret void @@ -912,8 +912,8 @@ declare <4 x i32> @llvm.mips.fsun.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsun_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsun_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsun_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsun_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsun_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsun.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsun_d_RES ret void @@ -934,8 +934,8 @@ declare <2 x i64> @llvm.mips.fsun.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsune_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsune_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsune_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsune_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsune_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsune.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsune_w_RES ret void @@ -956,8 +956,8 @@ declare <4 x i32> @llvm.mips.fsune.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsune_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsune_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsune_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsune_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsune_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsune.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsune_d_RES ret void diff --git a/test/CodeGen/Mips/msa/3rf_q.ll b/test/CodeGen/Mips/msa/3rf_q.ll index f7000ee..c8b0a50 100644 --- a/test/CodeGen/Mips/msa/3rf_q.ll +++ b/test/CodeGen/Mips/msa/3rf_q.ll @@ -10,8 +10,8 @@ define void @llvm_mips_mul_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mul_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mul_q_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mul_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mul_q_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.mul.q.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_mul_q_h_RES ret void @@ -32,8 +32,8 @@ declare <8 x i16> @llvm.mips.mul.q.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_mul_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mul_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mul_q_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mul_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mul_q_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.mul.q.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_mul_q_w_RES ret void @@ -54,8 +54,8 @@ declare <4 x i32> @llvm.mips.mul.q.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_mulr_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulr_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulr_q_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.mulr.q.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_mulr_q_h_RES ret void @@ -76,8 +76,8 @@ declare <8 x i16> @llvm.mips.mulr.q.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_mulr_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulr_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulr_q_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.mulr.q.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_mulr_q_w_RES ret void diff --git a/test/CodeGen/Mips/msa/arithmetic.ll b/test/CodeGen/Mips/msa/arithmetic.ll index 09ee502..3ecd0e4 100644 --- a/test/CodeGen/Mips/msa/arithmetic.ll +++ b/test/CodeGen/Mips/msa/arithmetic.ll @@ -4,9 +4,9 @@ define void @add_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: add_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = add <16 x i8> %1, %2 ; CHECK-DAG: addv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -20,9 +20,9 @@ define void @add_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @add_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: add_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = add <8 x i16> %1, %2 ; CHECK-DAG: addv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -36,9 +36,9 @@ define void @add_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @add_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: add_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = add <4 x i32> %1, %2 ; CHECK-DAG: addv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -52,9 +52,9 @@ define void @add_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: add_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = add <2 x i64> %1, %2 ; CHECK-DAG: addv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -68,7 +68,7 @@ define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @add_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: add_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = add <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -83,7 +83,7 @@ define void @add_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @add_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: add_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = add <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -98,7 +98,7 @@ define void @add_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @add_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: add_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = add <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> ; CHECK-DAG: addvi.w [[R3:\$w[0-9]+]], [[R1]], 1 @@ -112,7 +112,7 @@ define void @add_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @add_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: add_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = add <2 x i64> %1, <i64 1, i64 1> ; CHECK-DAG: addvi.d [[R3:\$w[0-9]+]], [[R1]], 1 @@ -126,9 +126,9 @@ define void @add_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: sub_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = sub <16 x i8> %1, %2 ; CHECK-DAG: subv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -142,9 +142,9 @@ define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @sub_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: sub_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = sub <8 x i16> %1, %2 ; CHECK-DAG: subv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -158,9 +158,9 @@ define void @sub_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @sub_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: sub_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = sub <4 x i32> %1, %2 ; CHECK-DAG: subv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -174,9 +174,9 @@ define void @sub_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: sub_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = sub <2 x i64> %1, %2 ; CHECK-DAG: subv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -190,7 +190,7 @@ define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @sub_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: sub_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = sub <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -205,7 +205,7 @@ define void @sub_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @sub_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: sub_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = sub <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -220,7 +220,7 @@ define void @sub_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @sub_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: sub_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = sub <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> ; CHECK-DAG: subvi.w [[R3:\$w[0-9]+]], [[R1]], 1 @@ -234,7 +234,7 @@ define void @sub_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @sub_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: sub_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = sub <2 x i64> %1, <i64 1, i64 1> ; CHECK-DAG: subvi.d [[R3:\$w[0-9]+]], [[R1]], 1 @@ -248,9 +248,9 @@ define void @sub_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: mul_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = mul <16 x i8> %1, %2 ; CHECK-DAG: mulv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -264,9 +264,9 @@ define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @mul_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: mul_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = mul <8 x i16> %1, %2 ; CHECK-DAG: mulv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -280,9 +280,9 @@ define void @mul_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @mul_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: mul_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = mul <4 x i32> %1, %2 ; CHECK-DAG: mulv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -296,9 +296,9 @@ define void @mul_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @mul_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: mul_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = mul <2 x i64> %1, %2 ; CHECK-DAG: mulv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -313,11 +313,11 @@ define void @maddv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: maddv_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = load <16 x i8>* %c + %3 = load <16 x i8>, <16 x i8>* %c ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7) %4 = mul <16 x i8> %2, %3 %5 = add <16 x i8> %4, %1 @@ -333,11 +333,11 @@ define void @maddv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: maddv_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) - %3 = load <8 x i16>* %c + %3 = load <8 x i16>, <8 x i16>* %c ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7) %4 = mul <8 x i16> %2, %3 %5 = add <8 x i16> %4, %1 @@ -353,11 +353,11 @@ define void @maddv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: maddv_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x i32>* %c + %3 = load <4 x i32>, <4 x i32>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = mul <4 x i32> %2, %3 %5 = add <4 x i32> %4, %1 @@ -373,11 +373,11 @@ define void @maddv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: maddv_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x i64>* %c + %3 = load <2 x i64>, <2 x i64>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = mul <2 x i64> %2, %3 %5 = add <2 x i64> %4, %1 @@ -393,11 +393,11 @@ define void @msubv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: msubv_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = load <16 x i8>* %c + %3 = load <16 x i8>, <16 x i8>* %c ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7) %4 = mul <16 x i8> %2, %3 %5 = sub <16 x i8> %1, %4 @@ -413,11 +413,11 @@ define void @msubv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: msubv_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) - %3 = load <8 x i16>* %c + %3 = load <8 x i16>, <8 x i16>* %c ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7) %4 = mul <8 x i16> %2, %3 %5 = sub <8 x i16> %1, %4 @@ -433,11 +433,11 @@ define void @msubv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: msubv_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x i32>* %c + %3 = load <4 x i32>, <4 x i32>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = mul <4 x i32> %2, %3 %5 = sub <4 x i32> %1, %4 @@ -453,11 +453,11 @@ define void @msubv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: msubv_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x i64>* %c + %3 = load <2 x i64>, <2 x i64>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = mul <2 x i64> %2, %3 %5 = sub <2 x i64> %1, %4 @@ -472,9 +472,9 @@ define void @msubv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, define void @div_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: div_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = sdiv <16 x i8> %1, %2 ; CHECK-DAG: div_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -488,9 +488,9 @@ define void @div_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @div_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: div_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = sdiv <8 x i16> %1, %2 ; CHECK-DAG: div_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -504,9 +504,9 @@ define void @div_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @div_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: div_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = sdiv <4 x i32> %1, %2 ; CHECK-DAG: div_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -520,9 +520,9 @@ define void @div_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @div_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: div_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = sdiv <2 x i64> %1, %2 ; CHECK-DAG: div_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -536,9 +536,9 @@ define void @div_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @div_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: div_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = udiv <16 x i8> %1, %2 ; CHECK-DAG: div_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -552,9 +552,9 @@ define void @div_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @div_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: div_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = udiv <8 x i16> %1, %2 ; CHECK-DAG: div_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -568,9 +568,9 @@ define void @div_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @div_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: div_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = udiv <4 x i32> %1, %2 ; CHECK-DAG: div_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -584,9 +584,9 @@ define void @div_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @div_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: div_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = udiv <2 x i64> %1, %2 ; CHECK-DAG: div_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -600,9 +600,9 @@ define void @div_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @mod_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: mod_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = srem <16 x i8> %1, %2 ; CHECK-DAG: mod_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -616,9 +616,9 @@ define void @mod_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @mod_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: mod_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = srem <8 x i16> %1, %2 ; CHECK-DAG: mod_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -632,9 +632,9 @@ define void @mod_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @mod_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: mod_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = srem <4 x i32> %1, %2 ; CHECK-DAG: mod_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -648,9 +648,9 @@ define void @mod_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @mod_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: mod_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = srem <2 x i64> %1, %2 ; CHECK-DAG: mod_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -664,9 +664,9 @@ define void @mod_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @mod_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: mod_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = urem <16 x i8> %1, %2 ; CHECK-DAG: mod_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -680,9 +680,9 @@ define void @mod_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @mod_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: mod_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = urem <8 x i16> %1, %2 ; CHECK-DAG: mod_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -696,9 +696,9 @@ define void @mod_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @mod_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: mod_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = urem <4 x i32> %1, %2 ; CHECK-DAG: mod_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -712,9 +712,9 @@ define void @mod_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @mod_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: mod_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = urem <2 x i64> %1, %2 ; CHECK-DAG: mod_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] diff --git a/test/CodeGen/Mips/msa/arithmetic_float.ll b/test/CodeGen/Mips/msa/arithmetic_float.ll index 9aae284..d2ead53 100644 --- a/test/CodeGen/Mips/msa/arithmetic_float.ll +++ b/test/CodeGen/Mips/msa/arithmetic_float.ll @@ -4,9 +4,9 @@ define void @add_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: add_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fadd <4 x float> %1, %2 ; CHECK-DAG: fadd.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -20,9 +20,9 @@ define void @add_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @add_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: add_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fadd <2 x double> %1, %2 ; CHECK-DAG: fadd.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -36,9 +36,9 @@ define void @add_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nou define void @sub_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: sub_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fsub <4 x float> %1, %2 ; CHECK-DAG: fsub.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -52,9 +52,9 @@ define void @sub_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @sub_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: sub_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fsub <2 x double> %1, %2 ; CHECK-DAG: fsub.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -68,9 +68,9 @@ define void @sub_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nou define void @mul_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: mul_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fmul <4 x float> %1, %2 ; CHECK-DAG: fmul.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -84,9 +84,9 @@ define void @mul_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @mul_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: mul_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fmul <2 x double> %1, %2 ; CHECK-DAG: fmul.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -101,11 +101,11 @@ define void @fma_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c) nounwind { ; CHECK: fma_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x float>* %c + %3 = load <4 x float>, <4 x float>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = tail call <4 x float> @llvm.fma.v4f32 (<4 x float> %1, <4 x float> %2, <4 x float> %3) @@ -121,11 +121,11 @@ define void @fma_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, <2 x double>* %c) nounwind { ; CHECK: fma_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x double>* %c + %3 = load <2 x double>, <2 x double>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = tail call <2 x double> @llvm.fma.v2f64 (<2 x double> %1, <2 x double> %2, <2 x double> %3) @@ -141,11 +141,11 @@ define void @fmsub_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c) nounwind { ; CHECK: fmsub_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x float>* %c + %3 = load <4 x float>, <4 x float>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = fmul <4 x float> %2, %3 %5 = fsub <4 x float> %1, %4 @@ -161,11 +161,11 @@ define void @fmsub_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, <2 x double>* %c) nounwind { ; CHECK: fmsub_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x double>* %c + %3 = load <2 x double>, <2 x double>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = fmul <2 x double> %2, %3 %5 = fsub <2 x double> %1, %4 @@ -180,9 +180,9 @@ define void @fmsub_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, define void @fdiv_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: fdiv_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fdiv <4 x float> %1, %2 ; CHECK-DAG: fdiv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -196,9 +196,9 @@ define void @fdiv_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounw define void @fdiv_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: fdiv_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fdiv <2 x double> %1, %2 ; CHECK-DAG: fdiv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -212,7 +212,7 @@ define void @fdiv_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) no define void @fabs_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { ; CHECK: fabs_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x float> @llvm.fabs.v4f32 (<4 x float> %1) ; CHECK-DAG: fmax_a.w [[R3:\$w[0-9]+]], [[R1]], [[R1]] @@ -226,7 +226,7 @@ define void @fabs_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { define void @fabs_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { ; CHECK: fabs_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x double> @llvm.fabs.v2f64 (<2 x double> %1) ; CHECK-DAG: fmax_a.d [[R3:\$w[0-9]+]], [[R1]], [[R1]] @@ -240,7 +240,7 @@ define void @fabs_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { define void @fexp2_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { ; CHECK: fexp2_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1) ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1 @@ -256,7 +256,7 @@ define void @fexp2_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { define void @fexp2_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { ; CHECK: fexp2_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1) ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1 @@ -272,7 +272,7 @@ define void @fexp2_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { define void @fexp2_v4f32_2(<4 x float>* %c, <4 x float>* %a) nounwind { ; CHECK: fexp2_v4f32_2: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1) %3 = fmul <4 x float> <float 2.0, float 2.0, float 2.0, float 2.0>, %2 @@ -289,7 +289,7 @@ define void @fexp2_v4f32_2(<4 x float>* %c, <4 x float>* %a) nounwind { define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind { ; CHECK: fexp2_v2f64_2: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1) %3 = fmul <2 x double> <double 2.0, double 2.0>, %2 @@ -306,7 +306,7 @@ define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind { define void @fsqrt_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { ; CHECK: fsqrt_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x float> @llvm.sqrt.v4f32 (<4 x float> %1) ; CHECK-DAG: fsqrt.w [[R3:\$w[0-9]+]], [[R1]] @@ -320,7 +320,7 @@ define void @fsqrt_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { define void @fsqrt_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { ; CHECK: fsqrt_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x double> @llvm.sqrt.v2f64 (<2 x double> %1) ; CHECK-DAG: fsqrt.d [[R3:\$w[0-9]+]], [[R1]] @@ -334,7 +334,7 @@ define void @fsqrt_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { define void @ffint_u_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind { ; CHECK: ffint_u_v4f32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = uitofp <4 x i32> %1 to <4 x float> ; CHECK-DAG: ffint_u.w [[R3:\$w[0-9]+]], [[R1]] @@ -348,7 +348,7 @@ define void @ffint_u_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind { define void @ffint_u_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind { ; CHECK: ffint_u_v2f64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = uitofp <2 x i64> %1 to <2 x double> ; CHECK-DAG: ffint_u.d [[R3:\$w[0-9]+]], [[R1]] @@ -362,7 +362,7 @@ define void @ffint_u_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind { define void @ffint_s_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind { ; CHECK: ffint_s_v4f32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = sitofp <4 x i32> %1 to <4 x float> ; CHECK-DAG: ffint_s.w [[R3:\$w[0-9]+]], [[R1]] @@ -376,7 +376,7 @@ define void @ffint_s_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind { define void @ffint_s_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind { ; CHECK: ffint_s_v2f64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = sitofp <2 x i64> %1 to <2 x double> ; CHECK-DAG: ffint_s.d [[R3:\$w[0-9]+]], [[R1]] @@ -390,7 +390,7 @@ define void @ffint_s_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind { define void @ftrunc_u_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind { ; CHECK: ftrunc_u_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = fptoui <4 x float> %1 to <4 x i32> ; CHECK-DAG: ftrunc_u.w [[R3:\$w[0-9]+]], [[R1]] @@ -404,7 +404,7 @@ define void @ftrunc_u_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind { define void @ftrunc_u_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind { ; CHECK: ftrunc_u_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = fptoui <2 x double> %1 to <2 x i64> ; CHECK-DAG: ftrunc_u.d [[R3:\$w[0-9]+]], [[R1]] @@ -418,7 +418,7 @@ define void @ftrunc_u_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind { define void @ftrunc_s_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind { ; CHECK: ftrunc_s_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = fptosi <4 x float> %1 to <4 x i32> ; CHECK-DAG: ftrunc_s.w [[R3:\$w[0-9]+]], [[R1]] @@ -432,7 +432,7 @@ define void @ftrunc_s_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind { define void @ftrunc_s_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind { ; CHECK: ftrunc_s_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = fptosi <2 x double> %1 to <2 x i64> ; CHECK-DAG: ftrunc_s.d [[R3:\$w[0-9]+]], [[R1]] diff --git a/test/CodeGen/Mips/msa/basic_operations.ll b/test/CodeGen/Mips/msa/basic_operations.ll index dbdf42b..97525be 100644 --- a/test/CodeGen/Mips/msa/basic_operations.ll +++ b/test/CodeGen/Mips/msa/basic_operations.ll @@ -258,7 +258,7 @@ define void @nonconst_v2i64(i64 %a, i64 %b) nounwind { define i32 @extract_sext_v16i8() nounwind { ; MIPS32-AE-LABEL: extract_sext_v16i8: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], %2 = add <16 x i8> %1, %1 @@ -277,7 +277,7 @@ define i32 @extract_sext_v16i8() nounwind { define i32 @extract_sext_v8i16() nounwind { ; MIPS32-AE-LABEL: extract_sext_v8i16: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], %2 = add <8 x i16> %1, %1 @@ -296,7 +296,7 @@ define i32 @extract_sext_v8i16() nounwind { define i32 @extract_sext_v4i32() nounwind { ; MIPS32-AE-LABEL: extract_sext_v4i32: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], %2 = add <4 x i32> %1, %1 @@ -312,7 +312,7 @@ define i32 @extract_sext_v4i32() nounwind { define i64 @extract_sext_v2i64() nounwind { ; MIPS32-AE-LABEL: extract_sext_v2i64: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], %2 = add <2 x i64> %1, %1 @@ -331,7 +331,7 @@ define i64 @extract_sext_v2i64() nounwind { define i32 @extract_zext_v16i8() nounwind { ; MIPS32-AE-LABEL: extract_zext_v16i8: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], %2 = add <16 x i8> %1, %1 @@ -349,7 +349,7 @@ define i32 @extract_zext_v16i8() nounwind { define i32 @extract_zext_v8i16() nounwind { ; MIPS32-AE-LABEL: extract_zext_v8i16: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], %2 = add <8 x i16> %1, %1 @@ -367,7 +367,7 @@ define i32 @extract_zext_v8i16() nounwind { define i32 @extract_zext_v4i32() nounwind { ; MIPS32-AE-LABEL: extract_zext_v4i32: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], %2 = add <4 x i32> %1, %1 @@ -383,7 +383,7 @@ define i32 @extract_zext_v4i32() nounwind { define i64 @extract_zext_v2i64() nounwind { ; MIPS32-AE-LABEL: extract_zext_v2i64: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], %2 = add <2 x i64> %1, %1 @@ -401,14 +401,14 @@ define i64 @extract_zext_v2i64() nounwind { define i32 @extract_sext_v16i8_vidx() nounwind { ; MIPS32-AE-LABEL: extract_sext_v16i8_vidx: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v16i8)( ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <16 x i8> %1, %1 ; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -425,14 +425,14 @@ define i32 @extract_sext_v16i8_vidx() nounwind { define i32 @extract_sext_v8i16_vidx() nounwind { ; MIPS32-AE-LABEL: extract_sext_v8i16_vidx: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v8i16)( ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <8 x i16> %1, %1 ; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -449,14 +449,14 @@ define i32 @extract_sext_v8i16_vidx() nounwind { define i32 @extract_sext_v4i32_vidx() nounwind { ; MIPS32-AE-LABEL: extract_sext_v4i32_vidx: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4i32)( ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <4 x i32> %1, %1 ; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -472,14 +472,14 @@ define i32 @extract_sext_v4i32_vidx() nounwind { define i64 @extract_sext_v2i64_vidx() nounwind { ; MIPS32-AE-LABEL: extract_sext_v2i64_vidx: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2i64)( ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <2 x i64> %1, %1 ; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -497,14 +497,14 @@ define i64 @extract_sext_v2i64_vidx() nounwind { define i32 @extract_zext_v16i8_vidx() nounwind { ; MIPS32-AE-LABEL: extract_zext_v16i8_vidx: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v16i8)( ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <16 x i8> %1, %1 ; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -521,14 +521,14 @@ define i32 @extract_zext_v16i8_vidx() nounwind { define i32 @extract_zext_v8i16_vidx() nounwind { ; MIPS32-AE-LABEL: extract_zext_v8i16_vidx: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v8i16)( ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <8 x i16> %1, %1 ; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -545,14 +545,14 @@ define i32 @extract_zext_v8i16_vidx() nounwind { define i32 @extract_zext_v4i32_vidx() nounwind { ; MIPS32-AE-LABEL: extract_zext_v4i32_vidx: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4i32)( ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <4 x i32> %1, %1 ; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -568,14 +568,14 @@ define i32 @extract_zext_v4i32_vidx() nounwind { define i64 @extract_zext_v2i64_vidx() nounwind { ; MIPS32-AE-LABEL: extract_zext_v2i64_vidx: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2i64)( ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <2 x i64> %1, %1 ; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -593,7 +593,7 @@ define i64 @extract_zext_v2i64_vidx() nounwind { define void @insert_v16i8(i32 %a) nounwind { ; MIPS32-AE-LABEL: insert_v16i8: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], %a2 = trunc i32 %a to i8 @@ -615,7 +615,7 @@ define void @insert_v16i8(i32 %a) nounwind { define void @insert_v8i16(i32 %a) nounwind { ; MIPS32-AE-LABEL: insert_v8i16: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], %a2 = trunc i32 %a to i16 @@ -637,7 +637,7 @@ define void @insert_v8i16(i32 %a) nounwind { define void @insert_v4i32(i32 %a) nounwind { ; MIPS32-AE-LABEL: insert_v4i32: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], ; MIPS32-AE-NOT: andi @@ -656,7 +656,7 @@ define void @insert_v4i32(i32 %a) nounwind { define void @insert_v2i64(i64 %a) nounwind { ; MIPS32-AE-LABEL: insert_v2i64: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], ; MIPS32-AE-NOT: andi @@ -676,10 +676,10 @@ define void @insert_v2i64(i64 %a) nounwind { define void @insert_v16i8_vidx(i32 %a) nounwind { ; MIPS32-AE: insert_v16i8_vidx: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -705,10 +705,10 @@ define void @insert_v16i8_vidx(i32 %a) nounwind { define void @insert_v8i16_vidx(i32 %a) nounwind { ; MIPS32-AE: insert_v8i16_vidx: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -735,10 +735,10 @@ define void @insert_v8i16_vidx(i32 %a) nounwind { define void @insert_v4i32_vidx(i32 %a) nounwind { ; MIPS32-AE: insert_v4i32_vidx: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -762,10 +762,10 @@ define void @insert_v4i32_vidx(i32 %a) nounwind { define void @insert_v2i64_vidx(i64 %a) nounwind { ; MIPS32-AE: insert_v2i64_vidx: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) diff --git a/test/CodeGen/Mips/msa/basic_operations_float.ll b/test/CodeGen/Mips/msa/basic_operations_float.ll index a0c9d29..53c1f11 100644 --- a/test/CodeGen/Mips/msa/basic_operations_float.ll +++ b/test/CodeGen/Mips/msa/basic_operations_float.ll @@ -75,7 +75,7 @@ define void @const_v2f64() nounwind { define void @nonconst_v4f32() nounwind { ; MIPS32-LABEL: nonconst_v4f32: - %1 = load float *@f32 + %1 = load float , float *@f32 %2 = insertelement <4 x float> undef, float %1, i32 0 %3 = insertelement <4 x float> %2, float %1, i32 1 %4 = insertelement <4 x float> %3, float %1, i32 2 @@ -91,7 +91,7 @@ define void @nonconst_v4f32() nounwind { define void @nonconst_v2f64() nounwind { ; MIPS32-LABEL: nonconst_v2f64: - %1 = load double *@f64 + %1 = load double , double *@f64 %2 = insertelement <2 x double> undef, double %1, i32 0 %3 = insertelement <2 x double> %2, double %1, i32 1 store volatile <2 x double> %3, <2 x double>*@v2f64 @@ -105,7 +105,7 @@ define void @nonconst_v2f64() nounwind { define float @extract_v4f32() nounwind { ; MIPS32-LABEL: extract_v4f32: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], %2 = fadd <4 x float> %1, %1 @@ -123,7 +123,7 @@ define float @extract_v4f32() nounwind { define float @extract_v4f32_elt0() nounwind { ; MIPS32-LABEL: extract_v4f32_elt0: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], %2 = fadd <4 x float> %1, %1 @@ -141,7 +141,7 @@ define float @extract_v4f32_elt0() nounwind { define float @extract_v4f32_elt2() nounwind { ; MIPS32-LABEL: extract_v4f32_elt2: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], %2 = fadd <4 x float> %1, %1 @@ -159,14 +159,14 @@ define float @extract_v4f32_elt2() nounwind { define float @extract_v4f32_vidx() nounwind { ; MIPS32-LABEL: extract_v4f32_vidx: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)( ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = fadd <4 x float> %1, %1 ; MIPS32-DAG: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -180,7 +180,7 @@ define float @extract_v4f32_vidx() nounwind { define double @extract_v2f64() nounwind { ; MIPS32-LABEL: extract_v2f64: - %1 = load <2 x double>* @v2f64 + %1 = load <2 x double>, <2 x double>* @v2f64 ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], %2 = fadd <2 x double> %1, %1 @@ -203,7 +203,7 @@ define double @extract_v2f64() nounwind { define double @extract_v2f64_elt0() nounwind { ; MIPS32-LABEL: extract_v2f64_elt0: - %1 = load <2 x double>* @v2f64 + %1 = load <2 x double>, <2 x double>* @v2f64 ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], %2 = fadd <2 x double> %1, %1 @@ -224,14 +224,14 @@ define double @extract_v2f64_elt0() nounwind { define double @extract_v2f64_vidx() nounwind { ; MIPS32-LABEL: extract_v2f64_vidx: - %1 = load <2 x double>* @v2f64 + %1 = load <2 x double>, <2 x double>* @v2f64 ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)( ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = fadd <2 x double> %1, %1 ; MIPS32-DAG: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -245,7 +245,7 @@ define double @extract_v2f64_vidx() nounwind { define void @insert_v4f32(float %a) nounwind { ; MIPS32-LABEL: insert_v4f32: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], %2 = insertelement <4 x float> %1, float %a, i32 1 @@ -262,7 +262,7 @@ define void @insert_v4f32(float %a) nounwind { define void @insert_v2f64(double %a) nounwind { ; MIPS32-LABEL: insert_v2f64: - %1 = load <2 x double>* @v2f64 + %1 = load <2 x double>, <2 x double>* @v2f64 ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], %2 = insertelement <2 x double> %1, double %a, i32 1 @@ -279,11 +279,11 @@ define void @insert_v2f64(double %a) nounwind { define void @insert_v4f32_vidx(float %a) nounwind { ; MIPS32-LABEL: insert_v4f32_vidx: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)( ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]]) - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -305,11 +305,11 @@ define void @insert_v4f32_vidx(float %a) nounwind { define void @insert_v2f64_vidx(double %a) nounwind { ; MIPS32-LABEL: insert_v2f64_vidx: - %1 = load <2 x double>* @v2f64 + %1 = load <2 x double>, <2 x double>* @v2f64 ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)( ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]]) - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) diff --git a/test/CodeGen/Mips/msa/bit.ll b/test/CodeGen/Mips/msa/bit.ll index 59ddbe1..f005730 100644 --- a/test/CodeGen/Mips/msa/bit.ll +++ b/test/CodeGen/Mips/msa/bit.ll @@ -8,7 +8,7 @@ define void @llvm_mips_sat_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sat_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sat_s_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.sat.s.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_s_b_RES ret void @@ -27,7 +27,7 @@ declare <16 x i8> @llvm.mips.sat.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_sat_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sat_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sat_s_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.sat.s.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_s_h_RES ret void @@ -46,7 +46,7 @@ declare <8 x i16> @llvm.mips.sat.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_sat_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sat_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sat_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.sat.s.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_s_w_RES ret void @@ -65,7 +65,7 @@ declare <4 x i32> @llvm.mips.sat.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_sat_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sat_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sat_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.sat.s.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_s_d_RES ret void @@ -84,7 +84,7 @@ declare <2 x i64> @llvm.mips.sat.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_sat_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sat_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sat_u_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.sat.u.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_u_b_RES ret void @@ -103,7 +103,7 @@ declare <16 x i8> @llvm.mips.sat.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_sat_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sat_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sat_u_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.sat.u.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_u_h_RES ret void @@ -122,7 +122,7 @@ declare <8 x i16> @llvm.mips.sat.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_sat_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sat_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sat_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.sat.u.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_u_w_RES ret void @@ -141,7 +141,7 @@ declare <4 x i32> @llvm.mips.sat.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_sat_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sat_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sat_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.sat.u.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_u_d_RES ret void @@ -160,7 +160,7 @@ declare <2 x i64> @llvm.mips.sat.u.d(<2 x i64>, i32) nounwind define void @llvm_mips_slli_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_slli_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_slli_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.slli.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_slli_b_RES ret void @@ -179,7 +179,7 @@ declare <16 x i8> @llvm.mips.slli.b(<16 x i8>, i32) nounwind define void @llvm_mips_slli_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_slli_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_slli_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.slli.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_slli_h_RES ret void @@ -198,7 +198,7 @@ declare <8 x i16> @llvm.mips.slli.h(<8 x i16>, i32) nounwind define void @llvm_mips_slli_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_slli_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_slli_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_slli_w_RES ret void @@ -217,7 +217,7 @@ declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32) nounwind define void @llvm_mips_slli_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_slli_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_slli_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_slli_d_RES ret void @@ -236,7 +236,7 @@ declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32) nounwind define void @llvm_mips_srai_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srai_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srai_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.srai.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_srai_b_RES ret void @@ -255,7 +255,7 @@ declare <16 x i8> @llvm.mips.srai.b(<16 x i8>, i32) nounwind define void @llvm_mips_srai_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srai_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srai_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.srai.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_srai_h_RES ret void @@ -274,7 +274,7 @@ declare <8 x i16> @llvm.mips.srai.h(<8 x i16>, i32) nounwind define void @llvm_mips_srai_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srai_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srai_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.srai.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_srai_w_RES ret void @@ -293,7 +293,7 @@ declare <4 x i32> @llvm.mips.srai.w(<4 x i32>, i32) nounwind define void @llvm_mips_srai_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srai_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srai_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.srai.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_srai_d_RES ret void @@ -312,7 +312,7 @@ declare <2 x i64> @llvm.mips.srai.d(<2 x i64>, i32) nounwind define void @llvm_mips_srari_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srari_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srari_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.srari.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_srari_b_RES ret void @@ -331,7 +331,7 @@ declare <16 x i8> @llvm.mips.srari.b(<16 x i8>, i32) nounwind define void @llvm_mips_srari_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srari_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srari_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.srari.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_srari_h_RES ret void @@ -350,7 +350,7 @@ declare <8 x i16> @llvm.mips.srari.h(<8 x i16>, i32) nounwind define void @llvm_mips_srari_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srari_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srari_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.srari.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_srari_w_RES ret void @@ -369,7 +369,7 @@ declare <4 x i32> @llvm.mips.srari.w(<4 x i32>, i32) nounwind define void @llvm_mips_srari_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srari_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srari_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.srari.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_srari_d_RES ret void @@ -388,7 +388,7 @@ declare <2 x i64> @llvm.mips.srari.d(<2 x i64>, i32) nounwind define void @llvm_mips_srli_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srli_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srli_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.srli.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_srli_b_RES ret void @@ -407,7 +407,7 @@ declare <16 x i8> @llvm.mips.srli.b(<16 x i8>, i32) nounwind define void @llvm_mips_srli_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srli_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srli_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.srli.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_srli_h_RES ret void @@ -426,7 +426,7 @@ declare <8 x i16> @llvm.mips.srli.h(<8 x i16>, i32) nounwind define void @llvm_mips_srli_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srli_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srli_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_srli_w_RES ret void @@ -445,7 +445,7 @@ declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32) nounwind define void @llvm_mips_srli_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srli_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srli_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_srli_d_RES ret void @@ -464,7 +464,7 @@ declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32) nounwind define void @llvm_mips_srlri_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srlri_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srlri_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_srlri_b_RES ret void @@ -483,7 +483,7 @@ declare <16 x i8> @llvm.mips.srlri.b(<16 x i8>, i32) nounwind define void @llvm_mips_srlri_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srlri_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srlri_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_srlri_h_RES ret void @@ -502,7 +502,7 @@ declare <8 x i16> @llvm.mips.srlri.h(<8 x i16>, i32) nounwind define void @llvm_mips_srlri_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srlri_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srlri_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_srlri_w_RES ret void @@ -521,7 +521,7 @@ declare <4 x i32> @llvm.mips.srlri.w(<4 x i32>, i32) nounwind define void @llvm_mips_srlri_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srlri_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srlri_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_srlri_d_RES ret void diff --git a/test/CodeGen/Mips/msa/bitcast.ll b/test/CodeGen/Mips/msa/bitcast.ll index 8e880ec..837cc28 100644 --- a/test/CodeGen/Mips/msa/bitcast.ll +++ b/test/CodeGen/Mips/msa/bitcast.ll @@ -5,7 +5,7 @@ define void @v16i8_to_v16i8(<16 x i8>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -29,7 +29,7 @@ entry: define void @v16i8_to_v8i16(<16 x i8>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -56,7 +56,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v16i8_to_v8f16(<16 x i8>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -77,7 +77,7 @@ entry: define void @v16i8_to_v4i32(<16 x i8>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -102,7 +102,7 @@ entry: define void @v16i8_to_v4f32(<16 x i8>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -127,7 +127,7 @@ entry: define void @v16i8_to_v2i64(<16 x i8>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -153,7 +153,7 @@ entry: define void @v16i8_to_v2f64(<16 x i8>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) @@ -179,7 +179,7 @@ entry: define void @v8i16_to_v16i8(<8 x i16>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -204,7 +204,7 @@ entry: define void @v8i16_to_v8i16(<8 x i16>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -230,7 +230,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8i16_to_v8f16(<8 x i16>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -251,7 +251,7 @@ entry: define void @v8i16_to_v4i32(<8 x i16>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -276,7 +276,7 @@ entry: define void @v8i16_to_v4f32(<8 x i16>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -301,7 +301,7 @@ entry: define void @v8i16_to_v2i64(<8 x i16>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -326,7 +326,7 @@ entry: define void @v8i16_to_v2f64(<8 x i16>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) @@ -354,7 +354,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v16i8(<8 x half>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <16 x i8> %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %1, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* %dst @@ -378,7 +378,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v8i16(<8 x half>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <8 x i16> %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %1, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* %dst @@ -403,7 +403,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v8f16(<8 x half>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <8 x half> store <8 x half> %1, <8 x half>* %dst ret void @@ -423,7 +423,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v4i32(<8 x half>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <4 x i32> %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %1, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* %dst @@ -447,7 +447,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v4f32(<8 x half>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <4 x float> %2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %1, <4 x float> %1) store <4 x float> %2, <4 x float>* %dst @@ -471,7 +471,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v2i64(<8 x half>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <2 x i64> %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %1, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* %dst @@ -495,7 +495,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v2f64(<8 x half>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <2 x double> %2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %1, <2 x double> %1) store <2 x double> %2, <2 x double>* %dst @@ -518,7 +518,7 @@ entry: define void @v4i32_to_v16i8(<4 x i32>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -543,7 +543,7 @@ entry: define void @v4i32_to_v8i16(<4 x i32>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -570,7 +570,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v4i32_to_v8f16(<4 x i32>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -591,7 +591,7 @@ entry: define void @v4i32_to_v4i32(<4 x i32>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -615,7 +615,7 @@ entry: define void @v4i32_to_v4f32(<4 x i32>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -639,7 +639,7 @@ entry: define void @v4i32_to_v2i64(<4 x i32>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -664,7 +664,7 @@ entry: define void @v4i32_to_v2f64(<4 x i32>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) @@ -689,7 +689,7 @@ entry: define void @v4f32_to_v16i8(<4 x float>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -714,7 +714,7 @@ entry: define void @v4f32_to_v8i16(<4 x float>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -741,7 +741,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v4f32_to_v8f16(<4 x float>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -762,7 +762,7 @@ entry: define void @v4f32_to_v4i32(<4 x float>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -786,7 +786,7 @@ entry: define void @v4f32_to_v4f32(<4 x float>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -810,7 +810,7 @@ entry: define void @v4f32_to_v2i64(<4 x float>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -835,7 +835,7 @@ entry: define void @v4f32_to_v2f64(<4 x float>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) @@ -860,7 +860,7 @@ entry: define void @v2i64_to_v16i8(<2 x i64>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -886,7 +886,7 @@ entry: define void @v2i64_to_v8i16(<2 x i64>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -913,7 +913,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v2i64_to_v8f16(<2 x i64>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -934,7 +934,7 @@ entry: define void @v2i64_to_v4i32(<2 x i64>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -959,7 +959,7 @@ entry: define void @v2i64_to_v4f32(<2 x i64>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -984,7 +984,7 @@ entry: define void @v2i64_to_v2i64(<2 x i64>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -1008,7 +1008,7 @@ entry: define void @v2i64_to_v2f64(<2 x i64>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) @@ -1032,7 +1032,7 @@ entry: define void @v2f64_to_v16i8(<2 x double>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -1058,7 +1058,7 @@ entry: define void @v2f64_to_v8i16(<2 x double>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -1085,7 +1085,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v2f64_to_v8f16(<2 x double>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -1106,7 +1106,7 @@ entry: define void @v2f64_to_v4i32(<2 x double>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -1131,7 +1131,7 @@ entry: define void @v2f64_to_v4f32(<2 x double>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -1156,7 +1156,7 @@ entry: define void @v2f64_to_v2i64(<2 x double>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -1180,7 +1180,7 @@ entry: define void @v2f64_to_v2f64(<2 x double>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) diff --git a/test/CodeGen/Mips/msa/bitwise.ll b/test/CodeGen/Mips/msa/bitwise.ll index 5d57198..2a260b2 100644 --- a/test/CodeGen/Mips/msa/bitwise.ll +++ b/test/CodeGen/Mips/msa/bitwise.ll @@ -4,9 +4,9 @@ define void @and_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: and_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = and <16 x i8> %1, %2 ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -20,9 +20,9 @@ define void @and_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @and_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: and_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = and <8 x i16> %1, %2 ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -36,9 +36,9 @@ define void @and_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @and_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: and_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = and <4 x i32> %1, %2 ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -52,9 +52,9 @@ define void @and_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @and_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: and_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = and <2 x i64> %1, %2 ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -68,7 +68,7 @@ define void @and_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @and_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: and_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = and <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> ; CHECK-DAG: andi.b [[R4:\$w[0-9]+]], [[R1]], 1 @@ -82,7 +82,7 @@ define void @and_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @and_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: and_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = and <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1 @@ -97,7 +97,7 @@ define void @and_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @and_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: and_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = and <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1 @@ -112,7 +112,7 @@ define void @and_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @and_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: and_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = and <2 x i64> %1, <i64 1, i64 1> ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1 @@ -127,9 +127,9 @@ define void @and_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @or_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: or_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = or <16 x i8> %1, %2 ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -143,9 +143,9 @@ define void @or_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @or_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: or_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = or <8 x i16> %1, %2 ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -159,9 +159,9 @@ define void @or_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @or_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: or_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = or <4 x i32> %1, %2 ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -175,9 +175,9 @@ define void @or_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @or_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: or_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = or <2 x i64> %1, %2 ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -191,7 +191,7 @@ define void @or_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @or_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: or_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = or <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> ; CHECK-DAG: ori.b [[R4:\$w[0-9]+]], [[R1]], 3 @@ -205,7 +205,7 @@ define void @or_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @or_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: or_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = or <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3> ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 3 @@ -220,7 +220,7 @@ define void @or_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @or_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: or_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = or <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3> ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 3 @@ -235,7 +235,7 @@ define void @or_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @or_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: or_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = or <2 x i64> %1, <i64 3, i64 3> ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 3 @@ -250,9 +250,9 @@ define void @or_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @nor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: nor_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = or <16 x i8> %1, %2 %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> @@ -267,9 +267,9 @@ define void @nor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @nor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: nor_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = or <8 x i16> %1, %2 %4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> @@ -284,9 +284,9 @@ define void @nor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @nor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: nor_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = or <4 x i32> %1, %2 %4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1> @@ -301,9 +301,9 @@ define void @nor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @nor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: nor_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = or <2 x i64> %1, %2 %4 = xor <2 x i64> %3, <i64 -1, i64 -1> @@ -318,7 +318,7 @@ define void @nor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @nor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: nor_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = or <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> @@ -333,7 +333,7 @@ define void @nor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @nor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: nor_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = or <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = xor <8 x i16> %2, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> @@ -349,7 +349,7 @@ define void @nor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @nor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: nor_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = or <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = xor <4 x i32> %2, <i32 -1, i32 -1, i32 -1, i32 -1> @@ -365,7 +365,7 @@ define void @nor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @nor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: nor_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = or <2 x i64> %1, <i64 1, i64 1> %3 = xor <2 x i64> %2, <i64 -1, i64 -1> @@ -381,9 +381,9 @@ define void @nor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @xor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: xor_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = xor <16 x i8> %1, %2 ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -397,9 +397,9 @@ define void @xor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @xor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: xor_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = xor <8 x i16> %1, %2 ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -413,9 +413,9 @@ define void @xor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @xor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: xor_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = xor <4 x i32> %1, %2 ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -429,9 +429,9 @@ define void @xor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @xor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: xor_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = xor <2 x i64> %1, %2 ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -445,7 +445,7 @@ define void @xor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @xor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: xor_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = xor <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> ; CHECK-DAG: xori.b [[R4:\$w[0-9]+]], [[R1]], 3 @@ -459,7 +459,7 @@ define void @xor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @xor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: xor_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = xor <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3> ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 3 @@ -474,7 +474,7 @@ define void @xor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @xor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: xor_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = xor <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3> ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 3 @@ -489,7 +489,7 @@ define void @xor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @xor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: xor_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = xor <2 x i64> %1, <i64 3, i64 3> ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 3 @@ -504,9 +504,9 @@ define void @xor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: sll_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shl <16 x i8> %1, %2 ; CHECK-DAG: sll.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -520,9 +520,9 @@ define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: sll_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shl <8 x i16> %1, %2 ; CHECK-DAG: sll.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -536,9 +536,9 @@ define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: sll_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shl <4 x i32> %1, %2 ; CHECK-DAG: sll.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -552,9 +552,9 @@ define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: sll_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shl <2 x i64> %1, %2 ; CHECK-DAG: sll.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -568,7 +568,7 @@ define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @sll_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: sll_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shl <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> ; CHECK-DAG: slli.b [[R4:\$w[0-9]+]], [[R1]], 1 @@ -582,7 +582,7 @@ define void @sll_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @sll_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: sll_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shl <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> ; CHECK-DAG: slli.h [[R4:\$w[0-9]+]], [[R1]], 1 @@ -596,7 +596,7 @@ define void @sll_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @sll_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: sll_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shl <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> ; CHECK-DAG: slli.w [[R4:\$w[0-9]+]], [[R1]], 1 @@ -610,7 +610,7 @@ define void @sll_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @sll_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: sll_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = shl <2 x i64> %1, <i64 1, i64 1> ; CHECK-DAG: slli.d [[R4:\$w[0-9]+]], [[R1]], 1 @@ -624,9 +624,9 @@ define void @sll_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: sra_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = ashr <16 x i8> %1, %2 ; CHECK-DAG: sra.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -640,9 +640,9 @@ define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: sra_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = ashr <8 x i16> %1, %2 ; CHECK-DAG: sra.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -656,9 +656,9 @@ define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: sra_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = ashr <4 x i32> %1, %2 ; CHECK-DAG: sra.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -672,9 +672,9 @@ define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: sra_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = ashr <2 x i64> %1, %2 ; CHECK-DAG: sra.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -688,7 +688,7 @@ define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @sra_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: sra_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = ashr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> ; CHECK-DAG: srai.b [[R4:\$w[0-9]+]], [[R1]], 1 @@ -702,7 +702,7 @@ define void @sra_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @sra_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: sra_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = ashr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> ; CHECK-DAG: srai.h [[R4:\$w[0-9]+]], [[R1]], 1 @@ -716,7 +716,7 @@ define void @sra_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @sra_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: sra_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = ashr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> ; CHECK-DAG: srai.w [[R4:\$w[0-9]+]], [[R1]], 1 @@ -730,7 +730,7 @@ define void @sra_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @sra_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: sra_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = ashr <2 x i64> %1, <i64 1, i64 1> ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R1]], 1 @@ -744,9 +744,9 @@ define void @sra_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: srl_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = lshr <16 x i8> %1, %2 ; CHECK-DAG: srl.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -760,9 +760,9 @@ define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: srl_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = lshr <8 x i16> %1, %2 ; CHECK-DAG: srl.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -776,9 +776,9 @@ define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: srl_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = lshr <4 x i32> %1, %2 ; CHECK-DAG: srl.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -792,9 +792,9 @@ define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: srl_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = lshr <2 x i64> %1, %2 ; CHECK-DAG: srl.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -808,7 +808,7 @@ define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @srl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: srl_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = lshr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> ; CHECK-DAG: srli.b [[R4:\$w[0-9]+]], [[R1]], 1 @@ -822,7 +822,7 @@ define void @srl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @srl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: srl_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = lshr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> ; CHECK-DAG: srli.h [[R4:\$w[0-9]+]], [[R1]], 1 @@ -836,7 +836,7 @@ define void @srl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @srl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: srl_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = lshr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> ; CHECK-DAG: srli.w [[R4:\$w[0-9]+]], [[R1]], 1 @@ -850,7 +850,7 @@ define void @srl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @srl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: srl_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = lshr <2 x i64> %1, <i64 1, i64 1> ; CHECK-DAG: srli.d [[R4:\$w[0-9]+]], [[R1]], 1 @@ -864,7 +864,7 @@ define void @srl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @ctpop_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: ctpop_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = tail call <16 x i8> @llvm.ctpop.v16i8 (<16 x i8> %1) ; CHECK-DAG: pcnt.b [[R3:\$w[0-9]+]], [[R1]] @@ -878,7 +878,7 @@ define void @ctpop_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @ctpop_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: ctpop_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = tail call <8 x i16> @llvm.ctpop.v8i16 (<8 x i16> %1) ; CHECK-DAG: pcnt.h [[R3:\$w[0-9]+]], [[R1]] @@ -892,7 +892,7 @@ define void @ctpop_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @ctpop_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: ctpop_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x i32> @llvm.ctpop.v4i32 (<4 x i32> %1) ; CHECK-DAG: pcnt.w [[R3:\$w[0-9]+]], [[R1]] @@ -906,7 +906,7 @@ define void @ctpop_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @ctpop_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: ctpop_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x i64> @llvm.ctpop.v2i64 (<2 x i64> %1) ; CHECK-DAG: pcnt.d [[R3:\$w[0-9]+]], [[R1]] @@ -920,7 +920,7 @@ define void @ctpop_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @ctlz_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: ctlz_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = tail call <16 x i8> @llvm.ctlz.v16i8 (<16 x i8> %1) ; CHECK-DAG: nlzc.b [[R3:\$w[0-9]+]], [[R1]] @@ -934,7 +934,7 @@ define void @ctlz_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @ctlz_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: ctlz_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = tail call <8 x i16> @llvm.ctlz.v8i16 (<8 x i16> %1) ; CHECK-DAG: nlzc.h [[R3:\$w[0-9]+]], [[R1]] @@ -948,7 +948,7 @@ define void @ctlz_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @ctlz_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: ctlz_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x i32> @llvm.ctlz.v4i32 (<4 x i32> %1) ; CHECK-DAG: nlzc.w [[R3:\$w[0-9]+]], [[R1]] @@ -962,7 +962,7 @@ define void @ctlz_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @ctlz_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: ctlz_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x i64> @llvm.ctlz.v2i64 (<2 x i64> %1) ; CHECK-DAG: nlzc.d [[R3:\$w[0-9]+]], [[R1]] @@ -976,11 +976,11 @@ define void @ctlz_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %m) nounwind { ; CHECK: bsel_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = load <16 x i8>* %m + %3 = load <16 x i8>, <16 x i8>* %m ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7) %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -1002,9 +1002,9 @@ define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* define void @bsel_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %m) nounwind { ; CHECK: bsel_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %m + %2 = load <16 x i8>, <16 x i8>* %m ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($6) %3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -1027,9 +1027,9 @@ define void @bsel_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %m) nounwind define void @bsel_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: bsel_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = and <8 x i16> %1, <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6> @@ -1048,9 +1048,9 @@ define void @bsel_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @bsel_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: bsel_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = and <4 x i32> %1, <i32 6, i32 6, i32 6, i32 6> %4 = and <4 x i32> %2, <i32 4294967289, i32 4294967289, i32 4294967289, i32 4294967289> @@ -1067,9 +1067,9 @@ define void @bsel_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @bsel_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: bsel_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = and <2 x i64> %1, <i64 6, i64 6> %4 = and <2 x i64> %2, <i64 18446744073709551609, i64 18446744073709551609> @@ -1086,9 +1086,9 @@ define void @bsel_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: binsl_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = and <16 x i8> %1, <i8 192, i8 192, i8 192, i8 192, i8 192, i8 192, i8 192, i8 192, @@ -1110,9 +1110,9 @@ define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: binsl_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = and <8 x i16> %1, <i16 49152, i16 49152, i16 49152, i16 49152, i16 49152, i16 49152, i16 49152, i16 49152> @@ -1130,9 +1130,9 @@ define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: binsl_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = and <4 x i32> %1, <i32 3221225472, i32 3221225472, i32 3221225472, i32 3221225472> %4 = and <4 x i32> %2, <i32 1073741823, i32 1073741823, i32 1073741823, i32 1073741823> @@ -1148,9 +1148,9 @@ define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: binsl_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = and <2 x i64> %1, <i64 18446744073709551608, i64 18446744073709551608> %4 = and <2 x i64> %2, <i64 7, i64 7> @@ -1170,9 +1170,9 @@ define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: binsr_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = and <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> @@ -1192,9 +1192,9 @@ define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: binsr_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = and <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3> @@ -1212,9 +1212,9 @@ define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: binsr_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3> %4 = and <4 x i32> %2, <i32 4294967292, i32 4294967292, i32 4294967292, i32 4294967292> @@ -1230,9 +1230,9 @@ define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: binsr_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = and <2 x i64> %1, <i64 3, i64 3> %4 = and <2 x i64> %2, <i64 18446744073709551612, i64 18446744073709551612> @@ -1248,9 +1248,9 @@ define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @bclr_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: bclr_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2 %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> @@ -1266,9 +1266,9 @@ define void @bclr_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @bclr_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: bclr_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2 %4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> @@ -1284,9 +1284,9 @@ define void @bclr_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @bclr_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: bclr_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2 %4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1> @@ -1302,9 +1302,9 @@ define void @bclr_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @bclr_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: bclr_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shl <2 x i64> <i64 1, i64 1>, %2 %4 = xor <2 x i64> %3, <i64 -1, i64 -1> @@ -1320,9 +1320,9 @@ define void @bclr_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @bset_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: bset_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2 %4 = or <16 x i8> %1, %3 @@ -1337,9 +1337,9 @@ define void @bset_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @bset_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: bset_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2 %4 = or <8 x i16> %1, %3 @@ -1354,9 +1354,9 @@ define void @bset_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @bset_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: bset_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2 %4 = or <4 x i32> %1, %3 @@ -1371,9 +1371,9 @@ define void @bset_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @bset_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: bset_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shl <2 x i64> <i64 1, i64 1>, %2 %4 = or <2 x i64> %1, %3 @@ -1388,9 +1388,9 @@ define void @bset_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @bneg_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: bneg_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2 %4 = xor <16 x i8> %1, %3 @@ -1405,9 +1405,9 @@ define void @bneg_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @bneg_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: bneg_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2 %4 = xor <8 x i16> %1, %3 @@ -1422,9 +1422,9 @@ define void @bneg_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @bneg_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: bneg_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2 %4 = xor <4 x i32> %1, %3 @@ -1439,9 +1439,9 @@ define void @bneg_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @bneg_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: bneg_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shl <2 x i64> <i64 1, i64 1>, %2 %4 = xor <2 x i64> %1, %3 @@ -1456,7 +1456,7 @@ define void @bneg_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @bclri_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: bclri_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = xor <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> @@ -1473,7 +1473,7 @@ define void @bclri_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @bclri_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: bclri_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = xor <8 x i16> <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> @@ -1489,7 +1489,7 @@ define void @bclri_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @bclri_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: bclri_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = xor <4 x i32> <i32 8, i32 8, i32 8, i32 8>, <i32 -1, i32 -1, i32 -1, i32 -1> @@ -1505,7 +1505,7 @@ define void @bclri_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @bclri_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: bclri_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = xor <2 x i64> <i64 8, i64 8>, <i64 -1, i64 -1> @@ -1521,7 +1521,7 @@ define void @bclri_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @bseti_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: bseti_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = or <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8> ; CHECK-DAG: bseti.b [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1535,7 +1535,7 @@ define void @bseti_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @bseti_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: bseti_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = or <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8> ; CHECK-DAG: bseti.h [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1549,7 +1549,7 @@ define void @bseti_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @bseti_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: bseti_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = or <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8> ; CHECK-DAG: bseti.w [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1563,7 +1563,7 @@ define void @bseti_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @bseti_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: bseti_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = or <2 x i64> %1, <i64 8, i64 8> ; CHECK-DAG: bseti.d [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1577,7 +1577,7 @@ define void @bseti_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @bnegi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: bnegi_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = xor <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8> ; CHECK-DAG: bnegi.b [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1591,7 +1591,7 @@ define void @bnegi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @bnegi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: bnegi_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = xor <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8> ; CHECK-DAG: bnegi.h [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1605,7 +1605,7 @@ define void @bnegi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @bnegi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: bnegi_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = xor <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8> ; CHECK-DAG: bnegi.w [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1619,7 +1619,7 @@ define void @bnegi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @bnegi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: bnegi_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = xor <2 x i64> %1, <i64 8, i64 8> ; CHECK-DAG: bnegi.d [[R3:\$w[0-9]+]], [[R1]], 3 diff --git a/test/CodeGen/Mips/msa/compare.ll b/test/CodeGen/Mips/msa/compare.ll index 87ca148..bc4f6e7 100644 --- a/test/CodeGen/Mips/msa/compare.ll +++ b/test/CodeGen/Mips/msa/compare.ll @@ -4,9 +4,9 @@ define void @ceq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: ceq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp eq <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -21,9 +21,9 @@ define void @ceq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @ceq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: ceq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp eq <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -38,9 +38,9 @@ define void @ceq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @ceq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: ceq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp eq <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -55,9 +55,9 @@ define void @ceq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @ceq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: ceq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp eq <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -72,9 +72,9 @@ define void @ceq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @cle_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: cle_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -89,9 +89,9 @@ define void @cle_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @cle_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: cle_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -106,9 +106,9 @@ define void @cle_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @cle_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: cle_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -123,9 +123,9 @@ define void @cle_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @cle_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: cle_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -140,9 +140,9 @@ define void @cle_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @cle_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: cle_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -157,9 +157,9 @@ define void @cle_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @cle_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: cle_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -174,9 +174,9 @@ define void @cle_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @cle_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: cle_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -191,9 +191,9 @@ define void @cle_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @cle_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: cle_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -208,9 +208,9 @@ define void @cle_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @clt_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: clt_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -225,9 +225,9 @@ define void @clt_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @clt_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: clt_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -242,9 +242,9 @@ define void @clt_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @clt_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: clt_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -259,9 +259,9 @@ define void @clt_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @clt_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: clt_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -276,9 +276,9 @@ define void @clt_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @clt_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: clt_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -293,9 +293,9 @@ define void @clt_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @clt_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: clt_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -310,9 +310,9 @@ define void @clt_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @clt_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: clt_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -327,9 +327,9 @@ define void @clt_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @clt_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: clt_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -345,9 +345,9 @@ define void @clt_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; issues in this area. define void @cne_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: cne_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ne <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -365,9 +365,9 @@ define void @cne_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: cne_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ne <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -387,9 +387,9 @@ define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: cne_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ne <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -409,9 +409,9 @@ define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @cne_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: cne_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ne <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -429,7 +429,7 @@ define void @cne_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @ceqi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: ceqi_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp eq <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = sext <16 x i1> %2 to <16 x i8> @@ -444,7 +444,7 @@ define void @ceqi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @ceqi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: ceqi_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp eq <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = sext <8 x i1> %2 to <8 x i16> @@ -459,7 +459,7 @@ define void @ceqi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @ceqi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: ceqi_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp eq <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = sext <4 x i1> %2 to <4 x i32> @@ -474,7 +474,7 @@ define void @ceqi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @ceqi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: ceqi_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp eq <2 x i64> %1, <i64 1, i64 1> %3 = sext <2 x i1> %2 to <2 x i64> @@ -489,7 +489,7 @@ define void @ceqi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @clei_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: clei_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = sext <16 x i1> %2 to <16 x i8> @@ -504,7 +504,7 @@ define void @clei_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @clei_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: clei_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = sext <8 x i1> %2 to <8 x i16> @@ -519,7 +519,7 @@ define void @clei_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @clei_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: clei_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = sext <4 x i1> %2 to <4 x i32> @@ -534,7 +534,7 @@ define void @clei_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @clei_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: clei_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <2 x i64> %1, <i64 1, i64 1> %3 = sext <2 x i1> %2 to <2 x i64> @@ -549,7 +549,7 @@ define void @clei_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @clei_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: clei_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = sext <16 x i1> %2 to <16 x i8> @@ -564,7 +564,7 @@ define void @clei_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @clei_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: clei_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = sext <8 x i1> %2 to <8 x i16> @@ -579,7 +579,7 @@ define void @clei_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @clei_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: clei_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = sext <4 x i1> %2 to <4 x i32> @@ -594,7 +594,7 @@ define void @clei_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @clei_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: clei_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <2 x i64> %1, <i64 1, i64 1> %3 = sext <2 x i1> %2 to <2 x i64> @@ -609,7 +609,7 @@ define void @clei_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @clti_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: clti_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = sext <16 x i1> %2 to <16 x i8> @@ -624,7 +624,7 @@ define void @clti_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @clti_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: clti_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = sext <8 x i1> %2 to <8 x i16> @@ -639,7 +639,7 @@ define void @clti_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @clti_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: clti_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = sext <4 x i1> %2 to <4 x i32> @@ -654,7 +654,7 @@ define void @clti_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @clti_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: clti_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <2 x i64> %1, <i64 1, i64 1> %3 = sext <2 x i1> %2 to <2 x i64> @@ -669,7 +669,7 @@ define void @clti_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @clti_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: clti_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = sext <16 x i1> %2 to <16 x i8> @@ -684,7 +684,7 @@ define void @clti_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @clti_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: clti_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = sext <8 x i1> %2 to <8 x i16> @@ -699,7 +699,7 @@ define void @clti_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @clti_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: clti_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = sext <4 x i1> %2 to <4 x i32> @@ -714,7 +714,7 @@ define void @clti_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @clti_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: clti_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <2 x i64> %1, <i64 1, i64 1> %3 = sext <2 x i1> %2 to <2 x i64> @@ -730,11 +730,11 @@ define void @bsel_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: bsel_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = load <16 x i8>* %c + %3 = load <16 x i8>, <16 x i8>* %c ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7) %4 = icmp sgt <16 x i8> %1, %2 ; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -752,11 +752,11 @@ define void @bsel_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: bsel_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) - %3 = load <8 x i16>* %c + %3 = load <8 x i16>, <8 x i16>* %c ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7) %4 = icmp sgt <8 x i16> %1, %2 ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -774,11 +774,11 @@ define void @bsel_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: bsel_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x i32>* %c + %3 = load <4 x i32>, <4 x i32>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = icmp sgt <4 x i32> %1, %2 ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -796,11 +796,11 @@ define void @bsel_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: bsel_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x i64>* %c + %3 = load <2 x i64>, <2 x i64>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = icmp sgt <2 x i64> %1, %2 ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -818,11 +818,11 @@ define void @bsel_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: bsel_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = load <16 x i8>* %c + %3 = load <16 x i8>, <16 x i8>* %c ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7) %4 = icmp ugt <16 x i8> %1, %2 ; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -840,11 +840,11 @@ define void @bsel_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: bsel_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) - %3 = load <8 x i16>* %c + %3 = load <8 x i16>, <8 x i16>* %c ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7) %4 = icmp ugt <8 x i16> %1, %2 ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -862,11 +862,11 @@ define void @bsel_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: bsel_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x i32>* %c + %3 = load <4 x i32>, <4 x i32>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = icmp ugt <4 x i32> %1, %2 ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -884,11 +884,11 @@ define void @bsel_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: bsel_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x i64>* %c + %3 = load <2 x i64>, <2 x i64>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = icmp ugt <2 x i64> %1, %2 ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -906,9 +906,9 @@ define void @bseli_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: bseli_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <16 x i8> %1, %2 ; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -925,9 +925,9 @@ define void @bseli_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: bseli_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <8 x i16> %1, %2 ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -945,9 +945,9 @@ define void @bseli_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: bseli_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <4 x i32> %1, %2 ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -965,9 +965,9 @@ define void @bseli_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: bseli_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <2 x i64> %1, %2 ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -985,9 +985,9 @@ define void @bseli_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: bseli_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <16 x i8> %1, %2 ; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -1004,9 +1004,9 @@ define void @bseli_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: bseli_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <8 x i16> %1, %2 ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -1024,9 +1024,9 @@ define void @bseli_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: bseli_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <4 x i32> %1, %2 ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -1044,9 +1044,9 @@ define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: bseli_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <2 x i64> %1, %2 ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -1063,9 +1063,9 @@ define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, define void @max_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: max_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1080,9 +1080,9 @@ define void @max_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @max_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: max_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1097,9 +1097,9 @@ define void @max_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @max_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: max_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1114,9 +1114,9 @@ define void @max_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @max_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: max_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1131,9 +1131,9 @@ define void @max_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @max_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: max_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1148,9 +1148,9 @@ define void @max_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @max_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: max_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1165,9 +1165,9 @@ define void @max_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @max_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: max_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1182,9 +1182,9 @@ define void @max_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @max_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: max_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1199,9 +1199,9 @@ define void @max_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @max_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: max_s_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sge <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1216,9 +1216,9 @@ define void @max_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin define void @max_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: max_s_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp sge <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1233,9 +1233,9 @@ define void @max_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin define void @max_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: max_s_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp sge <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1250,9 +1250,9 @@ define void @max_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin define void @max_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: max_s_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp sge <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1267,9 +1267,9 @@ define void @max_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin define void @max_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: max_u_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp uge <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1284,9 +1284,9 @@ define void @max_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin define void @max_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: max_u_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp uge <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1301,9 +1301,9 @@ define void @max_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin define void @max_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: max_u_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp uge <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1318,9 +1318,9 @@ define void @max_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin define void @max_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: max_u_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp uge <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1335,7 +1335,7 @@ define void @max_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin define void @maxi_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: maxi_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp sgt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -1350,7 +1350,7 @@ define void @maxi_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @maxi_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: maxi_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp sgt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -1365,7 +1365,7 @@ define void @maxi_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @maxi_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: maxi_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp sgt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -1380,7 +1380,7 @@ define void @maxi_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @maxi_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: maxi_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp sgt <2 x i64> %1, <i64 1, i64 1> %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1> @@ -1395,7 +1395,7 @@ define void @maxi_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @maxi_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: maxi_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp ugt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -1410,7 +1410,7 @@ define void @maxi_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @maxi_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: maxi_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp ugt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -1425,7 +1425,7 @@ define void @maxi_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @maxi_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: maxi_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp ugt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -1440,7 +1440,7 @@ define void @maxi_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @maxi_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: maxi_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp ugt <2 x i64> %1, <i64 1, i64 1> %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1> @@ -1455,7 +1455,7 @@ define void @maxi_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @maxi_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: maxi_s_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp sge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -1470,7 +1470,7 @@ define void @maxi_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @maxi_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: maxi_s_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp sge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -1485,7 +1485,7 @@ define void @maxi_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @maxi_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: maxi_s_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp sge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -1500,7 +1500,7 @@ define void @maxi_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @maxi_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: maxi_s_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp sge <2 x i64> %1, <i64 1, i64 1> %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1> @@ -1515,7 +1515,7 @@ define void @maxi_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @maxi_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: maxi_u_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp uge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -1530,7 +1530,7 @@ define void @maxi_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @maxi_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: maxi_u_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp uge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -1545,7 +1545,7 @@ define void @maxi_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @maxi_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: maxi_u_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp uge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -1560,7 +1560,7 @@ define void @maxi_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @maxi_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: maxi_u_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp uge <2 x i64> %1, <i64 1, i64 1> %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1> @@ -1575,9 +1575,9 @@ define void @maxi_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @min_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: min_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1592,9 +1592,9 @@ define void @min_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @min_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: min_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1609,9 +1609,9 @@ define void @min_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @min_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: min_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1626,9 +1626,9 @@ define void @min_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @min_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: min_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1643,9 +1643,9 @@ define void @min_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @min_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: min_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1660,9 +1660,9 @@ define void @min_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @min_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: min_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1677,9 +1677,9 @@ define void @min_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @min_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: min_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1694,9 +1694,9 @@ define void @min_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @min_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: min_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1711,9 +1711,9 @@ define void @min_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @min_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: min_s_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1728,9 +1728,9 @@ define void @min_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin define void @min_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: min_s_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1745,9 +1745,9 @@ define void @min_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin define void @min_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: min_s_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1762,9 +1762,9 @@ define void @min_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin define void @min_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: min_s_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1779,9 +1779,9 @@ define void @min_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin define void @min_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: min_u_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1796,9 +1796,9 @@ define void @min_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin define void @min_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: min_u_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1813,9 +1813,9 @@ define void @min_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin define void @min_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: min_u_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1830,9 +1830,9 @@ define void @min_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin define void @min_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: min_u_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1847,7 +1847,7 @@ define void @min_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin define void @mini_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: mini_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -1862,7 +1862,7 @@ define void @mini_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @mini_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: mini_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -1877,7 +1877,7 @@ define void @mini_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @mini_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: mini_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -1892,7 +1892,7 @@ define void @mini_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @mini_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: mini_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <2 x i64> %1, <i64 1, i64 1> %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1> @@ -1907,7 +1907,7 @@ define void @mini_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @mini_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: mini_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -1922,7 +1922,7 @@ define void @mini_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @mini_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: mini_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -1937,7 +1937,7 @@ define void @mini_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @mini_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: mini_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -1952,7 +1952,7 @@ define void @mini_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @mini_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: mini_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <2 x i64> %1, <i64 1, i64 1> %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1> @@ -1967,7 +1967,7 @@ define void @mini_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @mini_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: mini_s_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -1982,7 +1982,7 @@ define void @mini_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @mini_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: mini_s_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -1997,7 +1997,7 @@ define void @mini_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @mini_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: mini_s_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -2012,7 +2012,7 @@ define void @mini_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @mini_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: mini_s_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <2 x i64> %1, <i64 1, i64 1> %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1> @@ -2027,7 +2027,7 @@ define void @mini_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @mini_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: mini_u_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -2042,7 +2042,7 @@ define void @mini_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @mini_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: mini_u_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -2057,7 +2057,7 @@ define void @mini_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @mini_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: mini_u_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -2072,7 +2072,7 @@ define void @mini_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @mini_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: mini_u_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <2 x i64> %1, <i64 1, i64 1> %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1> diff --git a/test/CodeGen/Mips/msa/compare_float.ll b/test/CodeGen/Mips/msa/compare_float.ll index e93221b..3229d02 100644 --- a/test/CodeGen/Mips/msa/compare_float.ll +++ b/test/CodeGen/Mips/msa/compare_float.ll @@ -9,8 +9,8 @@ declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind define void @false_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: false_v4f32: - %1 = load <4 x float>* %a - %2 = load <4 x float>* %b + %1 = load <4 x float>, <4 x float>* %a + %2 = load <4 x float>, <4 x float>* %b %3 = fcmp false <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, <4 x i32>* %c @@ -25,8 +25,8 @@ define void @false_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: false_v2f64: - %1 = load <2 x double>* %a - %2 = load <2 x double>* %b + %1 = load <2 x double>, <2 x double>* %a + %2 = load <2 x double>, <2 x double>* %b %3 = fcmp false <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, <2 x i64>* %c @@ -41,9 +41,9 @@ define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) noun define void @oeq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: oeq_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp oeq <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -58,9 +58,9 @@ define void @oeq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @oeq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: oeq_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp oeq <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -75,9 +75,9 @@ define void @oeq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @oge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: oge_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp oge <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -92,9 +92,9 @@ define void @oge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @oge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: oge_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp oge <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -109,9 +109,9 @@ define void @oge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ogt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ogt_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ogt <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -126,9 +126,9 @@ define void @ogt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ogt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ogt_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ogt <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -143,9 +143,9 @@ define void @ogt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ole_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ole_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ole <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -160,9 +160,9 @@ define void @ole_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ole_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ole_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ole <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -177,9 +177,9 @@ define void @ole_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @olt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: olt_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp olt <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -194,9 +194,9 @@ define void @olt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @olt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: olt_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp olt <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -211,9 +211,9 @@ define void @olt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @one_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: one_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp one <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -228,9 +228,9 @@ define void @one_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @one_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: one_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp one <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -245,9 +245,9 @@ define void @one_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ord_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ord_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ord <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -262,9 +262,9 @@ define void @ord_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ord_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ord_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ord <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -279,9 +279,9 @@ define void @ord_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ueq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ueq_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ueq <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -296,9 +296,9 @@ define void @ueq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ueq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ueq_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ueq <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -313,9 +313,9 @@ define void @ueq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @uge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: uge_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp uge <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -330,9 +330,9 @@ define void @uge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @uge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: uge_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp uge <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -347,9 +347,9 @@ define void @uge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ugt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ugt_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ugt <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -364,9 +364,9 @@ define void @ugt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ugt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ugt_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ugt <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -381,9 +381,9 @@ define void @ugt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ule_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ule_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ule <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -398,9 +398,9 @@ define void @ule_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ule_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ule_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ule <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -415,9 +415,9 @@ define void @ule_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ult_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ult_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ult <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -432,9 +432,9 @@ define void @ult_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ult_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ult_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ult <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -449,9 +449,9 @@ define void @ult_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @uno_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: uno_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp uno <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -466,9 +466,9 @@ define void @uno_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @uno_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: uno_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp uno <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -483,8 +483,8 @@ define void @uno_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @true_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: true_v4f32: - %1 = load <4 x float>* %a - %2 = load <4 x float>* %b + %1 = load <4 x float>, <4 x float>* %a + %2 = load <4 x float>, <4 x float>* %b %3 = fcmp true <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, <4 x i32>* %c @@ -499,8 +499,8 @@ define void @true_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwin define void @true_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: true_v2f64: - %1 = load <2 x double>* %a - %2 = load <2 x double>* %b + %1 = load <2 x double>, <2 x double>* %a + %2 = load <2 x double>, <2 x double>* %b %3 = fcmp true <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, <2 x i64>* %c @@ -516,11 +516,11 @@ define void @bsel_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c) nounwind { ; CHECK: bsel_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x float>* %c + %3 = load <4 x float>, <4 x float>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = fcmp ogt <4 x float> %1, %2 ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -538,11 +538,11 @@ define void @bsel_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, <2 x double>* %c) nounwind { ; CHECK: bsel_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x double>* %c + %3 = load <2 x double>, <2 x double>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = fcmp ogt <2 x double> %1, %2 ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -560,9 +560,9 @@ define void @bseli_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c) nounwind { ; CHECK: bseli_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ogt <4 x float> %1, %2 ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -580,9 +580,9 @@ define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, <2 x double>* %c) nounwind { ; CHECK: bseli_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ogt <2 x double> %1, %2 ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -599,9 +599,9 @@ define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, define void @max_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: max_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %1, <4 x float> %2) ; CHECK-DAG: fmax.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -615,9 +615,9 @@ define void @max_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @max_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: max_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %1, <2 x double> %2) ; CHECK-DAG: fmax.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -631,9 +631,9 @@ define void @max_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nou define void @min_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: min_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %1, <4 x float> %2) ; CHECK-DAG: fmin.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -647,9 +647,9 @@ define void @min_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @min_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: min_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %1, <2 x double> %2) ; CHECK-DAG: fmin.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] diff --git a/test/CodeGen/Mips/msa/elm_copy.ll b/test/CodeGen/Mips/msa/elm_copy.ll index 0dd75fa..2a0d74f 100644 --- a/test/CodeGen/Mips/msa/elm_copy.ll +++ b/test/CodeGen/Mips/msa/elm_copy.ll @@ -15,7 +15,7 @@ define void @llvm_mips_copy_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_copy_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_copy_s_b_ARG1 %1 = tail call i32 @llvm.mips.copy.s.b(<16 x i8> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_s_b_RES ret void @@ -38,7 +38,7 @@ declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_copy_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_copy_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_copy_s_h_ARG1 %1 = tail call i32 @llvm.mips.copy.s.h(<8 x i16> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_s_h_RES ret void @@ -61,7 +61,7 @@ declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_copy_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_copy_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_copy_s_w_ARG1 %1 = tail call i32 @llvm.mips.copy.s.w(<4 x i32> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_s_w_RES ret void @@ -84,7 +84,7 @@ declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_copy_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_copy_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_copy_s_d_ARG1 %1 = tail call i64 @llvm.mips.copy.s.d(<2 x i64> %0, i32 1) store i64 %1, i64* @llvm_mips_copy_s_d_RES ret void @@ -112,7 +112,7 @@ declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_copy_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_copy_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_copy_u_b_ARG1 %1 = tail call i32 @llvm.mips.copy.u.b(<16 x i8> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_u_b_RES ret void @@ -135,7 +135,7 @@ declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_copy_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_copy_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_copy_u_h_ARG1 %1 = tail call i32 @llvm.mips.copy.u.h(<8 x i16> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_u_h_RES ret void @@ -158,7 +158,7 @@ declare i32 @llvm.mips.copy.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_copy_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_copy_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_copy_u_w_ARG1 %1 = tail call i32 @llvm.mips.copy.u.w(<4 x i32> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_u_w_RES ret void @@ -181,7 +181,7 @@ declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_copy_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_copy_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_copy_u_d_ARG1 %1 = tail call i64 @llvm.mips.copy.u.d(<2 x i64> %0, i32 1) store i64 %1, i64* @llvm_mips_copy_u_d_RES ret void diff --git a/test/CodeGen/Mips/msa/elm_insv.ll b/test/CodeGen/Mips/msa/elm_insv.ll index c746e52..46e6289 100644 --- a/test/CodeGen/Mips/msa/elm_insv.ll +++ b/test/CodeGen/Mips/msa/elm_insv.ll @@ -16,8 +16,8 @@ define void @llvm_mips_insert_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_insert_b_ARG1 - %1 = load i32* @llvm_mips_insert_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_insert_b_ARG1 + %1 = load i32, i32* @llvm_mips_insert_b_ARG3 %2 = tail call <16 x i8> @llvm.mips.insert.b(<16 x i8> %0, i32 1, i32 %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_insert_b_RES ret void @@ -38,8 +38,8 @@ declare <16 x i8> @llvm.mips.insert.b(<16 x i8>, i32, i32) nounwind define void @llvm_mips_insert_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_insert_h_ARG1 - %1 = load i32* @llvm_mips_insert_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_insert_h_ARG1 + %1 = load i32, i32* @llvm_mips_insert_h_ARG3 %2 = tail call <8 x i16> @llvm.mips.insert.h(<8 x i16> %0, i32 1, i32 %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_insert_h_RES ret void @@ -60,8 +60,8 @@ declare <8 x i16> @llvm.mips.insert.h(<8 x i16>, i32, i32) nounwind define void @llvm_mips_insert_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_insert_w_ARG1 - %1 = load i32* @llvm_mips_insert_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_insert_w_ARG1 + %1 = load i32, i32* @llvm_mips_insert_w_ARG3 %2 = tail call <4 x i32> @llvm.mips.insert.w(<4 x i32> %0, i32 1, i32 %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_insert_w_RES ret void @@ -82,8 +82,8 @@ declare <4 x i32> @llvm.mips.insert.w(<4 x i32>, i32, i32) nounwind define void @llvm_mips_insert_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_insert_d_ARG1 - %1 = load i64* @llvm_mips_insert_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_insert_d_ARG1 + %1 = load i64, i64* @llvm_mips_insert_d_ARG3 %2 = tail call <2 x i64> @llvm.mips.insert.d(<2 x i64> %0, i32 1, i64 %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_insert_d_RES ret void @@ -110,8 +110,8 @@ declare <2 x i64> @llvm.mips.insert.d(<2 x i64>, i32, i64) nounwind define void @llvm_mips_insve_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_insve_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_insve_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_insve_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_insve_b_ARG3 %2 = tail call <16 x i8> @llvm.mips.insve.b(<16 x i8> %0, i32 1, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_insve_b_RES ret void @@ -136,8 +136,8 @@ declare <16 x i8> @llvm.mips.insve.b(<16 x i8>, i32, <16 x i8>) nounwind define void @llvm_mips_insve_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_insve_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_insve_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_insve_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_insve_h_ARG3 %2 = tail call <8 x i16> @llvm.mips.insve.h(<8 x i16> %0, i32 1, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_insve_h_RES ret void @@ -162,8 +162,8 @@ declare <8 x i16> @llvm.mips.insve.h(<8 x i16>, i32, <8 x i16>) nounwind define void @llvm_mips_insve_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_insve_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_insve_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_insve_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_insve_w_ARG3 %2 = tail call <4 x i32> @llvm.mips.insve.w(<4 x i32> %0, i32 1, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_insve_w_RES ret void @@ -188,8 +188,8 @@ declare <4 x i32> @llvm.mips.insve.w(<4 x i32>, i32, <4 x i32>) nounwind define void @llvm_mips_insve_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_insve_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_insve_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_insve_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_insve_d_ARG3 %2 = tail call <2 x i64> @llvm.mips.insve.d(<2 x i64> %0, i32 1, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_insve_d_RES ret void diff --git a/test/CodeGen/Mips/msa/elm_move.ll b/test/CodeGen/Mips/msa/elm_move.ll index 98c06c7..9665b6d 100644 --- a/test/CodeGen/Mips/msa/elm_move.ll +++ b/test/CodeGen/Mips/msa/elm_move.ll @@ -9,7 +9,7 @@ define void @llvm_mips_move_vb_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_move_vb_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_move_vb_ARG1 %1 = tail call <16 x i8> @llvm.mips.move.v(<16 x i8> %0) store <16 x i8> %1, <16 x i8>* @llvm_mips_move_vb_RES ret void diff --git a/test/CodeGen/Mips/msa/elm_shift_slide.ll b/test/CodeGen/Mips/msa/elm_shift_slide.ll index 00a6544..87f15f1 100644 --- a/test/CodeGen/Mips/msa/elm_shift_slide.ll +++ b/test/CodeGen/Mips/msa/elm_shift_slide.ll @@ -10,8 +10,8 @@ define void @llvm_mips_sldi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sldi_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sldi_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sldi_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sldi_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, <16 x i8> %1, i32 1) store <16 x i8> %2, <16 x i8>* @llvm_mips_sldi_b_RES ret void @@ -31,8 +31,8 @@ declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_sldi_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sldi_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sldi_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sldi_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sldi_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, <8 x i16> %1, i32 1) store <8 x i16> %2, <8 x i16>* @llvm_mips_sldi_h_RES ret void @@ -52,8 +52,8 @@ declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, <8 x i16>, i32) nounwind define void @llvm_mips_sldi_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sldi_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sldi_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sldi_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sldi_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, <4 x i32> %1, i32 1) store <4 x i32> %2, <4 x i32>* @llvm_mips_sldi_w_RES ret void @@ -73,8 +73,8 @@ declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, <4 x i32>, i32) nounwind define void @llvm_mips_sldi_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sldi_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sldi_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sldi_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sldi_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, <2 x i64> %1, i32 1) store <2 x i64> %2, <2 x i64>* @llvm_mips_sldi_d_RES ret void @@ -93,7 +93,7 @@ declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, <2 x i64>, i32) nounwind define void @llvm_mips_splati_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_splati_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_splati_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.splati.b(<16 x i8> %0, i32 1) store <16 x i8> %1, <16 x i8>* @llvm_mips_splati_b_RES ret void @@ -112,7 +112,7 @@ declare <16 x i8> @llvm.mips.splati.b(<16 x i8>, i32) nounwind define void @llvm_mips_splati_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_splati_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_splati_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.splati.h(<8 x i16> %0, i32 1) store <8 x i16> %1, <8 x i16>* @llvm_mips_splati_h_RES ret void @@ -131,7 +131,7 @@ declare <8 x i16> @llvm.mips.splati.h(<8 x i16>, i32) nounwind define void @llvm_mips_splati_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_splati_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_splati_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.splati.w(<4 x i32> %0, i32 1) store <4 x i32> %1, <4 x i32>* @llvm_mips_splati_w_RES ret void @@ -150,7 +150,7 @@ declare <4 x i32> @llvm.mips.splati.w(<4 x i32>, i32) nounwind define void @llvm_mips_splati_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_splati_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_splati_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.splati.d(<2 x i64> %0, i32 1) store <2 x i64> %1, <2 x i64>* @llvm_mips_splati_d_RES ret void diff --git a/test/CodeGen/Mips/msa/frameindex.ll b/test/CodeGen/Mips/msa/frameindex.ll index ebec465..afd28ae 100644 --- a/test/CodeGen/Mips/msa/frameindex.ll +++ b/test/CodeGen/Mips/msa/frameindex.ll @@ -5,7 +5,7 @@ define void @loadstore_v16i8_near() nounwind { ; MIPS32-AE: loadstore_v16i8_near: %1 = alloca <16 x i8> - %2 = load volatile <16 x i8>* %1 + %2 = load volatile <16 x i8>, <16 x i8>* %1 ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0($sp) store volatile <16 x i8> %2, <16 x i8>* %1 ; MIPS32-AE: st.b [[R1]], 0($sp) @@ -20,7 +20,7 @@ define void @loadstore_v16i8_just_under_simm10() nounwind { %1 = alloca <16 x i8> %2 = alloca [496 x i8] ; Push the frame right up to 512 bytes - %3 = load volatile <16 x i8>* %1 + %3 = load volatile <16 x i8>, <16 x i8>* %1 ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 496($sp) store volatile <16 x i8> %3, <16 x i8>* %1 ; MIPS32-AE: st.b [[R1]], 496($sp) @@ -35,7 +35,7 @@ define void @loadstore_v16i8_just_over_simm10() nounwind { %1 = alloca <16 x i8> %2 = alloca [497 x i8] ; Push the frame just over 512 bytes - %3 = load volatile <16 x i8>* %1 + %3 = load volatile <16 x i8>, <16 x i8>* %1 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 512 ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <16 x i8> %3, <16 x i8>* %1 @@ -52,7 +52,7 @@ define void @loadstore_v16i8_just_under_simm16() nounwind { %1 = alloca <16 x i8> %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes - %3 = load volatile <16 x i8>* %1 + %3 = load volatile <16 x i8>, <16 x i8>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -71,7 +71,7 @@ define void @loadstore_v16i8_just_over_simm16() nounwind { %1 = alloca <16 x i8> %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes - %3 = load volatile <16 x i8>* %1 + %3 = load volatile <16 x i8>, <16 x i8>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -88,7 +88,7 @@ define void @loadstore_v8i16_near() nounwind { ; MIPS32-AE: loadstore_v8i16_near: %1 = alloca <8 x i16> - %2 = load volatile <8 x i16>* %1 + %2 = load volatile <8 x i16>, <8 x i16>* %1 ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0($sp) store volatile <8 x i16> %2, <8 x i16>* %1 ; MIPS32-AE: st.h [[R1]], 0($sp) @@ -102,11 +102,11 @@ define void @loadstore_v8i16_unaligned() nounwind { %1 = alloca [2 x <8 x i16>] %2 = bitcast [2 x <8 x i16>]* %1 to i8* - %3 = getelementptr i8* %2, i32 1 + %3 = getelementptr i8, i8* %2, i32 1 %4 = bitcast i8* %3 to [2 x <8 x i16>]* - %5 = getelementptr [2 x <8 x i16>]* %4, i32 0, i32 0 + %5 = getelementptr [2 x <8 x i16>], [2 x <8 x i16>]* %4, i32 0, i32 0 - %6 = load volatile <8 x i16>* %5 + %6 = load volatile <8 x i16>, <8 x i16>* %5 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1 ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <8 x i16> %6, <8 x i16>* %5 @@ -123,7 +123,7 @@ define void @loadstore_v8i16_just_under_simm10() nounwind { %1 = alloca <8 x i16> %2 = alloca [1008 x i8] ; Push the frame right up to 1024 bytes - %3 = load volatile <8 x i16>* %1 + %3 = load volatile <8 x i16>, <8 x i16>* %1 ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 1008($sp) store volatile <8 x i16> %3, <8 x i16>* %1 ; MIPS32-AE: st.h [[R1]], 1008($sp) @@ -138,7 +138,7 @@ define void @loadstore_v8i16_just_over_simm10() nounwind { %1 = alloca <8 x i16> %2 = alloca [1009 x i8] ; Push the frame just over 1024 bytes - %3 = load volatile <8 x i16>* %1 + %3 = load volatile <8 x i16>, <8 x i16>* %1 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1024 ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <8 x i16> %3, <8 x i16>* %1 @@ -155,7 +155,7 @@ define void @loadstore_v8i16_just_under_simm16() nounwind { %1 = alloca <8 x i16> %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes - %3 = load volatile <8 x i16>* %1 + %3 = load volatile <8 x i16>, <8 x i16>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -174,7 +174,7 @@ define void @loadstore_v8i16_just_over_simm16() nounwind { %1 = alloca <8 x i16> %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes - %3 = load volatile <8 x i16>* %1 + %3 = load volatile <8 x i16>, <8 x i16>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -191,7 +191,7 @@ define void @loadstore_v4i32_near() nounwind { ; MIPS32-AE: loadstore_v4i32_near: %1 = alloca <4 x i32> - %2 = load volatile <4 x i32>* %1 + %2 = load volatile <4 x i32>, <4 x i32>* %1 ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0($sp) store volatile <4 x i32> %2, <4 x i32>* %1 ; MIPS32-AE: st.w [[R1]], 0($sp) @@ -205,11 +205,11 @@ define void @loadstore_v4i32_unaligned() nounwind { %1 = alloca [2 x <4 x i32>] %2 = bitcast [2 x <4 x i32>]* %1 to i8* - %3 = getelementptr i8* %2, i32 1 + %3 = getelementptr i8, i8* %2, i32 1 %4 = bitcast i8* %3 to [2 x <4 x i32>]* - %5 = getelementptr [2 x <4 x i32>]* %4, i32 0, i32 0 + %5 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %4, i32 0, i32 0 - %6 = load volatile <4 x i32>* %5 + %6 = load volatile <4 x i32>, <4 x i32>* %5 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1 ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <4 x i32> %6, <4 x i32>* %5 @@ -226,7 +226,7 @@ define void @loadstore_v4i32_just_under_simm10() nounwind { %1 = alloca <4 x i32> %2 = alloca [2032 x i8] ; Push the frame right up to 2048 bytes - %3 = load volatile <4 x i32>* %1 + %3 = load volatile <4 x i32>, <4 x i32>* %1 ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 2032($sp) store volatile <4 x i32> %3, <4 x i32>* %1 ; MIPS32-AE: st.w [[R1]], 2032($sp) @@ -241,7 +241,7 @@ define void @loadstore_v4i32_just_over_simm10() nounwind { %1 = alloca <4 x i32> %2 = alloca [2033 x i8] ; Push the frame just over 2048 bytes - %3 = load volatile <4 x i32>* %1 + %3 = load volatile <4 x i32>, <4 x i32>* %1 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 2048 ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <4 x i32> %3, <4 x i32>* %1 @@ -258,7 +258,7 @@ define void @loadstore_v4i32_just_under_simm16() nounwind { %1 = alloca <4 x i32> %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes - %3 = load volatile <4 x i32>* %1 + %3 = load volatile <4 x i32>, <4 x i32>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -277,7 +277,7 @@ define void @loadstore_v4i32_just_over_simm16() nounwind { %1 = alloca <4 x i32> %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes - %3 = load volatile <4 x i32>* %1 + %3 = load volatile <4 x i32>, <4 x i32>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -294,7 +294,7 @@ define void @loadstore_v2i64_near() nounwind { ; MIPS32-AE: loadstore_v2i64_near: %1 = alloca <2 x i64> - %2 = load volatile <2 x i64>* %1 + %2 = load volatile <2 x i64>, <2 x i64>* %1 ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0($sp) store volatile <2 x i64> %2, <2 x i64>* %1 ; MIPS32-AE: st.d [[R1]], 0($sp) @@ -308,11 +308,11 @@ define void @loadstore_v2i64_unaligned() nounwind { %1 = alloca [2 x <2 x i64>] %2 = bitcast [2 x <2 x i64>]* %1 to i8* - %3 = getelementptr i8* %2, i32 1 + %3 = getelementptr i8, i8* %2, i32 1 %4 = bitcast i8* %3 to [2 x <2 x i64>]* - %5 = getelementptr [2 x <2 x i64>]* %4, i32 0, i32 0 + %5 = getelementptr [2 x <2 x i64>], [2 x <2 x i64>]* %4, i32 0, i32 0 - %6 = load volatile <2 x i64>* %5 + %6 = load volatile <2 x i64>, <2 x i64>* %5 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1 ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <2 x i64> %6, <2 x i64>* %5 @@ -329,7 +329,7 @@ define void @loadstore_v2i64_just_under_simm10() nounwind { %1 = alloca <2 x i64> %2 = alloca [4080 x i8] ; Push the frame right up to 4096 bytes - %3 = load volatile <2 x i64>* %1 + %3 = load volatile <2 x i64>, <2 x i64>* %1 ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 4080($sp) store volatile <2 x i64> %3, <2 x i64>* %1 ; MIPS32-AE: st.d [[R1]], 4080($sp) @@ -344,7 +344,7 @@ define void @loadstore_v2i64_just_over_simm10() nounwind { %1 = alloca <2 x i64> %2 = alloca [4081 x i8] ; Push the frame just over 4096 bytes - %3 = load volatile <2 x i64>* %1 + %3 = load volatile <2 x i64>, <2 x i64>* %1 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 4096 ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <2 x i64> %3, <2 x i64>* %1 @@ -361,7 +361,7 @@ define void @loadstore_v2i64_just_under_simm16() nounwind { %1 = alloca <2 x i64> %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes - %3 = load volatile <2 x i64>* %1 + %3 = load volatile <2 x i64>, <2 x i64>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -380,7 +380,7 @@ define void @loadstore_v2i64_just_over_simm16() nounwind { %1 = alloca <2 x i64> %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes - %3 = load volatile <2 x i64>* %1 + %3 = load volatile <2 x i64>, <2 x i64>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) diff --git a/test/CodeGen/Mips/msa/i10.ll b/test/CodeGen/Mips/msa/i10.ll index c5a9617..204884b 100644 --- a/test/CodeGen/Mips/msa/i10.ll +++ b/test/CodeGen/Mips/msa/i10.ll @@ -7,7 +7,7 @@ define i32 @llvm_mips_bnz_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bnz_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnz_b_ARG1 %1 = tail call i32 @llvm.mips.bnz.b(<16 x i8> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false @@ -28,7 +28,7 @@ declare i32 @llvm.mips.bnz.b(<16 x i8>) nounwind define i32 @llvm_mips_bnz_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bnz_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bnz_h_ARG1 %1 = tail call i32 @llvm.mips.bnz.h(<8 x i16> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false @@ -49,7 +49,7 @@ declare i32 @llvm.mips.bnz.h(<8 x i16>) nounwind define i32 @llvm_mips_bnz_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bnz_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bnz_w_ARG1 %1 = tail call i32 @llvm.mips.bnz.w(<4 x i32> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false @@ -70,7 +70,7 @@ declare i32 @llvm.mips.bnz.w(<4 x i32>) nounwind define i32 @llvm_mips_bnz_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bnz_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bnz_d_ARG1 %1 = tail call i32 @llvm.mips.bnz.d(<2 x i64> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false diff --git a/test/CodeGen/Mips/msa/i5-a.ll b/test/CodeGen/Mips/msa/i5-a.ll index 0b50720..f9486b1 100644 --- a/test/CodeGen/Mips/msa/i5-a.ll +++ b/test/CodeGen/Mips/msa/i5-a.ll @@ -9,7 +9,7 @@ define void @llvm_mips_addvi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_addvi_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addvi_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_addvi_b_RES ret void @@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32) nounwind define void @llvm_mips_addvi_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_addvi_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addvi_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_addvi_h_RES ret void @@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32) nounwind define void @llvm_mips_addvi_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_addvi_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addvi_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_addvi_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32) nounwind define void @llvm_mips_addvi_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_addvi_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addvi_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_addvi_d_RES ret void diff --git a/test/CodeGen/Mips/msa/i5-b.ll b/test/CodeGen/Mips/msa/i5-b.ll index da6be66..40ab095 100644 --- a/test/CodeGen/Mips/msa/i5-b.ll +++ b/test/CodeGen/Mips/msa/i5-b.ll @@ -9,7 +9,7 @@ define void @llvm_mips_bclri_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bclri_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclri_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_bclri_b_RES ret void @@ -29,7 +29,7 @@ declare <16 x i8> @llvm.mips.bclri.b(<16 x i8>, i32) nounwind define void @llvm_mips_bclri_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bclri_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclri_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_bclri_h_RES ret void @@ -48,7 +48,7 @@ declare <8 x i16> @llvm.mips.bclri.h(<8 x i16>, i32) nounwind define void @llvm_mips_bclri_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bclri_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclri_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_bclri_w_RES ret void @@ -67,7 +67,7 @@ declare <4 x i32> @llvm.mips.bclri.w(<4 x i32>, i32) nounwind define void @llvm_mips_bclri_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bclri_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclri_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_bclri_d_RES ret void @@ -87,8 +87,8 @@ declare <2 x i64> @llvm.mips.bclri.d(<2 x i64>, i32) nounwind define void @llvm_mips_binsli_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_binsli_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_binsli_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %0, <16 x i8> %1, i32 7) store <16 x i8> %2, <16 x i8>* @llvm_mips_binsli_b_RES ret void @@ -112,8 +112,8 @@ declare <16 x i8> @llvm.mips.binsli.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_binsli_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_binsli_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_binsli_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %0, <8 x i16> %1, i32 7) store <8 x i16> %2, <8 x i16>* @llvm_mips_binsli_h_RES ret void @@ -137,8 +137,8 @@ declare <8 x i16> @llvm.mips.binsli.h(<8 x i16>, <8 x i16>, i32) nounwind define void @llvm_mips_binsli_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_binsli_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_binsli_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %0, <4 x i32> %1, i32 7) store <4 x i32> %2, <4 x i32>* @llvm_mips_binsli_w_RES ret void @@ -162,8 +162,8 @@ declare <4 x i32> @llvm.mips.binsli.w(<4 x i32>, <4 x i32>, i32) nounwind define void @llvm_mips_binsli_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_binsli_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_binsli_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsli_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsli_d_ARG2 ; TODO: We use a particularly wide mask here to work around a legalization ; issue. If the mask doesn't fit within a 10-bit immediate, it gets ; legalized into a constant pool. We should add a test to cover the @@ -191,8 +191,8 @@ declare <2 x i64> @llvm.mips.binsli.d(<2 x i64>, <2 x i64>, i32) nounwind define void @llvm_mips_binsri_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_binsri_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_binsri_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %0, <16 x i8> %1, i32 7) store <16 x i8> %2, <16 x i8>* @llvm_mips_binsri_b_RES ret void @@ -216,8 +216,8 @@ declare <16 x i8> @llvm.mips.binsri.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_binsri_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_binsri_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_binsri_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %0, <8 x i16> %1, i32 7) store <8 x i16> %2, <8 x i16>* @llvm_mips_binsri_h_RES ret void @@ -241,8 +241,8 @@ declare <8 x i16> @llvm.mips.binsri.h(<8 x i16>, <8 x i16>, i32) nounwind define void @llvm_mips_binsri_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_binsri_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_binsri_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %0, <4 x i32> %1, i32 7) store <4 x i32> %2, <4 x i32>* @llvm_mips_binsri_w_RES ret void @@ -266,8 +266,8 @@ declare <4 x i32> @llvm.mips.binsri.w(<4 x i32>, <4 x i32>, i32) nounwind define void @llvm_mips_binsri_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_binsri_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_binsri_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %0, <2 x i64> %1, i32 7) store <2 x i64> %2, <2 x i64>* @llvm_mips_binsri_d_RES ret void @@ -290,7 +290,7 @@ declare <2 x i64> @llvm.mips.binsri.d(<2 x i64>, <2 x i64>, i32) nounwind define void @llvm_mips_bnegi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bnegi_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnegi_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_bnegi_b_RES ret void @@ -309,7 +309,7 @@ declare <16 x i8> @llvm.mips.bnegi.b(<16 x i8>, i32) nounwind define void @llvm_mips_bnegi_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bnegi_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bnegi_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_bnegi_h_RES ret void @@ -328,7 +328,7 @@ declare <8 x i16> @llvm.mips.bnegi.h(<8 x i16>, i32) nounwind define void @llvm_mips_bnegi_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bnegi_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bnegi_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_bnegi_w_RES ret void @@ -347,7 +347,7 @@ declare <4 x i32> @llvm.mips.bnegi.w(<4 x i32>, i32) nounwind define void @llvm_mips_bnegi_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bnegi_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bnegi_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_bnegi_d_RES ret void @@ -366,7 +366,7 @@ declare <2 x i64> @llvm.mips.bnegi.d(<2 x i64>, i32) nounwind define void @llvm_mips_bseti_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bseti_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bseti_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_bseti_b_RES ret void @@ -385,7 +385,7 @@ declare <16 x i8> @llvm.mips.bseti.b(<16 x i8>, i32) nounwind define void @llvm_mips_bseti_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bseti_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bseti_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_bseti_h_RES ret void @@ -404,7 +404,7 @@ declare <8 x i16> @llvm.mips.bseti.h(<8 x i16>, i32) nounwind define void @llvm_mips_bseti_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bseti_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bseti_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_bseti_w_RES ret void @@ -423,7 +423,7 @@ declare <4 x i32> @llvm.mips.bseti.w(<4 x i32>, i32) nounwind define void @llvm_mips_bseti_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bseti_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bseti_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_bseti_d_RES ret void diff --git a/test/CodeGen/Mips/msa/i5-c.ll b/test/CodeGen/Mips/msa/i5-c.ll index bf1578f..8158250 100644 --- a/test/CodeGen/Mips/msa/i5-c.ll +++ b/test/CodeGen/Mips/msa/i5-c.ll @@ -9,7 +9,7 @@ define void @llvm_mips_ceqi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ceqi_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ceqi_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.ceqi.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_ceqi_b_RES ret void @@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.ceqi.b(<16 x i8>, i32) nounwind define void @llvm_mips_ceqi_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ceqi_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ceqi_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.ceqi.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_ceqi_h_RES ret void @@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.ceqi.h(<8 x i16>, i32) nounwind define void @llvm_mips_ceqi_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ceqi_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ceqi_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.ceqi.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_ceqi_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.ceqi.w(<4 x i32>, i32) nounwind define void @llvm_mips_ceqi_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ceqi_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ceqi_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.ceqi.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_ceqi_d_RES ret void @@ -85,7 +85,7 @@ declare <2 x i64> @llvm.mips.ceqi.d(<2 x i64>, i32) nounwind define void @llvm_mips_clei_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clei_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clei_s_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_s_b_RES ret void @@ -104,7 +104,7 @@ declare <16 x i8> @llvm.mips.clei.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_clei_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clei_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clei_s_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_s_h_RES ret void @@ -123,7 +123,7 @@ declare <8 x i16> @llvm.mips.clei.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_clei_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clei_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clei_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_s_w_RES ret void @@ -142,7 +142,7 @@ declare <4 x i32> @llvm.mips.clei.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_clei_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clei_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clei_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_s_d_RES ret void @@ -161,7 +161,7 @@ declare <2 x i64> @llvm.mips.clei.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_clei_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clei_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clei_u_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_u_b_RES ret void @@ -180,7 +180,7 @@ declare <16 x i8> @llvm.mips.clei.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_clei_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clei_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clei_u_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_u_h_RES ret void @@ -199,7 +199,7 @@ declare <8 x i16> @llvm.mips.clei.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_clei_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clei_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clei_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_u_w_RES ret void @@ -218,7 +218,7 @@ declare <4 x i32> @llvm.mips.clei.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_clei_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clei_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clei_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_u_d_RES ret void @@ -237,7 +237,7 @@ declare <2 x i64> @llvm.mips.clei.u.d(<2 x i64>, i32) nounwind define void @llvm_mips_clti_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clti_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clti_s_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_s_b_RES ret void @@ -256,7 +256,7 @@ declare <16 x i8> @llvm.mips.clti.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_clti_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clti_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clti_s_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_s_h_RES ret void @@ -275,7 +275,7 @@ declare <8 x i16> @llvm.mips.clti.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_clti_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clti_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clti_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_s_w_RES ret void @@ -294,7 +294,7 @@ declare <4 x i32> @llvm.mips.clti.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_clti_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clti_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clti_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_s_d_RES ret void @@ -313,7 +313,7 @@ declare <2 x i64> @llvm.mips.clti.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_clti_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clti_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clti_u_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_u_b_RES ret void @@ -332,7 +332,7 @@ declare <16 x i8> @llvm.mips.clti.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_clti_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clti_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clti_u_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_u_h_RES ret void @@ -351,7 +351,7 @@ declare <8 x i16> @llvm.mips.clti.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_clti_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clti_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clti_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_u_w_RES ret void @@ -370,7 +370,7 @@ declare <4 x i32> @llvm.mips.clti.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_clti_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clti_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clti_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_u_d_RES ret void diff --git a/test/CodeGen/Mips/msa/i5-m.ll b/test/CodeGen/Mips/msa/i5-m.ll index 2766349..ba6e9d2 100644 --- a/test/CodeGen/Mips/msa/i5-m.ll +++ b/test/CodeGen/Mips/msa/i5-m.ll @@ -9,7 +9,7 @@ define void @llvm_mips_maxi_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_maxi_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maxi_s_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_s_b_RES ret void @@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.maxi.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_maxi_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_maxi_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maxi_s_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_s_h_RES ret void @@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.maxi.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_maxi_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_maxi_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maxi_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_s_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.maxi.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_maxi_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_maxi_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maxi_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_s_d_RES ret void @@ -85,7 +85,7 @@ declare <2 x i64> @llvm.mips.maxi.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_maxi_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_maxi_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maxi_u_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_u_b_RES ret void @@ -104,7 +104,7 @@ declare <16 x i8> @llvm.mips.maxi.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_maxi_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_maxi_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maxi_u_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_u_h_RES ret void @@ -123,7 +123,7 @@ declare <8 x i16> @llvm.mips.maxi.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_maxi_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_maxi_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maxi_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_u_w_RES ret void @@ -142,7 +142,7 @@ declare <4 x i32> @llvm.mips.maxi.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_maxi_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_maxi_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maxi_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_u_d_RES ret void @@ -161,7 +161,7 @@ declare <2 x i64> @llvm.mips.maxi.u.d(<2 x i64>, i32) nounwind define void @llvm_mips_mini_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mini_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mini_s_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_s_b_RES ret void @@ -180,7 +180,7 @@ declare <16 x i8> @llvm.mips.mini.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_mini_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mini_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mini_s_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_s_h_RES ret void @@ -199,7 +199,7 @@ declare <8 x i16> @llvm.mips.mini.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_mini_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mini_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mini_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_s_w_RES ret void @@ -218,7 +218,7 @@ declare <4 x i32> @llvm.mips.mini.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_mini_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mini_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mini_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_s_d_RES ret void @@ -237,7 +237,7 @@ declare <2 x i64> @llvm.mips.mini.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_mini_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mini_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mini_u_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_u_b_RES ret void @@ -256,7 +256,7 @@ declare <16 x i8> @llvm.mips.mini.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_mini_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mini_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mini_u_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_u_h_RES ret void @@ -275,7 +275,7 @@ declare <8 x i16> @llvm.mips.mini.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_mini_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mini_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mini_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_u_w_RES ret void @@ -294,7 +294,7 @@ declare <4 x i32> @llvm.mips.mini.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_mini_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mini_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mini_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_u_d_RES ret void diff --git a/test/CodeGen/Mips/msa/i5-s.ll b/test/CodeGen/Mips/msa/i5-s.ll index 184172f..db331b1 100644 --- a/test/CodeGen/Mips/msa/i5-s.ll +++ b/test/CodeGen/Mips/msa/i5-s.ll @@ -9,7 +9,7 @@ define void @llvm_mips_subvi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subvi_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subvi_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.subvi.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_subvi_b_RES ret void @@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.subvi.b(<16 x i8>, i32) nounwind define void @llvm_mips_subvi_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subvi_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subvi_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.subvi.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_subvi_h_RES ret void @@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.subvi.h(<8 x i16>, i32) nounwind define void @llvm_mips_subvi_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subvi_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subvi_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.subvi.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_subvi_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.subvi.w(<4 x i32>, i32) nounwind define void @llvm_mips_subvi_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subvi_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subvi_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.subvi.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_subvi_d_RES ret void diff --git a/test/CodeGen/Mips/msa/i5_ld_st.ll b/test/CodeGen/Mips/msa/i5_ld_st.ll index 7cc55f2..991bb84 100644 --- a/test/CodeGen/Mips/msa/i5_ld_st.ll +++ b/test/CodeGen/Mips/msa/i5_ld_st.ll @@ -81,7 +81,7 @@ declare <2 x i64> @llvm.mips.ld.d(i8*, i32) nounwind define void @llvm_mips_st_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_st_b_ARG + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8* tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 16) ret void @@ -99,7 +99,7 @@ declare void @llvm.mips.st.b(<16 x i8>, i8*, i32) nounwind define void @llvm_mips_st_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_st_h_ARG + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8* tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 16) ret void @@ -117,7 +117,7 @@ declare void @llvm.mips.st.h(<8 x i16>, i8*, i32) nounwind define void @llvm_mips_st_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_st_w_ARG + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8* tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 16) ret void @@ -135,7 +135,7 @@ declare void @llvm.mips.st.w(<4 x i32>, i8*, i32) nounwind define void @llvm_mips_st_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_st_d_ARG + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8* tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 16) ret void diff --git a/test/CodeGen/Mips/msa/i8.ll b/test/CodeGen/Mips/msa/i8.ll index d2931a7..4af9c58 100644 --- a/test/CodeGen/Mips/msa/i8.ll +++ b/test/CodeGen/Mips/msa/i8.ll @@ -8,7 +8,7 @@ define void @llvm_mips_andi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_andi_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_andi_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.andi.b(<16 x i8> %0, i32 25) store <16 x i8> %1, <16 x i8>* @llvm_mips_andi_b_RES ret void @@ -28,8 +28,8 @@ declare <16 x i8> @llvm.mips.andi.b(<16 x i8>, i32) nounwind define void @llvm_mips_bmnzi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bmnzi_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bmnzi_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 25) store <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES ret void @@ -52,8 +52,8 @@ declare <16 x i8> @llvm.mips.bmnzi.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_bmzi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bmzi_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bmzi_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmzi_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmzi_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 25) store <16 x i8> %2, <16 x i8>* @llvm_mips_bmzi_b_RES ret void @@ -77,8 +77,8 @@ declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_bseli_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bseli_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bseli_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bseli_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bseli_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %0, <16 x i8> %1, i32 25) store <16 x i8> %2, <16 x i8>* @llvm_mips_bseli_b_RES ret void @@ -100,7 +100,7 @@ declare <16 x i8> @llvm.mips.bseli.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_nori_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_nori_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nori_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.nori.b(<16 x i8> %0, i32 25) store <16 x i8> %1, <16 x i8>* @llvm_mips_nori_b_RES ret void @@ -119,7 +119,7 @@ declare <16 x i8> @llvm.mips.nori.b(<16 x i8>, i32) nounwind define void @llvm_mips_ori_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ori_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ori_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.ori.b(<16 x i8> %0, i32 25) store <16 x i8> %1, <16 x i8>* @llvm_mips_ori_b_RES ret void @@ -138,7 +138,7 @@ declare <16 x i8> @llvm.mips.ori.b(<16 x i8>, i32) nounwind define void @llvm_mips_shf_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_shf_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_shf_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.shf.b(<16 x i8> %0, i32 25) store <16 x i8> %1, <16 x i8>* @llvm_mips_shf_b_RES ret void @@ -157,7 +157,7 @@ declare <16 x i8> @llvm.mips.shf.b(<16 x i8>, i32) nounwind define void @llvm_mips_shf_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_shf_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_shf_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.shf.h(<8 x i16> %0, i32 25) store <8 x i16> %1, <8 x i16>* @llvm_mips_shf_h_RES ret void @@ -176,7 +176,7 @@ declare <8 x i16> @llvm.mips.shf.h(<8 x i16>, i32) nounwind define void @llvm_mips_shf_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_shf_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_shf_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.shf.w(<4 x i32> %0, i32 25) store <4 x i32> %1, <4 x i32>* @llvm_mips_shf_w_RES ret void @@ -195,7 +195,7 @@ declare <4 x i32> @llvm.mips.shf.w(<4 x i32>, i32) nounwind define void @llvm_mips_xori_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_xori_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xori_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.xori.b(<16 x i8> %0, i32 25) store <16 x i8> %1, <16 x i8>* @llvm_mips_xori_b_RES ret void diff --git a/test/CodeGen/Mips/msa/inline-asm.ll b/test/CodeGen/Mips/msa/inline-asm.ll index 4a34273..85da87b 100644 --- a/test/CodeGen/Mips/msa/inline-asm.ll +++ b/test/CodeGen/Mips/msa/inline-asm.ll @@ -16,7 +16,7 @@ entry: define void @test2() nounwind { entry: ; CHECK-LABEL: test2: - %0 = load <4 x i32>* @v4i32_r + %0 = load <4 x i32>, <4 x i32>* @v4i32_r %1 = call <4 x i32> asm "addvi.w ${0:w}, ${1:w}, 1", "=f,f"(<4 x i32> %0) ; CHECK: addvi.w $w{{[1-3]?[0-9]}}, $w{{[1-3]?[0-9]}}, 1 store <4 x i32> %1, <4 x i32>* @v4i32_r @@ -26,7 +26,7 @@ entry: define void @test3() nounwind { entry: ; CHECK-LABEL: test3: - %0 = load <4 x i32>* @v4i32_r + %0 = load <4 x i32>, <4 x i32>* @v4i32_r %1 = call <4 x i32> asm sideeffect "addvi.w ${0:w}, ${1:w}, 1", "=f,f,~{$w0}"(<4 x i32> %0) ; CHECK: addvi.w $w{{([1-9]|[1-3][0-9])}}, $w{{([1-9]|[1-3][0-9])}}, 1 store <4 x i32> %1, <4 x i32>* @v4i32_r diff --git a/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll b/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll index 4beaaa9..beb361b 100644 --- a/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll +++ b/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca <1 x double> %A1 = alloca double %A = alloca i32 - %L = load i8* %0 + %L = load i8, i8* %0 store i8 77, i8* %0 %E = extractelement <8 x i64> zeroinitializer, i32 2 %Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15, i32 1, i32 3> @@ -24,7 +24,7 @@ BB: br label %CF CF: ; preds = %CF, %CF78, %BB - %L5 = load i8* %Sl + %L5 = load i8, i8* %Sl store i8 %L, i8* %Sl %E6 = extractelement <8 x i32> zeroinitializer, i32 2 %Shuff7 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 13, i32 15, i32 1, i32 3, i32 5, i32 7, i32 9, i32 undef> @@ -33,7 +33,7 @@ CF: ; preds = %CF, %CF78, %BB %FC = sitofp <8 x i64> zeroinitializer to <8 x float> %Sl9 = select i1 %Cmp, i8 77, i8 77 %Cmp10 = icmp uge <8 x i64> %Shuff, zeroinitializer - %L11 = load i8* %0 + %L11 = load i8, i8* %0 store i8 %Sl9, i8* %0 %E12 = extractelement <1 x i16> zeroinitializer, i32 0 %Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 9, i32 11, i32 13, i32 15, i32 undef, i32 3, i32 5, i32 7> @@ -42,7 +42,7 @@ CF: ; preds = %CF, %CF78, %BB %Tr = trunc <8 x i64> %Shuff to <8 x i32> %Sl16 = select i1 %Cmp, i8 77, i8 %5 %Cmp17 = icmp ult <8 x i1> %Cmp10, %Cmp10 - %L18 = load i8* %Sl + %L18 = load i8, i8* %Sl store i8 -1, i8* %Sl %E19 = extractelement <8 x i32> zeroinitializer, i32 3 %Shuff20 = shufflevector <8 x float> %FC, <8 x float> %FC, <8 x i32> <i32 6, i32 8, i32 undef, i32 12, i32 14, i32 0, i32 2, i32 undef> @@ -54,7 +54,7 @@ CF: ; preds = %CF, %CF78, %BB br i1 %Cmp25, label %CF, label %CF78 CF78: ; preds = %CF - %L26 = load i8* %Sl + %L26 = load i8, i8* %Sl store i32 50347, i32* %A %E27 = extractelement <8 x i1> %Cmp10, i32 2 br i1 %E27, label %CF, label %CF77 @@ -65,7 +65,7 @@ CF77: ; preds = %CF77, %CF81, %CF78 %B30 = urem <8 x i32> %Tr, zeroinitializer %Tr31 = trunc i32 0 to i16 %Sl32 = select i1 %Cmp, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer - %L33 = load i8* %Sl + %L33 = load i8, i8* %Sl store i8 %L26, i8* %Sl %E34 = extractelement <4 x i32> zeroinitializer, i32 0 %Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> %B, <1 x i32> undef @@ -73,7 +73,7 @@ CF77: ; preds = %CF77, %CF81, %CF78 %B37 = srem <1 x i16> %I29, zeroinitializer %FC38 = sitofp <8 x i32> %B30 to <8 x double> %Sl39 = select i1 %Cmp, double 0.000000e+00, double %Sl24 - %L40 = load i8* %Sl + %L40 = load i8, i8* %Sl store i8 %Sl16, i8* %Sl %E41 = extractelement <1 x i16> zeroinitializer, i32 0 %Shuff42 = shufflevector <8 x i1> %Cmp17, <8 x i1> %Cmp10, <8 x i32> <i32 14, i32 undef, i32 2, i32 4, i32 undef, i32 8, i32 10, i32 12> @@ -85,7 +85,7 @@ CF77: ; preds = %CF77, %CF81, %CF78 br i1 %Cmp46, label %CF77, label %CF80 CF80: ; preds = %CF80, %CF77 - %L47 = load i64* %PC + %L47 = load i64, i64* %PC store i8 77, i8* %Sl %E48 = extractelement <8 x i64> zeroinitializer, i32 2 %Shuff49 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff7, <8 x i32> <i32 5, i32 7, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 3> @@ -97,7 +97,7 @@ CF80: ; preds = %CF80, %CF77 br i1 %Cmp54, label %CF80, label %CF81 CF81: ; preds = %CF80 - %L55 = load i8* %Sl + %L55 = load i8, i8* %Sl store i8 %Sl16, i8* %Sl %E56 = extractelement <1 x i16> %B, i32 0 %Shuff57 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> <i32 1> @@ -105,7 +105,7 @@ CF81: ; preds = %CF80 %B59 = srem i32 %E19, %E19 %Sl60 = select i1 %Cmp, i8 77, i8 77 %Cmp61 = icmp ult <1 x i16> zeroinitializer, %B - %L62 = load i8* %Sl + %L62 = load i8, i8* %Sl store i64 %L47, i64* %PC52 %E63 = extractelement <4 x i32> %I43, i32 2 %Shuff64 = shufflevector <4 x i1> zeroinitializer, <4 x i1> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 1, i32 3> @@ -117,7 +117,7 @@ CF81: ; preds = %CF80 br i1 %Cmp69, label %CF77, label %CF79 CF79: ; preds = %CF81 - %L70 = load i32* %A + %L70 = load i32, i32* %A store i64 %4, i64* %PC %E71 = extractelement <4 x i32> zeroinitializer, i32 0 %Shuff72 = shufflevector <8 x i32> zeroinitializer, <8 x i32> %B44, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 undef, i32 7, i32 9> diff --git a/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll b/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll index f9cab03..bdf6eaf 100644 --- a/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll +++ b/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca i64 %A1 = alloca i32 %A = alloca <2 x i64> - %L = load i8* %0 + %L = load i8, i8* %0 store i8 -1, i8* %0 %E = extractelement <2 x i32> zeroinitializer, i32 0 %Shuff = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3> @@ -22,7 +22,7 @@ BB: %B = lshr i8 %L, -69 %ZE = fpext float 0xBF2AA5FE80000000 to double %Sl = select i1 true, <1 x i64> <i64 -1>, <1 x i64> <i64 -1> - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 -69, i8* %0 %E6 = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14 %Shuff7 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3> @@ -31,7 +31,7 @@ BB: %FC = uitofp i32 %3 to double %Sl10 = select i1 true, <1 x i1> zeroinitializer, <1 x i1> zeroinitializer %Cmp = icmp ne <1 x i64> %I, <i64 -1> - %L11 = load i8* %0 + %L11 = load i8, i8* %0 store i8 %L11, i8* %0 %E12 = extractelement <1 x i64> <i64 -1>, i32 0 %Shuff13 = shufflevector <1 x i64> %Sl, <1 x i64> <i64 -1>, <1 x i32> <i32 1> @@ -42,7 +42,7 @@ BB: br label %CF74 CF74: ; preds = %CF74, %CF80, %CF76, %BB - %L18 = load i8* %0 + %L18 = load i8, i8* %0 store i8 -69, i8* %0 %E19 = extractelement <1 x i64> %Sl, i32 0 %Shuff20 = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i32> <i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10> @@ -50,7 +50,7 @@ CF74: ; preds = %CF74, %CF80, %CF76, %B22 = urem i32 135673, %3 %FC23 = sitofp i8 %L to float %Sl24 = select i1 true, i8 %B, i8 %L18 - %L25 = load i8* %0 + %L25 = load i8, i8* %0 store i8 %L, i8* %0 %E26 = extractelement <2 x i32> %Shuff, i32 1 %Shuff27 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 2, i32 0> @@ -62,7 +62,7 @@ CF74: ; preds = %CF74, %CF80, %CF76, br i1 %Cmp31, label %CF74, label %CF80 CF80: ; preds = %CF74 - %L32 = load i8* %0 + %L32 = load i8, i8* %0 store i8 -1, i8* %0 %E33 = extractelement <2 x i32> zeroinitializer, i32 1 %Shuff34 = shufflevector <1 x i64> %Shuff13, <1 x i64> <i64 -1>, <1 x i32> zeroinitializer @@ -70,7 +70,7 @@ CF80: ; preds = %CF74 %FC36 = sitofp <1 x i1> %Cmp to <1 x float> %Sl37 = select i1 true, <8 x i8> %Shuff20, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> %Cmp38 = icmp sgt <2 x i32> %I21, %Shuff27 - %L39 = load i8* %0 + %L39 = load i8, i8* %0 store i8 %Sl24, i8* %0 %E40 = extractelement <8 x i64> zeroinitializer, i32 1 %Shuff41 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Cmp38, <2 x i32> <i32 0, i32 2> @@ -81,7 +81,7 @@ CF80: ; preds = %CF74 br i1 %Cmp45, label %CF74, label %CF76 CF76: ; preds = %CF80 - %L46 = load i8* %0 + %L46 = load i8, i8* %0 store i8 %L39, i8* %0 %E47 = extractelement <2 x i32> %Shuff27, i32 0 %Shuff48 = shufflevector <1 x i1> %Sl10, <1 x i1> %Sl10, <1 x i32> <i32 1> @@ -92,7 +92,7 @@ CF76: ; preds = %CF80 br i1 %Cmp52, label %CF74, label %CF75 CF75: ; preds = %CF75, %CF76 - %L53 = load i8* %0 + %L53 = load i8, i8* %0 store i8 %L18, i8* %0 %E54 = extractelement <8 x i8> %Shuff20, i32 5 %Shuff55 = shufflevector <2 x i32> %Shuff, <2 x i32> zeroinitializer, <2 x i32> <i32 0, i32 2> @@ -103,7 +103,7 @@ CF75: ; preds = %CF75, %CF76 br i1 %Cmp59, label %CF75, label %CF78 CF78: ; preds = %CF75 - %L60 = load i8* %0 + %L60 = load i8, i8* %0 store i8 -69, i8* %0 %E61 = extractelement <2 x i32> zeroinitializer, i32 0 %Shuff62 = shufflevector <2 x i32> %Shuff7, <2 x i32> %I21, <2 x i32> <i32 1, i32 3> @@ -115,7 +115,7 @@ CF78: ; preds = %CF75 br label %CF CF: ; preds = %CF, %CF78 - %L68 = load i8* %0 + %L68 = load i8, i8* %0 store i64 %B57, i64* %2 %E69 = extractelement <2 x i1> %Shuff41, i32 1 br i1 %E69, label %CF, label %CF77 diff --git a/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll b/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll index e14f405..8f23a8c 100644 --- a/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll +++ b/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll @@ -13,7 +13,7 @@ BB: %A2 = alloca i8 %A1 = alloca i32 %A = alloca i8 - %L = load i8* %0 + %L = load i8, i8* %0 store i8 %5, i8* %0 %E = extractelement <2 x i16> zeroinitializer, i32 0 %Shuff = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> undef @@ -25,7 +25,7 @@ BB: br label %CF83 CF83: ; preds = %BB - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 85, i8* %0 %E6 = extractelement <1 x i8> <i8 -1>, i32 0 %Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 1, i32 3> @@ -37,7 +37,7 @@ CF83: ; preds = %BB br label %CF CF: ; preds = %CF, %CF81, %CF83 - %L13 = load i8* %0 + %L13 = load i8, i8* %0 store i8 0, i8* %0 %E14 = extractelement <2 x i64> zeroinitializer, i32 0 %Shuff15 = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 3, i32 5, i32 7, i32 undef> @@ -52,7 +52,7 @@ CF80: ; preds = %CF80, %CF br i1 %Cmp19, label %CF80, label %CF81 CF81: ; preds = %CF80 - %L20 = load i8* %0 + %L20 = load i8, i8* %0 store i8 85, i8* %0 %E21 = extractelement <1 x i8> <i8 -1>, i32 0 %Shuff22 = shufflevector <1 x i8> <i8 -1>, <1 x i8> %Shuff, <1 x i32> zeroinitializer @@ -60,7 +60,7 @@ CF81: ; preds = %CF80 %FC24 = fptoui <4 x float> %FC to <4 x i16> %Sl25 = select i1 %Cmp, <2 x i32> zeroinitializer, <2 x i32> <i32 -1, i32 -1> %Cmp26 = icmp ult <4 x i64> %I16, %Shuff15 - %L27 = load i8* %0 + %L27 = load i8, i8* %0 store i8 %L, i8* %0 %E28 = extractelement <1 x i8> <i8 -1>, i32 0 %Shuff29 = shufflevector <8 x i16> zeroinitializer, <8 x i16> zeroinitializer, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 5, i32 undef, i32 9> @@ -68,7 +68,7 @@ CF81: ; preds = %CF80 %B31 = mul i8 %E28, 85 %PC = bitcast i32* %A3 to i32* %Sl32 = select i1 %Cmp12, float %FC10, float 0x4712BFE680000000 - %L33 = load i32* %PC + %L33 = load i32, i32* %PC store i32 %L33, i32* %PC %E34 = extractelement <2 x i16> zeroinitializer, i32 1 %Shuff35 = shufflevector <1 x i8> %Shuff, <1 x i8> <i8 -1>, <1 x i32> zeroinitializer @@ -79,7 +79,7 @@ CF81: ; preds = %CF80 br i1 %Cmp39, label %CF, label %CF77 CF77: ; preds = %CF77, %CF81 - %L40 = load i32* %PC + %L40 = load i32, i32* %PC store i32 %3, i32* %PC %E41 = extractelement <2 x i32> zeroinitializer, i32 0 %Shuff42 = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3> @@ -88,7 +88,7 @@ CF77: ; preds = %CF77, %CF81 %Se = sext i32 %3 to i64 %Sl45 = select i1 true, <1 x i8> %Shuff, <1 x i8> %I43 %Cmp46 = icmp sge <1 x i8> %I36, %Shuff - %L47 = load i32* %PC + %L47 = load i32, i32* %PC store i32 %L33, i32* %PC %E48 = extractelement <2 x i16> zeroinitializer, i32 0 %Shuff49 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1> @@ -100,7 +100,7 @@ CF77: ; preds = %CF77, %CF81 br i1 %Cmp54, label %CF77, label %CF78 CF78: ; preds = %CF78, %CF77 - %L55 = load i32* %PC + %L55 = load i32, i32* %PC store i32 %L33, i32* %PC %E56 = extractelement <8 x i16> %Shuff29, i32 4 %Shuff57 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1> @@ -111,7 +111,7 @@ CF78: ; preds = %CF78, %CF77 br i1 %Cmp60, label %CF78, label %CF79 CF79: ; preds = %CF79, %CF78 - %L61 = load i32* %PC + %L61 = load i32, i32* %PC store i32 %L33, i32* %A3 %E62 = extractelement <4 x i64> %Shuff15, i32 1 %Shuff63 = shufflevector <8 x i16> %Shuff29, <8 x i16> %Shuff29, <8 x i32> <i32 undef, i32 10, i32 12, i32 undef, i32 undef, i32 undef, i32 4, i32 6> @@ -123,7 +123,7 @@ CF79: ; preds = %CF79, %CF78 br i1 %Cmp68, label %CF79, label %CF82 CF82: ; preds = %CF79 - %L69 = load i32* %PC + %L69 = load i32, i32* %PC store i32 %L33, i32* %PC %E70 = extractelement <8 x i16> zeroinitializer, i32 3 %Shuff71 = shufflevector <4 x i64> %Shuff15, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 6, i32 undef, i32 2, i32 4> diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll b/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll index 1a03e55..e3cf796 100644 --- a/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll +++ b/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca i64 %A1 = alloca i64 %A = alloca double - %L = load i8* %0 + %L = load i8, i8* %0 store i8 -101, i8* %0 %E = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0 %Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1> @@ -22,7 +22,7 @@ BB: %B = and i64 116376, 57247 %FC = uitofp i8 7 to double %Sl = select i1 false, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 %L, i8* %0 %E6 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 3 %Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0> @@ -33,7 +33,7 @@ BB: br label %CF CF: ; preds = %CF, %BB - %L11 = load i8* %0 + %L11 = load i8, i8* %0 store i8 -87, i8* %0 %E12 = extractelement <4 x i64> zeroinitializer, i32 0 %Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1, i32 3, i32 5> @@ -45,7 +45,7 @@ CF: ; preds = %CF, %BB br i1 %Cmp18, label %CF, label %CF80 CF80: ; preds = %CF80, %CF88, %CF - %L19 = load i8* %0 + %L19 = load i8, i8* %0 store i8 -101, i8* %0 %E20 = extractelement <4 x i64> zeroinitializer, i32 0 %Shuff21 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff7, <4 x i32> <i32 7, i32 1, i32 3, i32 5> @@ -56,7 +56,7 @@ CF80: ; preds = %CF80, %CF88, %CF br i1 %Cmp25, label %CF80, label %CF83 CF83: ; preds = %CF83, %CF80 - %L26 = load i8* %0 + %L26 = load i8, i8* %0 store i8 -87, i8* %0 %E27 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0 %Shuff28 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 7, i32 1, i32 3, i32 5> @@ -68,7 +68,7 @@ CF83: ; preds = %CF83, %CF80 br i1 %Cmp33, label %CF83, label %CF88 CF88: ; preds = %CF83 - %L34 = load i8* %0 + %L34 = load i8, i8* %0 store i8 -87, i8* %0 %E35 = extractelement <8 x i64> %Shuff, i32 7 %Shuff36 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %Shuff28, <4 x i32> <i32 2, i32 undef, i32 undef, i32 0> @@ -80,7 +80,7 @@ CF88: ; preds = %CF83 br i1 %Cmp40, label %CF80, label %CF81 CF81: ; preds = %CF81, %CF85, %CF87, %CF88 - %L41 = load i8* %0 + %L41 = load i8, i8* %0 store i8 %L34, i8* %0 %E42 = extractelement <8 x i64> %Shuff13, i32 6 %Shuff43 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 7> @@ -92,7 +92,7 @@ CF81: ; preds = %CF81, %CF85, %CF87, br i1 %Cmp47, label %CF81, label %CF85 CF85: ; preds = %CF81 - %L48 = load i8* %0 + %L48 = load i8, i8* %0 store i8 -101, i8* %0 %E49 = extractelement <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i32 2 %Shuff50 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 1, i32 3> @@ -101,7 +101,7 @@ CF85: ; preds = %CF81 %FC53 = uitofp i8 %L48 to double %Sl54 = select i1 %Cmp47, i32 %3, i32 %Sl24 %Cmp55 = icmp ne <8 x i64> %Shuff13, zeroinitializer - %L56 = load i8* %0 + %L56 = load i8, i8* %0 store i8 %L11, i8* %0 %E57 = extractelement <4 x i64> %Shuff21, i32 1 %Shuff58 = shufflevector <8 x i64> %Shuff, <8 x i64> zeroinitializer, <8 x i32> <i32 4, i32 6, i32 undef, i32 10, i32 12, i32 undef, i32 0, i32 2> @@ -113,7 +113,7 @@ CF85: ; preds = %CF81 CF84: ; preds = %CF84, %CF85 %Sl62 = select i1 false, i8 %L, i8 %L48 %Cmp63 = icmp ne <8 x i64> %I, zeroinitializer - %L64 = load i8* %0 + %L64 = load i8, i8* %0 store i8 %5, i8* %0 %E65 = extractelement <8 x i1> %Cmp55, i32 0 br i1 %E65, label %CF84, label %CF87 @@ -125,7 +125,7 @@ CF87: ; preds = %CF84 %ZE69 = zext <8 x i8> %Sl32 to <8 x i64> %Sl70 = select i1 %Tr61, i64 %E20, i64 %E12 %Cmp71 = icmp slt <8 x i64> %I, %Shuff - %L72 = load i8* %0 + %L72 = load i8, i8* %0 store i8 %L72, i8* %0 %E73 = extractelement <8 x i1> %Cmp55, i32 6 br i1 %E73, label %CF81, label %CF82 diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll b/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll index 96547d9..6f33810 100644 --- a/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll +++ b/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca double %A1 = alloca float %A = alloca double - %L = load i8* %0 + %L = load i8, i8* %0 store i8 -123, i8* %0 %E = extractelement <4 x i64> zeroinitializer, i32 1 %Shuff = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6> @@ -22,7 +22,7 @@ BB: %BC = bitcast i64 181325 to double %Sl = select i1 false, <2 x i32> zeroinitializer, <2 x i32> zeroinitializer %Cmp = icmp ne <4 x i64> zeroinitializer, zeroinitializer - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 %L, i8* %0 %E6 = extractelement <4 x i64> zeroinitializer, i32 3 %Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 2, i32 0> @@ -33,7 +33,7 @@ BB: br label %CF80 CF80: ; preds = %BB - %L11 = load i8* %0 + %L11 = load i8, i8* %0 store i8 -123, i8* %0 %E12 = extractelement <2 x i16> zeroinitializer, i32 1 %Shuff13 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6> @@ -42,7 +42,7 @@ CF80: ; preds = %BB %PC = bitcast i1* %A4 to i64* %Sl16 = select i1 %Cmp10, <4 x i32> zeroinitializer, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1> %Cmp17 = icmp ule <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %Sl16 - %L18 = load double* %A2 + %L18 = load double, double* %A2 store i64 498254, i64* %PC %E19 = extractelement <4 x i64> zeroinitializer, i32 0 %Shuff20 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1> @@ -51,7 +51,7 @@ CF80: ; preds = %BB %ZE = zext <2 x i1> %Shuff20 to <2 x i32> %Sl23 = select i1 %Cmp10, <2 x i1> %Shuff20, <2 x i1> zeroinitializer %Cmp24 = icmp ult <2 x i32> zeroinitializer, zeroinitializer - %L25 = load i8* %0 + %L25 = load i8, i8* %0 store i8 %L25, i8* %0 %E26 = extractelement <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, i32 3 %Shuff27 = shufflevector <4 x i32> %Shuff, <4 x i32> %I14, <4 x i32> <i32 6, i32 0, i32 undef, i32 4> @@ -63,7 +63,7 @@ CF80: ; preds = %BB CF79: ; preds = %CF80 %Sl30 = select i1 false, i8 %B29, i8 -123 %Cmp31 = icmp sge <2 x i1> %I, %I - %L32 = load i64* %PC + %L32 = load i64, i64* %PC store i8 -123, i8* %0 %E33 = extractelement <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 2 %Shuff34 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 5, i32 7, i32 1, i32 3> @@ -75,7 +75,7 @@ CF79: ; preds = %CF80 br label %CF CF: ; preds = %CF, %CF79 - %L40 = load double* %A + %L40 = load double, double* %A store i1 %Cmp39, i1* %PC37 %E41 = extractelement <4 x i64> zeroinitializer, i32 3 %Shuff42 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %ZE, <2 x i32> <i32 2, i32 undef> @@ -90,7 +90,7 @@ CF77: ; preds = %CF77, %CF br i1 %Cmp46, label %CF77, label %CF78 CF78: ; preds = %CF78, %CF83, %CF82, %CF77 - %L47 = load i64* %PC + %L47 = load i64, i64* %PC store i8 -123, i8* %0 %E48 = extractelement <4 x i64> zeroinitializer, i32 3 %Shuff49 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 undef> @@ -105,7 +105,7 @@ CF83: ; preds = %CF78 br i1 %Cmp54, label %CF78, label %CF82 CF82: ; preds = %CF83 - %L55 = load i64* %PC + %L55 = load i64, i64* %PC store i64 %L32, i64* %PC %E56 = extractelement <2 x i16> %Shuff7, i32 1 %Shuff57 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0> @@ -114,7 +114,7 @@ CF82: ; preds = %CF83 %FC = sitofp i64 498254 to double %Sl60 = select i1 false, i64 %E6, i64 -1 %Cmp61 = icmp sgt <4 x i32> %Shuff27, %I43 - %L62 = load i64* %PC + %L62 = load i64, i64* %PC store i64 %Sl9, i64* %PC %E63 = extractelement <2 x i32> %ZE, i32 0 %Shuff64 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 1, i32 3, i32 undef, i32 7> @@ -126,7 +126,7 @@ CF82: ; preds = %CF83 CF81: ; preds = %CF82 %Cmp69 = icmp ne <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %B36 - %L70 = load i8* %0 + %L70 = load i8, i8* %0 store i64 %L55, i64* %PC %E71 = extractelement <4 x i32> %Shuff49, i32 1 %Shuff72 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff34, <4 x i32> <i32 0, i32 2, i32 4, i32 6> diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll b/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll index bef75f3..181f72a 100644 --- a/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll +++ b/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca float %A1 = alloca double %A = alloca double - %L = load i8* %0 + %L = load i8, i8* %0 store i8 97, i8* %0 %E = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14 %Shuff = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 1, i32 3> @@ -22,7 +22,7 @@ BB: %Tr = trunc <1 x i64> zeroinitializer to <1 x i8> %Sl = select i1 false, double* %A1, double* %A %Cmp = icmp ne <2 x i64> zeroinitializer, zeroinitializer - %L5 = load double* %Sl + %L5 = load double, double* %Sl store float -4.374162e+06, float* %A2 %E6 = extractelement <4 x i64> zeroinitializer, i32 3 %Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I, <4 x i32> <i32 2, i32 4, i32 6, i32 undef> @@ -34,7 +34,7 @@ BB: br label %CF72 CF72: ; preds = %CF72, %CF80, %CF78, %BB - %L11 = load double* %Sl + %L11 = load double, double* %Sl store double 0.000000e+00, double* %Sl %E12 = extractelement <2 x i1> zeroinitializer, i32 0 br i1 %E12, label %CF72, label %CF80 @@ -49,7 +49,7 @@ CF80: ; preds = %CF72 br i1 %Cmp17, label %CF72, label %CF77 CF77: ; preds = %CF77, %CF80 - %L18 = load double* %Sl + %L18 = load double, double* %Sl store double 0.000000e+00, double* %Sl %E19 = extractelement <2 x i1> zeroinitializer, i32 0 br i1 %E19, label %CF77, label %CF78 @@ -60,7 +60,7 @@ CF78: ; preds = %CF77 %B22 = sdiv <4 x i64> %Shuff7, zeroinitializer %FC = uitofp i8 97 to double %Sl23 = select i1 %Cmp10, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer - %L24 = load double* %Sl + %L24 = load double, double* %Sl store float %Sl16, float* %PC %E25 = extractelement <2 x i1> %Shuff, i32 1 br i1 %E25, label %CF72, label %CF76 @@ -71,7 +71,7 @@ CF76: ; preds = %CF78 %B28 = mul <4 x i64> %I27, zeroinitializer %ZE = zext <8 x i1> zeroinitializer to <8 x i64> %Sl29 = select i1 %Cmp17, float -4.374162e+06, float -4.374162e+06 - %L30 = load i8* %0 + %L30 = load i8, i8* %0 store double %L5, double* %Sl %E31 = extractelement <8 x i1> zeroinitializer, i32 5 br label %CF @@ -85,7 +85,7 @@ CF: ; preds = %CF, %CF81, %CF76 br i1 %Cmp36, label %CF, label %CF74 CF74: ; preds = %CF74, %CF - %L37 = load float* %PC + %L37 = load float, float* %PC store double 0.000000e+00, double* %Sl %E38 = extractelement <2 x i1> %Sl23, i32 1 br i1 %E38, label %CF74, label %CF75 @@ -95,7 +95,7 @@ CF75: ; preds = %CF75, %CF82, %CF74 %I40 = insertelement <4 x i64> zeroinitializer, i64 %4, i32 2 %Sl41 = select i1 %Cmp10, i32 0, i32 %3 %Cmp42 = icmp ne <1 x i64> zeroinitializer, zeroinitializer - %L43 = load double* %Sl + %L43 = load double, double* %Sl store i64 %4, i64* %2 %E44 = extractelement <2 x i1> %Shuff20, i32 1 br i1 %E44, label %CF75, label %CF82 @@ -109,7 +109,7 @@ CF82: ; preds = %CF75 br i1 %Cmp49, label %CF75, label %CF81 CF81: ; preds = %CF82 - %L50 = load i8* %0 + %L50 = load i8, i8* %0 store double %L43, double* %Sl %E51 = extractelement <4 x i64> %Shuff7, i32 3 %Shuff52 = shufflevector <4 x float> %BC34, <4 x float> %BC34, <4 x i32> <i32 2, i32 4, i32 6, i32 0> @@ -117,7 +117,7 @@ CF81: ; preds = %CF82 %B54 = fdiv double %L24, %L43 %BC55 = bitcast <4 x i64> zeroinitializer to <4 x double> %Sl56 = select i1 false, i8 %5, i8 97 - %L57 = load i8* %0 + %L57 = load i8, i8* %0 store i8 %L50, i8* %0 %E58 = extractelement <2 x i1> %Shuff20, i32 1 br i1 %E58, label %CF, label %CF73 @@ -129,7 +129,7 @@ CF73: ; preds = %CF73, %CF81 %PC62 = bitcast double* %A3 to float* %Sl63 = select i1 %Cmp10, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer %Cmp64 = icmp ne <2 x i1> %Cmp, %Shuff - %L65 = load double* %A1 + %L65 = load double, double* %A1 store float -4.374162e+06, float* %PC62 %E66 = extractelement <8 x i1> %I21, i32 3 br i1 %E66, label %CF73, label %CF79 diff --git a/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll b/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll index 697871d..c0bc905 100644 --- a/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll +++ b/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca <1 x double> %A1 = alloca <8 x double> %A = alloca i64 - %L = load i8* %0 + %L = load i8, i8* %0 store i64 33695, i64* %A %E = extractelement <4 x i32> zeroinitializer, i32 3 %Shuff = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 2, i32 0> @@ -22,7 +22,7 @@ BB: %B = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> %ZE = fpext float 0x3B64A2B880000000 to double %Sl = select i1 true, i16 -1, i16 -11642 - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 0, i8* %0 %E6 = extractelement <4 x i32> zeroinitializer, i32 2 %Shuff7 = shufflevector <8 x i1> zeroinitializer, <8 x i1> zeroinitializer, <8 x i32> <i32 undef, i32 7, i32 9, i32 11, i32 13, i32 15, i32 1, i32 undef> @@ -31,7 +31,7 @@ BB: %BC = bitcast <2 x i32> <i32 -1, i32 -1> to <2 x float> %Sl10 = select i1 true, i32* %1, i32* %1 %Cmp = icmp sge <8 x i64> zeroinitializer, zeroinitializer - %L11 = load i32* %Sl10 + %L11 = load i32, i32* %Sl10 store <1 x double> zeroinitializer, <1 x double>* %A2 %E12 = extractelement <4 x i16> zeroinitializer, i32 0 %Shuff13 = shufflevector <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i32> undef @@ -43,7 +43,7 @@ BB: br label %CF75 CF75: ; preds = %CF75, %BB - %L19 = load i32* %Sl10 + %L19 = load i32, i32* %Sl10 store i32 %L11, i32* %Sl10 %E20 = extractelement <4 x i32> zeroinitializer, i32 1 %Shuff21 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %I8, <4 x i32> <i32 undef, i32 2, i32 4, i32 6> @@ -55,7 +55,7 @@ CF75: ; preds = %CF75, %BB br i1 %Cmp26, label %CF75, label %CF76 CF76: ; preds = %CF75 - %L27 = load i32* %Sl10 + %L27 = load i32, i32* %Sl10 store i32 439732, i32* %Sl10 %E28 = extractelement <4 x i32> %Shuff21, i32 3 %Shuff29 = shufflevector <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0> @@ -65,7 +65,7 @@ CF76: ; preds = %CF75 br label %CF74 CF74: ; preds = %CF74, %CF80, %CF78, %CF76 - %L33 = load i64* %2 + %L33 = load i64, i64* %2 store i32 71140, i32* %Sl10 %E34 = extractelement <4 x i32> zeroinitializer, i32 1 %Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> undef @@ -76,7 +76,7 @@ CF74: ; preds = %CF74, %CF80, %CF78, br i1 %Cmp39, label %CF74, label %CF80 CF80: ; preds = %CF74 - %L40 = load i8* %0 + %L40 = load i8, i8* %0 store i32 0, i32* %Sl10 %E41 = extractelement <8 x i64> zeroinitializer, i32 1 %Shuff42 = shufflevector <1 x i16> %I14, <1 x i16> %I14, <1 x i32> undef @@ -86,7 +86,7 @@ CF80: ; preds = %CF74 br i1 %Sl44, label %CF74, label %CF78 CF78: ; preds = %CF80 - %L45 = load i32* %Sl10 + %L45 = load i32, i32* %Sl10 store i8 %L5, i8* %0 %E46 = extractelement <8 x i1> %Shuff7, i32 2 br i1 %E46, label %CF74, label %CF77 @@ -101,7 +101,7 @@ CF77: ; preds = %CF77, %CF78 br i1 %Cmp52, label %CF77, label %CF79 CF79: ; preds = %CF77 - %L53 = load i32* %Sl10 + %L53 = load i32, i32* %Sl10 store i8 %L40, i8* %0 %E54 = extractelement <4 x i32> zeroinitializer, i32 1 %Shuff55 = shufflevector <4 x i32> %Shuff21, <4 x i32> %I8, <4 x i32> <i32 4, i32 6, i32 undef, i32 2> @@ -109,7 +109,7 @@ CF79: ; preds = %CF77 %Tr = trunc <1 x i64> %Shuff13 to <1 x i16> %Sl57 = select i1 %Cmp18, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1> %Cmp58 = icmp uge <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %I56 - %L59 = load i8* %0 + %L59 = load i8, i8* %0 store <1 x double> zeroinitializer, <1 x double>* %A2 %E60 = extractelement <4 x i32> zeroinitializer, i32 0 %Shuff61 = shufflevector <4 x i32> %I8, <4 x i32> %I8, <4 x i32> <i32 undef, i32 1, i32 undef, i32 undef> @@ -121,7 +121,7 @@ CF79: ; preds = %CF77 br label %CF CF: ; preds = %CF79 - %L66 = load i32* %Sl10 + %L66 = load i32, i32* %Sl10 store i32 %E6, i32* %PC %E67 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 2 %Shuff68 = shufflevector <4 x i32> %Sl64, <4 x i32> %I8, <4 x i32> <i32 5, i32 undef, i32 1, i32 undef> diff --git a/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll b/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll index dc4200a..a3150e9 100644 --- a/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll +++ b/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll @@ -14,14 +14,14 @@ BB: %A2 = alloca <4 x i1> %A1 = alloca <4 x i16> %A = alloca <2 x i32> - %L = load i8* %0 + %L = load i8, i8* %0 store i8 %L, i8* %0 %E = extractelement <4 x i32> zeroinitializer, i32 0 %Shuff = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 1, i32 3, i32 5> %I = insertelement <2 x i1> zeroinitializer, i1 false, i32 1 %FC = sitofp <4 x i32> zeroinitializer to <4 x double> %Sl = select i1 false, <4 x i64> %Shuff, <4 x i64> %Shuff - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 %5, i8* %0 %E6 = extractelement <1 x i16> zeroinitializer, i32 0 %Shuff7 = shufflevector <2 x i1> %I, <2 x i1> %I, <2 x i32> <i32 1, i32 undef> @@ -30,7 +30,7 @@ BB: %FC9 = fptoui float 0x406DB70180000000 to i64 %Sl10 = select i1 false, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> %Cmp = icmp ult <4 x i64> zeroinitializer, zeroinitializer - %L11 = load i8* %0 + %L11 = load i8, i8* %0 store i8 %L, i8* %0 %E12 = extractelement <4 x i64> zeroinitializer, i32 2 %Shuff13 = shufflevector <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 undef, i32 3> @@ -42,7 +42,7 @@ BB: br label %CF CF: ; preds = %CF, %CF79, %CF84, %BB - %L18 = load i8* %0 + %L18 = load i8, i8* %0 store i8 %L, i8* %0 %E19 = extractelement <4 x i64> %Sl, i32 3 %Shuff20 = shufflevector <2 x i1> %Shuff7, <2 x i1> %I, <2 x i32> <i32 2, i32 0> @@ -54,7 +54,7 @@ CF: ; preds = %CF, %CF79, %CF84, % br i1 %Cmp25, label %CF, label %CF79 CF79: ; preds = %CF - %L26 = load i8* %0 + %L26 = load i8, i8* %0 store i8 %L26, i8* %0 %E27 = extractelement <1 x i16> zeroinitializer, i32 0 %Shuff28 = shufflevector <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11> @@ -65,7 +65,7 @@ CF79: ; preds = %CF br i1 %Cmp32, label %CF, label %CF78 CF78: ; preds = %CF78, %CF79 - %L33 = load i8* %0 + %L33 = load i8, i8* %0 store i8 %L, i8* %0 %E34 = extractelement <16 x i32> %Shuff28, i32 1 %Shuff35 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I21, <4 x i32> <i32 undef, i32 6, i32 0, i32 2> @@ -76,7 +76,7 @@ CF78: ; preds = %CF78, %CF79 br i1 %Cmp38, label %CF78, label %CF80 CF80: ; preds = %CF80, %CF82, %CF78 - %L39 = load i8* %0 + %L39 = load i8, i8* %0 store i8 %L, i8* %0 %E40 = extractelement <2 x i1> %Shuff20, i32 1 br i1 %E40, label %CF80, label %CF82 @@ -87,7 +87,7 @@ CF82: ; preds = %CF80 %B43 = sub i32 %E, 0 %Sl44 = select i1 %Cmp32, <16 x i32> %Shuff28, <16 x i32> %Shuff28 %Cmp45 = icmp sgt <4 x i64> zeroinitializer, %I21 - %L46 = load i8* %0 + %L46 = load i8, i8* %0 store i8 %L11, i8* %0 %E47 = extractelement <8 x i32> %Sl16, i32 4 %Shuff48 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Shuff7, <2 x i32> <i32 undef, i32 1> @@ -99,7 +99,7 @@ CF82: ; preds = %CF80 CF81: ; preds = %CF81, %CF82 %Sl52 = select i1 false, float -6.749110e+06, float 0x406DB70180000000 %Cmp53 = icmp uge <2 x i32> <i32 -1, i32 -1>, <i32 -1, i32 -1> - %L54 = load i8* %0 + %L54 = load i8, i8* %0 store i8 %L5, i8* %0 %E55 = extractelement <8 x i32> zeroinitializer, i32 7 %Shuff56 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 4, i32 6, i32 0> @@ -108,7 +108,7 @@ CF81: ; preds = %CF81, %CF82 %FC59 = fptoui <4 x double> %I36 to <4 x i16> %Sl60 = select i1 %Cmp17, <2 x i1> %I, <2 x i1> %I57 %Cmp61 = icmp ule <8 x i32> %B50, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> - %L62 = load i8* %0 + %L62 = load i8, i8* %0 store i8 %L33, i8* %0 %E63 = extractelement <4 x i64> %Shuff, i32 2 %Shuff64 = shufflevector <4 x i64> %Shuff56, <4 x i64> %Shuff56, <4 x i32> <i32 5, i32 7, i32 1, i32 undef> @@ -126,7 +126,7 @@ CF84: ; preds = %CF83 br i1 %Cmp69, label %CF, label %CF77 CF77: ; preds = %CF84 - %L70 = load i8* %0 + %L70 = load i8, i8* %0 store i8 %L, i8* %0 %E71 = extractelement <4 x i64> %Shuff, i32 0 %Shuff72 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1> diff --git a/test/CodeGen/Mips/msa/shuffle.ll b/test/CodeGen/Mips/msa/shuffle.ll index faeec5d..7feed92 100644 --- a/test/CodeGen/Mips/msa/shuffle.ll +++ b/test/CodeGen/Mips/msa/shuffle.ll @@ -4,7 +4,7 @@ define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: vshf_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -20,7 +20,7 @@ define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: vshf_v16i8_1: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1] @@ -34,8 +34,8 @@ define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: vshf_v16i8_2: - %1 = load <16 x i8>* %a - %2 = load <16 x i8>* %b + %1 = load <16 x i8>, <16 x i8>* %a + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 16> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -51,9 +51,9 @@ define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: vshf_v16i8_3: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 17, i32 24, i32 25, i32 18, i32 19, i32 20, i32 28, i32 19, i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -71,7 +71,7 @@ define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: vshf_v16i8_4: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <16 x i8> %1, <16 x i8> %1, <16 x i32> <i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17> ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1] @@ -85,7 +85,7 @@ define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: vshf_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -101,7 +101,7 @@ define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: vshf_v8i16_1: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1] @@ -115,8 +115,8 @@ define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: vshf_v8i16_2: - %1 = load <8 x i16>* %a - %2 = load <8 x i16>* %b + %1 = load <8 x i16>, <8 x i16>* %a + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 8> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -132,9 +132,9 @@ define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: vshf_v8i16_3: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -152,7 +152,7 @@ define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: vshf_v8i16_4: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <8 x i16> %1, <8 x i16> %1, <8 x i32> <i32 1, i32 9, i32 1, i32 9, i32 1, i32 9, i32 1, i32 9> ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1] @@ -169,7 +169,7 @@ define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: vshf_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27 @@ -183,7 +183,7 @@ define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: vshf_v4i32_1: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 85 @@ -197,8 +197,8 @@ define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: vshf_v4i32_2: - %1 = load <4 x i32>* %a - %2 = load <4 x i32>* %b + %1 = load <4 x i32>, <4 x i32>* %a + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 5, i32 6, i32 4> ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R2]], 36 @@ -212,9 +212,9 @@ define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: vshf_v4i32_3: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 6, i32 4> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -232,7 +232,7 @@ define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: vshf_v4i32_4: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <4 x i32> %1, <4 x i32> %1, <4 x i32> <i32 1, i32 5, i32 5, i32 1> ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 85 @@ -246,7 +246,7 @@ define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: vshf_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 0> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -262,7 +262,7 @@ define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: vshf_v2i64_1: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1> ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1] @@ -276,8 +276,8 @@ define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: vshf_v2i64_2: - %1 = load <2 x i64>* %a - %2 = load <2 x i64>* %b + %1 = load <2 x i64>, <2 x i64>* %a + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 2> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -293,9 +293,9 @@ define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: vshf_v2i64_3: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 2> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -313,7 +313,7 @@ define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: vshf_v2i64_4: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <2 x i64> %1, <2 x i64> %1, <2 x i32> <i32 1, i32 3> ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1] @@ -327,7 +327,7 @@ define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @shf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: shf_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 3, i32 2, i32 0, i32 5, i32 7, i32 6, i32 4, i32 9, i32 11, i32 10, i32 8, i32 13, i32 15, i32 14, i32 12> ; CHECK-DAG: shf.b [[R3:\$w[0-9]+]], [[R1]], 45 @@ -341,7 +341,7 @@ define void @shf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @shf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: shf_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> ; CHECK-DAG: shf.h [[R3:\$w[0-9]+]], [[R1]], 27 @@ -355,7 +355,7 @@ define void @shf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @shf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: shf_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27 @@ -371,9 +371,9 @@ define void @shf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @ilvev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: ilvev_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30> @@ -388,9 +388,9 @@ define void @ilvev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @ilvev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: ilvev_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14> ; CHECK-DAG: ilvev.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -404,9 +404,9 @@ define void @ilvev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @ilvev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: ilvev_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6> ; CHECK-DAG: ilvev.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -420,9 +420,9 @@ define void @ilvev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @ilvev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: ilvev_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2> ; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -436,9 +436,9 @@ define void @ilvev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @ilvod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: ilvod_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31> @@ -453,9 +453,9 @@ define void @ilvod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @ilvod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: ilvod_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15> ; CHECK-DAG: ilvod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -469,9 +469,9 @@ define void @ilvod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @ilvod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: ilvod_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7> ; CHECK-DAG: ilvod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -485,9 +485,9 @@ define void @ilvod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @ilvod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: ilvod_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3> ; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -501,9 +501,9 @@ define void @ilvod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @ilvl_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: ilvl_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23> @@ -518,9 +518,9 @@ define void @ilvl_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @ilvl_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: ilvl_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> ; CHECK-DAG: ilvl.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -534,9 +534,9 @@ define void @ilvl_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @ilvl_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: ilvl_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5> ; CHECK-DAG: ilvl.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -550,9 +550,9 @@ define void @ilvl_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @ilvl_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: ilvl_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2> ; ilvl.d and ilvev.d are equivalent for v2i64 @@ -567,9 +567,9 @@ define void @ilvl_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @ilvr_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: ilvr_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> @@ -584,9 +584,9 @@ define void @ilvr_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @ilvr_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: ilvr_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> ; CHECK-DAG: ilvr.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -600,9 +600,9 @@ define void @ilvr_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @ilvr_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: ilvr_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7> ; CHECK-DAG: ilvr.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -616,9 +616,9 @@ define void @ilvr_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @ilvr_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: ilvr_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3> ; ilvr.d and ilvod.d are equivalent for v2i64 @@ -633,9 +633,9 @@ define void @ilvr_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @pckev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: pckev_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> @@ -650,9 +650,9 @@ define void @pckev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @pckev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: pckev_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> ; CHECK-DAG: pckev.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -666,9 +666,9 @@ define void @pckev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @pckev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: pckev_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 2, i32 4, i32 6> ; CHECK-DAG: pckev.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -682,9 +682,9 @@ define void @pckev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @pckev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: pckev_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2> ; pckev.d and ilvev.d are equivalent for v2i64 @@ -699,9 +699,9 @@ define void @pckev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @pckod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: pckod_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31> @@ -716,9 +716,9 @@ define void @pckod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @pckod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: pckod_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> ; CHECK-DAG: pckod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -732,9 +732,9 @@ define void @pckod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @pckod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: pckod_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 3, i32 5, i32 7> ; CHECK-DAG: pckod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -748,9 +748,9 @@ define void @pckod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @pckod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: pckod_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3> ; pckod.d and ilvod.d are equivalent for v2i64 @@ -765,7 +765,7 @@ define void @pckod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @splati_v16i8_0(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: splati_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4> @@ -780,7 +780,7 @@ define void @splati_v16i8_0(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @splati_v8i16_0(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: splati_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4> ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][4] @@ -794,7 +794,7 @@ define void @splati_v8i16_0(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @splati_v4i32_0(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: splati_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3> ; shf.w and splati.w are equivalent @@ -809,7 +809,7 @@ define void @splati_v4i32_0(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @splati_v2i64_0(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: splati_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1> ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1] diff --git a/test/CodeGen/Mips/msa/spill.ll b/test/CodeGen/Mips/msa/spill.ll index 66f896a..8c9a799 100644 --- a/test/CodeGen/Mips/msa/spill.ll +++ b/test/CodeGen/Mips/msa/spill.ll @@ -6,73 +6,73 @@ define i32 @test_i8(<16 x i8>* %p0, <16 x i8>* %q1) nounwind { entry: - %p1 = getelementptr <16 x i8>* %p0, i32 1 - %p2 = getelementptr <16 x i8>* %p0, i32 2 - %p3 = getelementptr <16 x i8>* %p0, i32 3 - %p4 = getelementptr <16 x i8>* %p0, i32 4 - %p5 = getelementptr <16 x i8>* %p0, i32 5 - %p6 = getelementptr <16 x i8>* %p0, i32 6 - %p7 = getelementptr <16 x i8>* %p0, i32 7 - %p8 = getelementptr <16 x i8>* %p0, i32 8 - %p9 = getelementptr <16 x i8>* %p0, i32 9 - %p10 = getelementptr <16 x i8>* %p0, i32 10 - %p11 = getelementptr <16 x i8>* %p0, i32 11 - %p12 = getelementptr <16 x i8>* %p0, i32 12 - %p13 = getelementptr <16 x i8>* %p0, i32 13 - %p14 = getelementptr <16 x i8>* %p0, i32 14 - %p15 = getelementptr <16 x i8>* %p0, i32 15 - %p16 = getelementptr <16 x i8>* %p0, i32 16 - %p17 = getelementptr <16 x i8>* %p0, i32 17 - %p18 = getelementptr <16 x i8>* %p0, i32 18 - %p19 = getelementptr <16 x i8>* %p0, i32 19 - %p20 = getelementptr <16 x i8>* %p0, i32 20 - %p21 = getelementptr <16 x i8>* %p0, i32 21 - %p22 = getelementptr <16 x i8>* %p0, i32 22 - %p23 = getelementptr <16 x i8>* %p0, i32 23 - %p24 = getelementptr <16 x i8>* %p0, i32 24 - %p25 = getelementptr <16 x i8>* %p0, i32 25 - %p26 = getelementptr <16 x i8>* %p0, i32 26 - %p27 = getelementptr <16 x i8>* %p0, i32 27 - %p28 = getelementptr <16 x i8>* %p0, i32 28 - %p29 = getelementptr <16 x i8>* %p0, i32 29 - %p30 = getelementptr <16 x i8>* %p0, i32 30 - %p31 = getelementptr <16 x i8>* %p0, i32 31 - %p32 = getelementptr <16 x i8>* %p0, i32 32 - %p33 = getelementptr <16 x i8>* %p0, i32 33 - %0 = load <16 x i8>* %p0, align 16 - %1 = load <16 x i8>* %p1, align 16 - %2 = load <16 x i8>* %p2, align 16 - %3 = load <16 x i8>* %p3, align 16 - %4 = load <16 x i8>* %p4, align 16 - %5 = load <16 x i8>* %p5, align 16 - %6 = load <16 x i8>* %p6, align 16 - %7 = load <16 x i8>* %p7, align 16 - %8 = load <16 x i8>* %p8, align 16 - %9 = load <16 x i8>* %p9, align 16 - %10 = load <16 x i8>* %p10, align 16 - %11 = load <16 x i8>* %p11, align 16 - %12 = load <16 x i8>* %p12, align 16 - %13 = load <16 x i8>* %p13, align 16 - %14 = load <16 x i8>* %p14, align 16 - %15 = load <16 x i8>* %p15, align 16 - %16 = load <16 x i8>* %p16, align 16 - %17 = load <16 x i8>* %p17, align 16 - %18 = load <16 x i8>* %p18, align 16 - %19 = load <16 x i8>* %p19, align 16 - %20 = load <16 x i8>* %p20, align 16 - %21 = load <16 x i8>* %p21, align 16 - %22 = load <16 x i8>* %p22, align 16 - %23 = load <16 x i8>* %p23, align 16 - %24 = load <16 x i8>* %p24, align 16 - %25 = load <16 x i8>* %p25, align 16 - %26 = load <16 x i8>* %p26, align 16 - %27 = load <16 x i8>* %p27, align 16 - %28 = load <16 x i8>* %p28, align 16 - %29 = load <16 x i8>* %p29, align 16 - %30 = load <16 x i8>* %p30, align 16 - %31 = load <16 x i8>* %p31, align 16 - %32 = load <16 x i8>* %p32, align 16 - %33 = load <16 x i8>* %p33, align 16 + %p1 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 1 + %p2 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 2 + %p3 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 3 + %p4 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 4 + %p5 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 5 + %p6 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 6 + %p7 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 7 + %p8 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 8 + %p9 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 9 + %p10 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 10 + %p11 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 11 + %p12 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 12 + %p13 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 13 + %p14 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 14 + %p15 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 15 + %p16 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 16 + %p17 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 17 + %p18 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 18 + %p19 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 19 + %p20 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 20 + %p21 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 21 + %p22 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 22 + %p23 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 23 + %p24 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 24 + %p25 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 25 + %p26 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 26 + %p27 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 27 + %p28 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 28 + %p29 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 29 + %p30 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 30 + %p31 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 31 + %p32 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 32 + %p33 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 33 + %0 = load <16 x i8>, <16 x i8>* %p0, align 16 + %1 = load <16 x i8>, <16 x i8>* %p1, align 16 + %2 = load <16 x i8>, <16 x i8>* %p2, align 16 + %3 = load <16 x i8>, <16 x i8>* %p3, align 16 + %4 = load <16 x i8>, <16 x i8>* %p4, align 16 + %5 = load <16 x i8>, <16 x i8>* %p5, align 16 + %6 = load <16 x i8>, <16 x i8>* %p6, align 16 + %7 = load <16 x i8>, <16 x i8>* %p7, align 16 + %8 = load <16 x i8>, <16 x i8>* %p8, align 16 + %9 = load <16 x i8>, <16 x i8>* %p9, align 16 + %10 = load <16 x i8>, <16 x i8>* %p10, align 16 + %11 = load <16 x i8>, <16 x i8>* %p11, align 16 + %12 = load <16 x i8>, <16 x i8>* %p12, align 16 + %13 = load <16 x i8>, <16 x i8>* %p13, align 16 + %14 = load <16 x i8>, <16 x i8>* %p14, align 16 + %15 = load <16 x i8>, <16 x i8>* %p15, align 16 + %16 = load <16 x i8>, <16 x i8>* %p16, align 16 + %17 = load <16 x i8>, <16 x i8>* %p17, align 16 + %18 = load <16 x i8>, <16 x i8>* %p18, align 16 + %19 = load <16 x i8>, <16 x i8>* %p19, align 16 + %20 = load <16 x i8>, <16 x i8>* %p20, align 16 + %21 = load <16 x i8>, <16 x i8>* %p21, align 16 + %22 = load <16 x i8>, <16 x i8>* %p22, align 16 + %23 = load <16 x i8>, <16 x i8>* %p23, align 16 + %24 = load <16 x i8>, <16 x i8>* %p24, align 16 + %25 = load <16 x i8>, <16 x i8>* %p25, align 16 + %26 = load <16 x i8>, <16 x i8>* %p26, align 16 + %27 = load <16 x i8>, <16 x i8>* %p27, align 16 + %28 = load <16 x i8>, <16 x i8>* %p28, align 16 + %29 = load <16 x i8>, <16 x i8>* %p29, align 16 + %30 = load <16 x i8>, <16 x i8>* %p30, align 16 + %31 = load <16 x i8>, <16 x i8>* %p31, align 16 + %32 = load <16 x i8>, <16 x i8>* %p32, align 16 + %33 = load <16 x i8>, <16 x i8>* %p33, align 16 %r1 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1) %r2 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r1, <16 x i8> %2) %r3 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r2, <16 x i8> %3) @@ -155,73 +155,73 @@ declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind define i32 @test_i16(<8 x i16>* %p0, <8 x i16>* %q1) nounwind { entry: - %p1 = getelementptr <8 x i16>* %p0, i32 1 - %p2 = getelementptr <8 x i16>* %p0, i32 2 - %p3 = getelementptr <8 x i16>* %p0, i32 3 - %p4 = getelementptr <8 x i16>* %p0, i32 4 - %p5 = getelementptr <8 x i16>* %p0, i32 5 - %p6 = getelementptr <8 x i16>* %p0, i32 6 - %p7 = getelementptr <8 x i16>* %p0, i32 7 - %p8 = getelementptr <8 x i16>* %p0, i32 8 - %p9 = getelementptr <8 x i16>* %p0, i32 9 - %p10 = getelementptr <8 x i16>* %p0, i32 10 - %p11 = getelementptr <8 x i16>* %p0, i32 11 - %p12 = getelementptr <8 x i16>* %p0, i32 12 - %p13 = getelementptr <8 x i16>* %p0, i32 13 - %p14 = getelementptr <8 x i16>* %p0, i32 14 - %p15 = getelementptr <8 x i16>* %p0, i32 15 - %p16 = getelementptr <8 x i16>* %p0, i32 16 - %p17 = getelementptr <8 x i16>* %p0, i32 17 - %p18 = getelementptr <8 x i16>* %p0, i32 18 - %p19 = getelementptr <8 x i16>* %p0, i32 19 - %p20 = getelementptr <8 x i16>* %p0, i32 20 - %p21 = getelementptr <8 x i16>* %p0, i32 21 - %p22 = getelementptr <8 x i16>* %p0, i32 22 - %p23 = getelementptr <8 x i16>* %p0, i32 23 - %p24 = getelementptr <8 x i16>* %p0, i32 24 - %p25 = getelementptr <8 x i16>* %p0, i32 25 - %p26 = getelementptr <8 x i16>* %p0, i32 26 - %p27 = getelementptr <8 x i16>* %p0, i32 27 - %p28 = getelementptr <8 x i16>* %p0, i32 28 - %p29 = getelementptr <8 x i16>* %p0, i32 29 - %p30 = getelementptr <8 x i16>* %p0, i32 30 - %p31 = getelementptr <8 x i16>* %p0, i32 31 - %p32 = getelementptr <8 x i16>* %p0, i32 32 - %p33 = getelementptr <8 x i16>* %p0, i32 33 - %0 = load <8 x i16>* %p0, align 16 - %1 = load <8 x i16>* %p1, align 16 - %2 = load <8 x i16>* %p2, align 16 - %3 = load <8 x i16>* %p3, align 16 - %4 = load <8 x i16>* %p4, align 16 - %5 = load <8 x i16>* %p5, align 16 - %6 = load <8 x i16>* %p6, align 16 - %7 = load <8 x i16>* %p7, align 16 - %8 = load <8 x i16>* %p8, align 16 - %9 = load <8 x i16>* %p9, align 16 - %10 = load <8 x i16>* %p10, align 16 - %11 = load <8 x i16>* %p11, align 16 - %12 = load <8 x i16>* %p12, align 16 - %13 = load <8 x i16>* %p13, align 16 - %14 = load <8 x i16>* %p14, align 16 - %15 = load <8 x i16>* %p15, align 16 - %16 = load <8 x i16>* %p16, align 16 - %17 = load <8 x i16>* %p17, align 16 - %18 = load <8 x i16>* %p18, align 16 - %19 = load <8 x i16>* %p19, align 16 - %20 = load <8 x i16>* %p20, align 16 - %21 = load <8 x i16>* %p21, align 16 - %22 = load <8 x i16>* %p22, align 16 - %23 = load <8 x i16>* %p23, align 16 - %24 = load <8 x i16>* %p24, align 16 - %25 = load <8 x i16>* %p25, align 16 - %26 = load <8 x i16>* %p26, align 16 - %27 = load <8 x i16>* %p27, align 16 - %28 = load <8 x i16>* %p28, align 16 - %29 = load <8 x i16>* %p29, align 16 - %30 = load <8 x i16>* %p30, align 16 - %31 = load <8 x i16>* %p31, align 16 - %32 = load <8 x i16>* %p32, align 16 - %33 = load <8 x i16>* %p33, align 16 + %p1 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 1 + %p2 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 2 + %p3 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 3 + %p4 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 4 + %p5 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 5 + %p6 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 6 + %p7 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 7 + %p8 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 8 + %p9 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 9 + %p10 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 10 + %p11 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 11 + %p12 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 12 + %p13 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 13 + %p14 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 14 + %p15 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 15 + %p16 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 16 + %p17 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 17 + %p18 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 18 + %p19 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 19 + %p20 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 20 + %p21 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 21 + %p22 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 22 + %p23 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 23 + %p24 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 24 + %p25 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 25 + %p26 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 26 + %p27 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 27 + %p28 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 28 + %p29 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 29 + %p30 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 30 + %p31 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 31 + %p32 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 32 + %p33 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 33 + %0 = load <8 x i16>, <8 x i16>* %p0, align 16 + %1 = load <8 x i16>, <8 x i16>* %p1, align 16 + %2 = load <8 x i16>, <8 x i16>* %p2, align 16 + %3 = load <8 x i16>, <8 x i16>* %p3, align 16 + %4 = load <8 x i16>, <8 x i16>* %p4, align 16 + %5 = load <8 x i16>, <8 x i16>* %p5, align 16 + %6 = load <8 x i16>, <8 x i16>* %p6, align 16 + %7 = load <8 x i16>, <8 x i16>* %p7, align 16 + %8 = load <8 x i16>, <8 x i16>* %p8, align 16 + %9 = load <8 x i16>, <8 x i16>* %p9, align 16 + %10 = load <8 x i16>, <8 x i16>* %p10, align 16 + %11 = load <8 x i16>, <8 x i16>* %p11, align 16 + %12 = load <8 x i16>, <8 x i16>* %p12, align 16 + %13 = load <8 x i16>, <8 x i16>* %p13, align 16 + %14 = load <8 x i16>, <8 x i16>* %p14, align 16 + %15 = load <8 x i16>, <8 x i16>* %p15, align 16 + %16 = load <8 x i16>, <8 x i16>* %p16, align 16 + %17 = load <8 x i16>, <8 x i16>* %p17, align 16 + %18 = load <8 x i16>, <8 x i16>* %p18, align 16 + %19 = load <8 x i16>, <8 x i16>* %p19, align 16 + %20 = load <8 x i16>, <8 x i16>* %p20, align 16 + %21 = load <8 x i16>, <8 x i16>* %p21, align 16 + %22 = load <8 x i16>, <8 x i16>* %p22, align 16 + %23 = load <8 x i16>, <8 x i16>* %p23, align 16 + %24 = load <8 x i16>, <8 x i16>* %p24, align 16 + %25 = load <8 x i16>, <8 x i16>* %p25, align 16 + %26 = load <8 x i16>, <8 x i16>* %p26, align 16 + %27 = load <8 x i16>, <8 x i16>* %p27, align 16 + %28 = load <8 x i16>, <8 x i16>* %p28, align 16 + %29 = load <8 x i16>, <8 x i16>* %p29, align 16 + %30 = load <8 x i16>, <8 x i16>* %p30, align 16 + %31 = load <8 x i16>, <8 x i16>* %p31, align 16 + %32 = load <8 x i16>, <8 x i16>* %p32, align 16 + %33 = load <8 x i16>, <8 x i16>* %p33, align 16 %r1 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1) %r2 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r1, <8 x i16> %2) %r3 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r2, <8 x i16> %3) @@ -304,73 +304,73 @@ declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind define i32 @test_i32(<4 x i32>* %p0, <4 x i32>* %q1) nounwind { entry: - %p1 = getelementptr <4 x i32>* %p0, i32 1 - %p2 = getelementptr <4 x i32>* %p0, i32 2 - %p3 = getelementptr <4 x i32>* %p0, i32 3 - %p4 = getelementptr <4 x i32>* %p0, i32 4 - %p5 = getelementptr <4 x i32>* %p0, i32 5 - %p6 = getelementptr <4 x i32>* %p0, i32 6 - %p7 = getelementptr <4 x i32>* %p0, i32 7 - %p8 = getelementptr <4 x i32>* %p0, i32 8 - %p9 = getelementptr <4 x i32>* %p0, i32 9 - %p10 = getelementptr <4 x i32>* %p0, i32 10 - %p11 = getelementptr <4 x i32>* %p0, i32 11 - %p12 = getelementptr <4 x i32>* %p0, i32 12 - %p13 = getelementptr <4 x i32>* %p0, i32 13 - %p14 = getelementptr <4 x i32>* %p0, i32 14 - %p15 = getelementptr <4 x i32>* %p0, i32 15 - %p16 = getelementptr <4 x i32>* %p0, i32 16 - %p17 = getelementptr <4 x i32>* %p0, i32 17 - %p18 = getelementptr <4 x i32>* %p0, i32 18 - %p19 = getelementptr <4 x i32>* %p0, i32 19 - %p20 = getelementptr <4 x i32>* %p0, i32 20 - %p21 = getelementptr <4 x i32>* %p0, i32 21 - %p22 = getelementptr <4 x i32>* %p0, i32 22 - %p23 = getelementptr <4 x i32>* %p0, i32 23 - %p24 = getelementptr <4 x i32>* %p0, i32 24 - %p25 = getelementptr <4 x i32>* %p0, i32 25 - %p26 = getelementptr <4 x i32>* %p0, i32 26 - %p27 = getelementptr <4 x i32>* %p0, i32 27 - %p28 = getelementptr <4 x i32>* %p0, i32 28 - %p29 = getelementptr <4 x i32>* %p0, i32 29 - %p30 = getelementptr <4 x i32>* %p0, i32 30 - %p31 = getelementptr <4 x i32>* %p0, i32 31 - %p32 = getelementptr <4 x i32>* %p0, i32 32 - %p33 = getelementptr <4 x i32>* %p0, i32 33 - %0 = load <4 x i32>* %p0, align 16 - %1 = load <4 x i32>* %p1, align 16 - %2 = load <4 x i32>* %p2, align 16 - %3 = load <4 x i32>* %p3, align 16 - %4 = load <4 x i32>* %p4, align 16 - %5 = load <4 x i32>* %p5, align 16 - %6 = load <4 x i32>* %p6, align 16 - %7 = load <4 x i32>* %p7, align 16 - %8 = load <4 x i32>* %p8, align 16 - %9 = load <4 x i32>* %p9, align 16 - %10 = load <4 x i32>* %p10, align 16 - %11 = load <4 x i32>* %p11, align 16 - %12 = load <4 x i32>* %p12, align 16 - %13 = load <4 x i32>* %p13, align 16 - %14 = load <4 x i32>* %p14, align 16 - %15 = load <4 x i32>* %p15, align 16 - %16 = load <4 x i32>* %p16, align 16 - %17 = load <4 x i32>* %p17, align 16 - %18 = load <4 x i32>* %p18, align 16 - %19 = load <4 x i32>* %p19, align 16 - %20 = load <4 x i32>* %p20, align 16 - %21 = load <4 x i32>* %p21, align 16 - %22 = load <4 x i32>* %p22, align 16 - %23 = load <4 x i32>* %p23, align 16 - %24 = load <4 x i32>* %p24, align 16 - %25 = load <4 x i32>* %p25, align 16 - %26 = load <4 x i32>* %p26, align 16 - %27 = load <4 x i32>* %p27, align 16 - %28 = load <4 x i32>* %p28, align 16 - %29 = load <4 x i32>* %p29, align 16 - %30 = load <4 x i32>* %p30, align 16 - %31 = load <4 x i32>* %p31, align 16 - %32 = load <4 x i32>* %p32, align 16 - %33 = load <4 x i32>* %p33, align 16 + %p1 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 1 + %p2 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 2 + %p3 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 3 + %p4 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 4 + %p5 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 5 + %p6 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 6 + %p7 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 7 + %p8 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 8 + %p9 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 9 + %p10 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 10 + %p11 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 11 + %p12 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 12 + %p13 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 13 + %p14 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 14 + %p15 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 15 + %p16 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 16 + %p17 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 17 + %p18 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 18 + %p19 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 19 + %p20 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 20 + %p21 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 21 + %p22 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 22 + %p23 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 23 + %p24 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 24 + %p25 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 25 + %p26 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 26 + %p27 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 27 + %p28 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 28 + %p29 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 29 + %p30 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 30 + %p31 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 31 + %p32 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 32 + %p33 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 33 + %0 = load <4 x i32>, <4 x i32>* %p0, align 16 + %1 = load <4 x i32>, <4 x i32>* %p1, align 16 + %2 = load <4 x i32>, <4 x i32>* %p2, align 16 + %3 = load <4 x i32>, <4 x i32>* %p3, align 16 + %4 = load <4 x i32>, <4 x i32>* %p4, align 16 + %5 = load <4 x i32>, <4 x i32>* %p5, align 16 + %6 = load <4 x i32>, <4 x i32>* %p6, align 16 + %7 = load <4 x i32>, <4 x i32>* %p7, align 16 + %8 = load <4 x i32>, <4 x i32>* %p8, align 16 + %9 = load <4 x i32>, <4 x i32>* %p9, align 16 + %10 = load <4 x i32>, <4 x i32>* %p10, align 16 + %11 = load <4 x i32>, <4 x i32>* %p11, align 16 + %12 = load <4 x i32>, <4 x i32>* %p12, align 16 + %13 = load <4 x i32>, <4 x i32>* %p13, align 16 + %14 = load <4 x i32>, <4 x i32>* %p14, align 16 + %15 = load <4 x i32>, <4 x i32>* %p15, align 16 + %16 = load <4 x i32>, <4 x i32>* %p16, align 16 + %17 = load <4 x i32>, <4 x i32>* %p17, align 16 + %18 = load <4 x i32>, <4 x i32>* %p18, align 16 + %19 = load <4 x i32>, <4 x i32>* %p19, align 16 + %20 = load <4 x i32>, <4 x i32>* %p20, align 16 + %21 = load <4 x i32>, <4 x i32>* %p21, align 16 + %22 = load <4 x i32>, <4 x i32>* %p22, align 16 + %23 = load <4 x i32>, <4 x i32>* %p23, align 16 + %24 = load <4 x i32>, <4 x i32>* %p24, align 16 + %25 = load <4 x i32>, <4 x i32>* %p25, align 16 + %26 = load <4 x i32>, <4 x i32>* %p26, align 16 + %27 = load <4 x i32>, <4 x i32>* %p27, align 16 + %28 = load <4 x i32>, <4 x i32>* %p28, align 16 + %29 = load <4 x i32>, <4 x i32>* %p29, align 16 + %30 = load <4 x i32>, <4 x i32>* %p30, align 16 + %31 = load <4 x i32>, <4 x i32>* %p31, align 16 + %32 = load <4 x i32>, <4 x i32>* %p32, align 16 + %33 = load <4 x i32>, <4 x i32>* %p33, align 16 %r1 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1) %r2 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r1, <4 x i32> %2) %r3 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r2, <4 x i32> %3) @@ -453,73 +453,73 @@ declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind define i32 @test_i64(<2 x i64>* %p0, <2 x i64>* %q1) nounwind { entry: - %p1 = getelementptr <2 x i64>* %p0, i32 1 - %p2 = getelementptr <2 x i64>* %p0, i32 2 - %p3 = getelementptr <2 x i64>* %p0, i32 3 - %p4 = getelementptr <2 x i64>* %p0, i32 4 - %p5 = getelementptr <2 x i64>* %p0, i32 5 - %p6 = getelementptr <2 x i64>* %p0, i32 6 - %p7 = getelementptr <2 x i64>* %p0, i32 7 - %p8 = getelementptr <2 x i64>* %p0, i32 8 - %p9 = getelementptr <2 x i64>* %p0, i32 9 - %p10 = getelementptr <2 x i64>* %p0, i32 10 - %p11 = getelementptr <2 x i64>* %p0, i32 11 - %p12 = getelementptr <2 x i64>* %p0, i32 12 - %p13 = getelementptr <2 x i64>* %p0, i32 13 - %p14 = getelementptr <2 x i64>* %p0, i32 14 - %p15 = getelementptr <2 x i64>* %p0, i32 15 - %p16 = getelementptr <2 x i64>* %p0, i32 16 - %p17 = getelementptr <2 x i64>* %p0, i32 17 - %p18 = getelementptr <2 x i64>* %p0, i32 18 - %p19 = getelementptr <2 x i64>* %p0, i32 19 - %p20 = getelementptr <2 x i64>* %p0, i32 20 - %p21 = getelementptr <2 x i64>* %p0, i32 21 - %p22 = getelementptr <2 x i64>* %p0, i32 22 - %p23 = getelementptr <2 x i64>* %p0, i32 23 - %p24 = getelementptr <2 x i64>* %p0, i32 24 - %p25 = getelementptr <2 x i64>* %p0, i32 25 - %p26 = getelementptr <2 x i64>* %p0, i32 26 - %p27 = getelementptr <2 x i64>* %p0, i32 27 - %p28 = getelementptr <2 x i64>* %p0, i32 28 - %p29 = getelementptr <2 x i64>* %p0, i32 29 - %p30 = getelementptr <2 x i64>* %p0, i32 30 - %p31 = getelementptr <2 x i64>* %p0, i32 31 - %p32 = getelementptr <2 x i64>* %p0, i32 32 - %p33 = getelementptr <2 x i64>* %p0, i32 33 - %0 = load <2 x i64>* %p0, align 16 - %1 = load <2 x i64>* %p1, align 16 - %2 = load <2 x i64>* %p2, align 16 - %3 = load <2 x i64>* %p3, align 16 - %4 = load <2 x i64>* %p4, align 16 - %5 = load <2 x i64>* %p5, align 16 - %6 = load <2 x i64>* %p6, align 16 - %7 = load <2 x i64>* %p7, align 16 - %8 = load <2 x i64>* %p8, align 16 - %9 = load <2 x i64>* %p9, align 16 - %10 = load <2 x i64>* %p10, align 16 - %11 = load <2 x i64>* %p11, align 16 - %12 = load <2 x i64>* %p12, align 16 - %13 = load <2 x i64>* %p13, align 16 - %14 = load <2 x i64>* %p14, align 16 - %15 = load <2 x i64>* %p15, align 16 - %16 = load <2 x i64>* %p16, align 16 - %17 = load <2 x i64>* %p17, align 16 - %18 = load <2 x i64>* %p18, align 16 - %19 = load <2 x i64>* %p19, align 16 - %20 = load <2 x i64>* %p20, align 16 - %21 = load <2 x i64>* %p21, align 16 - %22 = load <2 x i64>* %p22, align 16 - %23 = load <2 x i64>* %p23, align 16 - %24 = load <2 x i64>* %p24, align 16 - %25 = load <2 x i64>* %p25, align 16 - %26 = load <2 x i64>* %p26, align 16 - %27 = load <2 x i64>* %p27, align 16 - %28 = load <2 x i64>* %p28, align 16 - %29 = load <2 x i64>* %p29, align 16 - %30 = load <2 x i64>* %p30, align 16 - %31 = load <2 x i64>* %p31, align 16 - %32 = load <2 x i64>* %p32, align 16 - %33 = load <2 x i64>* %p33, align 16 + %p1 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 1 + %p2 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 2 + %p3 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 3 + %p4 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 4 + %p5 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 5 + %p6 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 6 + %p7 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 7 + %p8 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 8 + %p9 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 9 + %p10 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 10 + %p11 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 11 + %p12 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 12 + %p13 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 13 + %p14 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 14 + %p15 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 15 + %p16 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 16 + %p17 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 17 + %p18 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 18 + %p19 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 19 + %p20 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 20 + %p21 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 21 + %p22 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 22 + %p23 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 23 + %p24 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 24 + %p25 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 25 + %p26 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 26 + %p27 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 27 + %p28 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 28 + %p29 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 29 + %p30 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 30 + %p31 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 31 + %p32 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 32 + %p33 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 33 + %0 = load <2 x i64>, <2 x i64>* %p0, align 16 + %1 = load <2 x i64>, <2 x i64>* %p1, align 16 + %2 = load <2 x i64>, <2 x i64>* %p2, align 16 + %3 = load <2 x i64>, <2 x i64>* %p3, align 16 + %4 = load <2 x i64>, <2 x i64>* %p4, align 16 + %5 = load <2 x i64>, <2 x i64>* %p5, align 16 + %6 = load <2 x i64>, <2 x i64>* %p6, align 16 + %7 = load <2 x i64>, <2 x i64>* %p7, align 16 + %8 = load <2 x i64>, <2 x i64>* %p8, align 16 + %9 = load <2 x i64>, <2 x i64>* %p9, align 16 + %10 = load <2 x i64>, <2 x i64>* %p10, align 16 + %11 = load <2 x i64>, <2 x i64>* %p11, align 16 + %12 = load <2 x i64>, <2 x i64>* %p12, align 16 + %13 = load <2 x i64>, <2 x i64>* %p13, align 16 + %14 = load <2 x i64>, <2 x i64>* %p14, align 16 + %15 = load <2 x i64>, <2 x i64>* %p15, align 16 + %16 = load <2 x i64>, <2 x i64>* %p16, align 16 + %17 = load <2 x i64>, <2 x i64>* %p17, align 16 + %18 = load <2 x i64>, <2 x i64>* %p18, align 16 + %19 = load <2 x i64>, <2 x i64>* %p19, align 16 + %20 = load <2 x i64>, <2 x i64>* %p20, align 16 + %21 = load <2 x i64>, <2 x i64>* %p21, align 16 + %22 = load <2 x i64>, <2 x i64>* %p22, align 16 + %23 = load <2 x i64>, <2 x i64>* %p23, align 16 + %24 = load <2 x i64>, <2 x i64>* %p24, align 16 + %25 = load <2 x i64>, <2 x i64>* %p25, align 16 + %26 = load <2 x i64>, <2 x i64>* %p26, align 16 + %27 = load <2 x i64>, <2 x i64>* %p27, align 16 + %28 = load <2 x i64>, <2 x i64>* %p28, align 16 + %29 = load <2 x i64>, <2 x i64>* %p29, align 16 + %30 = load <2 x i64>, <2 x i64>* %p30, align 16 + %31 = load <2 x i64>, <2 x i64>* %p31, align 16 + %32 = load <2 x i64>, <2 x i64>* %p32, align 16 + %33 = load <2 x i64>, <2 x i64>* %p33, align 16 %r1 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1) %r2 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r1, <2 x i64> %2) %r3 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r2, <2 x i64> %3) diff --git a/test/CodeGen/Mips/msa/vec.ll b/test/CodeGen/Mips/msa/vec.ll index d5b97f5..8790923 100644 --- a/test/CodeGen/Mips/msa/vec.ll +++ b/test/CodeGen/Mips/msa/vec.ll @@ -9,8 +9,8 @@ define void @llvm_mips_and_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) @@ -32,8 +32,8 @@ entry: define void @llvm_mips_and_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) @@ -55,8 +55,8 @@ entry: define void @llvm_mips_and_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) @@ -78,8 +78,8 @@ entry: define void @llvm_mips_and_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) @@ -97,8 +97,8 @@ entry: ; define void @and_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2 %2 = and <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_and_v_b_RES ret void @@ -113,8 +113,8 @@ entry: ; define void @and_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2 %2 = and <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_and_v_h_RES ret void @@ -130,8 +130,8 @@ entry: define void @and_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2 %2 = and <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_and_v_w_RES ret void @@ -147,8 +147,8 @@ entry: define void @and_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2 %2 = and <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_and_v_d_RES ret void @@ -168,9 +168,9 @@ entry: define void @llvm_mips_bmnz_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG3 %3 = bitcast <16 x i8> %0 to <16 x i8> %4 = bitcast <16 x i8> %1 to <16 x i8> %5 = bitcast <16 x i8> %2 to <16 x i8> @@ -198,9 +198,9 @@ entry: define void @llvm_mips_bmnz_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG3 %3 = bitcast <8 x i16> %0 to <16 x i8> %4 = bitcast <8 x i16> %1 to <16 x i8> %5 = bitcast <8 x i16> %2 to <16 x i8> @@ -228,9 +228,9 @@ entry: define void @llvm_mips_bmnz_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG3 %3 = bitcast <4 x i32> %0 to <16 x i8> %4 = bitcast <4 x i32> %1 to <16 x i8> %5 = bitcast <4 x i32> %2 to <16 x i8> @@ -258,9 +258,9 @@ entry: define void @llvm_mips_bmnz_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG3 %3 = bitcast <2 x i64> %0 to <16 x i8> %4 = bitcast <2 x i64> %1 to <16 x i8> %5 = bitcast <2 x i64> %2 to <16 x i8> @@ -288,9 +288,9 @@ entry: define void @llvm_mips_bmz_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG3 %3 = bitcast <16 x i8> %0 to <16 x i8> %4 = bitcast <16 x i8> %1 to <16 x i8> %5 = bitcast <16 x i8> %2 to <16 x i8> @@ -319,9 +319,9 @@ entry: define void @llvm_mips_bmz_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG3 %3 = bitcast <8 x i16> %0 to <16 x i8> %4 = bitcast <8 x i16> %1 to <16 x i8> %5 = bitcast <8 x i16> %2 to <16 x i8> @@ -350,9 +350,9 @@ entry: define void @llvm_mips_bmz_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG3 %3 = bitcast <4 x i32> %0 to <16 x i8> %4 = bitcast <4 x i32> %1 to <16 x i8> %5 = bitcast <4 x i32> %2 to <16 x i8> @@ -381,9 +381,9 @@ entry: define void @llvm_mips_bmz_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG3 %3 = bitcast <2 x i64> %0 to <16 x i8> %4 = bitcast <2 x i64> %1 to <16 x i8> %5 = bitcast <2 x i64> %2 to <16 x i8> @@ -412,9 +412,9 @@ entry: define void @llvm_mips_bsel_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG3 %3 = bitcast <16 x i8> %0 to <16 x i8> %4 = bitcast <16 x i8> %1 to <16 x i8> %5 = bitcast <16 x i8> %2 to <16 x i8> @@ -443,9 +443,9 @@ entry: define void @llvm_mips_bsel_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG3 %3 = bitcast <8 x i16> %0 to <16 x i8> %4 = bitcast <8 x i16> %1 to <16 x i8> %5 = bitcast <8 x i16> %2 to <16 x i8> @@ -474,9 +474,9 @@ entry: define void @llvm_mips_bsel_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG3 %3 = bitcast <4 x i32> %0 to <16 x i8> %4 = bitcast <4 x i32> %1 to <16 x i8> %5 = bitcast <4 x i32> %2 to <16 x i8> @@ -505,9 +505,9 @@ entry: define void @llvm_mips_bsel_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG3 %3 = bitcast <2 x i64> %0 to <16 x i8> %4 = bitcast <2 x i64> %1 to <16 x i8> %5 = bitcast <2 x i64> %2 to <16 x i8> @@ -535,8 +535,8 @@ entry: define void @llvm_mips_nor_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_nor_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_nor_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) @@ -558,8 +558,8 @@ entry: define void @llvm_mips_nor_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_nor_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_nor_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) @@ -581,8 +581,8 @@ entry: define void @llvm_mips_nor_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_nor_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_nor_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) @@ -604,8 +604,8 @@ entry: define void @llvm_mips_nor_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_nor_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_nor_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) @@ -627,8 +627,8 @@ entry: define void @llvm_mips_or_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) @@ -650,8 +650,8 @@ entry: define void @llvm_mips_or_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) @@ -673,8 +673,8 @@ entry: define void @llvm_mips_or_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) @@ -696,8 +696,8 @@ entry: define void @llvm_mips_or_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) @@ -715,8 +715,8 @@ entry: ; define void @or_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2 %2 = or <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_or_v_b_RES ret void @@ -731,8 +731,8 @@ entry: ; define void @or_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2 %2 = or <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_or_v_h_RES ret void @@ -748,8 +748,8 @@ entry: define void @or_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2 %2 = or <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_or_v_w_RES ret void @@ -765,8 +765,8 @@ entry: define void @or_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2 %2 = or <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_or_v_d_RES ret void @@ -785,8 +785,8 @@ entry: define void @llvm_mips_xor_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) @@ -808,8 +808,8 @@ entry: define void @llvm_mips_xor_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) @@ -831,8 +831,8 @@ entry: define void @llvm_mips_xor_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) @@ -854,8 +854,8 @@ entry: define void @llvm_mips_xor_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) @@ -873,8 +873,8 @@ entry: ; define void @xor_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2 %2 = xor <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_xor_v_b_RES ret void @@ -889,8 +889,8 @@ entry: ; define void @xor_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2 %2 = xor <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_xor_v_h_RES ret void @@ -906,8 +906,8 @@ entry: define void @xor_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2 %2 = xor <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_xor_v_w_RES ret void @@ -923,8 +923,8 @@ entry: define void @xor_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2 %2 = xor <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_xor_v_d_RES ret void diff --git a/test/CodeGen/Mips/msa/vecs10.ll b/test/CodeGen/Mips/msa/vecs10.ll index e22e075..f442f77 100644 --- a/test/CodeGen/Mips/msa/vecs10.ll +++ b/test/CodeGen/Mips/msa/vecs10.ll @@ -7,7 +7,7 @@ define i32 @llvm_mips_bnz_v_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bnz_v_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnz_v_ARG1 %1 = tail call i32 @llvm.mips.bnz.v(<16 x i8> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false @@ -28,7 +28,7 @@ declare i32 @llvm.mips.bnz.v(<16 x i8>) nounwind define i32 @llvm_mips_bz_v_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bz_v_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bz_v_ARG1 %1 = tail call i32 @llvm.mips.bz.v(<16 x i8> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false diff --git a/test/CodeGen/Mips/mul.ll b/test/CodeGen/Mips/mul.ll index 4ce801b..3231f9c 100644 --- a/test/CodeGen/Mips/mul.ll +++ b/test/CodeGen/Mips/mul.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %mul = mul nsw i32 %1, %0 ; 16: mult ${{[0-9]+}}, ${{[0-9]+}} ; 16: mflo ${{[0-9]+}} diff --git a/test/CodeGen/Mips/mulll.ll b/test/CodeGen/Mips/mulll.ll index e37b919..6e5ba64 100644 --- a/test/CodeGen/Mips/mulll.ll +++ b/test/CodeGen/Mips/mulll.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i64* @iiii, align 8 - %1 = load i64* @jjjj, align 8 + %0 = load i64, i64* @iiii, align 8 + %1 = load i64, i64* @jjjj, align 8 %mul = mul nsw i64 %1, %0 store i64 %mul, i64* @kkkk, align 8 ; 16: multu ${{[0-9]+}}, ${{[0-9]+}} diff --git a/test/CodeGen/Mips/mulull.ll b/test/CodeGen/Mips/mulull.ll index 4d23c69..c133448 100644 --- a/test/CodeGen/Mips/mulull.ll +++ b/test/CodeGen/Mips/mulull.ll @@ -7,8 +7,8 @@ define void @test() nounwind { entry: - %0 = load i64* @iiii, align 8 - %1 = load i64* @jjjj, align 8 + %0 = load i64, i64* @iiii, align 8 + %1 = load i64, i64* @jjjj, align 8 %mul = mul nsw i64 %1, %0 store i64 %mul, i64* @kkkk, align 8 ; 16: multu ${{[0-9]+}}, ${{[0-9]+}} diff --git a/test/CodeGen/Mips/nacl-align.ll b/test/CodeGen/Mips/nacl-align.ll index e61b834..ec8f3f0 100644 --- a/test/CodeGen/Mips/nacl-align.ll +++ b/test/CodeGen/Mips/nacl-align.ll @@ -67,8 +67,8 @@ default: define i32 @test2(i32 %i) { entry: - %elementptr = getelementptr inbounds [2 x i8*]* @bb_array, i32 0, i32 %i - %0 = load i8** %elementptr, align 4 + %elementptr = getelementptr inbounds [2 x i8*], [2 x i8*]* @bb_array, i32 0, i32 %i + %0 = load i8*, i8** %elementptr, align 4 indirectbr i8* %0, [label %bb1, label %bb2] bb1: diff --git a/test/CodeGen/Mips/nacl-branch-delay.ll b/test/CodeGen/Mips/nacl-branch-delay.ll index d251eee..2927f39 100644 --- a/test/CodeGen/Mips/nacl-branch-delay.ll +++ b/test/CodeGen/Mips/nacl-branch-delay.ll @@ -10,7 +10,7 @@ declare void @f2() define void @test1() { - %1 = load i32* @x, align 4 + %1 = load i32, i32* @x, align 4 call void @f1(i32 %1) ret void diff --git a/test/CodeGen/Mips/nacl-reserved-regs.ll b/test/CodeGen/Mips/nacl-reserved-regs.ll index ae21283..efe2a66 100644 --- a/test/CodeGen/Mips/nacl-reserved-regs.ll +++ b/test/CodeGen/Mips/nacl-reserved-regs.ll @@ -5,22 +5,22 @@ @var = external global i32 define void @f() { - %val1 = load volatile i32* @var - %val2 = load volatile i32* @var - %val3 = load volatile i32* @var - %val4 = load volatile i32* @var - %val5 = load volatile i32* @var - %val6 = load volatile i32* @var - %val7 = load volatile i32* @var - %val8 = load volatile i32* @var - %val9 = load volatile i32* @var - %val10 = load volatile i32* @var - %val11 = load volatile i32* @var - %val12 = load volatile i32* @var - %val13 = load volatile i32* @var - %val14 = load volatile i32* @var - %val15 = load volatile i32* @var - %val16 = load volatile i32* @var + %val1 = load volatile i32, i32* @var + %val2 = load volatile i32, i32* @var + %val3 = load volatile i32, i32* @var + %val4 = load volatile i32, i32* @var + %val5 = load volatile i32, i32* @var + %val6 = load volatile i32, i32* @var + %val7 = load volatile i32, i32* @var + %val8 = load volatile i32, i32* @var + %val9 = load volatile i32, i32* @var + %val10 = load volatile i32, i32* @var + %val11 = load volatile i32, i32* @var + %val12 = load volatile i32, i32* @var + %val13 = load volatile i32, i32* @var + %val14 = load volatile i32, i32* @var + %val15 = load volatile i32, i32* @var + %val16 = load volatile i32, i32* @var store volatile i32 %val1, i32* @var store volatile i32 %val2, i32* @var store volatile i32 %val3, i32* @var diff --git a/test/CodeGen/Mips/neg1.ll b/test/CodeGen/Mips/neg1.ll index 281e626..e2b10e0 100644 --- a/test/CodeGen/Mips/neg1.ll +++ b/test/CodeGen/Mips/neg1.ll @@ -5,10 +5,10 @@ define i32 @main() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %sub = sub nsw i32 0, %0 ; 16: neg ${{[0-9]+}}, ${{[0-9]+}} - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %sub) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %sub) ret i32 0 } diff --git a/test/CodeGen/Mips/no-odd-spreg-msa.ll b/test/CodeGen/Mips/no-odd-spreg-msa.ll index 30dd1ff..cf79557 100644 --- a/test/CodeGen/Mips/no-odd-spreg-msa.ll +++ b/test/CodeGen/Mips/no-odd-spreg-msa.ll @@ -8,7 +8,7 @@ entry: ; Force the float into an odd-numbered register using named registers and ; load the vector. %b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a) - %0 = load volatile <4 x float>* @v4f32 + %0 = load volatile <4 x float>, <4 x float>* @v4f32 ; Clobber all except $f12/$w12 and $f13 ; @@ -42,7 +42,7 @@ entry: ; Force the float into an odd-numbered register using named registers and ; load the vector. %b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a) - %0 = load volatile <4 x float>* @v4f32 + %0 = load volatile <4 x float>, <4 x float>* @v4f32 ; Clobber all except $f12/$w12 and $f13 ; @@ -73,7 +73,7 @@ entry: define float @msa_extract_0() { entry: - %0 = load volatile <4 x float>* @v4f32 + %0 = load volatile <4 x float>, <4 x float>* @v4f32 %1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0) ; Clobber all except $f12, and $f13 @@ -101,7 +101,7 @@ entry: define float @msa_extract_1() { entry: - %0 = load volatile <4 x float>* @v4f32 + %0 = load volatile <4 x float>, <4 x float>* @v4f32 %1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0) ; Clobber all except $f13 diff --git a/test/CodeGen/Mips/nomips16.ll b/test/CodeGen/Mips/nomips16.ll index 5f7d74e..418d8ea 100644 --- a/test/CodeGen/Mips/nomips16.ll +++ b/test/CodeGen/Mips/nomips16.ll @@ -6,7 +6,7 @@ ; Function Attrs: nounwind define void @foo() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %conv = fpext float %0 to double %add = fadd double %conv, 1.500000e+00 %conv1 = fptrunc double %add to float @@ -20,7 +20,7 @@ entry: ; Function Attrs: nounwind define void @nofoo() #1 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %conv = fpext float %0 to double %add = fadd double %conv, 3.900000e+00 %conv1 = fptrunc double %add to float diff --git a/test/CodeGen/Mips/not1.ll b/test/CodeGen/Mips/not1.ll index 2163b23..bf5d06e 100644 --- a/test/CodeGen/Mips/not1.ll +++ b/test/CodeGen/Mips/not1.ll @@ -6,10 +6,10 @@ define i32 @main() nounwind { entry: - %0 = load i32* @x, align 4 + %0 = load i32, i32* @x, align 4 %neg = xor i32 %0, -1 ; 16: not ${{[0-9]+}}, ${{[0-9]+}} - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %neg) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %neg) ret i32 0 } diff --git a/test/CodeGen/Mips/o32_cc_byval.ll b/test/CodeGen/Mips/o32_cc_byval.ll index 5db47ac..108c663 100644 --- a/test/CodeGen/Mips/o32_cc_byval.ll +++ b/test/CodeGen/Mips/o32_cc_byval.ll @@ -30,7 +30,7 @@ entry: %agg.tmp10 = alloca %struct.S3, align 4 call void @callee1(float 2.000000e+01, %struct.S1* byval bitcast (%0* @f1.s1 to %struct.S1*)) nounwind call void @callee2(%struct.S2* byval @f1.s2) nounwind - %tmp11 = getelementptr inbounds %struct.S3* %agg.tmp10, i32 0, i32 0 + %tmp11 = getelementptr inbounds %struct.S3, %struct.S3* %agg.tmp10, i32 0, i32 0 store i8 11, i8* %tmp11, align 4 call void @callee3(float 2.100000e+01, %struct.S3* byval %agg.tmp10, %struct.S1* byval bitcast (%0* @f1.s1 to %struct.S1*)) nounwind ret void @@ -61,18 +61,18 @@ entry: ; CHECK: sw $[[R3]], 16($sp) ; CHECK: mfc1 $6, $f[[F0]] - %i2 = getelementptr inbounds %struct.S1* %s1, i32 0, i32 5 - %tmp = load i32* %i2, align 4 - %d = getelementptr inbounds %struct.S1* %s1, i32 0, i32 4 - %tmp1 = load double* %d, align 8 - %ll = getelementptr inbounds %struct.S1* %s1, i32 0, i32 3 - %tmp2 = load i64* %ll, align 8 - %i = getelementptr inbounds %struct.S1* %s1, i32 0, i32 2 - %tmp3 = load i32* %i, align 4 - %s = getelementptr inbounds %struct.S1* %s1, i32 0, i32 1 - %tmp4 = load i16* %s, align 2 - %c = getelementptr inbounds %struct.S1* %s1, i32 0, i32 0 - %tmp5 = load i8* %c, align 1 + %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5 + %tmp = load i32, i32* %i2, align 4 + %d = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 4 + %tmp1 = load double, double* %d, align 8 + %ll = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 3 + %tmp2 = load i64, i64* %ll, align 8 + %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2 + %tmp3 = load i32, i32* %i, align 4 + %s = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1 + %tmp4 = load i16, i16* %s, align 2 + %c = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 0 + %tmp5 = load i8, i8* %c, align 1 tail call void @callee4(i32 %tmp, double %tmp1, i64 %tmp2, i32 %tmp3, i16 signext %tmp4, i8 signext %tmp5, float %f) nounwind ret void } @@ -90,10 +90,10 @@ entry: ; CHECK: lw $[[R0:[0-9]+]], 60($sp) ; CHECK: sw $[[R0]], 24($sp) - %arrayidx = getelementptr inbounds %struct.S2* %s2, i32 0, i32 0, i32 0 - %tmp = load i32* %arrayidx, align 4 - %arrayidx2 = getelementptr inbounds %struct.S2* %s2, i32 0, i32 0, i32 3 - %tmp3 = load i32* %arrayidx2, align 4 + %arrayidx = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 0 + %tmp = load i32, i32* %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 3 + %tmp3 = load i32, i32* %arrayidx2, align 4 tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp3, i16 signext 4, i8 signext 5, float 6.000000e+00) nounwind ret void } @@ -110,12 +110,12 @@ entry: ; CHECK: sw $[[R0]], 32($sp) ; CHECK: sw $[[R1]], 24($sp) - %i = getelementptr inbounds %struct.S1* %s1, i32 0, i32 2 - %tmp = load i32* %i, align 4 - %i2 = getelementptr inbounds %struct.S1* %s1, i32 0, i32 5 - %tmp1 = load i32* %i2, align 4 - %c = getelementptr inbounds %struct.S3* %s3, i32 0, i32 0 - %tmp2 = load i8* %c, align 1 + %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2 + %tmp = load i32, i32* %i, align 4 + %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5 + %tmp1 = load i32, i32* %i2, align 4 + %c = getelementptr inbounds %struct.S3, %struct.S3* %s3, i32 0, i32 0 + %tmp2 = load i8, i8* %c, align 1 tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp1, i16 signext 4, i8 signext %tmp2, float 6.000000e+00) nounwind ret void } diff --git a/test/CodeGen/Mips/o32_cc_vararg.ll b/test/CodeGen/Mips/o32_cc_vararg.ll index 10972e8..b4597a3 100644 --- a/test/CodeGen/Mips/o32_cc_vararg.ll +++ b/test/CodeGen/Mips/o32_cc_vararg.ll @@ -24,7 +24,7 @@ entry: store i32 %0, i32* %b, align 4 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load i32* %b, align 4 + %tmp = load i32, i32* %b, align 4 ret i32 %tmp ; CHECK-LABEL: va1: @@ -50,7 +50,7 @@ entry: store double %0, double* %b, align 8 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load double* %b, align 8 + %tmp = load double, double* %b, align 8 ret double %tmp ; CHECK-LABEL: va2: @@ -78,7 +78,7 @@ entry: store i32 %0, i32* %b, align 4 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load i32* %b, align 4 + %tmp = load i32, i32* %b, align 4 ret i32 %tmp ; CHECK-LABEL: va3: @@ -101,7 +101,7 @@ entry: store double %0, double* %b, align 8 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load double* %b, align 8 + %tmp = load double, double* %b, align 8 ret double %tmp ; CHECK-LABEL: va4: @@ -129,7 +129,7 @@ entry: store i32 %0, i32* %d, align 4 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load i32* %d, align 4 + %tmp = load i32, i32* %d, align 4 ret i32 %tmp ; CHECK-LABEL: va5: @@ -155,7 +155,7 @@ entry: store double %0, double* %d, align 8 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load double* %d, align 8 + %tmp = load double, double* %d, align 8 ret double %tmp ; CHECK-LABEL: va6: @@ -183,7 +183,7 @@ entry: store i32 %0, i32* %c, align 4 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load i32* %c, align 4 + %tmp = load i32, i32* %c, align 4 ret i32 %tmp ; CHECK-LABEL: va7: @@ -206,7 +206,7 @@ entry: store double %0, double* %c, align 8 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load double* %c, align 8 + %tmp = load double, double* %c, align 8 ret double %tmp ; CHECK-LABEL: va8: @@ -232,7 +232,7 @@ entry: store i32 %0, i32* %d, align 4 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load i32* %d, align 4 + %tmp = load i32, i32* %d, align 4 ret i32 %tmp ; CHECK-LABEL: va9: @@ -257,7 +257,7 @@ entry: store double %0, double* %d, align 8 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load double* %d, align 8 + %tmp = load double, double* %d, align 8 ret double %tmp ; CHECK-LABEL: va10: diff --git a/test/CodeGen/Mips/optimize-pic-o0.ll b/test/CodeGen/Mips/optimize-pic-o0.ll index 554d49e..454bc85 100644 --- a/test/CodeGen/Mips/optimize-pic-o0.ll +++ b/test/CodeGen/Mips/optimize-pic-o0.ll @@ -10,7 +10,7 @@ entry: br label %for.cond for.cond: ; preds = %for.inc, %entry - %0 = load i32* %i, align 4 + %0 = load i32, i32* %i, align 4 %cmp = icmp slt i32 %0, 10 br i1 %cmp, label %for.body, label %for.end @@ -20,13 +20,13 @@ for.body: ; preds = %for.cond br label %for.inc for.inc: ; preds = %for.body - %1 = load i32* %i, align 4 + %1 = load i32, i32* %i, align 4 %inc = add nsw i32 %1, 1 store i32 %inc, i32* %i, align 4 br label %for.cond for.end: ; preds = %for.cond - %2 = load i32* %retval + %2 = load i32, i32* %retval ret i32 %2 } diff --git a/test/CodeGen/Mips/or1.ll b/test/CodeGen/Mips/or1.ll index b1c3696..66dd070 100644 --- a/test/CodeGen/Mips/or1.ll +++ b/test/CodeGen/Mips/or1.ll @@ -6,11 +6,11 @@ define i32 @main() nounwind { entry: - %0 = load i32* @x, align 4 - %1 = load i32* @y, align 4 + %0 = load i32, i32* @x, align 4 + %1 = load i32, i32* @y, align 4 %or = or i32 %0, %1 ; 16: or ${{[0-9]+}}, ${{[0-9]+}} - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %or) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %or) ret i32 0 } diff --git a/test/CodeGen/Mips/prevent-hoisting.ll b/test/CodeGen/Mips/prevent-hoisting.ll index 210fe3b..81b14d7 100644 --- a/test/CodeGen/Mips/prevent-hoisting.ll +++ b/test/CodeGen/Mips/prevent-hoisting.ll @@ -46,7 +46,7 @@ define void @readLumaCoeff8x8_CABAC(%struct.img_par* %img, i32 %b8) { - %1 = load i32* undef, align 4 + %1 = load i32, i32* undef, align 4 br i1 false, label %2, label %3 ; <label>:2 ; preds = %0 @@ -65,7 +65,7 @@ switch.lookup: ; preds = %3 br label %6 ; <label>:6 ; preds = %5, %4 - %7 = phi [2 x i8]* [ getelementptr inbounds ([64 x [2 x i8]]* @FIELD_SCAN8x8, i32 0, i32 0), %4 ], [ null, %5 ] + %7 = phi [2 x i8]* [ getelementptr inbounds ([64 x [2 x i8]], [64 x [2 x i8]]* @FIELD_SCAN8x8, i32 0, i32 0), %4 ], [ null, %5 ] br i1 undef, label %switch.lookup6, label %8 switch.lookup6: ; preds = %6 @@ -77,7 +77,7 @@ switch.lookup6: ; preds = %6 ; <label>:9 ; preds = %8 %10 = and i32 %b8, 1 %11 = shl nuw nsw i32 %10, 3 - %12 = getelementptr inbounds %struct.Slice* null, i32 0, i32 9 + %12 = getelementptr inbounds %struct.Slice, %struct.Slice* null, i32 0, i32 9 br i1 undef, label %.preheader, label %.preheader11 .preheader11: ; preds = %21, %9 @@ -92,19 +92,19 @@ switch.lookup6: ; preds = %6 br label %15 ; <label>:15 ; preds = %14, %13 - %16 = getelementptr inbounds [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef - %17 = load i32* %16, align 4 - %18 = getelementptr inbounds %struct.datapartition* null, i32 %17, i32 2 - %19 = load i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)** %18, align 4 + %16 = getelementptr inbounds [0 x [20 x i32]], [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef + %17 = load i32, i32* %16, align 4 + %18 = getelementptr inbounds %struct.datapartition, %struct.datapartition* null, i32 %17, i32 2 + %19 = load i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)*, i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)** %18, align 4 %20 = call i32 %19(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* undef) br i1 false, label %.loopexit, label %21 ; <label>:21 ; preds = %15 %22 = add i32 %coef_ctr.013, 1 %23 = add i32 %22, 0 - %24 = getelementptr inbounds [2 x i8]* %7, i32 %23, i32 0 + %24 = getelementptr inbounds [2 x i8], [2 x i8]* %7, i32 %23, i32 0 %25 = add nsw i32 0, %11 - %26 = getelementptr inbounds %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %25 + %26 = getelementptr inbounds %struct.img_par, %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %25 store i32 0, i32* %26, align 4 %27 = add nsw i32 %k.014, 1 %28 = icmp slt i32 %27, 65 @@ -122,21 +122,21 @@ switch.lookup6: ; preds = %6 br label %31 ; <label>:31 ; preds = %30, %29 - %32 = getelementptr inbounds [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef - %33 = load i32* %32, align 4 - %34 = getelementptr inbounds %struct.datapartition* null, i32 %33 + %32 = getelementptr inbounds [0 x [20 x i32]], [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef + %33 = load i32, i32* %32, align 4 + %34 = getelementptr inbounds %struct.datapartition, %struct.datapartition* null, i32 %33 %35 = call i32 undef(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* %34) br i1 false, label %.loopexit, label %36 ; <label>:36 ; preds = %31 - %37 = load i32* undef, align 4 + %37 = load i32, i32* undef, align 4 %38 = add i32 %coef_ctr.29, 1 %39 = add i32 %38, %37 - %40 = getelementptr inbounds [2 x i8]* %7, i32 %39, i32 0 - %41 = load i8* %40, align 1 + %40 = getelementptr inbounds [2 x i8], [2 x i8]* %7, i32 %39, i32 0 + %41 = load i8, i8* %40, align 1 %42 = zext i8 %41 to i32 %43 = add nsw i32 %42, %11 - %44 = getelementptr inbounds %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %43 + %44 = getelementptr inbounds %struct.img_par, %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %43 store i32 0, i32* %44, align 4 %45 = add nsw i32 %k.110, 1 %46 = icmp slt i32 %45, 65 diff --git a/test/CodeGen/Mips/private.ll b/test/CodeGen/Mips/private.ll index 058db0b..5907dbd 100644 --- a/test/CodeGen/Mips/private.ll +++ b/test/CodeGen/Mips/private.ll @@ -15,6 +15,6 @@ define i32 @bar() { ; CHECK: lw $[[R0:[0-9]+]], %got($baz)($ ; CHECK: lw ${{[0-9]+}}, %lo($baz)($[[R0]]) call void @foo() - %1 = load i32* @baz, align 4 + %1 = load i32, i32* @baz, align 4 ret i32 %1 } diff --git a/test/CodeGen/Mips/ra-allocatable.ll b/test/CodeGen/Mips/ra-allocatable.ll index afc5cb0..048d432 100644 --- a/test/CodeGen/Mips/ra-allocatable.ll +++ b/test/CodeGen/Mips/ra-allocatable.ll @@ -98,186 +98,186 @@ entry: ; CHECK: lw $ra, {{[0-9]+}}($sp) # 4-byte Folded Reload ; CHECK: jr $ra - %0 = load i32* @a0, align 4 - %1 = load i32** @b0, align 4 + %0 = load i32, i32* @a0, align 4 + %1 = load i32*, i32** @b0, align 4 store i32 %0, i32* %1, align 4 - %2 = load i32* @a1, align 4 - %3 = load i32** @b1, align 4 + %2 = load i32, i32* @a1, align 4 + %3 = load i32*, i32** @b1, align 4 store i32 %2, i32* %3, align 4 - %4 = load i32* @a2, align 4 - %5 = load i32** @b2, align 4 + %4 = load i32, i32* @a2, align 4 + %5 = load i32*, i32** @b2, align 4 store i32 %4, i32* %5, align 4 - %6 = load i32* @a3, align 4 - %7 = load i32** @b3, align 4 + %6 = load i32, i32* @a3, align 4 + %7 = load i32*, i32** @b3, align 4 store i32 %6, i32* %7, align 4 - %8 = load i32* @a4, align 4 - %9 = load i32** @b4, align 4 + %8 = load i32, i32* @a4, align 4 + %9 = load i32*, i32** @b4, align 4 store i32 %8, i32* %9, align 4 - %10 = load i32* @a5, align 4 - %11 = load i32** @b5, align 4 + %10 = load i32, i32* @a5, align 4 + %11 = load i32*, i32** @b5, align 4 store i32 %10, i32* %11, align 4 - %12 = load i32* @a6, align 4 - %13 = load i32** @b6, align 4 + %12 = load i32, i32* @a6, align 4 + %13 = load i32*, i32** @b6, align 4 store i32 %12, i32* %13, align 4 - %14 = load i32* @a7, align 4 - %15 = load i32** @b7, align 4 + %14 = load i32, i32* @a7, align 4 + %15 = load i32*, i32** @b7, align 4 store i32 %14, i32* %15, align 4 - %16 = load i32* @a8, align 4 - %17 = load i32** @b8, align 4 + %16 = load i32, i32* @a8, align 4 + %17 = load i32*, i32** @b8, align 4 store i32 %16, i32* %17, align 4 - %18 = load i32* @a9, align 4 - %19 = load i32** @b9, align 4 + %18 = load i32, i32* @a9, align 4 + %19 = load i32*, i32** @b9, align 4 store i32 %18, i32* %19, align 4 - %20 = load i32* @a10, align 4 - %21 = load i32** @b10, align 4 + %20 = load i32, i32* @a10, align 4 + %21 = load i32*, i32** @b10, align 4 store i32 %20, i32* %21, align 4 - %22 = load i32* @a11, align 4 - %23 = load i32** @b11, align 4 + %22 = load i32, i32* @a11, align 4 + %23 = load i32*, i32** @b11, align 4 store i32 %22, i32* %23, align 4 - %24 = load i32* @a12, align 4 - %25 = load i32** @b12, align 4 + %24 = load i32, i32* @a12, align 4 + %25 = load i32*, i32** @b12, align 4 store i32 %24, i32* %25, align 4 - %26 = load i32* @a13, align 4 - %27 = load i32** @b13, align 4 + %26 = load i32, i32* @a13, align 4 + %27 = load i32*, i32** @b13, align 4 store i32 %26, i32* %27, align 4 - %28 = load i32* @a14, align 4 - %29 = load i32** @b14, align 4 + %28 = load i32, i32* @a14, align 4 + %29 = load i32*, i32** @b14, align 4 store i32 %28, i32* %29, align 4 - %30 = load i32* @a15, align 4 - %31 = load i32** @b15, align 4 + %30 = load i32, i32* @a15, align 4 + %31 = load i32*, i32** @b15, align 4 store i32 %30, i32* %31, align 4 - %32 = load i32* @a16, align 4 - %33 = load i32** @b16, align 4 + %32 = load i32, i32* @a16, align 4 + %33 = load i32*, i32** @b16, align 4 store i32 %32, i32* %33, align 4 - %34 = load i32* @a17, align 4 - %35 = load i32** @b17, align 4 + %34 = load i32, i32* @a17, align 4 + %35 = load i32*, i32** @b17, align 4 store i32 %34, i32* %35, align 4 - %36 = load i32* @a18, align 4 - %37 = load i32** @b18, align 4 + %36 = load i32, i32* @a18, align 4 + %37 = load i32*, i32** @b18, align 4 store i32 %36, i32* %37, align 4 - %38 = load i32* @a19, align 4 - %39 = load i32** @b19, align 4 + %38 = load i32, i32* @a19, align 4 + %39 = load i32*, i32** @b19, align 4 store i32 %38, i32* %39, align 4 - %40 = load i32* @a20, align 4 - %41 = load i32** @b20, align 4 + %40 = load i32, i32* @a20, align 4 + %41 = load i32*, i32** @b20, align 4 store i32 %40, i32* %41, align 4 - %42 = load i32* @a21, align 4 - %43 = load i32** @b21, align 4 + %42 = load i32, i32* @a21, align 4 + %43 = load i32*, i32** @b21, align 4 store i32 %42, i32* %43, align 4 - %44 = load i32* @a22, align 4 - %45 = load i32** @b22, align 4 + %44 = load i32, i32* @a22, align 4 + %45 = load i32*, i32** @b22, align 4 store i32 %44, i32* %45, align 4 - %46 = load i32* @a23, align 4 - %47 = load i32** @b23, align 4 + %46 = load i32, i32* @a23, align 4 + %47 = load i32*, i32** @b23, align 4 store i32 %46, i32* %47, align 4 - %48 = load i32* @a24, align 4 - %49 = load i32** @b24, align 4 + %48 = load i32, i32* @a24, align 4 + %49 = load i32*, i32** @b24, align 4 store i32 %48, i32* %49, align 4 - %50 = load i32* @a25, align 4 - %51 = load i32** @b25, align 4 + %50 = load i32, i32* @a25, align 4 + %51 = load i32*, i32** @b25, align 4 store i32 %50, i32* %51, align 4 - %52 = load i32* @a26, align 4 - %53 = load i32** @b26, align 4 + %52 = load i32, i32* @a26, align 4 + %53 = load i32*, i32** @b26, align 4 store i32 %52, i32* %53, align 4 - %54 = load i32* @a27, align 4 - %55 = load i32** @b27, align 4 + %54 = load i32, i32* @a27, align 4 + %55 = load i32*, i32** @b27, align 4 store i32 %54, i32* %55, align 4 - %56 = load i32* @a28, align 4 - %57 = load i32** @b28, align 4 + %56 = load i32, i32* @a28, align 4 + %57 = load i32*, i32** @b28, align 4 store i32 %56, i32* %57, align 4 - %58 = load i32* @a29, align 4 - %59 = load i32** @b29, align 4 + %58 = load i32, i32* @a29, align 4 + %59 = load i32*, i32** @b29, align 4 store i32 %58, i32* %59, align 4 - %60 = load i32* @a0, align 4 - %61 = load i32** @c0, align 4 + %60 = load i32, i32* @a0, align 4 + %61 = load i32*, i32** @c0, align 4 store i32 %60, i32* %61, align 4 - %62 = load i32* @a1, align 4 - %63 = load i32** @c1, align 4 + %62 = load i32, i32* @a1, align 4 + %63 = load i32*, i32** @c1, align 4 store i32 %62, i32* %63, align 4 - %64 = load i32* @a2, align 4 - %65 = load i32** @c2, align 4 + %64 = load i32, i32* @a2, align 4 + %65 = load i32*, i32** @c2, align 4 store i32 %64, i32* %65, align 4 - %66 = load i32* @a3, align 4 - %67 = load i32** @c3, align 4 + %66 = load i32, i32* @a3, align 4 + %67 = load i32*, i32** @c3, align 4 store i32 %66, i32* %67, align 4 - %68 = load i32* @a4, align 4 - %69 = load i32** @c4, align 4 + %68 = load i32, i32* @a4, align 4 + %69 = load i32*, i32** @c4, align 4 store i32 %68, i32* %69, align 4 - %70 = load i32* @a5, align 4 - %71 = load i32** @c5, align 4 + %70 = load i32, i32* @a5, align 4 + %71 = load i32*, i32** @c5, align 4 store i32 %70, i32* %71, align 4 - %72 = load i32* @a6, align 4 - %73 = load i32** @c6, align 4 + %72 = load i32, i32* @a6, align 4 + %73 = load i32*, i32** @c6, align 4 store i32 %72, i32* %73, align 4 - %74 = load i32* @a7, align 4 - %75 = load i32** @c7, align 4 + %74 = load i32, i32* @a7, align 4 + %75 = load i32*, i32** @c7, align 4 store i32 %74, i32* %75, align 4 - %76 = load i32* @a8, align 4 - %77 = load i32** @c8, align 4 + %76 = load i32, i32* @a8, align 4 + %77 = load i32*, i32** @c8, align 4 store i32 %76, i32* %77, align 4 - %78 = load i32* @a9, align 4 - %79 = load i32** @c9, align 4 + %78 = load i32, i32* @a9, align 4 + %79 = load i32*, i32** @c9, align 4 store i32 %78, i32* %79, align 4 - %80 = load i32* @a10, align 4 - %81 = load i32** @c10, align 4 + %80 = load i32, i32* @a10, align 4 + %81 = load i32*, i32** @c10, align 4 store i32 %80, i32* %81, align 4 - %82 = load i32* @a11, align 4 - %83 = load i32** @c11, align 4 + %82 = load i32, i32* @a11, align 4 + %83 = load i32*, i32** @c11, align 4 store i32 %82, i32* %83, align 4 - %84 = load i32* @a12, align 4 - %85 = load i32** @c12, align 4 + %84 = load i32, i32* @a12, align 4 + %85 = load i32*, i32** @c12, align 4 store i32 %84, i32* %85, align 4 - %86 = load i32* @a13, align 4 - %87 = load i32** @c13, align 4 + %86 = load i32, i32* @a13, align 4 + %87 = load i32*, i32** @c13, align 4 store i32 %86, i32* %87, align 4 - %88 = load i32* @a14, align 4 - %89 = load i32** @c14, align 4 + %88 = load i32, i32* @a14, align 4 + %89 = load i32*, i32** @c14, align 4 store i32 %88, i32* %89, align 4 - %90 = load i32* @a15, align 4 - %91 = load i32** @c15, align 4 + %90 = load i32, i32* @a15, align 4 + %91 = load i32*, i32** @c15, align 4 store i32 %90, i32* %91, align 4 - %92 = load i32* @a16, align 4 - %93 = load i32** @c16, align 4 + %92 = load i32, i32* @a16, align 4 + %93 = load i32*, i32** @c16, align 4 store i32 %92, i32* %93, align 4 - %94 = load i32* @a17, align 4 - %95 = load i32** @c17, align 4 + %94 = load i32, i32* @a17, align 4 + %95 = load i32*, i32** @c17, align 4 store i32 %94, i32* %95, align 4 - %96 = load i32* @a18, align 4 - %97 = load i32** @c18, align 4 + %96 = load i32, i32* @a18, align 4 + %97 = load i32*, i32** @c18, align 4 store i32 %96, i32* %97, align 4 - %98 = load i32* @a19, align 4 - %99 = load i32** @c19, align 4 + %98 = load i32, i32* @a19, align 4 + %99 = load i32*, i32** @c19, align 4 store i32 %98, i32* %99, align 4 - %100 = load i32* @a20, align 4 - %101 = load i32** @c20, align 4 + %100 = load i32, i32* @a20, align 4 + %101 = load i32*, i32** @c20, align 4 store i32 %100, i32* %101, align 4 - %102 = load i32* @a21, align 4 - %103 = load i32** @c21, align 4 + %102 = load i32, i32* @a21, align 4 + %103 = load i32*, i32** @c21, align 4 store i32 %102, i32* %103, align 4 - %104 = load i32* @a22, align 4 - %105 = load i32** @c22, align 4 + %104 = load i32, i32* @a22, align 4 + %105 = load i32*, i32** @c22, align 4 store i32 %104, i32* %105, align 4 - %106 = load i32* @a23, align 4 - %107 = load i32** @c23, align 4 + %106 = load i32, i32* @a23, align 4 + %107 = load i32*, i32** @c23, align 4 store i32 %106, i32* %107, align 4 - %108 = load i32* @a24, align 4 - %109 = load i32** @c24, align 4 + %108 = load i32, i32* @a24, align 4 + %109 = load i32*, i32** @c24, align 4 store i32 %108, i32* %109, align 4 - %110 = load i32* @a25, align 4 - %111 = load i32** @c25, align 4 + %110 = load i32, i32* @a25, align 4 + %111 = load i32*, i32** @c25, align 4 store i32 %110, i32* %111, align 4 - %112 = load i32* @a26, align 4 - %113 = load i32** @c26, align 4 + %112 = load i32, i32* @a26, align 4 + %113 = load i32*, i32** @c26, align 4 store i32 %112, i32* %113, align 4 - %114 = load i32* @a27, align 4 - %115 = load i32** @c27, align 4 + %114 = load i32, i32* @a27, align 4 + %115 = load i32*, i32** @c27, align 4 store i32 %114, i32* %115, align 4 - %116 = load i32* @a28, align 4 - %117 = load i32** @c28, align 4 + %116 = load i32, i32* @a28, align 4 + %117 = load i32*, i32** @c28, align 4 store i32 %116, i32* %117, align 4 - %118 = load i32* @a29, align 4 - %119 = load i32** @c29, align 4 + %118 = load i32, i32* @a29, align 4 + %119 = load i32*, i32** @c29, align 4 store i32 %118, i32* %119, align 4 - %120 = load i32* @a0, align 4 + %120 = load i32, i32* @a0, align 4 ret i32 %120 } diff --git a/test/CodeGen/Mips/rdhwr-directives.ll b/test/CodeGen/Mips/rdhwr-directives.ll index 27010d4..ebc91ea 100644 --- a/test/CodeGen/Mips/rdhwr-directives.ll +++ b/test/CodeGen/Mips/rdhwr-directives.ll @@ -9,7 +9,7 @@ entry: ; CHECK: rdhwr ; CHECK: .set pop - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 ret i32 %0 } diff --git a/test/CodeGen/Mips/rem.ll b/test/CodeGen/Mips/rem.ll index b18f85d..70f957c 100644 --- a/test/CodeGen/Mips/rem.ll +++ b/test/CodeGen/Mips/rem.ll @@ -7,8 +7,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %rem = srem i32 %0, %1 ; 16: div $zero, ${{[0-9]+}}, ${{[0-9]+}} ; 16: mfhi ${{[0-9]+}} diff --git a/test/CodeGen/Mips/remu.ll b/test/CodeGen/Mips/remu.ll index 472503c..1267972 100644 --- a/test/CodeGen/Mips/remu.ll +++ b/test/CodeGen/Mips/remu.ll @@ -7,8 +7,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %rem = urem i32 %0, %1 ; 16: divu $zero, ${{[0-9]+}}, ${{[0-9]+}} ; 16: mfhi ${{[0-9]+}} diff --git a/test/CodeGen/Mips/s2rem.ll b/test/CodeGen/Mips/s2rem.ll index 9edb5be..65e48fe 100644 --- a/test/CodeGen/Mips/s2rem.ll +++ b/test/CodeGen/Mips/s2rem.ll @@ -56,7 +56,7 @@ declare double @d() #1 ; Function Attrs: nounwind define void @fft() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @ff(float %0) store float %call, float* @x, align 4 ret void @@ -71,7 +71,7 @@ declare float @ff(float) #1 ; Function Attrs: nounwind define void @vft() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 call void @vf(float %0) ret void ; PIC: .ent vft diff --git a/test/CodeGen/Mips/sb1.ll b/test/CodeGen/Mips/sb1.ll index e1a28d4..4c17a50 100644 --- a/test/CodeGen/Mips/sb1.ll +++ b/test/CodeGen/Mips/sb1.ll @@ -6,14 +6,14 @@ define i32 @main() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %conv = trunc i32 %0 to i8 store i8 %conv, i8* @c, align 1 - %1 = load i32* @i, align 4 - %2 = load i8* @c, align 1 + %1 = load i32, i32* @i, align 4 + %2 = load i8, i8* @c, align 1 %conv1 = sext i8 %2 to i32 ; 16: sb ${{[0-9]+}}, 0(${{[0-9]+}}) - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %1, i32 %conv1) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %1, i32 %conv1) ret i32 0 } diff --git a/test/CodeGen/Mips/sel1c.ll b/test/CodeGen/Mips/sel1c.ll index edd2e3e..991228d7 100644 --- a/test/CodeGen/Mips/sel1c.ll +++ b/test/CodeGen/Mips/sel1c.ll @@ -7,8 +7,8 @@ ; Function Attrs: nounwind optsize define void @t() #0 { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @j, align 4 %cmp = icmp eq i32 %0, %1 %cond = select i1 %cmp, i32 1, i32 3 store i32 %cond, i32* @k, align 4 diff --git a/test/CodeGen/Mips/sel2c.ll b/test/CodeGen/Mips/sel2c.ll index 4b21124..ba95a51 100644 --- a/test/CodeGen/Mips/sel2c.ll +++ b/test/CodeGen/Mips/sel2c.ll @@ -7,8 +7,8 @@ ; Function Attrs: nounwind optsize define void @t() #0 { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @j, align 4 %cmp = icmp ne i32 %0, %1 %cond = select i1 %cmp, i32 1, i32 3 store i32 %cond, i32* @k, align 4 diff --git a/test/CodeGen/Mips/selTBteqzCmpi.ll b/test/CodeGen/Mips/selTBteqzCmpi.ll index 9cb8227..5a72ea0 100644 --- a/test/CodeGen/Mips/selTBteqzCmpi.ll +++ b/test/CodeGen/Mips/selTBteqzCmpi.ll @@ -8,10 +8,10 @@ define void @t() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp eq i32 %0, 10 - %1 = load i32* @i, align 4 - %2 = load i32* @j, align 4 + %1 = load i32, i32* @i, align 4 + %2 = load i32, i32* @j, align 4 %cond = select i1 %cmp, i32 %1, i32 %2 store i32 %cond, i32* @i, align 4 ret void diff --git a/test/CodeGen/Mips/selTBtnezCmpi.ll b/test/CodeGen/Mips/selTBtnezCmpi.ll index bd334f5..b6407e6 100644 --- a/test/CodeGen/Mips/selTBtnezCmpi.ll +++ b/test/CodeGen/Mips/selTBtnezCmpi.ll @@ -8,10 +8,10 @@ define void @t() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp ne i32 %0, 10 - %1 = load i32* @i, align 4 - %2 = load i32* @j, align 4 + %1 = load i32, i32* @i, align 4 + %2 = load i32, i32* @j, align 4 %cond = select i1 %cmp, i32 %1, i32 %2 store i32 %cond, i32* @i, align 4 ret void diff --git a/test/CodeGen/Mips/selTBtnezSlti.ll b/test/CodeGen/Mips/selTBtnezSlti.ll index 593f6f2..2f1cdb8 100644 --- a/test/CodeGen/Mips/selTBtnezSlti.ll +++ b/test/CodeGen/Mips/selTBtnezSlti.ll @@ -8,10 +8,10 @@ define void @t() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp slt i32 %0, 10 - %1 = load i32* @j, align 4 - %2 = load i32* @i, align 4 + %1 = load i32, i32* @j, align 4 + %2 = load i32, i32* @i, align 4 %cond = select i1 %cmp, i32 %1, i32 %2 store i32 %cond, i32* @i, align 4 ret void diff --git a/test/CodeGen/Mips/select.ll b/test/CodeGen/Mips/select.ll index d6e1826..96bd378 100644 --- a/test/CodeGen/Mips/select.ll +++ b/test/CodeGen/Mips/select.ll @@ -700,8 +700,8 @@ entry: ; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]] ; 64R6: or $2, $[[NE]], $[[EQ]] - %tmp = load double* @d2, align 8 - %tmp1 = load double* @d3, align 8 + %tmp = load double, double* @d2, align 8 + %tmp1 = load double, double* @d3, align 8 %cmp = fcmp oeq double %tmp, %tmp1 %cond = select i1 %cmp, i32 %f0, i32 %f1 ret i32 %cond @@ -777,8 +777,8 @@ entry: ; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]] ; 64R6: or $2, $[[NE]], $[[EQ]] - %tmp = load double* @d2, align 8 - %tmp1 = load double* @d3, align 8 + %tmp = load double, double* @d2, align 8 + %tmp1 = load double, double* @d3, align 8 %cmp = fcmp olt double %tmp, %tmp1 %cond = select i1 %cmp, i32 %f0, i32 %f1 ret i32 %cond @@ -854,8 +854,8 @@ entry: ; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]] ; 64R6: or $2, $[[NE]], $[[EQ]] - %tmp = load double* @d2, align 8 - %tmp1 = load double* @d3, align 8 + %tmp = load double, double* @d2, align 8 + %tmp1 = load double, double* @d3, align 8 %cmp = fcmp ogt double %tmp, %tmp1 %cond = select i1 %cmp, i32 %f0, i32 %f1 ret i32 %cond diff --git a/test/CodeGen/Mips/seleq.ll b/test/CodeGen/Mips/seleq.ll index 9af422f..bd25358 100644 --- a/test/CodeGen/Mips/seleq.ll +++ b/test/CodeGen/Mips/seleq.ll @@ -12,65 +12,65 @@ define void @calc_seleq() nounwind { entry: - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp eq i32 %0, %1 br i1 %cmp, label %cond.true, label %cond.false cond.true: ; preds = %entry - %2 = load i32* @f, align 4 + %2 = load i32, i32* @f, align 4 br label %cond.end cond.false: ; preds = %entry - %3 = load i32* @t, align 4 + %3 = load i32, i32* @t, align 4 br label %cond.end cond.end: ; preds = %cond.false, %cond.true %cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ] store i32 %cond, i32* @z1, align 4 - %4 = load i32* @b, align 4 - %5 = load i32* @a, align 4 + %4 = load i32, i32* @b, align 4 + %5 = load i32, i32* @a, align 4 %cmp1 = icmp eq i32 %4, %5 br i1 %cmp1, label %cond.true2, label %cond.false3 cond.true2: ; preds = %cond.end - %6 = load i32* @f, align 4 + %6 = load i32, i32* @f, align 4 br label %cond.end4 cond.false3: ; preds = %cond.end - %7 = load i32* @t, align 4 + %7 = load i32, i32* @t, align 4 br label %cond.end4 cond.end4: ; preds = %cond.false3, %cond.true2 %cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ] store i32 %cond5, i32* @z2, align 4 - %8 = load i32* @c, align 4 - %9 = load i32* @a, align 4 + %8 = load i32, i32* @c, align 4 + %9 = load i32, i32* @a, align 4 %cmp6 = icmp eq i32 %8, %9 br i1 %cmp6, label %cond.true7, label %cond.false8 cond.true7: ; preds = %cond.end4 - %10 = load i32* @t, align 4 + %10 = load i32, i32* @t, align 4 br label %cond.end9 cond.false8: ; preds = %cond.end4 - %11 = load i32* @f, align 4 + %11 = load i32, i32* @f, align 4 br label %cond.end9 cond.end9: ; preds = %cond.false8, %cond.true7 %cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ] store i32 %cond10, i32* @z3, align 4 - %12 = load i32* @a, align 4 - %13 = load i32* @c, align 4 + %12 = load i32, i32* @a, align 4 + %13 = load i32, i32* @c, align 4 %cmp11 = icmp eq i32 %12, %13 br i1 %cmp11, label %cond.true12, label %cond.false13 cond.true12: ; preds = %cond.end9 - %14 = load i32* @t, align 4 + %14 = load i32, i32* @t, align 4 br label %cond.end14 cond.false13: ; preds = %cond.end9 - %15 = load i32* @f, align 4 + %15 = load i32, i32* @f, align 4 br label %cond.end14 cond.end14: ; preds = %cond.false13, %cond.true12 diff --git a/test/CodeGen/Mips/seleqk.ll b/test/CodeGen/Mips/seleqk.ll index 3ca622d..2eeaa9e 100644 --- a/test/CodeGen/Mips/seleqk.ll +++ b/test/CodeGen/Mips/seleqk.ll @@ -12,61 +12,61 @@ define void @calc_seleqk() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp eq i32 %0, 1 br i1 %cmp, label %cond.true, label %cond.false cond.true: ; preds = %entry - %1 = load i32* @t, align 4 + %1 = load i32, i32* @t, align 4 br label %cond.end cond.false: ; preds = %entry - %2 = load i32* @f, align 4 + %2 = load i32, i32* @f, align 4 br label %cond.end cond.end: ; preds = %cond.false, %cond.true %cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ] store i32 %cond, i32* @z1, align 4 - %3 = load i32* @a, align 4 + %3 = load i32, i32* @a, align 4 %cmp1 = icmp eq i32 %3, 1000 br i1 %cmp1, label %cond.true2, label %cond.false3 cond.true2: ; preds = %cond.end - %4 = load i32* @f, align 4 + %4 = load i32, i32* @f, align 4 br label %cond.end4 cond.false3: ; preds = %cond.end - %5 = load i32* @t, align 4 + %5 = load i32, i32* @t, align 4 br label %cond.end4 cond.end4: ; preds = %cond.false3, %cond.true2 %cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ] store i32 %cond5, i32* @z2, align 4 - %6 = load i32* @b, align 4 + %6 = load i32, i32* @b, align 4 %cmp6 = icmp eq i32 %6, 3 br i1 %cmp6, label %cond.true7, label %cond.false8 cond.true7: ; preds = %cond.end4 - %7 = load i32* @f, align 4 + %7 = load i32, i32* @f, align 4 br label %cond.end9 cond.false8: ; preds = %cond.end4 - %8 = load i32* @t, align 4 + %8 = load i32, i32* @t, align 4 br label %cond.end9 cond.end9: ; preds = %cond.false8, %cond.true7 %cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ] store i32 %cond10, i32* @z3, align 4 - %9 = load i32* @b, align 4 + %9 = load i32, i32* @b, align 4 %cmp11 = icmp eq i32 %9, 1000 br i1 %cmp11, label %cond.true12, label %cond.false13 cond.true12: ; preds = %cond.end9 - %10 = load i32* @t, align 4 + %10 = load i32, i32* @t, align 4 br label %cond.end14 cond.false13: ; preds = %cond.end9 - %11 = load i32* @f, align 4 + %11 = load i32, i32* @f, align 4 br label %cond.end14 cond.end14: ; preds = %cond.false13, %cond.true12 diff --git a/test/CodeGen/Mips/selgek.ll b/test/CodeGen/Mips/selgek.ll index 8ab4046..38ad95e 100644 --- a/test/CodeGen/Mips/selgek.ll +++ b/test/CodeGen/Mips/selgek.ll @@ -13,61 +13,61 @@ define void @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp sge i32 %0, 1000 br i1 %cmp, label %cond.true, label %cond.false cond.true: ; preds = %entry - %1 = load i32* @f, align 4 + %1 = load i32, i32* @f, align 4 br label %cond.end cond.false: ; preds = %entry - %2 = load i32* @t, align 4 + %2 = load i32, i32* @t, align 4 br label %cond.end cond.end: ; preds = %cond.false, %cond.true %cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ] store i32 %cond, i32* @z1, align 4 - %3 = load i32* @b, align 4 + %3 = load i32, i32* @b, align 4 %cmp1 = icmp sge i32 %3, 1 br i1 %cmp1, label %cond.true2, label %cond.false3 cond.true2: ; preds = %cond.end - %4 = load i32* @t, align 4 + %4 = load i32, i32* @t, align 4 br label %cond.end4 cond.false3: ; preds = %cond.end - %5 = load i32* @f, align 4 + %5 = load i32, i32* @f, align 4 br label %cond.end4 cond.end4: ; preds = %cond.false3, %cond.true2 %cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ] store i32 %cond5, i32* @z2, align 4 - %6 = load i32* @c, align 4 + %6 = load i32, i32* @c, align 4 %cmp6 = icmp sge i32 %6, 2 br i1 %cmp6, label %cond.true7, label %cond.false8 cond.true7: ; preds = %cond.end4 - %7 = load i32* @t, align 4 + %7 = load i32, i32* @t, align 4 br label %cond.end9 cond.false8: ; preds = %cond.end4 - %8 = load i32* @f, align 4 + %8 = load i32, i32* @f, align 4 br label %cond.end9 cond.end9: ; preds = %cond.false8, %cond.true7 %cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ] store i32 %cond10, i32* @z3, align 4 - %9 = load i32* @a, align 4 + %9 = load i32, i32* @a, align 4 %cmp11 = icmp sge i32 %9, 2 br i1 %cmp11, label %cond.true12, label %cond.false13 cond.true12: ; preds = %cond.end9 - %10 = load i32* @t, align 4 + %10 = load i32, i32* @t, align 4 br label %cond.end14 cond.false13: ; preds = %cond.end9 - %11 = load i32* @f, align 4 + %11 = load i32, i32* @f, align 4 br label %cond.end14 cond.end14: ; preds = %cond.false13, %cond.true12 diff --git a/test/CodeGen/Mips/selgt.ll b/test/CodeGen/Mips/selgt.ll index 67b9b49..a2e1e39 100644 --- a/test/CodeGen/Mips/selgt.ll +++ b/test/CodeGen/Mips/selgt.ll @@ -14,71 +14,71 @@ define i32 @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" { entry: %retval = alloca i32, align 4 - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp sgt i32 %0, %1 br i1 %cmp, label %cond.true, label %cond.false cond.true: ; preds = %entry - %2 = load i32* @f, align 4 + %2 = load i32, i32* @f, align 4 br label %cond.end cond.false: ; preds = %entry - %3 = load i32* @t, align 4 + %3 = load i32, i32* @t, align 4 br label %cond.end cond.end: ; preds = %cond.false, %cond.true %cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ] store i32 %cond, i32* @z1, align 4 - %4 = load i32* @b, align 4 - %5 = load i32* @a, align 4 + %4 = load i32, i32* @b, align 4 + %5 = load i32, i32* @a, align 4 %cmp1 = icmp sgt i32 %4, %5 br i1 %cmp1, label %cond.true2, label %cond.false3 cond.true2: ; preds = %cond.end - %6 = load i32* @t, align 4 + %6 = load i32, i32* @t, align 4 br label %cond.end4 cond.false3: ; preds = %cond.end - %7 = load i32* @f, align 4 + %7 = load i32, i32* @f, align 4 br label %cond.end4 cond.end4: ; preds = %cond.false3, %cond.true2 %cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ] store i32 %cond5, i32* @z2, align 4 - %8 = load i32* @c, align 4 - %9 = load i32* @a, align 4 + %8 = load i32, i32* @c, align 4 + %9 = load i32, i32* @a, align 4 %cmp6 = icmp sgt i32 %8, %9 br i1 %cmp6, label %cond.true7, label %cond.false8 cond.true7: ; preds = %cond.end4 - %10 = load i32* @f, align 4 + %10 = load i32, i32* @f, align 4 br label %cond.end9 cond.false8: ; preds = %cond.end4 - %11 = load i32* @t, align 4 + %11 = load i32, i32* @t, align 4 br label %cond.end9 cond.end9: ; preds = %cond.false8, %cond.true7 %cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ] store i32 %cond10, i32* @z3, align 4 - %12 = load i32* @a, align 4 - %13 = load i32* @c, align 4 + %12 = load i32, i32* @a, align 4 + %13 = load i32, i32* @c, align 4 %cmp11 = icmp sgt i32 %12, %13 br i1 %cmp11, label %cond.true12, label %cond.false13 cond.true12: ; preds = %cond.end9 - %14 = load i32* @f, align 4 + %14 = load i32, i32* @f, align 4 br label %cond.end14 cond.false13: ; preds = %cond.end9 - %15 = load i32* @t, align 4 + %15 = load i32, i32* @t, align 4 br label %cond.end14 cond.end14: ; preds = %cond.false13, %cond.true12 %cond15 = phi i32 [ %14, %cond.true12 ], [ %15, %cond.false13 ] store i32 %cond15, i32* @z4, align 4 - %16 = load i32* %retval + %16 = load i32, i32* %retval ret i32 %16 } diff --git a/test/CodeGen/Mips/selle.ll b/test/CodeGen/Mips/selle.ll index b27df45..1adefb7 100644 --- a/test/CodeGen/Mips/selle.ll +++ b/test/CodeGen/Mips/selle.ll @@ -13,65 +13,65 @@ define void @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" { entry: - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp sle i32 %0, %1 br i1 %cmp, label %cond.true, label %cond.false cond.true: ; preds = %entry - %2 = load i32* @t, align 4 + %2 = load i32, i32* @t, align 4 br label %cond.end cond.false: ; preds = %entry - %3 = load i32* @f, align 4 + %3 = load i32, i32* @f, align 4 br label %cond.end cond.end: ; preds = %cond.false, %cond.true %cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ] store i32 %cond, i32* @z1, align 4 - %4 = load i32* @b, align 4 - %5 = load i32* @a, align 4 + %4 = load i32, i32* @b, align 4 + %5 = load i32, i32* @a, align 4 %cmp1 = icmp sle i32 %4, %5 br i1 %cmp1, label %cond.true2, label %cond.false3 cond.true2: ; preds = %cond.end - %6 = load i32* @f, align 4 + %6 = load i32, i32* @f, align 4 br label %cond.end4 cond.false3: ; preds = %cond.end - %7 = load i32* @t, align 4 + %7 = load i32, i32* @t, align 4 br label %cond.end4 cond.end4: ; preds = %cond.false3, %cond.true2 %cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ] store i32 %cond5, i32* @z2, align 4 - %8 = load i32* @c, align 4 - %9 = load i32* @a, align 4 + %8 = load i32, i32* @c, align 4 + %9 = load i32, i32* @a, align 4 %cmp6 = icmp sle i32 %8, %9 br i1 %cmp6, label %cond.true7, label %cond.false8 cond.true7: ; preds = %cond.end4 - %10 = load i32* @t, align 4 + %10 = load i32, i32* @t, align 4 br label %cond.end9 cond.false8: ; preds = %cond.end4 - %11 = load i32* @f, align 4 + %11 = load i32, i32* @f, align 4 br label %cond.end9 cond.end9: ; preds = %cond.false8, %cond.true7 %cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ] store i32 %cond10, i32* @z3, align 4 - %12 = load i32* @a, align 4 - %13 = load i32* @c, align 4 + %12 = load i32, i32* @a, align 4 + %13 = load i32, i32* @c, align 4 %cmp11 = icmp sle i32 %12, %13 br i1 %cmp11, label %cond.true12, label %cond.false13 cond.true12: ; preds = %cond.end9 - %14 = load i32* @t, align 4 + %14 = load i32, i32* @t, align 4 br label %cond.end14 cond.false13: ; preds = %cond.end9 - %15 = load i32* @f, align 4 + %15 = load i32, i32* @f, align 4 br label %cond.end14 cond.end14: ; preds = %cond.false13, %cond.true12 diff --git a/test/CodeGen/Mips/selltk.ll b/test/CodeGen/Mips/selltk.ll index 1471b89..db9f8c1 100644 --- a/test/CodeGen/Mips/selltk.ll +++ b/test/CodeGen/Mips/selltk.ll @@ -13,61 +13,61 @@ define void @calc_selltk() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp slt i32 %0, 1000 br i1 %cmp, label %cond.true, label %cond.false cond.true: ; preds = %entry - %1 = load i32* @t, align 4 + %1 = load i32, i32* @t, align 4 br label %cond.end cond.false: ; preds = %entry - %2 = load i32* @f, align 4 + %2 = load i32, i32* @f, align 4 br label %cond.end cond.end: ; preds = %cond.false, %cond.true %cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ] store i32 %cond, i32* @z1, align 4 - %3 = load i32* @b, align 4 + %3 = load i32, i32* @b, align 4 %cmp1 = icmp slt i32 %3, 2 br i1 %cmp1, label %cond.true2, label %cond.false3 cond.true2: ; preds = %cond.end - %4 = load i32* @f, align 4 + %4 = load i32, i32* @f, align 4 br label %cond.end4 cond.false3: ; preds = %cond.end - %5 = load i32* @t, align 4 + %5 = load i32, i32* @t, align 4 br label %cond.end4 cond.end4: ; preds = %cond.false3, %cond.true2 %cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ] store i32 %cond5, i32* @z2, align 4 - %6 = load i32* @c, align 4 + %6 = load i32, i32* @c, align 4 %cmp6 = icmp sgt i32 %6, 2 br i1 %cmp6, label %cond.true7, label %cond.false8 cond.true7: ; preds = %cond.end4 - %7 = load i32* @f, align 4 + %7 = load i32, i32* @f, align 4 br label %cond.end9 cond.false8: ; preds = %cond.end4 - %8 = load i32* @t, align 4 + %8 = load i32, i32* @t, align 4 br label %cond.end9 cond.end9: ; preds = %cond.false8, %cond.true7 %cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ] store i32 %cond10, i32* @z3, align 4 - %9 = load i32* @a, align 4 + %9 = load i32, i32* @a, align 4 %cmp11 = icmp sgt i32 %9, 2 br i1 %cmp11, label %cond.true12, label %cond.false13 cond.true12: ; preds = %cond.end9 - %10 = load i32* @f, align 4 + %10 = load i32, i32* @f, align 4 br label %cond.end14 cond.false13: ; preds = %cond.end9 - %11 = load i32* @t, align 4 + %11 = load i32, i32* @t, align 4 br label %cond.end14 cond.end14: ; preds = %cond.false13, %cond.true12 diff --git a/test/CodeGen/Mips/selne.ll b/test/CodeGen/Mips/selne.ll index e3d82b8..9be99d6 100644 --- a/test/CodeGen/Mips/selne.ll +++ b/test/CodeGen/Mips/selne.ll @@ -13,65 +13,65 @@ define void @calc_seleq() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" { entry: - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp ne i32 %0, %1 br i1 %cmp, label %cond.true, label %cond.false cond.true: ; preds = %entry - %2 = load i32* @f, align 4 + %2 = load i32, i32* @f, align 4 br label %cond.end cond.false: ; preds = %entry - %3 = load i32* @t, align 4 + %3 = load i32, i32* @t, align 4 br label %cond.end cond.end: ; preds = %cond.false, %cond.true %cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ] store i32 %cond, i32* @z1, align 4 - %4 = load i32* @b, align 4 - %5 = load i32* @a, align 4 + %4 = load i32, i32* @b, align 4 + %5 = load i32, i32* @a, align 4 %cmp1 = icmp ne i32 %4, %5 br i1 %cmp1, label %cond.true2, label %cond.false3 cond.true2: ; preds = %cond.end - %6 = load i32* @f, align 4 + %6 = load i32, i32* @f, align 4 br label %cond.end4 cond.false3: ; preds = %cond.end - %7 = load i32* @t, align 4 + %7 = load i32, i32* @t, align 4 br label %cond.end4 cond.end4: ; preds = %cond.false3, %cond.true2 %cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ] store i32 %cond5, i32* @z2, align 4 - %8 = load i32* @c, align 4 - %9 = load i32* @a, align 4 + %8 = load i32, i32* @c, align 4 + %9 = load i32, i32* @a, align 4 %cmp6 = icmp ne i32 %8, %9 br i1 %cmp6, label %cond.true7, label %cond.false8 cond.true7: ; preds = %cond.end4 - %10 = load i32* @t, align 4 + %10 = load i32, i32* @t, align 4 br label %cond.end9 cond.false8: ; preds = %cond.end4 - %11 = load i32* @f, align 4 + %11 = load i32, i32* @f, align 4 br label %cond.end9 cond.end9: ; preds = %cond.false8, %cond.true7 %cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ] store i32 %cond10, i32* @z3, align 4 - %12 = load i32* @a, align 4 - %13 = load i32* @c, align 4 + %12 = load i32, i32* @a, align 4 + %13 = load i32, i32* @c, align 4 %cmp11 = icmp ne i32 %12, %13 br i1 %cmp11, label %cond.true12, label %cond.false13 cond.true12: ; preds = %cond.end9 - %14 = load i32* @t, align 4 + %14 = load i32, i32* @t, align 4 br label %cond.end14 cond.false13: ; preds = %cond.end9 - %15 = load i32* @f, align 4 + %15 = load i32, i32* @f, align 4 br label %cond.end14 cond.end14: ; preds = %cond.false13, %cond.true12 diff --git a/test/CodeGen/Mips/selnek.ll b/test/CodeGen/Mips/selnek.ll index 64834b2..e8a5105 100644 --- a/test/CodeGen/Mips/selnek.ll +++ b/test/CodeGen/Mips/selnek.ll @@ -12,61 +12,61 @@ define void @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp ne i32 %0, 1 br i1 %cmp, label %cond.true, label %cond.false cond.true: ; preds = %entry - %1 = load i32* @f, align 4 + %1 = load i32, i32* @f, align 4 br label %cond.end cond.false: ; preds = %entry - %2 = load i32* @t, align 4 + %2 = load i32, i32* @t, align 4 br label %cond.end cond.end: ; preds = %cond.false, %cond.true %cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ] store i32 %cond, i32* @z1, align 4 - %3 = load i32* @a, align 4 + %3 = load i32, i32* @a, align 4 %cmp1 = icmp ne i32 %3, 1000 br i1 %cmp1, label %cond.true2, label %cond.false3 cond.true2: ; preds = %cond.end - %4 = load i32* @t, align 4 + %4 = load i32, i32* @t, align 4 br label %cond.end4 cond.false3: ; preds = %cond.end - %5 = load i32* @f, align 4 + %5 = load i32, i32* @f, align 4 br label %cond.end4 cond.end4: ; preds = %cond.false3, %cond.true2 %cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ] store i32 %cond5, i32* @z2, align 4 - %6 = load i32* @b, align 4 + %6 = load i32, i32* @b, align 4 %cmp6 = icmp ne i32 %6, 3 br i1 %cmp6, label %cond.true7, label %cond.false8 cond.true7: ; preds = %cond.end4 - %7 = load i32* @t, align 4 + %7 = load i32, i32* @t, align 4 br label %cond.end9 cond.false8: ; preds = %cond.end4 - %8 = load i32* @f, align 4 + %8 = load i32, i32* @f, align 4 br label %cond.end9 cond.end9: ; preds = %cond.false8, %cond.true7 %cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ] store i32 %cond10, i32* @z3, align 4 - %9 = load i32* @b, align 4 + %9 = load i32, i32* @b, align 4 %cmp11 = icmp ne i32 %9, 1000 br i1 %cmp11, label %cond.true12, label %cond.false13 cond.true12: ; preds = %cond.end9 - %10 = load i32* @f, align 4 + %10 = load i32, i32* @f, align 4 br label %cond.end14 cond.false13: ; preds = %cond.end9 - %11 = load i32* @t, align 4 + %11 = load i32, i32* @t, align 4 br label %cond.end14 cond.end14: ; preds = %cond.false13, %cond.true12 @@ -78,14 +78,14 @@ cond.end14: ; preds = %cond.false13, %cond define i32 @main() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" { entry: call void @calc_z() "target-cpu"="mips16" "target-features"="+mips16,+o32" - %0 = load i32* @z1, align 4 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %0) "target-cpu"="mips16" "target-features"="+mips16,+o32" - %1 = load i32* @z2, align 4 - %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %1) "target-cpu"="mips16" "target-features"="+mips16,+o32" - %2 = load i32* @z3, align 4 - %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %2) "target-cpu"="mips16" "target-features"="+mips16,+o32" - %3 = load i32* @z4, align 4 - %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %3) "target-cpu"="mips16" "target-features"="+mips16,+o32" + %0 = load i32, i32* @z1, align 4 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %0) "target-cpu"="mips16" "target-features"="+mips16,+o32" + %1 = load i32, i32* @z2, align 4 + %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1) "target-cpu"="mips16" "target-features"="+mips16,+o32" + %2 = load i32, i32* @z3, align 4 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %2) "target-cpu"="mips16" "target-features"="+mips16,+o32" + %3 = load i32, i32* @z4, align 4 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %3) "target-cpu"="mips16" "target-features"="+mips16,+o32" ret i32 0 } diff --git a/test/CodeGen/Mips/selpat.ll b/test/CodeGen/Mips/selpat.ll index 8eda8de..c682d81 100644 --- a/test/CodeGen/Mips/selpat.ll +++ b/test/CodeGen/Mips/selpat.ll @@ -12,18 +12,18 @@ define void @calc_seleq() nounwind { entry: - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp eq i32 %0, %1 - %2 = load i32* @f, align 4 - %3 = load i32* @t, align 4 + %2 = load i32, i32* @f, align 4 + %3 = load i32, i32* @t, align 4 %cond = select i1 %cmp, i32 %2, i32 %3 store i32 %cond, i32* @z1, align 4 ; 16: cmp ${{[0-9]+}}, ${{[0-9]+}} ; 16: bteqz $BB{{[0-9]+}}_{{[0-9]}} ; 16: move ${{[0-9]+}}, ${{[0-9]+}} store i32 %cond, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %cmp6 = icmp eq i32 %4, %0 %cond10 = select i1 %cmp6, i32 %3, i32 %2 store i32 %cond10, i32* @z3, align 4 @@ -34,10 +34,10 @@ entry: define void @calc_seleqk() nounwind { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp eq i32 %0, 1 - %1 = load i32* @t, align 4 - %2 = load i32* @f, align 4 + %1 = load i32, i32* @t, align 4 + %2 = load i32, i32* @f, align 4 %cond = select i1 %cmp, i32 %1, i32 %2 store i32 %cond, i32* @z1, align 4 ; 16: cmpi ${{[0-9]+}}, 1 @@ -46,7 +46,7 @@ entry: %cmp1 = icmp eq i32 %0, 10 %cond5 = select i1 %cmp1, i32 %2, i32 %1 store i32 %cond5, i32* @z2, align 4 - %3 = load i32* @b, align 4 + %3 = load i32, i32* @b, align 4 %cmp6 = icmp eq i32 %3, 3 %cond10 = select i1 %cmp6, i32 %2, i32 %1 store i32 %cond10, i32* @z3, align 4 @@ -61,19 +61,19 @@ entry: define void @calc_seleqz() nounwind { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp eq i32 %0, 0 - %1 = load i32* @t, align 4 - %2 = load i32* @f, align 4 + %1 = load i32, i32* @t, align 4 + %2 = load i32, i32* @f, align 4 %cond = select i1 %cmp, i32 %1, i32 %2 store i32 %cond, i32* @z1, align 4 ; 16: beqz ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]}} ; 16: move ${{[0-9]+}}, ${{[0-9]+}} - %3 = load i32* @b, align 4 + %3 = load i32, i32* @b, align 4 %cmp1 = icmp eq i32 %3, 0 %cond5 = select i1 %cmp1, i32 %2, i32 %1 store i32 %cond5, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %cmp6 = icmp eq i32 %4, 0 %cond10 = select i1 %cmp6, i32 %1, i32 %2 store i32 %cond10, i32* @z3, align 4 @@ -83,11 +83,11 @@ entry: define void @calc_selge() nounwind { entry: - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp sge i32 %0, %1 - %2 = load i32* @f, align 4 - %3 = load i32* @t, align 4 + %2 = load i32, i32* @f, align 4 + %3 = load i32, i32* @t, align 4 %cond = select i1 %cmp, i32 %2, i32 %3 store i32 %cond, i32* @z1, align 4 ; 16: slt ${{[0-9]+}}, ${{[0-9]+}} @@ -96,7 +96,7 @@ entry: %cmp1 = icmp sge i32 %1, %0 %cond5 = select i1 %cmp1, i32 %3, i32 %2 store i32 %cond5, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %cmp6 = icmp sge i32 %4, %0 %cond10 = select i1 %cmp6, i32 %3, i32 %2 store i32 %cond10, i32* @z3, align 4 @@ -108,20 +108,20 @@ entry: define i32 @calc_selgt() nounwind { entry: - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp sgt i32 %0, %1 ; 16: slt ${{[0-9]+}}, ${{[0-9]+}} ; 16: btnez $BB{{[0-9]+}}_{{[0-9]}} ; 16: move ${{[0-9]+}}, ${{[0-9]+}} - %2 = load i32* @f, align 4 - %3 = load i32* @t, align 4 + %2 = load i32, i32* @f, align 4 + %3 = load i32, i32* @t, align 4 %cond = select i1 %cmp, i32 %2, i32 %3 store i32 %cond, i32* @z1, align 4 %cmp1 = icmp sgt i32 %1, %0 %cond5 = select i1 %cmp1, i32 %3, i32 %2 store i32 %cond5, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %cmp6 = icmp sgt i32 %4, %0 %cond10 = select i1 %cmp6, i32 %2, i32 %3 store i32 %cond10, i32* @z3, align 4 @@ -133,11 +133,11 @@ entry: define void @calc_selle() nounwind { entry: - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp sle i32 %0, %1 - %2 = load i32* @t, align 4 - %3 = load i32* @f, align 4 + %2 = load i32, i32* @t, align 4 + %3 = load i32, i32* @f, align 4 %cond = select i1 %cmp, i32 %2, i32 %3 store i32 %cond, i32* @z1, align 4 ; 16: slt ${{[0-9]+}}, ${{[0-9]+}} @@ -146,7 +146,7 @@ entry: %cmp1 = icmp sle i32 %1, %0 %cond5 = select i1 %cmp1, i32 %3, i32 %2 store i32 %cond5, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %cmp6 = icmp sle i32 %4, %0 %cond10 = select i1 %cmp6, i32 %2, i32 %3 store i32 %cond10, i32* @z3, align 4 @@ -158,20 +158,20 @@ entry: define void @calc_selltk() nounwind { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp slt i32 %0, 10 - %1 = load i32* @t, align 4 - %2 = load i32* @f, align 4 + %1 = load i32, i32* @t, align 4 + %2 = load i32, i32* @f, align 4 %cond = select i1 %cmp, i32 %1, i32 %2 store i32 %cond, i32* @z1, align 4 ; 16: slti ${{[0-9]+}}, {{[0-9]+}} ; 16: btnez $BB{{[0-9]+}}_{{[0-9]}} ; 16: move ${{[0-9]+}}, ${{[0-9]+}} - %3 = load i32* @b, align 4 + %3 = load i32, i32* @b, align 4 %cmp1 = icmp slt i32 %3, 2 %cond5 = select i1 %cmp1, i32 %2, i32 %1 store i32 %cond5, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %cmp6 = icmp sgt i32 %4, 2 %cond10 = select i1 %cmp6, i32 %2, i32 %1 store i32 %cond10, i32* @z3, align 4 @@ -184,18 +184,18 @@ entry: define void @calc_selne() nounwind { entry: - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp ne i32 %0, %1 - %2 = load i32* @t, align 4 - %3 = load i32* @f, align 4 + %2 = load i32, i32* @t, align 4 + %3 = load i32, i32* @f, align 4 %cond = select i1 %cmp, i32 %2, i32 %3 store i32 %cond, i32* @z1, align 4 ; 16: cmp ${{[0-9]+}}, ${{[0-9]+}} ; 16: btnez $BB{{[0-9]+}}_{{[0-9]}} ; 16: move ${{[0-9]+}}, ${{[0-9]+}} store i32 %cond, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %cmp6 = icmp ne i32 %4, %0 %cond10 = select i1 %cmp6, i32 %3, i32 %2 store i32 %cond10, i32* @z3, align 4 @@ -205,10 +205,10 @@ entry: define void @calc_selnek() nounwind { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp ne i32 %0, 1 - %1 = load i32* @f, align 4 - %2 = load i32* @t, align 4 + %1 = load i32, i32* @f, align 4 + %2 = load i32, i32* @t, align 4 %cond = select i1 %cmp, i32 %1, i32 %2 store i32 %cond, i32* @z1, align 4 ; 16: cmpi ${{[0-9]+}}, 1 @@ -217,7 +217,7 @@ entry: %cmp1 = icmp ne i32 %0, 10 %cond5 = select i1 %cmp1, i32 %2, i32 %1 store i32 %cond5, i32* @z2, align 4 - %3 = load i32* @b, align 4 + %3 = load i32, i32* @b, align 4 %cmp6 = icmp ne i32 %3, 3 %cond10 = select i1 %cmp6, i32 %2, i32 %1 store i32 %cond10, i32* @z3, align 4 @@ -232,19 +232,19 @@ entry: define void @calc_selnez() nounwind { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %cmp = icmp ne i32 %0, 0 - %1 = load i32* @f, align 4 - %2 = load i32* @t, align 4 + %1 = load i32, i32* @f, align 4 + %2 = load i32, i32* @t, align 4 %cond = select i1 %cmp, i32 %1, i32 %2 store i32 %cond, i32* @z1, align 4 ; 16: bnez ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]}} ; 16: move ${{[0-9]+}}, ${{[0-9]+}} - %3 = load i32* @b, align 4 + %3 = load i32, i32* @b, align 4 %cmp1 = icmp ne i32 %3, 0 %cond5 = select i1 %cmp1, i32 %2, i32 %1 store i32 %cond5, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %cmp6 = icmp ne i32 %4, 0 %cond10 = select i1 %cmp6, i32 %1, i32 %2 store i32 %cond10, i32* @z3, align 4 @@ -254,19 +254,19 @@ entry: define void @calc_selnez2() nounwind { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %tobool = icmp ne i32 %0, 0 - %1 = load i32* @f, align 4 - %2 = load i32* @t, align 4 + %1 = load i32, i32* @f, align 4 + %2 = load i32, i32* @t, align 4 %cond = select i1 %tobool, i32 %1, i32 %2 store i32 %cond, i32* @z1, align 4 ; 16: bnez ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]}} ; 16: move ${{[0-9]+}}, ${{[0-9]+}} - %3 = load i32* @b, align 4 + %3 = load i32, i32* @b, align 4 %tobool1 = icmp ne i32 %3, 0 %cond5 = select i1 %tobool1, i32 %2, i32 %1 store i32 %cond5, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %tobool6 = icmp ne i32 %4, 0 %cond10 = select i1 %tobool6, i32 %1, i32 %2 store i32 %cond10, i32* @z3, align 4 @@ -276,11 +276,11 @@ entry: define void @calc_seluge() nounwind { entry: - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp uge i32 %0, %1 - %2 = load i32* @f, align 4 - %3 = load i32* @t, align 4 + %2 = load i32, i32* @f, align 4 + %3 = load i32, i32* @t, align 4 %cond = select i1 %cmp, i32 %2, i32 %3 store i32 %cond, i32* @z1, align 4 ; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} @@ -289,7 +289,7 @@ entry: %cmp1 = icmp uge i32 %1, %0 %cond5 = select i1 %cmp1, i32 %3, i32 %2 store i32 %cond5, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %cmp6 = icmp uge i32 %4, %0 %cond10 = select i1 %cmp6, i32 %3, i32 %2 store i32 %cond10, i32* @z3, align 4 @@ -301,11 +301,11 @@ entry: define void @calc_selugt() nounwind { entry: - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp ugt i32 %0, %1 - %2 = load i32* @f, align 4 - %3 = load i32* @t, align 4 + %2 = load i32, i32* @f, align 4 + %3 = load i32, i32* @t, align 4 %cond = select i1 %cmp, i32 %2, i32 %3 store i32 %cond, i32* @z1, align 4 ; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} @@ -314,7 +314,7 @@ entry: %cmp1 = icmp ugt i32 %1, %0 %cond5 = select i1 %cmp1, i32 %3, i32 %2 store i32 %cond5, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %cmp6 = icmp ugt i32 %4, %0 %cond10 = select i1 %cmp6, i32 %2, i32 %3 store i32 %cond10, i32* @z3, align 4 @@ -326,11 +326,11 @@ entry: define void @calc_selule() nounwind { entry: - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %cmp = icmp ule i32 %0, %1 - %2 = load i32* @t, align 4 - %3 = load i32* @f, align 4 + %2 = load i32, i32* @t, align 4 + %3 = load i32, i32* @f, align 4 %cond = select i1 %cmp, i32 %2, i32 %3 store i32 %cond, i32* @z1, align 4 ; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} @@ -339,7 +339,7 @@ entry: %cmp1 = icmp ule i32 %1, %0 %cond5 = select i1 %cmp1, i32 %3, i32 %2 store i32 %cond5, i32* @z2, align 4 - %4 = load i32* @c, align 4 + %4 = load i32, i32* @c, align 4 %cmp6 = icmp ule i32 %4, %0 %cond10 = select i1 %cmp6, i32 %2, i32 %3 store i32 %cond10, i32* @z3, align 4 diff --git a/test/CodeGen/Mips/seteq.ll b/test/CodeGen/Mips/seteq.ll index 5fadf78..8fad612 100644 --- a/test/CodeGen/Mips/seteq.ll +++ b/test/CodeGen/Mips/seteq.ll @@ -8,8 +8,8 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @k, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @k, align 4 %cmp = icmp eq i32 %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 diff --git a/test/CodeGen/Mips/seteqz.ll b/test/CodeGen/Mips/seteqz.ll index 80dc312..8e9a4be 100644 --- a/test/CodeGen/Mips/seteqz.ll +++ b/test/CodeGen/Mips/seteqz.ll @@ -7,13 +7,13 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 0 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 ; 16: sltiu ${{[0-9]+}}, 1 ; 16: move ${{[0-9]+}}, $24 - %1 = load i32* @j, align 4 + %1 = load i32, i32* @j, align 4 %cmp1 = icmp eq i32 %1, 99 %conv2 = zext i1 %cmp1 to i32 store i32 %conv2, i32* @r2, align 4 diff --git a/test/CodeGen/Mips/setge.ll b/test/CodeGen/Mips/setge.ll index 8869eb8..8fb7299 100644 --- a/test/CodeGen/Mips/setge.ll +++ b/test/CodeGen/Mips/setge.ll @@ -11,15 +11,15 @@ define void @test() nounwind { entry: - %0 = load i32* @k, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @k, align 4 + %1 = load i32, i32* @j, align 4 %cmp = icmp sge i32 %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 ; 16: slt ${{[0-9]+}}, ${{[0-9]+}} ; 16: move $[[REGISTER:[0-9]+]], $24 ; 16: xor $[[REGISTER]], ${{[0-9]+}} - %2 = load i32* @m, align 4 + %2 = load i32, i32* @m, align 4 %cmp1 = icmp sge i32 %0, %2 %conv2 = zext i1 %cmp1 to i32 store i32 %conv2, i32* @r2, align 4 diff --git a/test/CodeGen/Mips/setgek.ll b/test/CodeGen/Mips/setgek.ll index 18a0fcf..1148d1b 100644 --- a/test/CodeGen/Mips/setgek.ll +++ b/test/CodeGen/Mips/setgek.ll @@ -7,7 +7,7 @@ define void @test() nounwind { entry: - %0 = load i32* @k, align 4 + %0 = load i32, i32* @k, align 4 %cmp = icmp sgt i32 %0, -32769 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 diff --git a/test/CodeGen/Mips/setle.ll b/test/CodeGen/Mips/setle.ll index 2df6774..fe4a2c3 100644 --- a/test/CodeGen/Mips/setle.ll +++ b/test/CodeGen/Mips/setle.ll @@ -10,15 +10,15 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @k, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @k, align 4 %cmp = icmp sle i32 %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 ; 16: slt ${{[0-9]+}}, ${{[0-9]+}} ; 16: move $[[REGISTER:[0-9]+]], $24 ; 16: xor $[[REGISTER]], ${{[0-9]+}} - %2 = load i32* @m, align 4 + %2 = load i32, i32* @m, align 4 %cmp1 = icmp sle i32 %2, %1 %conv2 = zext i1 %cmp1 to i32 store i32 %conv2, i32* @r2, align 4 diff --git a/test/CodeGen/Mips/setlt.ll b/test/CodeGen/Mips/setlt.ll index 3dac74b..c4211e6 100644 --- a/test/CodeGen/Mips/setlt.ll +++ b/test/CodeGen/Mips/setlt.ll @@ -10,8 +10,8 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @k, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @k, align 4 %cmp = icmp slt i32 %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 diff --git a/test/CodeGen/Mips/setltk.ll b/test/CodeGen/Mips/setltk.ll index ecebc7e..8c00411 100644 --- a/test/CodeGen/Mips/setltk.ll +++ b/test/CodeGen/Mips/setltk.ll @@ -10,7 +10,7 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 + %0 = load i32, i32* @j, align 4 %cmp = icmp slt i32 %0, 10 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 diff --git a/test/CodeGen/Mips/setne.ll b/test/CodeGen/Mips/setne.ll index 9e66901..484674e 100644 --- a/test/CodeGen/Mips/setne.ll +++ b/test/CodeGen/Mips/setne.ll @@ -8,8 +8,8 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @k, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @k, align 4 %cmp = icmp ne i32 %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 diff --git a/test/CodeGen/Mips/setuge.ll b/test/CodeGen/Mips/setuge.ll index 1c9b5bb..025b4dc 100644 --- a/test/CodeGen/Mips/setuge.ll +++ b/test/CodeGen/Mips/setuge.ll @@ -10,15 +10,15 @@ define void @test() nounwind { entry: - %0 = load i32* @k, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @k, align 4 + %1 = load i32, i32* @j, align 4 %cmp = icmp uge i32 %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 ; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} ; 16: move $[[REGISTER:[0-9]+]], $24 ; 16: xor $[[REGISTER]], ${{[0-9]+}} - %2 = load i32* @m, align 4 + %2 = load i32, i32* @m, align 4 %cmp1 = icmp uge i32 %0, %2 %conv2 = zext i1 %cmp1 to i32 store i32 %conv2, i32* @r2, align 4 diff --git a/test/CodeGen/Mips/setugt.ll b/test/CodeGen/Mips/setugt.ll index f10b47a..0ce317e 100644 --- a/test/CodeGen/Mips/setugt.ll +++ b/test/CodeGen/Mips/setugt.ll @@ -10,8 +10,8 @@ define void @test() nounwind { entry: - %0 = load i32* @k, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @k, align 4 + %1 = load i32, i32* @j, align 4 %cmp = icmp ugt i32 %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 diff --git a/test/CodeGen/Mips/setule.ll b/test/CodeGen/Mips/setule.ll index a6d6bf0..4255fd2 100644 --- a/test/CodeGen/Mips/setule.ll +++ b/test/CodeGen/Mips/setule.ll @@ -10,15 +10,15 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @k, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @k, align 4 %cmp = icmp ule i32 %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 ; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} ; 16: move $[[REGISTER:[0-9]+]], $24 ; 16: xor $[[REGISTER]], ${{[0-9]+}} - %2 = load i32* @m, align 4 + %2 = load i32, i32* @m, align 4 %cmp1 = icmp ule i32 %2, %1 %conv2 = zext i1 %cmp1 to i32 store i32 %conv2, i32* @r2, align 4 diff --git a/test/CodeGen/Mips/setult.ll b/test/CodeGen/Mips/setult.ll index 00ee437..d30107e 100644 --- a/test/CodeGen/Mips/setult.ll +++ b/test/CodeGen/Mips/setult.ll @@ -10,8 +10,8 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @k, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @k, align 4 %cmp = icmp ult i32 %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 diff --git a/test/CodeGen/Mips/setultk.ll b/test/CodeGen/Mips/setultk.ll index eb9edba..1b79f10 100644 --- a/test/CodeGen/Mips/setultk.ll +++ b/test/CodeGen/Mips/setultk.ll @@ -10,7 +10,7 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 + %0 = load i32, i32* @j, align 4 %cmp = icmp ult i32 %0, 10 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @r1, align 4 diff --git a/test/CodeGen/Mips/sh1.ll b/test/CodeGen/Mips/sh1.ll index 1746ae2..1ab7779 100644 --- a/test/CodeGen/Mips/sh1.ll +++ b/test/CodeGen/Mips/sh1.ll @@ -6,14 +6,14 @@ define i32 @main() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %conv = trunc i32 %0 to i16 store i16 %conv, i16* @s, align 2 - %1 = load i32* @i, align 4 - %2 = load i16* @s, align 2 + %1 = load i32, i32* @i, align 4 + %2 = load i16, i16* @s, align 2 %conv1 = sext i16 %2 to i32 ; 16: sh ${{[0-9]+}}, 0(${{[0-9]+}}) - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 %1, i32 %conv1) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %1, i32 %conv1) ret i32 0 } diff --git a/test/CodeGen/Mips/simplebr.ll b/test/CodeGen/Mips/simplebr.ll index a1d6367..08e153a 100644 --- a/test/CodeGen/Mips/simplebr.ll +++ b/test/CodeGen/Mips/simplebr.ll @@ -9,7 +9,7 @@ target triple = "mips--linux-gnu" ; Function Attrs: nounwind define void @foo() #0 { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %tobool = icmp ne i32 %0, 0 br i1 %tobool, label %if.then, label %if.else diff --git a/test/CodeGen/Mips/sitofp-selectcc-opt.ll b/test/CodeGen/Mips/sitofp-selectcc-opt.ll index 576cbd8..c60fceb 100644 --- a/test/CodeGen/Mips/sitofp-selectcc-opt.ll +++ b/test/CodeGen/Mips/sitofp-selectcc-opt.ll @@ -14,7 +14,7 @@ entry: %tobool1. = or i1 %tobool1, %not.tobool %lor.ext = zext i1 %tobool1. to i32 %conv = sitofp i32 %lor.ext to double - %1 = load double* @foo12.d4, align 8 + %1 = load double, double* @foo12.d4, align 8 %add = fadd double %conv, %1 store double %add, double* @foo12.d4, align 8 ret double %add diff --git a/test/CodeGen/Mips/sll1.ll b/test/CodeGen/Mips/sll1.ll index fdcd38c..52173b8 100644 --- a/test/CodeGen/Mips/sll1.ll +++ b/test/CodeGen/Mips/sll1.ll @@ -7,12 +7,12 @@ define i32 @main() nounwind { entry: ; 16: sll ${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}} - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %shl = shl i32 %0, 4 ; 16: sll ${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}} store i32 %shl, i32* @j, align 4 - %1 = load i32* @j, align 4 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %1) + %1 = load i32, i32* @j, align 4 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1) ret i32 0 } diff --git a/test/CodeGen/Mips/sll2.ll b/test/CodeGen/Mips/sll2.ll index c2af454..0e7194e 100644 --- a/test/CodeGen/Mips/sll2.ll +++ b/test/CodeGen/Mips/sll2.ll @@ -6,13 +6,13 @@ define i32 @main() nounwind { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @j, align 4 %shl = shl i32 %0, %1 ; 16: sllv ${{[0-9]+}}, ${{[0-9]+}} store i32 %shl, i32* @i, align 4 - %2 = load i32* @j, align 4 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %2) + %2 = load i32, i32* @j, align 4 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %2) ret i32 0 } diff --git a/test/CodeGen/Mips/small-section-reserve-gp.ll b/test/CodeGen/Mips/small-section-reserve-gp.ll index cbf0681..c4e3766 100644 --- a/test/CodeGen/Mips/small-section-reserve-gp.ll +++ b/test/CodeGen/Mips/small-section-reserve-gp.ll @@ -6,7 +6,7 @@ define i32 @geti() nounwind readonly { entry: ; CHECK: lw ${{[0-9]+}}, %gp_rel(i)($gp) - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 ret i32 %0 } diff --git a/test/CodeGen/Mips/spill-copy-acreg.ll b/test/CodeGen/Mips/spill-copy-acreg.ll index 6563a5c..fd160b6 100644 --- a/test/CodeGen/Mips/spill-copy-acreg.ll +++ b/test/CodeGen/Mips/spill-copy-acreg.ll @@ -6,7 +6,7 @@ define i64 @test_acreg_copy(i32 %a0, i32 %a1, i32 %a2, i32 %a3) { entry: - %0 = load i64* @g1, align 8 + %0 = load i64, i64* @g1, align 8 %1 = tail call i64 @llvm.mips.maddu(i64 %0, i32 %a0, i32 %a1) %2 = tail call i64 @llvm.mips.maddu(i64 %0, i32 %a2, i32 %a3) store i64 %1, i64* @g1, align 8 @@ -32,8 +32,8 @@ entry: %sext = sext <2 x i1> %cmp3 to <2 x i16> store <2 x i16> %sext, <2 x i16>* @g4, align 4 tail call void @foo1() - %2 = load <2 x i16>* @g5, align 4 - %3 = load <2 x i16>* @g6, align 4 + %2 = load <2 x i16>, <2 x i16>* @g5, align 4 + %3 = load <2 x i16>, <2 x i16>* @g6, align 4 %or = select <2 x i1> %cmp3, <2 x i16> %2, <2 x i16> %3 %4 = bitcast <2 x i16> %or to i32 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0 diff --git a/test/CodeGen/Mips/sr1.ll b/test/CodeGen/Mips/sr1.ll index 610693d..69655f7 100644 --- a/test/CodeGen/Mips/sr1.ll +++ b/test/CodeGen/Mips/sr1.ll @@ -8,9 +8,9 @@ define void @foo1() #0 { entry: %c = alloca [10 x i8], align 1 - %arraydecay = getelementptr inbounds [10 x i8]* %c, i32 0, i32 0 + %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %c, i32 0, i32 0 call void @x(i8* %arraydecay) - %arraydecay1 = getelementptr inbounds [10 x i8]* %c, i32 0, i32 0 + %arraydecay1 = getelementptr inbounds [10 x i8], [10 x i8]* %c, i32 0, i32 0 call void @x(i8* %arraydecay1) ret void ; CHECK: .ent foo1 @@ -25,9 +25,9 @@ declare void @x(i8*) #1 define void @foo2() #0 { entry: %c = alloca [150 x i8], align 1 - %arraydecay = getelementptr inbounds [150 x i8]* %c, i32 0, i32 0 + %arraydecay = getelementptr inbounds [150 x i8], [150 x i8]* %c, i32 0, i32 0 call void @x(i8* %arraydecay) - %arraydecay1 = getelementptr inbounds [150 x i8]* %c, i32 0, i32 0 + %arraydecay1 = getelementptr inbounds [150 x i8], [150 x i8]* %c, i32 0, i32 0 call void @x(i8* %arraydecay1) ret void ; CHECK: .ent foo2 diff --git a/test/CodeGen/Mips/sra1.ll b/test/CodeGen/Mips/sra1.ll index 15bf8d6..ecaba2c 100644 --- a/test/CodeGen/Mips/sra1.ll +++ b/test/CodeGen/Mips/sra1.ll @@ -5,10 +5,10 @@ define i32 @main() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %shr = ashr i32 %0, 3 ; 16: sra ${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}} - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %shr) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %shr) ret i32 0 } diff --git a/test/CodeGen/Mips/sra2.ll b/test/CodeGen/Mips/sra2.ll index 26bf19d..d5fac8d 100644 --- a/test/CodeGen/Mips/sra2.ll +++ b/test/CodeGen/Mips/sra2.ll @@ -6,11 +6,11 @@ define i32 @main() nounwind { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @j, align 4 %shr = ashr i32 %0, %1 ; 16: srav ${{[0-9]+}}, ${{[0-9]+}} - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %shr) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %shr) ret i32 0 } diff --git a/test/CodeGen/Mips/srl1.ll b/test/CodeGen/Mips/srl1.ll index 3474283..dc4d88a 100644 --- a/test/CodeGen/Mips/srl1.ll +++ b/test/CodeGen/Mips/srl1.ll @@ -6,12 +6,12 @@ define i32 @main() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %shr = lshr i32 %0, 4 ; 16: srl ${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}} store i32 %shr, i32* @j, align 4 - %1 = load i32* @j, align 4 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %1) + %1 = load i32, i32* @j, align 4 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1) ret i32 0 } diff --git a/test/CodeGen/Mips/srl2.ll b/test/CodeGen/Mips/srl2.ll index 26ec092..8fe088c 100644 --- a/test/CodeGen/Mips/srl2.ll +++ b/test/CodeGen/Mips/srl2.ll @@ -7,13 +7,13 @@ define i32 @main() nounwind { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @k, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @k, align 4 %shr = lshr i32 %0, %1 ; 16: srlv ${{[0-9]+}}, ${{[0-9]+}} store i32 %shr, i32* @j, align 4 - %2 = load i32* @j, align 4 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %2) + %2 = load i32, i32* @j, align 4 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %2) ret i32 0 } diff --git a/test/CodeGen/Mips/stackcoloring.ll b/test/CodeGen/Mips/stackcoloring.ll index 4987dad..5516b5a 100644 --- a/test/CodeGen/Mips/stackcoloring.ll +++ b/test/CodeGen/Mips/stackcoloring.ll @@ -12,15 +12,15 @@ entry: %b = alloca [16 x i32], align 4 %0 = bitcast [16 x i32]* %b to i8* call void @llvm.lifetime.start(i64 64, i8* %0) - %arraydecay = getelementptr inbounds [16 x i32]* %b, i32 0, i32 0 + %arraydecay = getelementptr inbounds [16 x i32], [16 x i32]* %b, i32 0, i32 0 br label %for.body for.body: ; preds = %for.body, %entry %i.05 = phi i32 [ 0, %entry ], [ %inc, %for.body ] %v.04 = phi i32 [ 0, %entry ], [ %add, %for.body ] - %1 = load i32** @g1, align 4 - %arrayidx = getelementptr inbounds i32* %1, i32 %i.05 - %2 = load i32* %arrayidx, align 4 + %1 = load i32*, i32** @g1, align 4 + %arrayidx = getelementptr inbounds i32, i32* %1, i32 %i.05 + %2 = load i32, i32* %arrayidx, align 4 %call = call i32 @foo2(i32 %2, i32* %arraydecay) %add = add nsw i32 %call, %v.04 %inc = add nsw i32 %i.05, 1 diff --git a/test/CodeGen/Mips/stchar.ll b/test/CodeGen/Mips/stchar.ll index 12eae34..ad58794 100644 --- a/test/CodeGen/Mips/stchar.ll +++ b/test/CodeGen/Mips/stchar.ll @@ -9,7 +9,7 @@ define void @p1(i16 signext %s, i8 signext %c) nounwind { entry: %conv = sext i16 %s to i32 %conv1 = sext i8 %c to i32 - %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 %conv, i32 %conv1) nounwind + %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %conv, i32 %conv1) nounwind ret void } @@ -17,16 +17,16 @@ declare i32 @printf(i8* nocapture, ...) nounwind define void @p2() nounwind { entry: - %0 = load i16** @sp, align 4 - %1 = load i16* %0, align 2 - %2 = load i8** @cp, align 4 - %3 = load i8* %2, align 1 + %0 = load i16*, i16** @sp, align 4 + %1 = load i16, i16* %0, align 2 + %2 = load i8*, i8** @cp, align 4 + %3 = load i8, i8* %2, align 1 %conv.i = sext i16 %1 to i32 %conv1.i = sext i8 %3 to i32 - %call.i = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 %conv.i, i32 %conv1.i) nounwind - %4 = load i16** @sp, align 4 + %call.i = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %conv.i, i32 %conv1.i) nounwind + %4 = load i16*, i16** @sp, align 4 store i16 32, i16* %4, align 2 - %5 = load i8** @cp, align 4 + %5 = load i8*, i8** @cp, align 4 store i8 97, i8* %5, align 1 ret void } @@ -39,16 +39,16 @@ entry: store i8 99, i8* %c, align 4 store i16* %s, i16** @sp, align 4 store i8* %c, i8** @cp, align 4 - %call.i.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 16, i32 99) nounwind - %0 = load i16** @sp, align 4 + %call.i.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 16, i32 99) nounwind + %0 = load i16*, i16** @sp, align 4 store i16 32, i16* %0, align 2 - %1 = load i8** @cp, align 4 + %1 = load i8*, i8** @cp, align 4 store i8 97, i8* %1, align 1 - %2 = load i16* %s, align 4 - %3 = load i8* %c, align 4 + %2 = load i16, i16* %s, align 4 + %3 = load i8, i8* %c, align 4 %conv.i = sext i16 %2 to i32 %conv1.i = sext i8 %3 to i32 - %call.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 %conv.i, i32 %conv1.i) nounwind + %call.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %conv.i, i32 %conv1.i) nounwind ret void ; 16_b-LABEL: test: ; 16_h-LABEL: test: @@ -69,16 +69,16 @@ entry: store i8 99, i8* %c.i, align 4 store i16* %s.i, i16** @sp, align 4 store i8* %c.i, i8** @cp, align 4 - %call.i.i.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 16, i32 99) nounwind - %1 = load i16** @sp, align 4 + %call.i.i.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 16, i32 99) nounwind + %1 = load i16*, i16** @sp, align 4 store i16 32, i16* %1, align 2 - %2 = load i8** @cp, align 4 + %2 = load i8*, i8** @cp, align 4 store i8 97, i8* %2, align 1 - %3 = load i16* %s.i, align 4 - %4 = load i8* %c.i, align 4 + %3 = load i16, i16* %s.i, align 4 + %4 = load i8, i8* %c.i, align 4 %conv.i.i = sext i16 %3 to i32 %conv1.i.i = sext i8 %4 to i32 - %call.i.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 %conv.i.i, i32 %conv1.i.i) nounwind + %call.i.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %conv.i.i, i32 %conv1.i.i) nounwind call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind call void @llvm.lifetime.end(i64 -1, i8* %c.i) nounwind ret i32 0 diff --git a/test/CodeGen/Mips/stldst.ll b/test/CodeGen/Mips/stldst.ll index 4182b9e..63e1e14 100644 --- a/test/CodeGen/Mips/stldst.ll +++ b/test/CodeGen/Mips/stldst.ll @@ -12,25 +12,25 @@ define i32 @main() nounwind { entry: - %0 = load i32* @kkkk, align 4 - %1 = load i32* @llll, align 4 + %0 = load i32, i32* @kkkk, align 4 + %1 = load i32, i32* @llll, align 4 %add = add nsw i32 %0, 10 %add1 = add nsw i32 %1, 10 - %2 = load i32* @mmmm, align 4 + %2 = load i32, i32* @mmmm, align 4 %sub = add nsw i32 %2, -3 - %3 = load i32* @nnnn, align 4 + %3 = load i32, i32* @nnnn, align 4 %add2 = add nsw i32 %3, 10 - %4 = load i32* @oooo, align 4 + %4 = load i32, i32* @oooo, align 4 %add3 = add nsw i32 %4, 4 - %5 = load i32* @pppp, align 4 + %5 = load i32, i32* @pppp, align 4 %sub4 = add nsw i32 %5, -5 - %6 = load i32* @qqqq, align 4 + %6 = load i32, i32* @qqqq, align 4 %sub5 = add nsw i32 %6, -10 - %7 = load i32* @rrrr, align 4 + %7 = load i32, i32* @rrrr, align 4 %add6 = add nsw i32 %7, 6 - %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([32 x i8]* @.str, i32 0, i32 0), i32 %sub5, i32 %add6, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) nounwind - %call7 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([32 x i8]* @.str, i32 0, i32 0), i32 %0, i32 %1, i32 %add, i32 %add1, i32 %sub, i32 %add2, i32 %add3, i32 %sub4, i32 %sub5, i32 %add6) nounwind + %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @.str, i32 0, i32 0), i32 %sub5, i32 %add6, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) nounwind + %call7 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @.str, i32 0, i32 0), i32 %0, i32 %1, i32 %add, i32 %add1, i32 %sub, i32 %add2, i32 %add3, i32 %sub4, i32 %sub5, i32 %add6) nounwind ret i32 0 } ; 16: sw ${{[0-9]+}}, {{[0-9]+}} ( $sp ); # 4-byte Folded Spill diff --git a/test/CodeGen/Mips/sub1.ll b/test/CodeGen/Mips/sub1.ll index 195750b..4c91252 100644 --- a/test/CodeGen/Mips/sub1.ll +++ b/test/CodeGen/Mips/sub1.ll @@ -5,10 +5,10 @@ define i32 @main() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %sub = sub nsw i32 %0, 5 ; 16: addiu ${{[0-9]+}}, -{{[0-9]+}} - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 %sub) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %sub) ret i32 0 } diff --git a/test/CodeGen/Mips/sub2.ll b/test/CodeGen/Mips/sub2.ll index 4f6bfcc..e978d45 100644 --- a/test/CodeGen/Mips/sub2.ll +++ b/test/CodeGen/Mips/sub2.ll @@ -6,11 +6,11 @@ define i32 @main() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @i, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @i, align 4 %sub = sub nsw i32 %0, %1 ; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 %sub) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %sub) ret i32 0 } diff --git a/test/CodeGen/Mips/swzero.ll b/test/CodeGen/Mips/swzero.ll index 9f91a39..9aaee15 100644 --- a/test/CodeGen/Mips/swzero.ll +++ b/test/CodeGen/Mips/swzero.ll @@ -6,7 +6,7 @@ define void @zero_u(%struct.unaligned* nocapture %p) nounwind { entry: ; CHECK: swl $zero ; CHECK: swr $zero - %x = getelementptr inbounds %struct.unaligned* %p, i32 0, i32 0 + %x = getelementptr inbounds %struct.unaligned, %struct.unaligned* %p, i32 0, i32 0 store i32 0, i32* %x, align 1 ret void } diff --git a/test/CodeGen/Mips/tailcall.ll b/test/CodeGen/Mips/tailcall.ll index 30f47ab..01b2d73 100644 --- a/test/CodeGen/Mips/tailcall.ll +++ b/test/CodeGen/Mips/tailcall.ll @@ -85,16 +85,16 @@ entry: ; PIC16: jalrc ; PIC16: .end caller5 - %0 = load i32* @g0, align 4 - %1 = load i32* @g1, align 4 - %2 = load i32* @g2, align 4 - %3 = load i32* @g3, align 4 - %4 = load i32* @g4, align 4 - %5 = load i32* @g5, align 4 - %6 = load i32* @g6, align 4 - %7 = load i32* @g7, align 4 - %8 = load i32* @g8, align 4 - %9 = load i32* @g9, align 4 + %0 = load i32, i32* @g0, align 4 + %1 = load i32, i32* @g1, align 4 + %2 = load i32, i32* @g2, align 4 + %3 = load i32, i32* @g3, align 4 + %4 = load i32, i32* @g4, align 4 + %5 = load i32, i32* @g5, align 4 + %6 = load i32, i32* @g6, align 4 + %7 = load i32, i32* @g7, align 4 + %8 = load i32, i32* @g8, align 4 + %9 = load i32, i32* @g9, align 4 %call = tail call fastcc i32 @callee5(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) ret i32 %call } diff --git a/test/CodeGen/Mips/tls.ll b/test/CodeGen/Mips/tls.ll index b14ad5b..97e270f 100644 --- a/test/CodeGen/Mips/tls.ll +++ b/test/CodeGen/Mips/tls.ll @@ -10,7 +10,7 @@ define i32 @f1() nounwind { entry: - %tmp = load i32* @t1, align 4 + %tmp = load i32, i32* @t1, align 4 ret i32 %tmp ; PIC-LABEL: f1: @@ -33,7 +33,7 @@ entry: define i32 @f2() nounwind { entry: - %tmp = load i32* @t2, align 4 + %tmp = load i32, i32* @t2, align 4 ret i32 %tmp ; PIC-LABEL: f2: @@ -69,7 +69,7 @@ entry: ; PIC: addu $[[R1:[0-9]+]], $[[R0]], $2 ; PIC: lw ${{[0-9]+}}, %dtprel_lo(f3.i)($[[R1]]) - %0 = load i32* @f3.i, align 4 + %0 = load i32, i32* @f3.i, align 4 %inc = add nsw i32 %0, 1 store i32 %inc, i32* @f3.i, align 4 ret i32 %inc diff --git a/test/CodeGen/Mips/tls16.ll b/test/CodeGen/Mips/tls16.ll index 861864b..3d324d7 100644 --- a/test/CodeGen/Mips/tls16.ll +++ b/test/CodeGen/Mips/tls16.ll @@ -4,7 +4,7 @@ define i32 @foo() nounwind readonly { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 ; PIC16: lw ${{[0-9]+}}, %call16(__tls_get_addr)(${{[0-9]+}}) ; PIC16: addiu ${{[0-9]+}}, %tlsgd(a) ret i32 %0 diff --git a/test/CodeGen/Mips/tls16_2.ll b/test/CodeGen/Mips/tls16_2.ll index b33e3c3..0a6a412 100644 --- a/test/CodeGen/Mips/tls16_2.ll +++ b/test/CodeGen/Mips/tls16_2.ll @@ -4,7 +4,7 @@ define i8* @f(i8* nocapture %a) nounwind { entry: - %0 = load i32* @f.i, align 4 + %0 = load i32, i32* @f.i, align 4 %inc = add nsw i32 %0, 1 store i32 %inc, i32* @f.i, align 4 %1 = inttoptr i32 %inc to i8* diff --git a/test/CodeGen/Mips/uitofp.ll b/test/CodeGen/Mips/uitofp.ll index aff70c2..83c2069 100644 --- a/test/CodeGen/Mips/uitofp.ll +++ b/test/CodeGen/Mips/uitofp.ll @@ -5,7 +5,7 @@ entry: %b = alloca i32, align 4 %a = alloca float, align 4 store volatile i32 1, i32* %b, align 4 - %0 = load volatile i32* %b, align 4 + %0 = load volatile i32, i32* %b, align 4 %conv = uitofp i32 %0 to float store float %conv, float* %a, align 4 ret void diff --git a/test/CodeGen/Mips/ul1.ll b/test/CodeGen/Mips/ul1.ll index 7e64ff4..ad09929 100644 --- a/test/CodeGen/Mips/ul1.ll +++ b/test/CodeGen/Mips/ul1.ll @@ -5,7 +5,7 @@ define i32 @main() nounwind { entry: - store i32 10, i32* getelementptr inbounds (%struct.ua* @foo, i32 0, i32 1), align 1 + store i32 10, i32* getelementptr inbounds (%struct.ua, %struct.ua* @foo, i32 0, i32 1), align 1 ; 16: sb ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}}) ; 16: sb ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}}) ; 16: sb ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}}) diff --git a/test/CodeGen/Mips/unalignedload.ll b/test/CodeGen/Mips/unalignedload.ll index 2002b1c..9e453a6 100644 --- a/test/CodeGen/Mips/unalignedload.ll +++ b/test/CodeGen/Mips/unalignedload.ll @@ -30,7 +30,7 @@ entry: ; MIPS32R6-DAG: lhu $[[PART1:[0-9]+]], 2($[[R0]]) - tail call void @foo2(%struct.S1* byval getelementptr inbounds (%struct.S2* @s2, i32 0, i32 1)) nounwind + tail call void @foo2(%struct.S1* byval getelementptr inbounds (%struct.S2, %struct.S2* @s2, i32 0, i32 1)) nounwind ret void } diff --git a/test/CodeGen/Mips/vector-load-store.ll b/test/CodeGen/Mips/vector-load-store.ll index d889963..61cbc5a 100644 --- a/test/CodeGen/Mips/vector-load-store.ll +++ b/test/CodeGen/Mips/vector-load-store.ll @@ -10,7 +10,7 @@ entry: ; CHECK: lw ; CHECK: sw - %0 = load <2 x i16>* @g1, align 4 + %0 = load <2 x i16>, <2 x i16>* @g1, align 4 store <2 x i16> %0, <2 x i16>* @g0, align 4 ret void } @@ -20,7 +20,7 @@ entry: ; CHECK: lw ; CHECK: sw - %0 = load <4 x i8>* @g3, align 4 + %0 = load <4 x i8>, <4 x i8>* @g3, align 4 store <4 x i8> %0, <4 x i8>* @g2, align 4 ret void } diff --git a/test/CodeGen/Mips/vector-setcc.ll b/test/CodeGen/Mips/vector-setcc.ll index aeff491..64b84e4 100644 --- a/test/CodeGen/Mips/vector-setcc.ll +++ b/test/CodeGen/Mips/vector-setcc.ll @@ -6,8 +6,8 @@ define void @foo0() nounwind { entry: - %0 = load <4 x i32>* @a, align 16 - %1 = load <4 x i32>* @b, align 16 + %0 = load <4 x i32>, <4 x i32>* @a, align 16 + %1 = load <4 x i32>, <4 x i32>* @b, align 16 %cmp = icmp slt <4 x i32> %0, %1 %sext = sext <4 x i1> %cmp to <4 x i32> store <4 x i32> %sext, <4 x i32>* @g0, align 16 diff --git a/test/CodeGen/Mips/xor1.ll b/test/CodeGen/Mips/xor1.ll index f2c1316..4fcfc45 100644 --- a/test/CodeGen/Mips/xor1.ll +++ b/test/CodeGen/Mips/xor1.ll @@ -6,11 +6,11 @@ define i32 @main() nounwind { entry: - %0 = load i32* @x, align 4 - %1 = load i32* @y, align 4 + %0 = load i32, i32* @x, align 4 + %1 = load i32, i32* @y, align 4 %xor = xor i32 %0, %1 ; 16: xor ${{[0-9]+}}, ${{[0-9]+}} - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %xor) + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %xor) ret i32 0 } diff --git a/test/CodeGen/Mips/zeroreg.ll b/test/CodeGen/Mips/zeroreg.ll index c766d3b..6baf9d4 100644 --- a/test/CodeGen/Mips/zeroreg.ll +++ b/test/CodeGen/Mips/zeroreg.ll @@ -25,7 +25,7 @@ entry: ; 64R6: seleqz $2, $[[R0]], $4 %tobool = icmp ne i32 %s, 0 - %0 = load i32* @g1, align 4 + %0 = load i32, i32* @g1, align 4 %cond = select i1 %tobool, i32 0, i32 %0 ret i32 %cond } @@ -47,7 +47,7 @@ entry: ; 64R6: selnez $2, $[[R0]], $4 %tobool = icmp ne i32 %s, 0 - %0 = load i32* @g1, align 4 + %0 = load i32, i32* @g1, align 4 %cond = select i1 %tobool, i32 %0, i32 0 ret i32 %cond } @@ -76,7 +76,7 @@ entry: ; 64R6: seleqz $2, $[[R0]], $4 %tobool = icmp ne i64 %s, 0 - %0 = load i64* @g2, align 4 + %0 = load i64, i64* @g2, align 4 %cond = select i1 %tobool, i64 0, i64 %0 ret i64 %cond } @@ -103,7 +103,7 @@ entry: ; 64R6: selnez $2, $[[R0]], $4 %tobool = icmp ne i64 %s, 0 - %0 = load i64* @g2, align 4 + %0 = load i64, i64* @g2, align 4 %cond = select i1 %tobool, i64 %0, i64 0 ret i64 %cond } |