diff options
Diffstat (limited to 'test/CodeGen/PowerPC/vsx-minmax.ll')
-rw-r--r-- | test/CodeGen/PowerPC/vsx-minmax.ll | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/test/CodeGen/PowerPC/vsx-minmax.ll b/test/CodeGen/PowerPC/vsx-minmax.ll index 47f50ab..ad72cac 100644 --- a/test/CodeGen/PowerPC/vsx-minmax.ll +++ b/test/CodeGen/PowerPC/vsx-minmax.ll @@ -18,35 +18,35 @@ target triple = "powerpc64-unknown-linux-gnu" define void @test1() #0 { ; CHECK-LABEL: @test1 entry: - %0 = load volatile <4 x float>* @vf, align 16 - %1 = load volatile <4 x float>* @vf, align 16 + %0 = load volatile <4 x float>, <4 x float>* @vf, align 16 + %1 = load volatile <4 x float>, <4 x float>* @vf, align 16 %2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1) ; CHECK: xvmaxsp store <4 x float> %2, <4 x float>* @vf1, align 16 - %3 = load <2 x double>* @vd, align 16 + %3 = load <2 x double>, <2 x double>* @vd, align 16 %4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3) ; CHECK: xvmaxdp store <2 x double> %4, <2 x double>* @vd1, align 16 - %5 = load volatile <4 x float>* @vf, align 16 - %6 = load volatile <4 x float>* @vf, align 16 + %5 = load volatile <4 x float>, <4 x float>* @vf, align 16 + %6 = load volatile <4 x float>, <4 x float>* @vf, align 16 %7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6) ; CHECK: xvmaxsp store <4 x float> %7, <4 x float>* @vf2, align 16 - %8 = load volatile <4 x float>* @vf, align 16 - %9 = load volatile <4 x float>* @vf, align 16 + %8 = load volatile <4 x float>, <4 x float>* @vf, align 16 + %9 = load volatile <4 x float>, <4 x float>* @vf, align 16 %10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9) ; CHECK: xvminsp store <4 x float> %10, <4 x float>* @vf3, align 16 - %11 = load <2 x double>* @vd, align 16 + %11 = load <2 x double>, <2 x double>* @vd, align 16 %12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11) ; CHECK: xvmindp store <2 x double> %12, <2 x double>* @vd2, align 16 - %13 = load volatile <4 x float>* @vf, align 16 - %14 = load volatile <4 x float>* @vf, align 16 + %13 = load volatile <4 x float>, <4 x float>* @vf, align 16 + %14 = load volatile <4 x float>, <4 x float>* @vf, align 16 %15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14) ; CHECK: xvminsp store <4 x float> %15, <4 x float>* @vf4, align 16 - %16 = load double* @d, align 8 + %16 = load double, double* @d, align 8 %17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16) ; CHECK: xsmaxdp store double %17, double* @d1, align 8 |