aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/AArch64/PBQP-chain.ll
diff options
context:
space:
mode:
authorPirama Arumuga Nainar <pirama@google.com>2015-04-08 08:55:49 -0700
committerPirama Arumuga Nainar <pirama@google.com>2015-04-09 15:04:38 -0700
commit4c5e43da7792f75567b693105cc53e3f1992ad98 (patch)
tree1b2c9792582e12f5af0b1512e3094425f0dc0df9 /test/CodeGen/AArch64/PBQP-chain.ll
parentc75239e6119d0f9a74c57099d91cbc9bde56bf33 (diff)
downloadexternal_llvm-4c5e43da7792f75567b693105cc53e3f1992ad98.zip
external_llvm-4c5e43da7792f75567b693105cc53e3f1992ad98.tar.gz
external_llvm-4c5e43da7792f75567b693105cc53e3f1992ad98.tar.bz2
Update aosp/master llvm for rebase to r233350
Change-Id: I07d935f8793ee8ec6b7da003f6483046594bca49
Diffstat (limited to 'test/CodeGen/AArch64/PBQP-chain.ll')
-rw-r--r--test/CodeGen/AArch64/PBQP-chain.ll90
1 files changed, 45 insertions, 45 deletions
diff --git a/test/CodeGen/AArch64/PBQP-chain.ll b/test/CodeGen/AArch64/PBQP-chain.ll
index c4ba026..3e5fa74 100644
--- a/test/CodeGen/AArch64/PBQP-chain.ll
+++ b/test/CodeGen/AArch64/PBQP-chain.ll
@@ -22,79 +22,79 @@ target triple = "aarch64"
; CHECK-ODD: fmadd {{d[0-9]*[13579]}}, {{d[0-9]*}}, {{d[0-9]*}}, {{d[0-9]*[13579]}}
define void @fir(double* nocapture %rx, double* nocapture %ry, double* nocapture %c, double* nocapture %x, double* nocapture %y) {
entry:
- %0 = load double* %c, align 8
- %1 = load double* %x, align 8
+ %0 = load double, double* %c, align 8
+ %1 = load double, double* %x, align 8
%mul = fmul fast double %1, %0
- %2 = load double* %y, align 8
+ %2 = load double, double* %y, align 8
%mul7 = fmul fast double %2, %0
- %arrayidx.1 = getelementptr inbounds double* %c, i64 1
- %3 = load double* %arrayidx.1, align 8
- %arrayidx2.1 = getelementptr inbounds double* %x, i64 1
- %4 = load double* %arrayidx2.1, align 8
+ %arrayidx.1 = getelementptr inbounds double, double* %c, i64 1
+ %3 = load double, double* %arrayidx.1, align 8
+ %arrayidx2.1 = getelementptr inbounds double, double* %x, i64 1
+ %4 = load double, double* %arrayidx2.1, align 8
%mul.1 = fmul fast double %4, %3
%add.1 = fadd fast double %mul.1, %mul
- %arrayidx6.1 = getelementptr inbounds double* %y, i64 1
- %5 = load double* %arrayidx6.1, align 8
+ %arrayidx6.1 = getelementptr inbounds double, double* %y, i64 1
+ %5 = load double, double* %arrayidx6.1, align 8
%mul7.1 = fmul fast double %5, %3
%add8.1 = fadd fast double %mul7.1, %mul7
- %arrayidx.2 = getelementptr inbounds double* %c, i64 2
- %6 = load double* %arrayidx.2, align 8
- %arrayidx2.2 = getelementptr inbounds double* %x, i64 2
- %7 = load double* %arrayidx2.2, align 8
+ %arrayidx.2 = getelementptr inbounds double, double* %c, i64 2
+ %6 = load double, double* %arrayidx.2, align 8
+ %arrayidx2.2 = getelementptr inbounds double, double* %x, i64 2
+ %7 = load double, double* %arrayidx2.2, align 8
%mul.2 = fmul fast double %7, %6
%add.2 = fadd fast double %mul.2, %add.1
- %arrayidx6.2 = getelementptr inbounds double* %y, i64 2
- %8 = load double* %arrayidx6.2, align 8
+ %arrayidx6.2 = getelementptr inbounds double, double* %y, i64 2
+ %8 = load double, double* %arrayidx6.2, align 8
%mul7.2 = fmul fast double %8, %6
%add8.2 = fadd fast double %mul7.2, %add8.1
- %arrayidx.3 = getelementptr inbounds double* %c, i64 3
- %9 = load double* %arrayidx.3, align 8
- %arrayidx2.3 = getelementptr inbounds double* %x, i64 3
- %10 = load double* %arrayidx2.3, align 8
+ %arrayidx.3 = getelementptr inbounds double, double* %c, i64 3
+ %9 = load double, double* %arrayidx.3, align 8
+ %arrayidx2.3 = getelementptr inbounds double, double* %x, i64 3
+ %10 = load double, double* %arrayidx2.3, align 8
%mul.3 = fmul fast double %10, %9
%add.3 = fadd fast double %mul.3, %add.2
- %arrayidx6.3 = getelementptr inbounds double* %y, i64 3
- %11 = load double* %arrayidx6.3, align 8
+ %arrayidx6.3 = getelementptr inbounds double, double* %y, i64 3
+ %11 = load double, double* %arrayidx6.3, align 8
%mul7.3 = fmul fast double %11, %9
%add8.3 = fadd fast double %mul7.3, %add8.2
- %arrayidx.4 = getelementptr inbounds double* %c, i64 4
- %12 = load double* %arrayidx.4, align 8
- %arrayidx2.4 = getelementptr inbounds double* %x, i64 4
- %13 = load double* %arrayidx2.4, align 8
+ %arrayidx.4 = getelementptr inbounds double, double* %c, i64 4
+ %12 = load double, double* %arrayidx.4, align 8
+ %arrayidx2.4 = getelementptr inbounds double, double* %x, i64 4
+ %13 = load double, double* %arrayidx2.4, align 8
%mul.4 = fmul fast double %13, %12
%add.4 = fadd fast double %mul.4, %add.3
- %arrayidx6.4 = getelementptr inbounds double* %y, i64 4
- %14 = load double* %arrayidx6.4, align 8
+ %arrayidx6.4 = getelementptr inbounds double, double* %y, i64 4
+ %14 = load double, double* %arrayidx6.4, align 8
%mul7.4 = fmul fast double %14, %12
%add8.4 = fadd fast double %mul7.4, %add8.3
- %arrayidx.5 = getelementptr inbounds double* %c, i64 5
- %15 = load double* %arrayidx.5, align 8
- %arrayidx2.5 = getelementptr inbounds double* %x, i64 5
- %16 = load double* %arrayidx2.5, align 8
+ %arrayidx.5 = getelementptr inbounds double, double* %c, i64 5
+ %15 = load double, double* %arrayidx.5, align 8
+ %arrayidx2.5 = getelementptr inbounds double, double* %x, i64 5
+ %16 = load double, double* %arrayidx2.5, align 8
%mul.5 = fmul fast double %16, %15
%add.5 = fadd fast double %mul.5, %add.4
- %arrayidx6.5 = getelementptr inbounds double* %y, i64 5
- %17 = load double* %arrayidx6.5, align 8
+ %arrayidx6.5 = getelementptr inbounds double, double* %y, i64 5
+ %17 = load double, double* %arrayidx6.5, align 8
%mul7.5 = fmul fast double %17, %15
%add8.5 = fadd fast double %mul7.5, %add8.4
- %arrayidx.6 = getelementptr inbounds double* %c, i64 6
- %18 = load double* %arrayidx.6, align 8
- %arrayidx2.6 = getelementptr inbounds double* %x, i64 6
- %19 = load double* %arrayidx2.6, align 8
+ %arrayidx.6 = getelementptr inbounds double, double* %c, i64 6
+ %18 = load double, double* %arrayidx.6, align 8
+ %arrayidx2.6 = getelementptr inbounds double, double* %x, i64 6
+ %19 = load double, double* %arrayidx2.6, align 8
%mul.6 = fmul fast double %19, %18
%add.6 = fadd fast double %mul.6, %add.5
- %arrayidx6.6 = getelementptr inbounds double* %y, i64 6
- %20 = load double* %arrayidx6.6, align 8
+ %arrayidx6.6 = getelementptr inbounds double, double* %y, i64 6
+ %20 = load double, double* %arrayidx6.6, align 8
%mul7.6 = fmul fast double %20, %18
%add8.6 = fadd fast double %mul7.6, %add8.5
- %arrayidx.7 = getelementptr inbounds double* %c, i64 7
- %21 = load double* %arrayidx.7, align 8
- %arrayidx2.7 = getelementptr inbounds double* %x, i64 7
- %22 = load double* %arrayidx2.7, align 8
+ %arrayidx.7 = getelementptr inbounds double, double* %c, i64 7
+ %21 = load double, double* %arrayidx.7, align 8
+ %arrayidx2.7 = getelementptr inbounds double, double* %x, i64 7
+ %22 = load double, double* %arrayidx2.7, align 8
%mul.7 = fmul fast double %22, %21
%add.7 = fadd fast double %mul.7, %add.6
- %arrayidx6.7 = getelementptr inbounds double* %y, i64 7
- %23 = load double* %arrayidx6.7, align 8
+ %arrayidx6.7 = getelementptr inbounds double, double* %y, i64 7
+ %23 = load double, double* %arrayidx6.7, align 8
%mul7.7 = fmul fast double %23, %21
%add8.7 = fadd fast double %mul7.7, %add8.6
store double %add.7, double* %rx, align 8