diff options
Diffstat (limited to 'test/Transforms/LoopVectorize/X86/gather-cost.ll')
-rw-r--r-- | test/Transforms/LoopVectorize/X86/gather-cost.ll | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/test/Transforms/LoopVectorize/X86/gather-cost.ll b/test/Transforms/LoopVectorize/X86/gather-cost.ll index 09363d6..f0e6c8f 100644 --- a/test/Transforms/LoopVectorize/X86/gather-cost.ll +++ b/test/Transforms/LoopVectorize/X86/gather-cost.ll @@ -31,32 +31,32 @@ for.body: %b.054 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add30, %for.body ] %add = add i64 %v.055, %offset %mul = mul i64 %add, 3 - %arrayidx = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %mul - %0 = load float* %arrayidx, align 4 - %arrayidx2 = getelementptr inbounds [512 x float]* @kernel, i64 0, i64 %v.055 - %1 = load float* %arrayidx2, align 4 + %arrayidx = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %mul + %0 = load float, float* %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds [512 x float], [512 x float]* @kernel, i64 0, i64 %v.055 + %1 = load float, float* %arrayidx2, align 4 %mul3 = fmul fast float %0, %1 - %arrayidx4 = getelementptr inbounds [512 x float]* @kernel2, i64 0, i64 %v.055 - %2 = load float* %arrayidx4, align 4 + %arrayidx4 = getelementptr inbounds [512 x float], [512 x float]* @kernel2, i64 0, i64 %v.055 + %2 = load float, float* %arrayidx4, align 4 %mul5 = fmul fast float %mul3, %2 - %arrayidx6 = getelementptr inbounds [512 x float]* @kernel3, i64 0, i64 %v.055 - %3 = load float* %arrayidx6, align 4 + %arrayidx6 = getelementptr inbounds [512 x float], [512 x float]* @kernel3, i64 0, i64 %v.055 + %3 = load float, float* %arrayidx6, align 4 %mul7 = fmul fast float %mul5, %3 - %arrayidx8 = getelementptr inbounds [512 x float]* @kernel4, i64 0, i64 %v.055 - %4 = load float* %arrayidx8, align 4 + %arrayidx8 = getelementptr inbounds [512 x float], [512 x float]* @kernel4, i64 0, i64 %v.055 + %4 = load float, float* %arrayidx8, align 4 %mul9 = fmul fast float %mul7, %4 %add10 = fadd fast float %r.057, %mul9 %arrayidx.sum = add i64 %mul, 1 - %arrayidx11 = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum - %5 = load float* %arrayidx11, align 4 + %arrayidx11 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum + %5 = load float, float* %arrayidx11, align 4 %mul13 = fmul fast float %1, %5 %mul15 = fmul fast float %2, %mul13 %mul17 = fmul fast float %3, %mul15 %mul19 = fmul fast float %4, %mul17 %add20 = fadd fast float %g.056, %mul19 %arrayidx.sum52 = add i64 %mul, 2 - %arrayidx21 = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum52 - %6 = load float* %arrayidx21, align 4 + %arrayidx21 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum52 + %6 = load float, float* %arrayidx21, align 4 %mul23 = fmul fast float %1, %6 %mul25 = fmul fast float %2, %mul23 %mul27 = fmul fast float %3, %mul25 |