diff options
author | Pirama Arumuga Nainar <pirama@google.com> | 2015-04-10 22:08:18 +0000 |
---|---|---|
committer | Android Git Automerger <android-git-automerger@android.com> | 2015-04-10 22:08:18 +0000 |
commit | 13a7db5b9c4f5e543d037be68ec3428216bfd550 (patch) | |
tree | 1b2c9792582e12f5af0b1512e3094425f0dc0df9 /test/CodeGen/Thumb2/2009-07-30-PEICrash.ll | |
parent | 0eb46f5d1e06a4284663d636a74b06adc3a161d7 (diff) | |
parent | 31195f0bdca6ee2a5e72d07edf13e1d81206d949 (diff) | |
download | external_llvm-13a7db5b9c4f5e543d037be68ec3428216bfd550.zip external_llvm-13a7db5b9c4f5e543d037be68ec3428216bfd550.tar.gz external_llvm-13a7db5b9c4f5e543d037be68ec3428216bfd550.tar.bz2 |
am 31195f0b: Merge "Update aosp/master llvm for rebase to r233350"
* commit '31195f0bdca6ee2a5e72d07edf13e1d81206d949':
Update aosp/master llvm for rebase to r233350
Diffstat (limited to 'test/CodeGen/Thumb2/2009-07-30-PEICrash.ll')
-rw-r--r-- | test/CodeGen/Thumb2/2009-07-30-PEICrash.ll | 82 |
1 files changed, 41 insertions, 41 deletions
diff --git a/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll b/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll index 3e07618..77d2991 100644 --- a/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll +++ b/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll @@ -31,35 +31,35 @@ define void @jpeg_idct_float(%struct.jpeg_decompress_struct* nocapture %cinfo, %struct.jpeg_component_info* nocapture %compptr, i16* nocapture %coef_block, i8** nocapture %output_buf, i32 %output_col) nounwind { entry: %workspace = alloca [64 x float], align 4 ; <[64 x float]*> [#uses=11] - %0 = load i8** undef, align 4 ; <i8*> [#uses=5] + %0 = load i8*, i8** undef, align 4 ; <i8*> [#uses=5] br label %bb bb: ; preds = %bb, %entry %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=11] %tmp39 = add i32 %indvar, 8 ; <i32> [#uses=0] %tmp41 = add i32 %indvar, 16 ; <i32> [#uses=2] - %scevgep42 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp41 ; <float*> [#uses=1] + %scevgep42 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp41 ; <float*> [#uses=1] %tmp43 = add i32 %indvar, 24 ; <i32> [#uses=1] - %scevgep44 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp43 ; <float*> [#uses=1] + %scevgep44 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp43 ; <float*> [#uses=1] %tmp45 = add i32 %indvar, 32 ; <i32> [#uses=1] - %scevgep46 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp45 ; <float*> [#uses=1] + %scevgep46 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp45 ; <float*> [#uses=1] %tmp47 = add i32 %indvar, 40 ; <i32> [#uses=1] - %scevgep48 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp47 ; <float*> [#uses=1] + %scevgep48 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp47 ; <float*> [#uses=1] %tmp49 = add i32 %indvar, 48 ; <i32> [#uses=1] - %scevgep50 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp49 ; <float*> [#uses=1] + %scevgep50 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp49 ; <float*> [#uses=1] %tmp51 = add i32 %indvar, 56 ; <i32> [#uses=1] - %scevgep52 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp51 ; <float*> [#uses=1] - %wsptr.119 = getelementptr [64 x float]* %workspace, i32 0, i32 %indvar ; <float*> [#uses=1] + %scevgep52 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp51 ; <float*> [#uses=1] + %wsptr.119 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %indvar ; <float*> [#uses=1] %tmp54 = shl i32 %indvar, 2 ; <i32> [#uses=1] - %scevgep76 = getelementptr i8* undef, i32 %tmp54 ; <i8*> [#uses=1] + %scevgep76 = getelementptr i8, i8* undef, i32 %tmp54 ; <i8*> [#uses=1] %quantptr.118 = bitcast i8* %scevgep76 to float* ; <float*> [#uses=1] - %scevgep79 = getelementptr i16* %coef_block, i32 %tmp41 ; <i16*> [#uses=0] - %inptr.117 = getelementptr i16* %coef_block, i32 %indvar ; <i16*> [#uses=1] - %1 = load i16* null, align 2 ; <i16> [#uses=1] - %2 = load i16* undef, align 2 ; <i16> [#uses=1] - %3 = load i16* %inptr.117, align 2 ; <i16> [#uses=1] + %scevgep79 = getelementptr i16, i16* %coef_block, i32 %tmp41 ; <i16*> [#uses=0] + %inptr.117 = getelementptr i16, i16* %coef_block, i32 %indvar ; <i16*> [#uses=1] + %1 = load i16, i16* null, align 2 ; <i16> [#uses=1] + %2 = load i16, i16* undef, align 2 ; <i16> [#uses=1] + %3 = load i16, i16* %inptr.117, align 2 ; <i16> [#uses=1] %4 = sitofp i16 %3 to float ; <float> [#uses=1] - %5 = load float* %quantptr.118, align 4 ; <float> [#uses=1] + %5 = load float, float* %quantptr.118, align 4 ; <float> [#uses=1] %6 = fmul float %4, %5 ; <float> [#uses=1] %7 = fsub float %6, undef ; <float> [#uses=2] %8 = fmul float undef, 0x3FF6A09E60000000 ; <float> [#uses=1] @@ -70,7 +70,7 @@ bb: ; preds = %bb, %entry %13 = sitofp i16 %1 to float ; <float> [#uses=1] %14 = fmul float %13, undef ; <float> [#uses=2] %15 = sitofp i16 %2 to float ; <float> [#uses=1] - %16 = load float* undef, align 4 ; <float> [#uses=1] + %16 = load float, float* undef, align 4 ; <float> [#uses=1] %17 = fmul float %15, %16 ; <float> [#uses=1] %18 = fadd float %14, undef ; <float> [#uses=2] %19 = fsub float %14, undef ; <float> [#uses=2] @@ -114,22 +114,22 @@ bb6: ; preds = %bb bb8: ; preds = %bb8, %bb6 %ctr.116 = phi i32 [ 0, %bb6 ], [ %88, %bb8 ] ; <i32> [#uses=3] - %scevgep = getelementptr i8** %output_buf, i32 %ctr.116 ; <i8**> [#uses=1] + %scevgep = getelementptr i8*, i8** %output_buf, i32 %ctr.116 ; <i8**> [#uses=1] %tmp = shl i32 %ctr.116, 3 ; <i32> [#uses=5] %tmp2392 = or i32 %tmp, 4 ; <i32> [#uses=1] - %scevgep24 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp2392 ; <float*> [#uses=1] + %scevgep24 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp2392 ; <float*> [#uses=1] %tmp2591 = or i32 %tmp, 2 ; <i32> [#uses=1] - %scevgep26 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp2591 ; <float*> [#uses=1] + %scevgep26 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp2591 ; <float*> [#uses=1] %tmp2790 = or i32 %tmp, 6 ; <i32> [#uses=1] - %scevgep28 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp2790 ; <float*> [#uses=1] + %scevgep28 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp2790 ; <float*> [#uses=1] %tmp3586 = or i32 %tmp, 7 ; <i32> [#uses=0] - %wsptr.215 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp ; <float*> [#uses=1] - %40 = load i8** %scevgep, align 4 ; <i8*> [#uses=4] - %41 = load float* %wsptr.215, align 4 ; <float> [#uses=1] - %42 = load float* %scevgep24, align 4 ; <float> [#uses=1] + %wsptr.215 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp ; <float*> [#uses=1] + %40 = load i8*, i8** %scevgep, align 4 ; <i8*> [#uses=4] + %41 = load float, float* %wsptr.215, align 4 ; <float> [#uses=1] + %42 = load float, float* %scevgep24, align 4 ; <float> [#uses=1] %43 = fadd float %41, %42 ; <float> [#uses=1] - %44 = load float* %scevgep26, align 4 ; <float> [#uses=1] - %45 = load float* %scevgep28, align 4 ; <float> [#uses=1] + %44 = load float, float* %scevgep26, align 4 ; <float> [#uses=1] + %45 = load float, float* %scevgep28, align 4 ; <float> [#uses=1] %46 = fadd float %44, %45 ; <float> [#uses=1] %47 = fsub float %43, %46 ; <float> [#uses=2] %48 = fsub float undef, 0.000000e+00 ; <float> [#uses=1] @@ -139,13 +139,13 @@ bb8: ; preds = %bb8, %bb6 %52 = lshr i32 %51, 3 ; <i32> [#uses=1] %53 = and i32 %52, 1023 ; <i32> [#uses=1] %.sum14 = add i32 %53, 128 ; <i32> [#uses=1] - %54 = getelementptr i8* %0, i32 %.sum14 ; <i8*> [#uses=1] - %55 = load i8* %54, align 1 ; <i8> [#uses=1] + %54 = getelementptr i8, i8* %0, i32 %.sum14 ; <i8*> [#uses=1] + %55 = load i8, i8* %54, align 1 ; <i8> [#uses=1] store i8 %55, i8* null, align 1 - %56 = getelementptr i8* %40, i32 %.sum10 ; <i8*> [#uses=1] + %56 = getelementptr i8, i8* %40, i32 %.sum10 ; <i8*> [#uses=1] store i8 0, i8* %56, align 1 - %57 = load i8* null, align 1 ; <i8> [#uses=1] - %58 = getelementptr i8* %40, i32 %.sum8 ; <i8*> [#uses=1] + %57 = load i8, i8* null, align 1 ; <i8> [#uses=1] + %58 = getelementptr i8, i8* %40, i32 %.sum8 ; <i8*> [#uses=1] store i8 %57, i8* %58, align 1 %59 = fadd float undef, %48 ; <float> [#uses=1] %60 = fptosi float %59 to i32 ; <i32> [#uses=1] @@ -153,17 +153,17 @@ bb8: ; preds = %bb8, %bb6 %62 = lshr i32 %61, 3 ; <i32> [#uses=1] %63 = and i32 %62, 1023 ; <i32> [#uses=1] %.sum7 = add i32 %63, 128 ; <i32> [#uses=1] - %64 = getelementptr i8* %0, i32 %.sum7 ; <i8*> [#uses=1] - %65 = load i8* %64, align 1 ; <i8> [#uses=1] - %66 = getelementptr i8* %40, i32 %.sum6 ; <i8*> [#uses=1] + %64 = getelementptr i8, i8* %0, i32 %.sum7 ; <i8*> [#uses=1] + %65 = load i8, i8* %64, align 1 ; <i8> [#uses=1] + %66 = getelementptr i8, i8* %40, i32 %.sum6 ; <i8*> [#uses=1] store i8 %65, i8* %66, align 1 %67 = fptosi float undef to i32 ; <i32> [#uses=1] %68 = add i32 %67, 4 ; <i32> [#uses=1] %69 = lshr i32 %68, 3 ; <i32> [#uses=1] %70 = and i32 %69, 1023 ; <i32> [#uses=1] %.sum5 = add i32 %70, 128 ; <i32> [#uses=1] - %71 = getelementptr i8* %0, i32 %.sum5 ; <i8*> [#uses=1] - %72 = load i8* %71, align 1 ; <i8> [#uses=1] + %71 = getelementptr i8, i8* %0, i32 %.sum5 ; <i8*> [#uses=1] + %72 = load i8, i8* %71, align 1 ; <i8> [#uses=1] store i8 %72, i8* undef, align 1 %73 = fadd float %47, undef ; <float> [#uses=1] %74 = fptosi float %73 to i32 ; <i32> [#uses=1] @@ -171,8 +171,8 @@ bb8: ; preds = %bb8, %bb6 %76 = lshr i32 %75, 3 ; <i32> [#uses=1] %77 = and i32 %76, 1023 ; <i32> [#uses=1] %.sum3 = add i32 %77, 128 ; <i32> [#uses=1] - %78 = getelementptr i8* %0, i32 %.sum3 ; <i8*> [#uses=1] - %79 = load i8* %78, align 1 ; <i8> [#uses=1] + %78 = getelementptr i8, i8* %0, i32 %.sum3 ; <i8*> [#uses=1] + %79 = load i8, i8* %78, align 1 ; <i8> [#uses=1] store i8 %79, i8* undef, align 1 %80 = fsub float %47, undef ; <float> [#uses=1] %81 = fptosi float %80 to i32 ; <i32> [#uses=1] @@ -180,9 +180,9 @@ bb8: ; preds = %bb8, %bb6 %83 = lshr i32 %82, 3 ; <i32> [#uses=1] %84 = and i32 %83, 1023 ; <i32> [#uses=1] %.sum1 = add i32 %84, 128 ; <i32> [#uses=1] - %85 = getelementptr i8* %0, i32 %.sum1 ; <i8*> [#uses=1] - %86 = load i8* %85, align 1 ; <i8> [#uses=1] - %87 = getelementptr i8* %40, i32 %.sum ; <i8*> [#uses=1] + %85 = getelementptr i8, i8* %0, i32 %.sum1 ; <i8*> [#uses=1] + %86 = load i8, i8* %85, align 1 ; <i8> [#uses=1] + %87 = getelementptr i8, i8* %40, i32 %.sum ; <i8*> [#uses=1] store i8 %86, i8* %87, align 1 %88 = add i32 %ctr.116, 1 ; <i32> [#uses=2] %exitcond = icmp eq i32 %88, 8 ; <i1> [#uses=1] |