diff options
Diffstat (limited to 'test/Transforms/BBVectorize/X86')
-rw-r--r-- | test/Transforms/BBVectorize/X86/loop1.ll | 10 | ||||
-rw-r--r-- | test/Transforms/BBVectorize/X86/pr15289.ll | 20 | ||||
-rw-r--r-- | test/Transforms/BBVectorize/X86/sh-rec.ll | 18 | ||||
-rw-r--r-- | test/Transforms/BBVectorize/X86/sh-rec2.ll | 64 | ||||
-rw-r--r-- | test/Transforms/BBVectorize/X86/sh-rec3.ll | 112 | ||||
-rw-r--r-- | test/Transforms/BBVectorize/X86/simple-ldstr.ll | 18 | ||||
-rw-r--r-- | test/Transforms/BBVectorize/X86/wr-aliases.ll | 88 |
7 files changed, 165 insertions, 165 deletions
diff --git a/test/Transforms/BBVectorize/X86/loop1.ll b/test/Transforms/BBVectorize/X86/loop1.ll index 4018084..c3c3045 100644 --- a/test/Transforms/BBVectorize/X86/loop1.ll +++ b/test/Transforms/BBVectorize/X86/loop1.ll @@ -12,10 +12,10 @@ entry: for.body: ; preds = %for.body, %entry %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] - %arrayidx = getelementptr inbounds double* %in1, i64 %indvars.iv - %0 = load double* %arrayidx, align 8 - %arrayidx2 = getelementptr inbounds double* %in2, i64 %indvars.iv - %1 = load double* %arrayidx2, align 8 + %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv + %0 = load double, double* %arrayidx, align 8 + %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv + %1 = load double, double* %arrayidx2, align 8 %mul = fmul double %0, %0 %mul3 = fmul double %0, %1 %add = fadd double %mul, %mul3 @@ -28,7 +28,7 @@ for.body: ; preds = %for.body, %entry %add10 = fadd double %add9, %0 %mul11 = fmul double %mul8, %add10 %add12 = fadd double %add7, %mul11 - %arrayidx14 = getelementptr inbounds double* %out, i64 %indvars.iv + %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv store double %add12, double* %arrayidx14, align 8 %indvars.iv.next = add i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 diff --git a/test/Transforms/BBVectorize/X86/pr15289.ll b/test/Transforms/BBVectorize/X86/pr15289.ll index 42bd0ff..a383a26 100644 --- a/test/Transforms/BBVectorize/X86/pr15289.ll +++ b/test/Transforms/BBVectorize/X86/pr15289.ll @@ -44,43 +44,43 @@ entry: %12 = fsub double undef, %7 %13 = fmul double %3, %12 %14 = fmul double %3, undef - %15 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 0, i32 0 + %15 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 0, i32 0 store double %13, double* %15, align 8 - %16 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 0, i32 1 + %16 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 0, i32 1 %17 = fmul double undef, %8 %18 = fmul double %17, undef %19 = fmul double undef, %18 %20 = fadd double undef, undef %21 = fmul double %3, %19 %22 = fsub double -0.000000e+00, %21 - %23 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 1, i32 0 + %23 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 1, i32 0 store double %22, double* %23, align 8 - %24 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 1, i32 1 + %24 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 1, i32 1 %25 = fmul double undef, 0x3FE42F601A8C6794 %26 = fmul double undef, 2.000000e+00 %27 = fsub double %26, %0 %28 = fmul double %6, undef %29 = fsub double undef, %28 - %30 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 2, i32 0 + %30 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 2, i32 0 store double undef, double* %30, align 8 - %31 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 2, i32 1 + %31 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 2, i32 1 %32 = fmul double undef, %17 %33 = fmul double undef, %17 %34 = fmul double undef, %32 %35 = fmul double undef, %33 %36 = fsub double undef, %35 %37 = fmul double %3, %34 - %38 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 3, i32 0 + %38 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 3, i32 0 store double %37, double* %38, align 8 - %39 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 3, i32 1 + %39 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 3, i32 1 %40 = fmul double undef, %8 %41 = fmul double undef, %40 %42 = fmul double undef, %41 %43 = fsub double undef, %42 %44 = fmul double %3, %43 - %45 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 4, i32 0 + %45 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 4, i32 0 store double %13, double* %45, align 8 - %46 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 4, i32 1 + %46 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 4, i32 1 %47 = fsub double -0.000000e+00, %14 store double %47, double* %16, align 8 store double undef, double* %24, align 8 diff --git a/test/Transforms/BBVectorize/X86/sh-rec.ll b/test/Transforms/BBVectorize/X86/sh-rec.ll index ad75fc9..2cb9dbd 100644 --- a/test/Transforms/BBVectorize/X86/sh-rec.ll +++ b/test/Transforms/BBVectorize/X86/sh-rec.ll @@ -8,39 +8,39 @@ entry: br i1 undef, label %return, label %if.end10 if.end10: ; preds = %entry - %incdec.ptr = getelementptr inbounds i8* %call, i64 undef + %incdec.ptr = getelementptr inbounds i8, i8* %call, i64 undef %call17 = call i32 @ptou() nounwind - %incdec.ptr26.1 = getelementptr inbounds i8* %incdec.ptr, i64 -2 + %incdec.ptr26.1 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -2 store i8 undef, i8* %incdec.ptr26.1, align 1 %div27.1 = udiv i32 %call17, 100 %rem.2 = urem i32 %div27.1, 10 %add2230.2 = or i32 %rem.2, 48 %conv25.2 = trunc i32 %add2230.2 to i8 - %incdec.ptr26.2 = getelementptr inbounds i8* %incdec.ptr, i64 -3 + %incdec.ptr26.2 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -3 store i8 %conv25.2, i8* %incdec.ptr26.2, align 1 - %incdec.ptr26.3 = getelementptr inbounds i8* %incdec.ptr, i64 -4 + %incdec.ptr26.3 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -4 store i8 undef, i8* %incdec.ptr26.3, align 1 %div27.3 = udiv i32 %call17, 10000 %rem.4 = urem i32 %div27.3, 10 %add2230.4 = or i32 %rem.4, 48 %conv25.4 = trunc i32 %add2230.4 to i8 - %incdec.ptr26.4 = getelementptr inbounds i8* %incdec.ptr, i64 -5 + %incdec.ptr26.4 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -5 store i8 %conv25.4, i8* %incdec.ptr26.4, align 1 %div27.4 = udiv i32 %call17, 100000 %rem.5 = urem i32 %div27.4, 10 %add2230.5 = or i32 %rem.5, 48 %conv25.5 = trunc i32 %add2230.5 to i8 - %incdec.ptr26.5 = getelementptr inbounds i8* %incdec.ptr, i64 -6 + %incdec.ptr26.5 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -6 store i8 %conv25.5, i8* %incdec.ptr26.5, align 1 - %incdec.ptr26.6 = getelementptr inbounds i8* %incdec.ptr, i64 -7 + %incdec.ptr26.6 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -7 store i8 0, i8* %incdec.ptr26.6, align 1 - %incdec.ptr26.7 = getelementptr inbounds i8* %incdec.ptr, i64 -8 + %incdec.ptr26.7 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -8 store i8 undef, i8* %incdec.ptr26.7, align 1 %div27.7 = udiv i32 %call17, 100000000 %rem.8 = urem i32 %div27.7, 10 %add2230.8 = or i32 %rem.8, 48 %conv25.8 = trunc i32 %add2230.8 to i8 - %incdec.ptr26.8 = getelementptr inbounds i8* %incdec.ptr, i64 -9 + %incdec.ptr26.8 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -9 store i8 %conv25.8, i8* %incdec.ptr26.8, align 1 unreachable diff --git a/test/Transforms/BBVectorize/X86/sh-rec2.ll b/test/Transforms/BBVectorize/X86/sh-rec2.ll index d65ac1c..d7a004c 100644 --- a/test/Transforms/BBVectorize/X86/sh-rec2.ll +++ b/test/Transforms/BBVectorize/X86/sh-rec2.ll @@ -7,72 +7,72 @@ target triple = "x86_64-unknown-linux-gnu" define void @gsm_encode(%struct.gsm_state.2.8.14.15.16.17.19.22.23.25.26.28.29.31.32.33.35.36.37.38.40.41.42.44.45.47.48.50.52.53.54.56.57.58.59.60.61.62.63.66.73.83.84.89.90.91.92.93.94.95.96.99.100.101.102.103.104.106.107.114.116.121.122.129.130.135.136.137.138.139.140.141.142.143.144.147.148.149.158.159.160.161.164.165.166.167.168.169.172.179.181.182.183.188.195.200.201.202.203.204.205.208.209.210.212.213.214.215.222.223.225.226.230.231.232.233.234.235.236.237.238.239.240.241.242.243.244.352* %s, i16* %source, i8* %c) nounwind uwtable { entry: %xmc = alloca [52 x i16], align 16 - %arraydecay5 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 0 + %arraydecay5 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 0 call void @Gsm_Coder(%struct.gsm_state.2.8.14.15.16.17.19.22.23.25.26.28.29.31.32.33.35.36.37.38.40.41.42.44.45.47.48.50.52.53.54.56.57.58.59.60.61.62.63.66.73.83.84.89.90.91.92.93.94.95.96.99.100.101.102.103.104.106.107.114.116.121.122.129.130.135.136.137.138.139.140.141.142.143.144.147.148.149.158.159.160.161.164.165.166.167.168.169.172.179.181.182.183.188.195.200.201.202.203.204.205.208.209.210.212.213.214.215.222.223.225.226.230.231.232.233.234.235.236.237.238.239.240.241.242.243.244.352* %s, i16* %source, i16* undef, i16* null, i16* undef, i16* undef, i16* undef, i16* %arraydecay5) nounwind - %incdec.ptr136 = getelementptr inbounds i8* %c, i64 10 - %incdec.ptr157 = getelementptr inbounds i8* %c, i64 11 + %incdec.ptr136 = getelementptr inbounds i8, i8* %c, i64 10 + %incdec.ptr157 = getelementptr inbounds i8, i8* %c, i64 11 store i8 0, i8* %incdec.ptr136, align 1 - %arrayidx162 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 11 - %0 = load i16* %arrayidx162, align 2 + %arrayidx162 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 11 + %0 = load i16, i16* %arrayidx162, align 2 %conv1631 = trunc i16 %0 to i8 %and164 = shl i8 %conv1631, 3 %shl165 = and i8 %and164, 56 - %incdec.ptr172 = getelementptr inbounds i8* %c, i64 12 + %incdec.ptr172 = getelementptr inbounds i8, i8* %c, i64 12 store i8 %shl165, i8* %incdec.ptr157, align 1 - %1 = load i16* inttoptr (i64 2 to i16*), align 2 + %1 = load i16, i16* inttoptr (i64 2 to i16*), align 2 %conv1742 = trunc i16 %1 to i8 %and175 = shl i8 %conv1742, 1 - %incdec.ptr183 = getelementptr inbounds i8* %c, i64 13 + %incdec.ptr183 = getelementptr inbounds i8, i8* %c, i64 13 store i8 %and175, i8* %incdec.ptr172, align 1 - %incdec.ptr199 = getelementptr inbounds i8* %c, i64 14 + %incdec.ptr199 = getelementptr inbounds i8, i8* %c, i64 14 store i8 0, i8* %incdec.ptr183, align 1 - %arrayidx214 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 15 - %incdec.ptr220 = getelementptr inbounds i8* %c, i64 15 + %arrayidx214 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 15 + %incdec.ptr220 = getelementptr inbounds i8, i8* %c, i64 15 store i8 0, i8* %incdec.ptr199, align 1 - %2 = load i16* %arrayidx214, align 2 + %2 = load i16, i16* %arrayidx214, align 2 %conv2223 = trunc i16 %2 to i8 %and223 = shl i8 %conv2223, 6 - %incdec.ptr235 = getelementptr inbounds i8* %c, i64 16 + %incdec.ptr235 = getelementptr inbounds i8, i8* %c, i64 16 store i8 %and223, i8* %incdec.ptr220, align 1 - %arrayidx240 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 19 - %3 = load i16* %arrayidx240, align 2 + %arrayidx240 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 19 + %3 = load i16, i16* %arrayidx240, align 2 %conv2414 = trunc i16 %3 to i8 %and242 = shl i8 %conv2414, 2 %shl243 = and i8 %and242, 28 - %incdec.ptr251 = getelementptr inbounds i8* %c, i64 17 + %incdec.ptr251 = getelementptr inbounds i8, i8* %c, i64 17 store i8 %shl243, i8* %incdec.ptr235, align 1 - %incdec.ptr272 = getelementptr inbounds i8* %c, i64 18 + %incdec.ptr272 = getelementptr inbounds i8, i8* %c, i64 18 store i8 0, i8* %incdec.ptr251, align 1 - %arrayidx282 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 25 - %4 = load i16* %arrayidx282, align 2 + %arrayidx282 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 25 + %4 = load i16, i16* %arrayidx282, align 2 %conv2835 = trunc i16 %4 to i8 %and284 = and i8 %conv2835, 7 - %incdec.ptr287 = getelementptr inbounds i8* %c, i64 19 + %incdec.ptr287 = getelementptr inbounds i8, i8* %c, i64 19 store i8 %and284, i8* %incdec.ptr272, align 1 - %incdec.ptr298 = getelementptr inbounds i8* %c, i64 20 + %incdec.ptr298 = getelementptr inbounds i8, i8* %c, i64 20 store i8 0, i8* %incdec.ptr287, align 1 - %incdec.ptr314 = getelementptr inbounds i8* %c, i64 21 + %incdec.ptr314 = getelementptr inbounds i8, i8* %c, i64 21 store i8 0, i8* %incdec.ptr298, align 1 - %arrayidx319 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 26 - %5 = load i16* %arrayidx319, align 4 + %arrayidx319 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 26 + %5 = load i16, i16* %arrayidx319, align 4 %conv3206 = trunc i16 %5 to i8 %and321 = shl i8 %conv3206, 4 %shl322 = and i8 %and321, 112 - %incdec.ptr335 = getelementptr inbounds i8* %c, i64 22 + %incdec.ptr335 = getelementptr inbounds i8, i8* %c, i64 22 store i8 %shl322, i8* %incdec.ptr314, align 1 - %arrayidx340 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 29 - %6 = load i16* %arrayidx340, align 2 + %arrayidx340 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 29 + %6 = load i16, i16* %arrayidx340, align 2 %conv3417 = trunc i16 %6 to i8 %and342 = shl i8 %conv3417, 3 %shl343 = and i8 %and342, 56 - %incdec.ptr350 = getelementptr inbounds i8* %c, i64 23 + %incdec.ptr350 = getelementptr inbounds i8, i8* %c, i64 23 store i8 %shl343, i8* %incdec.ptr335, align 1 - %incdec.ptr366 = getelementptr inbounds i8* %c, i64 24 + %incdec.ptr366 = getelementptr inbounds i8, i8* %c, i64 24 store i8 0, i8* %incdec.ptr350, align 1 - %arrayidx381 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 36 - %incdec.ptr387 = getelementptr inbounds i8* %c, i64 25 + %arrayidx381 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 36 + %incdec.ptr387 = getelementptr inbounds i8, i8* %c, i64 25 store i8 0, i8* %incdec.ptr366, align 1 - %7 = load i16* %arrayidx381, align 8 + %7 = load i16, i16* %arrayidx381, align 8 %conv3898 = trunc i16 %7 to i8 %and390 = shl i8 %conv3898, 6 store i8 %and390, i8* %incdec.ptr387, align 1 diff --git a/test/Transforms/BBVectorize/X86/sh-rec3.ll b/test/Transforms/BBVectorize/X86/sh-rec3.ll index ad880ed..2096deb 100644 --- a/test/Transforms/BBVectorize/X86/sh-rec3.ll +++ b/test/Transforms/BBVectorize/X86/sh-rec3.ll @@ -7,153 +7,153 @@ target triple = "x86_64-unknown-linux-gnu" define void @gsm_encode(%struct.gsm_state.2.8.39.44.45.55.56.57.58.59.62.63.64.65.74.75.76.77.80.87.92.93.94.95.96.97.110.111.112.113.114.128.130.135.136.137.138.139.140.141.142.143.144.145.148.149.150.151.152.169.170.177.178.179.184.185.186.187.188.201.208.209.219.220.221.223.224.225.230.231.232.233.235.236.237.238.245.246.248.249.272.274.279.280.281.282.283.286.293.298.299.314.315.316.317.318.319.320.321.322.323.324.325.326.327.328.329.330.331.332.333.334.335.336.337.338.339.340.341.342.343.344.345.346.347.348.349.350.351.352.353.565* %s, i16* %source, i8* %c) nounwind uwtable { entry: %LARc28 = alloca [2 x i64], align 16 - %LARc28.sub = getelementptr inbounds [2 x i64]* %LARc28, i64 0, i64 0 + %LARc28.sub = getelementptr inbounds [2 x i64], [2 x i64]* %LARc28, i64 0, i64 0 %tmpcast = bitcast [2 x i64]* %LARc28 to [8 x i16]* %Nc = alloca [4 x i16], align 2 %Mc = alloca [4 x i16], align 2 %bc = alloca [4 x i16], align 2 %xmc = alloca [52 x i16], align 16 %arraydecay = bitcast [2 x i64]* %LARc28 to i16* - %arraydecay1 = getelementptr inbounds [4 x i16]* %Nc, i64 0, i64 0 - %arraydecay2 = getelementptr inbounds [4 x i16]* %bc, i64 0, i64 0 - %arraydecay3 = getelementptr inbounds [4 x i16]* %Mc, i64 0, i64 0 - %arraydecay5 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 0 + %arraydecay1 = getelementptr inbounds [4 x i16], [4 x i16]* %Nc, i64 0, i64 0 + %arraydecay2 = getelementptr inbounds [4 x i16], [4 x i16]* %bc, i64 0, i64 0 + %arraydecay3 = getelementptr inbounds [4 x i16], [4 x i16]* %Mc, i64 0, i64 0 + %arraydecay5 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 0 call void @Gsm_Coder(%struct.gsm_state.2.8.39.44.45.55.56.57.58.59.62.63.64.65.74.75.76.77.80.87.92.93.94.95.96.97.110.111.112.113.114.128.130.135.136.137.138.139.140.141.142.143.144.145.148.149.150.151.152.169.170.177.178.179.184.185.186.187.188.201.208.209.219.220.221.223.224.225.230.231.232.233.235.236.237.238.245.246.248.249.272.274.279.280.281.282.283.286.293.298.299.314.315.316.317.318.319.320.321.322.323.324.325.326.327.328.329.330.331.332.333.334.335.336.337.338.339.340.341.342.343.344.345.346.347.348.349.350.351.352.353.565* %s, i16* %source, i16* %arraydecay, i16* %arraydecay1, i16* %arraydecay2, i16* %arraydecay3, i16* undef, i16* %arraydecay5) nounwind - %0 = load i64* %LARc28.sub, align 16 + %0 = load i64, i64* %LARc28.sub, align 16 %1 = trunc i64 %0 to i32 %conv1 = lshr i32 %1, 2 %and = and i32 %conv1, 15 %or = or i32 %and, 208 %conv6 = trunc i32 %or to i8 - %incdec.ptr = getelementptr inbounds i8* %c, i64 1 + %incdec.ptr = getelementptr inbounds i8, i8* %c, i64 1 store i8 %conv6, i8* %c, align 1 %conv84 = trunc i64 %0 to i8 %and9 = shl i8 %conv84, 6 - %incdec.ptr15 = getelementptr inbounds i8* %c, i64 2 + %incdec.ptr15 = getelementptr inbounds i8, i8* %c, i64 2 store i8 %and9, i8* %incdec.ptr, align 1 %2 = lshr i64 %0, 50 %shr226.tr = trunc i64 %2 to i8 %conv25 = and i8 %shr226.tr, 7 - %incdec.ptr26 = getelementptr inbounds i8* %c, i64 3 + %incdec.ptr26 = getelementptr inbounds i8, i8* %c, i64 3 store i8 %conv25, i8* %incdec.ptr15, align 1 - %incdec.ptr42 = getelementptr inbounds i8* %c, i64 4 + %incdec.ptr42 = getelementptr inbounds i8, i8* %c, i64 4 store i8 0, i8* %incdec.ptr26, align 1 - %arrayidx52 = getelementptr inbounds [8 x i16]* %tmpcast, i64 0, i64 7 - %3 = load i16* %arrayidx52, align 2 + %arrayidx52 = getelementptr inbounds [8 x i16], [8 x i16]* %tmpcast, i64 0, i64 7 + %3 = load i16, i16* %arrayidx52, align 2 %conv537 = trunc i16 %3 to i8 %and54 = and i8 %conv537, 7 - %incdec.ptr57 = getelementptr inbounds i8* %c, i64 5 + %incdec.ptr57 = getelementptr inbounds i8, i8* %c, i64 5 store i8 %and54, i8* %incdec.ptr42, align 1 - %incdec.ptr68 = getelementptr inbounds i8* %c, i64 6 + %incdec.ptr68 = getelementptr inbounds i8, i8* %c, i64 6 store i8 0, i8* %incdec.ptr57, align 1 - %4 = load i16* %arraydecay3, align 2 + %4 = load i16, i16* %arraydecay3, align 2 %conv748 = trunc i16 %4 to i8 %and75 = shl i8 %conv748, 5 %shl76 = and i8 %and75, 96 - %incdec.ptr84 = getelementptr inbounds i8* %c, i64 7 + %incdec.ptr84 = getelementptr inbounds i8, i8* %c, i64 7 store i8 %shl76, i8* %incdec.ptr68, align 1 - %arrayidx94 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 1 - %5 = load i16* %arrayidx94, align 2 + %arrayidx94 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 1 + %5 = load i16, i16* %arrayidx94, align 2 %conv959 = trunc i16 %5 to i8 %and96 = shl i8 %conv959, 1 %shl97 = and i8 %and96, 14 %or103 = or i8 %shl97, 1 - %incdec.ptr105 = getelementptr inbounds i8* %c, i64 8 + %incdec.ptr105 = getelementptr inbounds i8, i8* %c, i64 8 store i8 %or103, i8* %incdec.ptr84, align 1 - %arrayidx115 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 4 + %arrayidx115 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 4 %6 = bitcast i16* %arrayidx115 to i32* - %7 = load i32* %6, align 8 + %7 = load i32, i32* %6, align 8 %conv11610 = trunc i32 %7 to i8 %and117 = and i8 %conv11610, 7 - %incdec.ptr120 = getelementptr inbounds i8* %c, i64 9 + %incdec.ptr120 = getelementptr inbounds i8, i8* %c, i64 9 store i8 %and117, i8* %incdec.ptr105, align 1 %8 = lshr i32 %7, 16 %and12330 = shl nuw nsw i32 %8, 5 %and123 = trunc i32 %and12330 to i8 - %incdec.ptr136 = getelementptr inbounds i8* %c, i64 10 + %incdec.ptr136 = getelementptr inbounds i8, i8* %c, i64 10 store i8 %and123, i8* %incdec.ptr120, align 1 - %incdec.ptr157 = getelementptr inbounds i8* %c, i64 11 + %incdec.ptr157 = getelementptr inbounds i8, i8* %c, i64 11 store i8 0, i8* %incdec.ptr136, align 1 - %incdec.ptr172 = getelementptr inbounds i8* %c, i64 12 + %incdec.ptr172 = getelementptr inbounds i8, i8* %c, i64 12 store i8 0, i8* %incdec.ptr157, align 1 - %arrayidx173 = getelementptr inbounds [4 x i16]* %Nc, i64 0, i64 1 - %9 = load i16* %arrayidx173, align 2 + %arrayidx173 = getelementptr inbounds [4 x i16], [4 x i16]* %Nc, i64 0, i64 1 + %9 = load i16, i16* %arrayidx173, align 2 %conv17412 = zext i16 %9 to i32 %and175 = shl nuw nsw i32 %conv17412, 1 - %arrayidx177 = getelementptr inbounds [4 x i16]* %bc, i64 0, i64 1 - %10 = load i16* %arrayidx177, align 2 + %arrayidx177 = getelementptr inbounds [4 x i16], [4 x i16]* %bc, i64 0, i64 1 + %10 = load i16, i16* %arrayidx177, align 2 %conv17826 = zext i16 %10 to i32 %shr17913 = lshr i32 %conv17826, 1 %and180 = and i32 %shr17913, 1 %or181 = or i32 %and175, %and180 %conv182 = trunc i32 %or181 to i8 - %incdec.ptr183 = getelementptr inbounds i8* %c, i64 13 + %incdec.ptr183 = getelementptr inbounds i8, i8* %c, i64 13 store i8 %conv182, i8* %incdec.ptr172, align 1 - %arrayidx188 = getelementptr inbounds [4 x i16]* %Mc, i64 0, i64 1 - %11 = load i16* %arrayidx188, align 2 + %arrayidx188 = getelementptr inbounds [4 x i16], [4 x i16]* %Mc, i64 0, i64 1 + %11 = load i16, i16* %arrayidx188, align 2 %conv18914 = trunc i16 %11 to i8 %and190 = shl i8 %conv18914, 5 %shl191 = and i8 %and190, 96 - %incdec.ptr199 = getelementptr inbounds i8* %c, i64 14 + %incdec.ptr199 = getelementptr inbounds i8, i8* %c, i64 14 store i8 %shl191, i8* %incdec.ptr183, align 1 - %arrayidx209 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 14 - %12 = load i16* %arrayidx209, align 4 + %arrayidx209 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 14 + %12 = load i16, i16* %arrayidx209, align 4 %conv21015 = trunc i16 %12 to i8 %and211 = shl i8 %conv21015, 1 %shl212 = and i8 %and211, 14 %or218 = or i8 %shl212, 1 - %incdec.ptr220 = getelementptr inbounds i8* %c, i64 15 + %incdec.ptr220 = getelementptr inbounds i8, i8* %c, i64 15 store i8 %or218, i8* %incdec.ptr199, align 1 - %arrayidx225 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 16 + %arrayidx225 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 16 %13 = bitcast i16* %arrayidx225 to i64* - %14 = load i64* %13, align 16 + %14 = load i64, i64* %13, align 16 %conv22616 = trunc i64 %14 to i8 %and227 = shl i8 %conv22616, 3 %shl228 = and i8 %and227, 56 - %incdec.ptr235 = getelementptr inbounds i8* %c, i64 16 + %incdec.ptr235 = getelementptr inbounds i8, i8* %c, i64 16 store i8 %shl228, i8* %incdec.ptr220, align 1 %15 = lshr i64 %14, 32 %and23832 = shl nuw nsw i64 %15, 5 %and238 = trunc i64 %and23832 to i8 - %incdec.ptr251 = getelementptr inbounds i8* %c, i64 17 + %incdec.ptr251 = getelementptr inbounds i8, i8* %c, i64 17 store i8 %and238, i8* %incdec.ptr235, align 1 - %arrayidx266 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 23 - %incdec.ptr272 = getelementptr inbounds i8* %c, i64 18 + %arrayidx266 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 23 + %incdec.ptr272 = getelementptr inbounds i8, i8* %c, i64 18 store i8 0, i8* %incdec.ptr251, align 1 - %16 = load i16* %arrayidx266, align 2 + %16 = load i16, i16* %arrayidx266, align 2 %conv27418 = trunc i16 %16 to i8 %and275 = shl i8 %conv27418, 6 - %incdec.ptr287 = getelementptr inbounds i8* %c, i64 19 + %incdec.ptr287 = getelementptr inbounds i8, i8* %c, i64 19 store i8 %and275, i8* %incdec.ptr272, align 1 - %arrayidx288 = getelementptr inbounds [4 x i16]* %Nc, i64 0, i64 2 - %17 = load i16* %arrayidx288, align 2 + %arrayidx288 = getelementptr inbounds [4 x i16], [4 x i16]* %Nc, i64 0, i64 2 + %17 = load i16, i16* %arrayidx288, align 2 %conv28919 = zext i16 %17 to i32 %and290 = shl nuw nsw i32 %conv28919, 1 - %arrayidx292 = getelementptr inbounds [4 x i16]* %bc, i64 0, i64 2 - %18 = load i16* %arrayidx292, align 2 + %arrayidx292 = getelementptr inbounds [4 x i16], [4 x i16]* %bc, i64 0, i64 2 + %18 = load i16, i16* %arrayidx292, align 2 %conv29327 = zext i16 %18 to i32 %shr29420 = lshr i32 %conv29327, 1 %and295 = and i32 %shr29420, 1 %or296 = or i32 %and290, %and295 %conv297 = trunc i32 %or296 to i8 - %incdec.ptr298 = getelementptr inbounds i8* %c, i64 20 + %incdec.ptr298 = getelementptr inbounds i8, i8* %c, i64 20 store i8 %conv297, i8* %incdec.ptr287, align 1 %conv30021 = trunc i16 %18 to i8 %and301 = shl i8 %conv30021, 7 - %incdec.ptr314 = getelementptr inbounds i8* %c, i64 21 + %incdec.ptr314 = getelementptr inbounds i8, i8* %c, i64 21 store i8 %and301, i8* %incdec.ptr298, align 1 - %incdec.ptr335 = getelementptr inbounds i8* %c, i64 22 + %incdec.ptr335 = getelementptr inbounds i8, i8* %c, i64 22 store i8 0, i8* %incdec.ptr314, align 1 - %arrayidx340 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 29 - %19 = load i16* %arrayidx340, align 2 + %arrayidx340 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 29 + %19 = load i16, i16* %arrayidx340, align 2 %conv34122 = trunc i16 %19 to i8 %and342 = shl i8 %conv34122, 3 %shl343 = and i8 %and342, 56 - %incdec.ptr350 = getelementptr inbounds i8* %c, i64 23 + %incdec.ptr350 = getelementptr inbounds i8, i8* %c, i64 23 store i8 %shl343, i8* %incdec.ptr335, align 1 - %arrayidx355 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 32 + %arrayidx355 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 32 %20 = bitcast i16* %arrayidx355 to i32* - %21 = load i32* %20, align 16 + %21 = load i32, i32* %20, align 16 %conv35623 = shl i32 %21, 2 %shl358 = and i32 %conv35623, 28 %22 = lshr i32 %21, 17 diff --git a/test/Transforms/BBVectorize/X86/simple-ldstr.ll b/test/Transforms/BBVectorize/X86/simple-ldstr.ll index 1abbc34..2c05f30 100644 --- a/test/Transforms/BBVectorize/X86/simple-ldstr.ll +++ b/test/Transforms/BBVectorize/X86/simple-ldstr.ll @@ -4,23 +4,23 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 ; Simple 3-pair chain with loads and stores define void @test1(double* %a, double* %b, double* %c) nounwind uwtable readonly { entry: - %i0 = load double* %a, align 8 - %i1 = load double* %b, align 8 + %i0 = load double, double* %a, align 8 + %i1 = load double, double* %b, align 8 %mul = fmul double %i0, %i1 - %arrayidx3 = getelementptr inbounds double* %a, i64 1 - %i3 = load double* %arrayidx3, align 8 - %arrayidx4 = getelementptr inbounds double* %b, i64 1 - %i4 = load double* %arrayidx4, align 8 + %arrayidx3 = getelementptr inbounds double, double* %a, i64 1 + %i3 = load double, double* %arrayidx3, align 8 + %arrayidx4 = getelementptr inbounds double, double* %b, i64 1 + %i4 = load double, double* %arrayidx4, align 8 %mul5 = fmul double %i3, %i4 store double %mul, double* %c, align 8 - %arrayidx5 = getelementptr inbounds double* %c, i64 1 + %arrayidx5 = getelementptr inbounds double, double* %c, i64 1 store double %mul5, double* %arrayidx5, align 8 ret void ; CHECK-LABEL: @test1( ; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>* ; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>* -; CHECK: %i0 = load <2 x double>* %i0.v.i0, align 8 -; CHECK: %i1 = load <2 x double>* %i1.v.i0, align 8 +; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8 +; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8 ; CHECK: %mul = fmul <2 x double> %i0, %i1 ; CHECK: %0 = bitcast double* %c to <2 x double>* ; CHECK: store <2 x double> %mul, <2 x double>* %0, align 8 diff --git a/test/Transforms/BBVectorize/X86/wr-aliases.ll b/test/Transforms/BBVectorize/X86/wr-aliases.ll index 34b1d4e..56448c0 100644 --- a/test/Transforms/BBVectorize/X86/wr-aliases.ll +++ b/test/Transforms/BBVectorize/X86/wr-aliases.ll @@ -27,27 +27,27 @@ arrayctor.cont.ret.exitStub: ; preds = %arrayctor.cont ; CHECK: <2 x double> ; CHECK: @_ZL12printQBezier7QBezier ; CHECK: store double %mul8.i, double* %x3.i, align 16 -; CHECK: load double* %x3.i, align 16 +; CHECK: load double, double* %x3.i, align 16 ; CHECK: ret arrayctor.cont: ; preds = %newFuncRoot - %ref.tmp.sroa.0.0.idx = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 0 + %ref.tmp.sroa.0.0.idx = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 0 store double 1.000000e+01, double* %ref.tmp.sroa.0.0.idx, align 16 - %ref.tmp.sroa.2.0.idx1 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 1 + %ref.tmp.sroa.2.0.idx1 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 1 store double 2.000000e+01, double* %ref.tmp.sroa.2.0.idx1, align 8 - %ref.tmp.sroa.3.0.idx2 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2 + %ref.tmp.sroa.3.0.idx2 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2 store double 3.000000e+01, double* %ref.tmp.sroa.3.0.idx2, align 16 - %ref.tmp.sroa.4.0.idx3 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 3 + %ref.tmp.sroa.4.0.idx3 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 3 store double 4.000000e+01, double* %ref.tmp.sroa.4.0.idx3, align 8 - %ref.tmp.sroa.5.0.idx4 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4 + %ref.tmp.sroa.5.0.idx4 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4 store double 5.000000e+01, double* %ref.tmp.sroa.5.0.idx4, align 16 - %ref.tmp.sroa.6.0.idx5 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 5 + %ref.tmp.sroa.6.0.idx5 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 5 store double 6.000000e+01, double* %ref.tmp.sroa.6.0.idx5, align 8 - %ref.tmp.sroa.7.0.idx6 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 6 + %ref.tmp.sroa.7.0.idx6 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 6 store double 7.000000e+01, double* %ref.tmp.sroa.7.0.idx6, align 16 - %ref.tmp.sroa.8.0.idx7 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 7 + %ref.tmp.sroa.8.0.idx7 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 7 store double 8.000000e+01, double* %ref.tmp.sroa.8.0.idx7, align 8 - %add.ptr = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1 + %add.ptr = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1 %v0 = bitcast %class.QBezier.15* %agg.tmp.i to i8* call void @llvm.lifetime.start(i64 64, i8* %v0) %v1 = bitcast %class.QBezier.15* %agg.tmp55.i to i8* @@ -57,77 +57,77 @@ arrayctor.cont: ; preds = %newFuncRoot %v3 = bitcast [10 x %class.QBezier.15]* %beziers to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v0, i8* %v3, i64 64, i32 8, i1 false) call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval align 8 %agg.tmp.i) - %x2.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2 - %v4 = load double* %x2.i, align 16 - %x3.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4 - %v5 = load double* %x3.i, align 16 + %x2.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2 + %v4 = load double, double* %x2.i, align 16 + %x3.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4 + %v5 = load double, double* %x3.i, align 16 %add.i = fadd double %v4, %v5 %mul.i = fmul double 5.000000e-01, %add.i - %x1.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 0 - %v6 = load double* %x1.i, align 16 + %x1.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 0 + %v6 = load double, double* %x1.i, align 16 %add3.i = fadd double %v4, %v6 %mul4.i = fmul double 5.000000e-01, %add3.i - %x25.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 2 + %x25.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 2 store double %mul4.i, double* %x25.i, align 16 - %v7 = load double* %x3.i, align 16 - %x4.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 6 - %v8 = load double* %x4.i, align 16 + %v7 = load double, double* %x3.i, align 16 + %x4.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 6 + %v8 = load double, double* %x4.i, align 16 %add7.i = fadd double %v7, %v8 %mul8.i = fmul double 5.000000e-01, %add7.i store double %mul8.i, double* %x3.i, align 16 - %v9 = load double* %x1.i, align 16 - %x111.i = getelementptr inbounds %class.QBezier.15* %add.ptr, i64 0, i32 0 + %v9 = load double, double* %x1.i, align 16 + %x111.i = getelementptr inbounds %class.QBezier.15, %class.QBezier.15* %add.ptr, i64 0, i32 0 store double %v9, double* %x111.i, align 16 - %v10 = load double* %x25.i, align 16 + %v10 = load double, double* %x25.i, align 16 %add15.i = fadd double %mul.i, %v10 %mul16.i = fmul double 5.000000e-01, %add15.i - %x317.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 4 + %x317.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 4 store double %mul16.i, double* %x317.i, align 16 - %v11 = load double* %x3.i, align 16 + %v11 = load double, double* %x3.i, align 16 %add19.i = fadd double %mul.i, %v11 %mul20.i = fmul double 5.000000e-01, %add19.i store double %mul20.i, double* %x2.i, align 16 - %v12 = load double* %x317.i, align 16 + %v12 = load double, double* %x317.i, align 16 %add24.i = fadd double %v12, %mul20.i %mul25.i = fmul double 5.000000e-01, %add24.i store double %mul25.i, double* %x1.i, align 16 - %x427.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 6 + %x427.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 6 store double %mul25.i, double* %x427.i, align 16 - %y2.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 3 - %v13 = load double* %y2.i, align 8 - %y3.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 5 - %v14 = load double* %y3.i, align 8 + %y2.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 3 + %v13 = load double, double* %y2.i, align 8 + %y3.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 5 + %v14 = load double, double* %y3.i, align 8 %add28.i = fadd double %v13, %v14 %div.i = fmul double 5.000000e-01, %add28.i - %y1.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 1 - %v15 = load double* %y1.i, align 8 + %y1.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 1 + %v15 = load double, double* %y1.i, align 8 %add30.i = fadd double %v13, %v15 %mul31.i = fmul double 5.000000e-01, %add30.i - %y232.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 3 + %y232.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 3 store double %mul31.i, double* %y232.i, align 8 - %v16 = load double* %y3.i, align 8 - %y4.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 7 - %v17 = load double* %y4.i, align 8 + %v16 = load double, double* %y3.i, align 8 + %y4.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 7 + %v17 = load double, double* %y4.i, align 8 %add34.i = fadd double %v16, %v17 %mul35.i = fmul double 5.000000e-01, %add34.i store double %mul35.i, double* %y3.i, align 8 - %v18 = load double* %y1.i, align 8 - %y138.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 1 + %v18 = load double, double* %y1.i, align 8 + %y138.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 1 store double %v18, double* %y138.i, align 8 - %v19 = load double* %y232.i, align 8 + %v19 = load double, double* %y232.i, align 8 %add42.i = fadd double %div.i, %v19 %mul43.i = fmul double 5.000000e-01, %add42.i - %y344.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 5 + %y344.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 5 store double %mul43.i, double* %y344.i, align 8 - %v20 = load double* %y3.i, align 8 + %v20 = load double, double* %y3.i, align 8 %add46.i = fadd double %div.i, %v20 %mul47.i = fmul double 5.000000e-01, %add46.i store double %mul47.i, double* %y2.i, align 8 - %v21 = load double* %y344.i, align 8 + %v21 = load double, double* %y344.i, align 8 %add51.i = fadd double %v21, %mul47.i %mul52.i = fmul double 5.000000e-01, %add51.i store double %mul52.i, double* %y1.i, align 8 - %y454.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 7 + %y454.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 7 store double %mul52.i, double* %y454.i, align 8 %v22 = bitcast %class.QBezier.15* %add.ptr to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v1, i8* %v22, i64 64, i32 8, i1 false) |