diff options
Diffstat (limited to 'test/CodeGen/X86/mmx-arith.ll')
-rw-r--r-- | test/CodeGen/X86/mmx-arith.ll | 70 |
1 files changed, 35 insertions, 35 deletions
diff --git a/test/CodeGen/X86/mmx-arith.ll b/test/CodeGen/X86/mmx-arith.ll index d9d1fbf..114d253 100644 --- a/test/CodeGen/X86/mmx-arith.ll +++ b/test/CodeGen/X86/mmx-arith.ll @@ -8,48 +8,48 @@ ; X64-LABEL: test0 define void @test0(x86_mmx* %A, x86_mmx* %B) { entry: - %tmp1 = load x86_mmx* %A - %tmp3 = load x86_mmx* %B + %tmp1 = load x86_mmx, x86_mmx* %A + %tmp3 = load x86_mmx, x86_mmx* %B %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8> %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8> %tmp4 = add <8 x i8> %tmp1a, %tmp3a %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx store x86_mmx %tmp4a, x86_mmx* %A - %tmp7 = load x86_mmx* %B + %tmp7 = load x86_mmx, x86_mmx* %B %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b(x86_mmx %tmp4a, x86_mmx %tmp7) store x86_mmx %tmp12, x86_mmx* %A - %tmp16 = load x86_mmx* %B + %tmp16 = load x86_mmx, x86_mmx* %B %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %tmp12, x86_mmx %tmp16) store x86_mmx %tmp21, x86_mmx* %A - %tmp27 = load x86_mmx* %B + %tmp27 = load x86_mmx, x86_mmx* %B %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8> %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8> %tmp28 = sub <8 x i8> %tmp21a, %tmp27a %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx store x86_mmx %tmp28a, x86_mmx* %A - %tmp31 = load x86_mmx* %B + %tmp31 = load x86_mmx, x86_mmx* %B %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx %tmp28a, x86_mmx %tmp31) store x86_mmx %tmp36, x86_mmx* %A - %tmp40 = load x86_mmx* %B + %tmp40 = load x86_mmx, x86_mmx* %B %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx %tmp36, x86_mmx %tmp40) store x86_mmx %tmp45, x86_mmx* %A - %tmp51 = load x86_mmx* %B + %tmp51 = load x86_mmx, x86_mmx* %B %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8> %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8> %tmp52 = mul <8 x i8> %tmp45a, %tmp51a %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx store x86_mmx %tmp52a, x86_mmx* %A - %tmp57 = load x86_mmx* %B + %tmp57 = load x86_mmx, x86_mmx* %B %tmp57a = bitcast x86_mmx %tmp57 to <8 x i8> %tmp58 = and <8 x i8> %tmp52, %tmp57a %tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx store x86_mmx %tmp58a, x86_mmx* %A - %tmp63 = load x86_mmx* %B + %tmp63 = load x86_mmx, x86_mmx* %B %tmp63a = bitcast x86_mmx %tmp63 to <8 x i8> %tmp64 = or <8 x i8> %tmp58, %tmp63a %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx store x86_mmx %tmp64a, x86_mmx* %A - %tmp69 = load x86_mmx* %B + %tmp69 = load x86_mmx, x86_mmx* %B %tmp69a = bitcast x86_mmx %tmp69 to <8 x i8> %tmp64b = bitcast x86_mmx %tmp64a to <8 x i8> %tmp70 = xor <8 x i8> %tmp64b, %tmp69a @@ -63,37 +63,37 @@ entry: ; X64-LABEL: test1 define void @test1(x86_mmx* %A, x86_mmx* %B) { entry: - %tmp1 = load x86_mmx* %A - %tmp3 = load x86_mmx* %B + %tmp1 = load x86_mmx, x86_mmx* %A + %tmp3 = load x86_mmx, x86_mmx* %B %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32> %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32> %tmp4 = add <2 x i32> %tmp1a, %tmp3a %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx store x86_mmx %tmp4a, x86_mmx* %A - %tmp9 = load x86_mmx* %B + %tmp9 = load x86_mmx, x86_mmx* %B %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32> %tmp10 = sub <2 x i32> %tmp4, %tmp9a %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx store x86_mmx %tmp10a, x86_mmx* %A - %tmp15 = load x86_mmx* %B + %tmp15 = load x86_mmx, x86_mmx* %B %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32> %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32> %tmp16 = mul <2 x i32> %tmp10b, %tmp15a %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx store x86_mmx %tmp16a, x86_mmx* %A - %tmp21 = load x86_mmx* %B + %tmp21 = load x86_mmx, x86_mmx* %B %tmp16b = bitcast x86_mmx %tmp16a to <2 x i32> %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32> %tmp22 = and <2 x i32> %tmp16b, %tmp21a %tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx store x86_mmx %tmp22a, x86_mmx* %A - %tmp27 = load x86_mmx* %B + %tmp27 = load x86_mmx, x86_mmx* %B %tmp22b = bitcast x86_mmx %tmp22a to <2 x i32> %tmp27a = bitcast x86_mmx %tmp27 to <2 x i32> %tmp28 = or <2 x i32> %tmp22b, %tmp27a %tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx store x86_mmx %tmp28a, x86_mmx* %A - %tmp33 = load x86_mmx* %B + %tmp33 = load x86_mmx, x86_mmx* %B %tmp28b = bitcast x86_mmx %tmp28a to <2 x i32> %tmp33a = bitcast x86_mmx %tmp33 to <2 x i32> %tmp34 = xor <2 x i32> %tmp28b, %tmp33a @@ -107,57 +107,57 @@ entry: ; X64-LABEL: test2 define void @test2(x86_mmx* %A, x86_mmx* %B) { entry: - %tmp1 = load x86_mmx* %A - %tmp3 = load x86_mmx* %B + %tmp1 = load x86_mmx, x86_mmx* %A + %tmp3 = load x86_mmx, x86_mmx* %B %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16> %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16> %tmp4 = add <4 x i16> %tmp1a, %tmp3a %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx store x86_mmx %tmp4a, x86_mmx* %A - %tmp7 = load x86_mmx* %B + %tmp7 = load x86_mmx, x86_mmx* %B %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w(x86_mmx %tmp4a, x86_mmx %tmp7) store x86_mmx %tmp12, x86_mmx* %A - %tmp16 = load x86_mmx* %B + %tmp16 = load x86_mmx, x86_mmx* %B %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp12, x86_mmx %tmp16) store x86_mmx %tmp21, x86_mmx* %A - %tmp27 = load x86_mmx* %B + %tmp27 = load x86_mmx, x86_mmx* %B %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16> %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16> %tmp28 = sub <4 x i16> %tmp21a, %tmp27a %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx store x86_mmx %tmp28a, x86_mmx* %A - %tmp31 = load x86_mmx* %B + %tmp31 = load x86_mmx, x86_mmx* %B %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx %tmp28a, x86_mmx %tmp31) store x86_mmx %tmp36, x86_mmx* %A - %tmp40 = load x86_mmx* %B + %tmp40 = load x86_mmx, x86_mmx* %B %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx %tmp36, x86_mmx %tmp40) store x86_mmx %tmp45, x86_mmx* %A - %tmp51 = load x86_mmx* %B + %tmp51 = load x86_mmx, x86_mmx* %B %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16> %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16> %tmp52 = mul <4 x i16> %tmp45a, %tmp51a %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx store x86_mmx %tmp52a, x86_mmx* %A - %tmp55 = load x86_mmx* %B + %tmp55 = load x86_mmx, x86_mmx* %B %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx %tmp52a, x86_mmx %tmp55) store x86_mmx %tmp60, x86_mmx* %A - %tmp64 = load x86_mmx* %B + %tmp64 = load x86_mmx, x86_mmx* %B %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx %tmp60, x86_mmx %tmp64) %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx store x86_mmx %tmp70, x86_mmx* %A - %tmp75 = load x86_mmx* %B + %tmp75 = load x86_mmx, x86_mmx* %B %tmp70a = bitcast x86_mmx %tmp70 to <4 x i16> %tmp75a = bitcast x86_mmx %tmp75 to <4 x i16> %tmp76 = and <4 x i16> %tmp70a, %tmp75a %tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx store x86_mmx %tmp76a, x86_mmx* %A - %tmp81 = load x86_mmx* %B + %tmp81 = load x86_mmx, x86_mmx* %B %tmp76b = bitcast x86_mmx %tmp76a to <4 x i16> %tmp81a = bitcast x86_mmx %tmp81 to <4 x i16> %tmp82 = or <4 x i16> %tmp76b, %tmp81a %tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx store x86_mmx %tmp82a, x86_mmx* %A - %tmp87 = load x86_mmx* %B + %tmp87 = load x86_mmx, x86_mmx* %B %tmp82b = bitcast x86_mmx %tmp82a to <4 x i16> %tmp87a = bitcast x86_mmx %tmp87 to <4 x i16> %tmp88 = xor <4 x i16> %tmp82b, %tmp87a @@ -178,10 +178,10 @@ bb26: ; X32: adcl %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ] %sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] - %tmp13 = getelementptr <1 x i64>* %b, i32 %i.037.0 - %tmp14 = load <1 x i64>* %tmp13 - %tmp18 = getelementptr <1 x i64>* %a, i32 %i.037.0 - %tmp19 = load <1 x i64>* %tmp18 + %tmp13 = getelementptr <1 x i64>, <1 x i64>* %b, i32 %i.037.0 + %tmp14 = load <1 x i64>, <1 x i64>* %tmp13 + %tmp18 = getelementptr <1 x i64>, <1 x i64>* %a, i32 %i.037.0 + %tmp19 = load <1 x i64>, <1 x i64>* %tmp18 %tmp21 = add <1 x i64> %tmp19, %tmp14 %tmp22 = add <1 x i64> %tmp21, %sum.035.0 %tmp25 = add i32 %i.037.0, 1 |