aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/PowerPC/vsx-infl-copy1.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/PowerPC/vsx-infl-copy1.ll')
-rw-r--r--test/CodeGen/PowerPC/vsx-infl-copy1.ll68
1 files changed, 34 insertions, 34 deletions
diff --git a/test/CodeGen/PowerPC/vsx-infl-copy1.ll b/test/CodeGen/PowerPC/vsx-infl-copy1.ll
index cff7f8f..531e3ad 100644
--- a/test/CodeGen/PowerPC/vsx-infl-copy1.ll
+++ b/test/CodeGen/PowerPC/vsx-infl-copy1.ll
@@ -26,69 +26,69 @@ vector.body: ; preds = %vector.body, %entry
%vec.phi28 = phi <4 x i32> [ zeroinitializer, %entry ], [ %51, %vector.body ]
%vec.phi29 = phi <4 x i32> [ zeroinitializer, %entry ], [ %52, %vector.body ]
%vec.phi30 = phi <4 x i32> [ zeroinitializer, %entry ], [ %53, %vector.body ]
- %wide.load32 = load <4 x i32>* null, align 4
+ %wide.load32 = load <4 x i32>, <4 x i32>* null, align 4
%.sum82 = add i64 %index, 24
- %0 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum82
+ %0 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum82
%1 = bitcast i32* %0 to <4 x i32>*
- %wide.load36 = load <4 x i32>* %1, align 4
- %wide.load37 = load <4 x i32>* undef, align 4
+ %wide.load36 = load <4 x i32>, <4 x i32>* %1, align 4
+ %wide.load37 = load <4 x i32>, <4 x i32>* undef, align 4
%.sum84 = add i64 %index, 32
- %2 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum84
+ %2 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum84
%3 = bitcast i32* %2 to <4 x i32>*
- %wide.load38 = load <4 x i32>* %3, align 4
+ %wide.load38 = load <4 x i32>, <4 x i32>* %3, align 4
%.sum85 = add i64 %index, 36
- %4 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum85
+ %4 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum85
%5 = bitcast i32* %4 to <4 x i32>*
- %wide.load39 = load <4 x i32>* %5, align 4
- %6 = getelementptr [1024 x i32]* @ub, i64 0, i64 undef
+ %wide.load39 = load <4 x i32>, <4 x i32>* %5, align 4
+ %6 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 undef
%7 = bitcast i32* %6 to <4 x i32>*
- %wide.load40 = load <4 x i32>* %7, align 4
+ %wide.load40 = load <4 x i32>, <4 x i32>* %7, align 4
%.sum87 = add i64 %index, 44
- %8 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum87
+ %8 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum87
%9 = bitcast i32* %8 to <4 x i32>*
- %wide.load41 = load <4 x i32>* %9, align 4
- %10 = getelementptr inbounds [1024 x i32]* @uc, i64 0, i64 %index
+ %wide.load41 = load <4 x i32>, <4 x i32>* %9, align 4
+ %10 = getelementptr inbounds [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %index
%11 = bitcast i32* %10 to <4 x i32>*
- %wide.load42 = load <4 x i32>* %11, align 4
+ %wide.load42 = load <4 x i32>, <4 x i32>* %11, align 4
%.sum8889 = or i64 %index, 4
- %12 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum8889
+ %12 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum8889
%13 = bitcast i32* %12 to <4 x i32>*
- %wide.load43 = load <4 x i32>* %13, align 4
+ %wide.load43 = load <4 x i32>, <4 x i32>* %13, align 4
%.sum9091 = or i64 %index, 8
- %14 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum9091
+ %14 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum9091
%15 = bitcast i32* %14 to <4 x i32>*
- %wide.load44 = load <4 x i32>* %15, align 4
+ %wide.load44 = load <4 x i32>, <4 x i32>* %15, align 4
%.sum94 = add i64 %index, 16
- %16 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum94
+ %16 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum94
%17 = bitcast i32* %16 to <4 x i32>*
- %wide.load46 = load <4 x i32>* %17, align 4
+ %wide.load46 = load <4 x i32>, <4 x i32>* %17, align 4
%.sum95 = add i64 %index, 20
- %18 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum95
+ %18 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum95
%19 = bitcast i32* %18 to <4 x i32>*
- %wide.load47 = load <4 x i32>* %19, align 4
- %20 = getelementptr [1024 x i32]* @uc, i64 0, i64 undef
+ %wide.load47 = load <4 x i32>, <4 x i32>* %19, align 4
+ %20 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 undef
%21 = bitcast i32* %20 to <4 x i32>*
- %wide.load48 = load <4 x i32>* %21, align 4
+ %wide.load48 = load <4 x i32>, <4 x i32>* %21, align 4
%.sum97 = add i64 %index, 28
- %22 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum97
+ %22 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum97
%23 = bitcast i32* %22 to <4 x i32>*
- %wide.load49 = load <4 x i32>* %23, align 4
+ %wide.load49 = load <4 x i32>, <4 x i32>* %23, align 4
%.sum98 = add i64 %index, 32
- %24 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum98
+ %24 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum98
%25 = bitcast i32* %24 to <4 x i32>*
- %wide.load50 = load <4 x i32>* %25, align 4
+ %wide.load50 = load <4 x i32>, <4 x i32>* %25, align 4
%.sum99 = add i64 %index, 36
- %26 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum99
+ %26 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum99
%27 = bitcast i32* %26 to <4 x i32>*
- %wide.load51 = load <4 x i32>* %27, align 4
+ %wide.load51 = load <4 x i32>, <4 x i32>* %27, align 4
%.sum100 = add i64 %index, 40
- %28 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum100
+ %28 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum100
%29 = bitcast i32* %28 to <4 x i32>*
- %wide.load52 = load <4 x i32>* %29, align 4
+ %wide.load52 = load <4 x i32>, <4 x i32>* %29, align 4
%.sum101 = add i64 %index, 44
- %30 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum101
+ %30 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum101
%31 = bitcast i32* %30 to <4 x i32>*
- %wide.load53 = load <4 x i32>* %31, align 4
+ %wide.load53 = load <4 x i32>, <4 x i32>* %31, align 4
%32 = add <4 x i32> zeroinitializer, %vec.phi
%33 = add <4 x i32> zeroinitializer, %vec.phi20
%34 = add <4 x i32> %wide.load32, %vec.phi21