diff options
Diffstat (limited to 'test/Transforms/LoopStrengthReduce/X86')
5 files changed, 501 insertions, 5 deletions
diff --git a/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll b/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll new file mode 100644 index 0000000..2dcaab8 --- /dev/null +++ b/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll @@ -0,0 +1,92 @@ +; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 | FileCheck %s + +declare i1 @check() nounwind +declare i1 @foo(i8*, i8*, i8*) nounwind + +; Check that redundant phi elimination ran +; CHECK: @test +; CHECK: %while.body.i +; CHECK: movs +; CHECK-NOT: movs +; CHECK: %for.end.i +define i32 @test(i8* %base) nounwind uwtable ssp { +entry: + br label %while.body.lr.ph.i + +while.body.lr.ph.i: ; preds = %cond.true.i + br label %while.body.i + +while.body.i: ; preds = %cond.true29.i, %while.body.lr.ph.i + %indvars.iv7.i = phi i64 [ 16, %while.body.lr.ph.i ], [ %indvars.iv.next8.i, %cond.true29.i ] + %i.05.i = phi i64 [ 0, %while.body.lr.ph.i ], [ %indvars.iv7.i, %cond.true29.i ] + %sext.i = shl i64 %i.05.i, 32 + %idx.ext.i = ashr exact i64 %sext.i, 32 + %add.ptr.sum.i = add i64 %idx.ext.i, 16 + br label %for.body.i + +for.body.i: ; preds = %for.body.i, %while.body.i + %indvars.iv.i = phi i64 [ 0, %while.body.i ], [ %indvars.iv.next.i, %for.body.i ] + %add.ptr.sum = add i64 %add.ptr.sum.i, %indvars.iv.i + %arrayidx22.i = getelementptr inbounds i8* %base, i64 %add.ptr.sum + %0 = load i8* %arrayidx22.i, align 1 + %indvars.iv.next.i = add i64 %indvars.iv.i, 1 + %cmp = call i1 @check() nounwind + br i1 %cmp, label %for.end.i, label %for.body.i + +for.end.i: ; preds = %for.body.i + %add.ptr.i144 = getelementptr inbounds i8* %base, i64 %add.ptr.sum.i + %cmp2 = tail call i1 @foo(i8* %add.ptr.i144, i8* %add.ptr.i144, i8* undef) nounwind + br i1 %cmp2, label %cond.true29.i, label %cond.false35.i + +cond.true29.i: ; preds = %for.end.i + %indvars.iv.next8.i = add i64 %indvars.iv7.i, 16 + br i1 false, label %exit, label %while.body.i + +cond.false35.i: ; preds = %for.end.i + unreachable + +exit: ; preds = %cond.true29.i, %cond.true.i + ret i32 0 +} + +%struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771 = type { i32, i32, i32 } + +@tags = external global [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], align 16 + +; PR11782: SCEVExpander assert +; +; Test phi reuse after LSR that requires SCEVExpander to hoist an +; interesting GEP. +; +; CHECK: @test2 +; CHECK: %entry +; CHECK-NOT: mov +; CHECK: jne +define void @test2(i32 %n) nounwind uwtable { +entry: + br i1 undef, label %while.end, label %for.cond468 + +for.cond468: ; preds = %if.then477, %entry + %indvars.iv1163 = phi i64 [ %indvars.iv.next1164, %if.then477 ], [ 1, %entry ] + %k.0.in = phi i32* [ %last, %if.then477 ], [ getelementptr inbounds ([5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 0, i32 2), %entry ] + %k.0 = load i32* %k.0.in, align 4 + %0 = trunc i64 %indvars.iv1163 to i32 + %cmp469 = icmp slt i32 %0, %n + br i1 %cmp469, label %for.body471, label %for.inc498 + +for.body471: ; preds = %for.cond468 + %first = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 %indvars.iv1163, i32 1 + %1 = load i32* %first, align 4 + br i1 undef, label %if.then477, label %for.inc498 + +if.then477: ; preds = %for.body471 + %last = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 %indvars.iv1163, i32 2 + %indvars.iv.next1164 = add i64 %indvars.iv1163, 1 + br label %for.cond468 + +for.inc498: ; preds = %for.inc498, %for.body471, %for.cond468 + br label %for.inc498 + +while.end: ; preds = %entry + ret void +} diff --git a/test/Transforms/LoopStrengthReduce/X86/dg.exp b/test/Transforms/LoopStrengthReduce/X86/dg.exp deleted file mode 100644 index 7b7bd4e..0000000 --- a/test/Transforms/LoopStrengthReduce/X86/dg.exp +++ /dev/null @@ -1,5 +0,0 @@ -load_lib llvm.exp - -if { [llvm_supports_target X86] } { - RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll}]] -} diff --git a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll new file mode 100644 index 0000000..e42b67f --- /dev/null +++ b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll @@ -0,0 +1,300 @@ +; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 | FileCheck %s -check-prefix=X64 +; RUN: llc < %s -O3 -march=x86 -mcpu=core2 | FileCheck %s -check-prefix=X32 + +; @simple is the most basic chain of address induction variables. Chaining +; saves at least one register and avoids complex addressing and setup +; code. +; +; X64: @simple +; %x * 4 +; X64: shlq $2 +; no other address computation in the preheader +; X64-NEXT: xorl +; X64-NEXT: .align +; X64: %loop +; no complex address modes +; X64-NOT: (%{{[^)]+}},%{{[^)]+}}, +; +; X32: @simple +; no expensive address computation in the preheader +; X32-NOT: imul +; X32: %loop +; no complex address modes +; X32-NOT: (%{{[^)]+}},%{{[^)]+}}, +define i32 @simple(i32* %a, i32* %b, i32 %x) nounwind { +entry: + br label %loop +loop: + %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ] + %s = phi i32 [ 0, %entry ], [ %s4, %loop ] + %v = load i32* %iv + %iv1 = getelementptr inbounds i32* %iv, i32 %x + %v1 = load i32* %iv1 + %iv2 = getelementptr inbounds i32* %iv1, i32 %x + %v2 = load i32* %iv2 + %iv3 = getelementptr inbounds i32* %iv2, i32 %x + %v3 = load i32* %iv3 + %s1 = add i32 %s, %v + %s2 = add i32 %s1, %v1 + %s3 = add i32 %s2, %v2 + %s4 = add i32 %s3, %v3 + %iv4 = getelementptr inbounds i32* %iv3, i32 %x + %cmp = icmp eq i32* %iv4, %b + br i1 %cmp, label %exit, label %loop +exit: + ret i32 %s4 +} + +; @user is not currently chained because the IV is live across memory ops. +; +; X64: @user +; X64: shlq $4 +; X64: lea +; X64: lea +; X64: %loop +; complex address modes +; X64: (%{{[^)]+}},%{{[^)]+}}, +; +; X32: @user +; expensive address computation in the preheader +; X32: imul +; X32: %loop +; complex address modes +; X32: (%{{[^)]+}},%{{[^)]+}}, +define i32 @user(i32* %a, i32* %b, i32 %x) nounwind { +entry: + br label %loop +loop: + %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ] + %s = phi i32 [ 0, %entry ], [ %s4, %loop ] + %v = load i32* %iv + %iv1 = getelementptr inbounds i32* %iv, i32 %x + %v1 = load i32* %iv1 + %iv2 = getelementptr inbounds i32* %iv1, i32 %x + %v2 = load i32* %iv2 + %iv3 = getelementptr inbounds i32* %iv2, i32 %x + %v3 = load i32* %iv3 + %s1 = add i32 %s, %v + %s2 = add i32 %s1, %v1 + %s3 = add i32 %s2, %v2 + %s4 = add i32 %s3, %v3 + %iv4 = getelementptr inbounds i32* %iv3, i32 %x + store i32 %s4, i32* %iv + %cmp = icmp eq i32* %iv4, %b + br i1 %cmp, label %exit, label %loop +exit: + ret i32 %s4 +} + +; @extrastride is a slightly more interesting case of a single +; complete chain with multiple strides. The test case IR is what LSR +; used to do, and exactly what we don't want to do. LSR's new IV +; chaining feature should now undo the damage. +; +; X64: extrastride: +; We currently don't handle this on X64 because the sexts cause +; strange increment expressions like this: +; IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64))) +; +; X32: extrastride: +; no spills in the preheader +; X32-NOT: mov{{.*}}(%esp){{$}} +; X32: %for.body{{$}} +; no complex address modes +; X32-NOT: (%{{[^)]+}},%{{[^)]+}}, +; no reloads +; X32-NOT: (%esp) +define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture %res, i32 %x, i32 %y, i32 %z) nounwind { +entry: + %cmp8 = icmp eq i32 %z, 0 + br i1 %cmp8, label %for.end, label %for.body.lr.ph + +for.body.lr.ph: ; preds = %entry + %add.ptr.sum = shl i32 %main_stride, 1 ; s*2 + %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride ; s*3 + %add.ptr2.sum = add i32 %x, %main_stride ; s + x + %add.ptr4.sum = shl i32 %main_stride, 2 ; s*4 + %add.ptr3.sum = add i32 %add.ptr2.sum, %add.ptr4.sum ; total IV stride = s*5+x + br label %for.body + +for.body: ; preds = %for.body.lr.ph, %for.body + %main.addr.011 = phi i8* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ] + %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] + %res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ] + %0 = bitcast i8* %main.addr.011 to i32* + %1 = load i32* %0, align 4 + %add.ptr = getelementptr inbounds i8* %main.addr.011, i32 %main_stride + %2 = bitcast i8* %add.ptr to i32* + %3 = load i32* %2, align 4 + %add.ptr1 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr.sum + %4 = bitcast i8* %add.ptr1 to i32* + %5 = load i32* %4, align 4 + %add.ptr2 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr1.sum + %6 = bitcast i8* %add.ptr2 to i32* + %7 = load i32* %6, align 4 + %add.ptr3 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr4.sum + %8 = bitcast i8* %add.ptr3 to i32* + %9 = load i32* %8, align 4 + %add = add i32 %3, %1 + %add4 = add i32 %add, %5 + %add5 = add i32 %add4, %7 + %add6 = add i32 %add5, %9 + store i32 %add6, i32* %res.addr.09, align 4 + %add.ptr6 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr3.sum + %add.ptr7 = getelementptr inbounds i32* %res.addr.09, i32 %y + %inc = add i32 %i.010, 1 + %cmp = icmp eq i32 %inc, %z + br i1 %cmp, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +; @foldedidx is an unrolled variant of this loop: +; for (unsigned long i = 0; i < len; i += s) { +; c[i] = a[i] + b[i]; +; } +; where 's' can be folded into the addressing mode. +; Consequently, we should *not* form any chains. +; +; X64: foldedidx: +; X64: movzbl -3( +; +; X32: foldedidx: +; X32: movzbl -3( +define void @foldedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c) nounwind ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ] + %arrayidx = getelementptr inbounds i8* %a, i32 %i.07 + %0 = load i8* %arrayidx, align 1 + %conv5 = zext i8 %0 to i32 + %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.07 + %1 = load i8* %arrayidx1, align 1 + %conv26 = zext i8 %1 to i32 + %add = add nsw i32 %conv26, %conv5 + %conv3 = trunc i32 %add to i8 + %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.07 + store i8 %conv3, i8* %arrayidx4, align 1 + %inc1 = or i32 %i.07, 1 + %arrayidx.1 = getelementptr inbounds i8* %a, i32 %inc1 + %2 = load i8* %arrayidx.1, align 1 + %conv5.1 = zext i8 %2 to i32 + %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %inc1 + %3 = load i8* %arrayidx1.1, align 1 + %conv26.1 = zext i8 %3 to i32 + %add.1 = add nsw i32 %conv26.1, %conv5.1 + %conv3.1 = trunc i32 %add.1 to i8 + %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %inc1 + store i8 %conv3.1, i8* %arrayidx4.1, align 1 + %inc.12 = or i32 %i.07, 2 + %arrayidx.2 = getelementptr inbounds i8* %a, i32 %inc.12 + %4 = load i8* %arrayidx.2, align 1 + %conv5.2 = zext i8 %4 to i32 + %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %inc.12 + %5 = load i8* %arrayidx1.2, align 1 + %conv26.2 = zext i8 %5 to i32 + %add.2 = add nsw i32 %conv26.2, %conv5.2 + %conv3.2 = trunc i32 %add.2 to i8 + %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %inc.12 + store i8 %conv3.2, i8* %arrayidx4.2, align 1 + %inc.23 = or i32 %i.07, 3 + %arrayidx.3 = getelementptr inbounds i8* %a, i32 %inc.23 + %6 = load i8* %arrayidx.3, align 1 + %conv5.3 = zext i8 %6 to i32 + %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %inc.23 + %7 = load i8* %arrayidx1.3, align 1 + %conv26.3 = zext i8 %7 to i32 + %add.3 = add nsw i32 %conv26.3, %conv5.3 + %conv3.3 = trunc i32 %add.3 to i8 + %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %inc.23 + store i8 %conv3.3, i8* %arrayidx4.3, align 1 + %inc.3 = add nsw i32 %i.07, 4 + %exitcond.3 = icmp eq i32 %inc.3, 400 + br i1 %exitcond.3, label %for.end, label %for.body + +for.end: ; preds = %for.body + ret void +} + +; @multioper tests instructions with multiple IV user operands. We +; should be able to chain them independent of each other. +; +; X64: @multioper +; X64: %for.body +; X64: movl %{{.*}},4) +; X64-NEXT: leal 1( +; X64-NEXT: movl %{{.*}},4) +; X64-NEXT: leal 2( +; X64-NEXT: movl %{{.*}},4) +; X64-NEXT: leal 3( +; X64-NEXT: movl %{{.*}},4) +; +; X32: @multioper +; X32: %for.body +; X32: movl %{{.*}},4) +; X32-NEXT: leal 1( +; X32-NEXT: movl %{{.*}},4) +; X32-NEXT: leal 2( +; X32-NEXT: movl %{{.*}},4) +; X32-NEXT: leal 3( +; X32-NEXT: movl %{{.*}},4) +define void @multioper(i32* %a, i32 %n) nounwind { +entry: + br label %for.body + +for.body: + %p = phi i32* [ %p.next, %for.body ], [ %a, %entry ] + %i = phi i32 [ %inc4, %for.body ], [ 0, %entry ] + store i32 %i, i32* %p, align 4 + %inc1 = or i32 %i, 1 + %add.ptr.i1 = getelementptr inbounds i32* %p, i32 1 + store i32 %inc1, i32* %add.ptr.i1, align 4 + %inc2 = add nsw i32 %i, 2 + %add.ptr.i2 = getelementptr inbounds i32* %p, i32 2 + store i32 %inc2, i32* %add.ptr.i2, align 4 + %inc3 = add nsw i32 %i, 3 + %add.ptr.i3 = getelementptr inbounds i32* %p, i32 3 + store i32 %inc3, i32* %add.ptr.i3, align 4 + %p.next = getelementptr inbounds i32* %p, i32 4 + %inc4 = add nsw i32 %i, 4 + %cmp = icmp slt i32 %inc4, %n + br i1 %cmp, label %for.body, label %exit + +exit: + ret void +} + +; @testCmpZero has a ICmpZero LSR use that should not be hidden from +; LSR. Profitable chains should have more than one nonzero increment +; anyway. +; +; X32: @testCmpZero +; X32: %for.body82.us +; X32: dec +; X32: jne +define void @testCmpZero(i8* %src, i8* %dst, i32 %srcidx, i32 %dstidx, i32 %len) nounwind ssp { +entry: + %dest0 = getelementptr inbounds i8* %src, i32 %srcidx + %source0 = getelementptr inbounds i8* %dst, i32 %dstidx + %add.ptr79.us.sum = add i32 %srcidx, %len + %lftr.limit = getelementptr i8* %src, i32 %add.ptr79.us.sum + br label %for.body82.us + +for.body82.us: + %dest = phi i8* [ %dest0, %entry ], [ %incdec.ptr91.us, %for.body82.us ] + %source = phi i8* [ %source0, %entry ], [ %add.ptr83.us, %for.body82.us ] + %0 = bitcast i8* %source to i32* + %1 = load i32* %0, align 4 + %trunc = trunc i32 %1 to i8 + %add.ptr83.us = getelementptr inbounds i8* %source, i32 4 + %incdec.ptr91.us = getelementptr inbounds i8* %dest, i32 1 + store i8 %trunc, i8* %dest, align 1 + %exitcond = icmp eq i8* %incdec.ptr91.us, %lftr.limit + br i1 %exitcond, label %return, label %for.body82.us + +return: + ret void +} diff --git a/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll b/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll new file mode 100644 index 0000000..d8e0aa9 --- /dev/null +++ b/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll @@ -0,0 +1,96 @@ +; REQUIRES: asserts +; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 -stress-ivchain | FileCheck %s -check-prefix=X64 +; RUN: llc < %s -O3 -march=x86 -mcpu=core2 -stress-ivchain | FileCheck %s -check-prefix=X32 + +; @sharedidx is an unrolled variant of this loop: +; for (unsigned long i = 0; i < len; i += s) { +; c[i] = a[i] + b[i]; +; } +; where 's' cannot be folded into the addressing mode. +; +; This is not quite profitable to chain. But with -stress-ivchain, we +; can form three address chains in place of the shared induction +; variable. + +; X64: sharedidx: +; X64: %for.body.preheader +; X64-NOT: leal ({{.*}},4) +; X64: %for.body.1 + +; X32: sharedidx: +; X32: %for.body.2 +; X32: add +; X32: add +; X32: add +; X32: add +; X32: add +; X32: %for.body.3 +define void @sharedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c, i32 %s, i32 %len) nounwind ssp { +entry: + %cmp8 = icmp eq i32 %len, 0 + br i1 %cmp8, label %for.end, label %for.body + +for.body: ; preds = %entry, %for.body.3 + %i.09 = phi i32 [ %add5.3, %for.body.3 ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i8* %a, i32 %i.09 + %0 = load i8* %arrayidx, align 1 + %conv6 = zext i8 %0 to i32 + %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.09 + %1 = load i8* %arrayidx1, align 1 + %conv27 = zext i8 %1 to i32 + %add = add nsw i32 %conv27, %conv6 + %conv3 = trunc i32 %add to i8 + %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.09 + store i8 %conv3, i8* %arrayidx4, align 1 + %add5 = add i32 %i.09, %s + %cmp = icmp ult i32 %add5, %len + br i1 %cmp, label %for.body.1, label %for.end + +for.end: ; preds = %for.body, %for.body.1, %for.body.2, %for.body.3, %entry + ret void + +for.body.1: ; preds = %for.body + %arrayidx.1 = getelementptr inbounds i8* %a, i32 %add5 + %2 = load i8* %arrayidx.1, align 1 + %conv6.1 = zext i8 %2 to i32 + %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %add5 + %3 = load i8* %arrayidx1.1, align 1 + %conv27.1 = zext i8 %3 to i32 + %add.1 = add nsw i32 %conv27.1, %conv6.1 + %conv3.1 = trunc i32 %add.1 to i8 + %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %add5 + store i8 %conv3.1, i8* %arrayidx4.1, align 1 + %add5.1 = add i32 %add5, %s + %cmp.1 = icmp ult i32 %add5.1, %len + br i1 %cmp.1, label %for.body.2, label %for.end + +for.body.2: ; preds = %for.body.1 + %arrayidx.2 = getelementptr inbounds i8* %a, i32 %add5.1 + %4 = load i8* %arrayidx.2, align 1 + %conv6.2 = zext i8 %4 to i32 + %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %add5.1 + %5 = load i8* %arrayidx1.2, align 1 + %conv27.2 = zext i8 %5 to i32 + %add.2 = add nsw i32 %conv27.2, %conv6.2 + %conv3.2 = trunc i32 %add.2 to i8 + %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %add5.1 + store i8 %conv3.2, i8* %arrayidx4.2, align 1 + %add5.2 = add i32 %add5.1, %s + %cmp.2 = icmp ult i32 %add5.2, %len + br i1 %cmp.2, label %for.body.3, label %for.end + +for.body.3: ; preds = %for.body.2 + %arrayidx.3 = getelementptr inbounds i8* %a, i32 %add5.2 + %6 = load i8* %arrayidx.3, align 1 + %conv6.3 = zext i8 %6 to i32 + %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %add5.2 + %7 = load i8* %arrayidx1.3, align 1 + %conv27.3 = zext i8 %7 to i32 + %add.3 = add nsw i32 %conv27.3, %conv6.3 + %conv3.3 = trunc i32 %add.3 to i8 + %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %add5.2 + store i8 %conv3.3, i8* %arrayidx4.3, align 1 + %add5.3 = add i32 %add5.2, %s + %cmp.3 = icmp ult i32 %add5.3, %len + br i1 %cmp.3, label %for.body, label %for.end +} diff --git a/test/Transforms/LoopStrengthReduce/X86/lit.local.cfg b/test/Transforms/LoopStrengthReduce/X86/lit.local.cfg new file mode 100644 index 0000000..84bd88c --- /dev/null +++ b/test/Transforms/LoopStrengthReduce/X86/lit.local.cfg @@ -0,0 +1,13 @@ +config.suffixes = ['.ll'] + +def getRoot(config): + if not config.parent: + return config + return getRoot(config.parent) + +root = getRoot(config) + +targets = set(root.targets_to_build.split()) +if not 'X86' in targets: + config.unsupported = True + |