diff options
author | Stephen Hines <srhines@google.com> | 2014-07-21 00:45:20 -0700 |
---|---|---|
committer | Stephen Hines <srhines@google.com> | 2014-07-21 00:45:20 -0700 |
commit | c6a4f5e819217e1e12c458aed8e7b122e23a3a58 (patch) | |
tree | 81b7dd2bb4370a392f31d332a566c903b5744764 /test/Transforms/SeparateConstOffsetFromGEP | |
parent | 19c6fbb3e8aaf74093afa08013134b61fa08f245 (diff) | |
download | external_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.zip external_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.tar.gz external_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.tar.bz2 |
Update LLVM for rebase to r212749.
Includes a cherry-pick of:
r212948 - fixes a small issue with atomic calls
Change-Id: Ib97bd980b59f18142a69506400911a6009d9df18
Diffstat (limited to 'test/Transforms/SeparateConstOffsetFromGEP')
3 files changed, 260 insertions, 55 deletions
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg index 40532cd..a5e90f8 100644 --- a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg +++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg @@ -1,4 +1,3 @@ -targets = set(config.root.targets_to_build.split()) -if not 'NVPTX' in targets: +if not 'NVPTX' in config.root.targets: config.unsupported = True diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll index 850fc4c..c07440c 100644 --- a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll +++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll @@ -1,4 +1,3 @@ -; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=PTX ; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX ; RUN: opt < %s -S -separate-const-offset-from-gep -gvn -dce | FileCheck %s --check-prefix=IR @@ -20,6 +19,90 @@ target triple = "nvptx64-unknown-unknown" define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) { .preheader: + %0 = sext i32 %y to i64 + %1 = sext i32 %x to i64 + %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0 + %3 = addrspacecast float addrspace(3)* %2 to float* + %4 = load float* %3, align 4 + %5 = fadd float %4, 0.000000e+00 + %6 = add i32 %y, 1 + %7 = sext i32 %6 to i64 + %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7 + %9 = addrspacecast float addrspace(3)* %8 to float* + %10 = load float* %9, align 4 + %11 = fadd float %5, %10 + %12 = add i32 %x, 1 + %13 = sext i32 %12 to i64 + %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0 + %15 = addrspacecast float addrspace(3)* %14 to float* + %16 = load float* %15, align 4 + %17 = fadd float %11, %16 + %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7 + %19 = addrspacecast float addrspace(3)* %18 to float* + %20 = load float* %19, align 4 + %21 = fadd float %17, %20 + store float %21, float* %output, align 4 + ret void +} +; PTX-LABEL: sum_of_array( +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}} + +; IR-LABEL: @sum_of_array( +; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} +; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1 +; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32 +; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33 + +; @sum_of_array2 is very similar to @sum_of_array. The only difference is in +; the order of "sext" and "add" when computing the array indices. @sum_of_array +; computes add before sext, e.g., array[sext(x + 1)][sext(y + 1)], while +; @sum_of_array2 computes sext before add, +; e.g., array[sext(x) + 1][sext(y) + 1]. SeparateConstOffsetFromGEP should be +; able to extract constant offsets from both forms. +define void @sum_of_array2(i32 %x, i32 %y, float* nocapture %output) { +.preheader: + %0 = sext i32 %y to i64 + %1 = sext i32 %x to i64 + %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0 + %3 = addrspacecast float addrspace(3)* %2 to float* + %4 = load float* %3, align 4 + %5 = fadd float %4, 0.000000e+00 + %6 = add i64 %0, 1 + %7 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6 + %8 = addrspacecast float addrspace(3)* %7 to float* + %9 = load float* %8, align 4 + %10 = fadd float %5, %9 + %11 = add i64 %1, 1 + %12 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0 + %13 = addrspacecast float addrspace(3)* %12 to float* + %14 = load float* %13, align 4 + %15 = fadd float %10, %14 + %16 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6 + %17 = addrspacecast float addrspace(3)* %16 to float* + %18 = load float* %17, align 4 + %19 = fadd float %15, %18 + store float %19, float* %output, align 4 + ret void +} +; PTX-LABEL: sum_of_array2( +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}} + +; IR-LABEL: @sum_of_array2( +; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} +; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1 +; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32 +; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33 + +; Similar to @sum_of_array3, but extends array indices using zext instead of +; sext. e.g., array[zext(x + 1)][zext(y + 1)]. +define void @sum_of_array3(i32 %x, i32 %y, float* nocapture %output) { +.preheader: %0 = zext i32 %y to i64 %1 = zext i32 %x to i64 %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0 @@ -45,15 +128,14 @@ define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) { store float %21, float* %output, align 4 ret void } - -; PTX-LABEL: sum_of_array( +; PTX-LABEL: sum_of_array3( ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}} -; IR-LABEL: @sum_of_array( -; IR: [[BASE_PTR:%[0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i32 %x, i32 %y +; IR-LABEL: @sum_of_array3( +; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} ; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1 ; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32 ; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33 diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll index 2e50f5f..ed40c7e 100644 --- a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll +++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll @@ -23,71 +23,94 @@ entry: %p = getelementptr inbounds [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1 ret double* %p } -; CHECK-LABEL: @struct -; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i32 %i, i32 1 +; CHECK-LABEL: @struct( +; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1 -; We should be able to trace into sext/zext if it's directly used as a GEP -; index. -define float* @sext_zext(i32 %i, i32 %j) { +; We should be able to trace into sext(a + b) if a + b is non-negative +; (e.g., used as an index of an inbounds GEP) and one of a and b is +; non-negative. +define float* @sext_add(i32 %i, i32 %j) { entry: - %i1 = add i32 %i, 1 - %j2 = add i32 %j, 2 - %i1.ext = sext i32 %i1 to i64 - %j2.ext = zext i32 %j2 to i64 - %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i1.ext, i64 %j2.ext + %0 = add i32 %i, 1 + %1 = sext i32 %0 to i64 ; inbound sext(i + 1) = sext(i) + 1 + %2 = add i32 %j, -2 + ; However, inbound sext(j + -2) != sext(j) + -2, e.g., j = INT_MIN + %3 = sext i32 %2 to i64 + %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3 ret float* %p } -; CHECK-LABEL: @sext_zext -; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i32 %i, i32 %j -; CHECK: getelementptr float* %{{[0-9]+}}, i64 34 +; CHECK-LABEL: @sext_add( +; CHECK-NOT: = add +; CHECK: add i32 %j, -2 +; CHECK: sext +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} +; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 32 ; We should be able to trace into sext/zext if it can be distributed to both ; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b) +; +; This test verifies we can transform +; gep base, a + sext(b +nsw 1), c + zext(d +nuw 1) +; to +; gep base, a + sext(b), c + zext(d); gep ..., 1 * 32 + 1 define float* @ext_add_no_overflow(i64 %a, i32 %b, i64 %c, i32 %d) { %b1 = add nsw i32 %b, 1 %b2 = sext i32 %b1 to i64 - %i = add i64 %a, %b2 + %i = add i64 %a, %b2 ; i = a + sext(b +nsw 1) %d1 = add nuw i32 %d, 1 %d2 = zext i32 %d1 to i64 - %j = add i64 %c, %d2 + %j = add i64 %c, %d2 ; j = c + zext(d +nuw 1) %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j ret float* %p } -; CHECK-LABEL: @ext_add_no_overflow -; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[0-9]+}}, i64 %{{[0-9]+}} +; CHECK-LABEL: @ext_add_no_overflow( +; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} ; CHECK: getelementptr float* [[BASE_PTR]], i64 33 -; Similar to @ext_add_no_overflow, we should be able to trace into sext/zext if -; its operand is an "or" instruction. -define float* @ext_or(i64 %a, i32 %b) { +; Verifies we handle nested sext/zext correctly. +define void @sext_zext(i32 %a, i32 %b, float** %out1, float** %out2) { +entry: + %0 = add nsw nuw i32 %a, 1 + %1 = sext i32 %0 to i48 + %2 = zext i48 %1 to i64 ; zext(sext(a +nsw nuw 1)) = zext(sext(a)) + 1 + %3 = add nsw i32 %b, 2 + %4 = sext i32 %3 to i48 + %5 = zext i48 %4 to i64 ; zext(sext(b +nsw 2)) != zext(sext(b)) + 2 + %p1 = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %2, i64 %5 + store float* %p1, float** %out1 + %6 = add nuw i32 %a, 3 + %7 = zext i32 %6 to i48 + %8 = sext i48 %7 to i64 ; sext(zext(a +nuw 3)) = zext(a +nuw 3) = zext(a) + 3 + %9 = add nsw i32 %b, 4 + %10 = zext i32 %9 to i48 + %11 = sext i48 %10 to i64 ; sext(zext(b +nsw 4)) != zext(b) + 4 + %p2 = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %8, i64 %11 + store float* %p2, float** %out2 + ret void +} +; CHECK-LABEL: @sext_zext( +; CHECK: [[BASE_PTR_1:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} +; CHECK: getelementptr float* [[BASE_PTR_1]], i64 32 +; CHECK: [[BASE_PTR_2:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} +; CHECK: getelementptr float* [[BASE_PTR_2]], i64 96 + +; Similar to @ext_add_no_overflow, we should be able to trace into s/zext if +; its operand is an OR and the two operands of the OR have no common bits. +define float* @sext_or(i64 %a, i32 %b) { entry: %b1 = shl i32 %b, 2 - %b2 = or i32 %b1, 1 - %b3 = or i32 %b1, 2 - %b2.ext = sext i32 %b2 to i64 + %b2 = or i32 %b1, 1 ; (b << 2) and 1 have no common bits + %b3 = or i32 %b1, 4 ; (b << 2) and 4 may have common bits + %b2.ext = zext i32 %b2 to i64 %b3.ext = sext i32 %b3 to i64 %i = add i64 %a, %b2.ext %j = add i64 %a, %b3.ext %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j ret float* %p } -; CHECK-LABEL: @ext_or -; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[0-9]+}}, i64 %{{[0-9]+}} -; CHECK: getelementptr float* [[BASE_PTR]], i64 34 - -; We should treat "or" with no common bits (%k) as "add", and leave "or" with -; potentially common bits (%l) as is. -define float* @or(i64 %i) { -entry: - %j = shl i64 %i, 2 - %k = or i64 %j, 3 ; no common bits - %l = or i64 %j, 4 ; potentially common bits - %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %k, i64 %l - ret float* %p -} -; CHECK-LABEL: @or -; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %j, i64 %l -; CHECK: getelementptr float* [[BASE_PTR]], i64 96 +; CHECK-LABEL: @sext_or( +; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} +; CHECK: getelementptr float* [[BASE_PTR]], i64 32 ; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out = b + ; 5". When extracting the constant offset 5, make sure "*out = b + 5" isn't @@ -100,11 +123,28 @@ entry: store i64 %b5, i64* %out ret float* %p } -; CHECK-LABEL: @expr -; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %0, i64 0 +; CHECK-LABEL: @expr( +; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 0 ; CHECK: getelementptr float* [[BASE_PTR]], i64 160 ; CHECK: store i64 %b5, i64* %out +; d + sext(a +nsw (b +nsw (c +nsw 8))) => (d + sext(a) + sext(b) + sext(c)) + 8 +define float* @sext_expr(i32 %a, i32 %b, i32 %c, i64 %d) { +entry: + %0 = add nsw i32 %c, 8 + %1 = add nsw i32 %b, %0 + %2 = add nsw i32 %a, %1 + %3 = sext i32 %2 to i64 + %i = add i64 %d, %3 + %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i + ret float* %p +} +; CHECK-LABEL: @sext_expr( +; CHECK: sext i32 +; CHECK: sext i32 +; CHECK: sext i32 +; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 8 + ; Verifies we handle "sub" correctly. define float* @sub(i64 %i, i64 %j) { %i2 = sub i64 %i, 5 ; i - 5 @@ -112,9 +152,9 @@ define float* @sub(i64 %i, i64 %j) { %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2 ret float* %p } -; CHECK-LABEL: @sub -; CHECK: %[[j2:[0-9]+]] = sub i64 0, %j -; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]] +; CHECK-LABEL: @sub( +; CHECK: %[[j2:[a-zA-Z0-9]+]] = sub i64 0, %j +; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]] ; CHECK: getelementptr float* [[BASE_PTR]], i64 -155 %struct.Packed = type <{ [3 x i32], [8 x i64] }> ; <> means packed @@ -130,8 +170,92 @@ entry: %arrayidx3 = getelementptr inbounds [1024 x %struct.Packed]* %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom ret i64* %arrayidx3 } -; CHECK-LABEL: @packed_struct -; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [1024 x %struct.Packed]* %s, i64 0, i32 %i, i32 1, i32 %j -; CHECK: [[CASTED_PTR:%[0-9]+]] = bitcast i64* [[BASE_PTR]] to i8* +; CHECK-LABEL: @packed_struct( +; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [1024 x %struct.Packed]* %s, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1, i64 %{{[a-zA-Z0-9]+}} +; CHECK: [[CASTED_PTR:%[a-zA-Z0-9]+]] = bitcast i64* [[BASE_PTR]] to i8* ; CHECK: %uglygep = getelementptr i8* [[CASTED_PTR]], i64 100 ; CHECK: bitcast i8* %uglygep to i64* + +; We shouldn't be able to extract the 8 from "zext(a +nuw (b + 8))", +; because "zext(b + 8) != zext(b) + 8" +define float* @zext_expr(i32 %a, i32 %b) { +entry: + %0 = add i32 %b, 8 + %1 = add nuw i32 %a, %0 + %i = zext i32 %1 to i64 + %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i + ret float* %p +} +; CHECK-LABEL: zext_expr( +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i + +; Per http://llvm.org/docs/LangRef.html#id181, the indices of a off-bound gep +; should be considered sign-extended to the pointer size. Therefore, +; gep base, (add i32 a, b) != gep (gep base, i32 a), i32 b +; because +; sext(a + b) != sext(a) + sext(b) +; +; This test verifies we do not illegitimately extract the 8 from +; gep base, (i32 a + 8) +define float* @i32_add(i32 %a) { +entry: + %i = add i32 %a, 8 + %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i32 %i + ret float* %p +} +; CHECK-LABEL: @i32_add( +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}} +; CHECK-NOT: getelementptr + +; Verifies that we compute the correct constant offset when the index is +; sign-extended and then zero-extended. The old version of our code failed to +; handle this case because it simply computed the constant offset as the +; sign-extended value of the constant part of the GEP index. +define float* @apint(i1 %a) { +entry: + %0 = add nsw nuw i1 %a, 1 + %1 = sext i1 %0 to i4 + %2 = zext i4 %1 to i64 ; zext (sext i1 1 to i4) to i64 = 15 + %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %2 + ret float* %p +} +; CHECK-LABEL: @apint( +; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}} +; CHECK: getelementptr float* [[BASE_PTR]], i64 15 + +; Do not trace into binary operators other than ADD, SUB, and OR. +define float* @and(i64 %a) { +entry: + %0 = shl i64 %a, 2 + %1 = and i64 %0, 1 + %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %1 + ret float* %p +} +; CHECK-LABEL: @and( +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array +; CHECK-NOT: getelementptr + +; if zext(a + b) <= max signed value of typeof(a + b), then we can prove +; a + b >= 0 and zext(a + b) == sext(a + b). If we can prove further a or b is +; non-negative, we have zext(a + b) == sext(a) + sext(b). +define float* @inbounds_zext_add(i32 %i, i4 %j) { +entry: + %0 = add i32 %i, 1 + %1 = zext i32 %0 to i64 + ; Because zext(i + 1) is an index of an in bounds GEP based on + ; float_2d_array, zext(i + 1) <= sizeof(float_2d_array) = 4096. + ; Furthermore, since typeof(i + 1) is i32 and 4096 < 2^31, we are sure the + ; sign bit of i + 1 is 0. This implies zext(i + 1) = sext(i + 1). + %2 = add i4 %j, 2 + %3 = zext i4 %2 to i64 + ; In this case, typeof(j + 2) is i4, so zext(j + 2) <= 4096 does not imply + ; the sign bit of j + 2 is 0. + %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3 + ret float* %p +} +; CHECK-LABEL: @inbounds_zext_add( +; CHECK-NOT: add +; CHECK: add i4 %j, 2 +; CHECK: sext +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} +; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 32 |