diff options
Diffstat (limited to 'test/CodeGen/R600/and.ll')
-rw-r--r-- | test/CodeGen/R600/and.ll | 100 |
1 files changed, 74 insertions, 26 deletions
diff --git a/test/CodeGen/R600/and.ll b/test/CodeGen/R600/and.ll index cf11481..9a76fce 100644 --- a/test/CodeGen/R600/and.ll +++ b/test/CodeGen/R600/and.ll @@ -1,12 +1,12 @@ ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s ; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -; FUNC-LABEL: @test2 +; FUNC-LABEL: {{^}}test2: ; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} ; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1 @@ -17,16 +17,16 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { ret void } -; FUNC-LABEL: @test4 +; FUNC-LABEL: {{^}}test4: ; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} ; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} ; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} ; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1 @@ -37,24 +37,24 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { ret void } -; FUNC-LABEL: @s_and_i32 -; SI: S_AND_B32 +; FUNC-LABEL: {{^}}s_and_i32: +; SI: s_and_b32 define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) { %and = and i32 %a, %b store i32 %and, i32 addrspace(1)* %out, align 4 ret void } -; FUNC-LABEL: @s_and_constant_i32 -; SI: S_AND_B32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687 +; FUNC-LABEL: {{^}}s_and_constant_i32: +; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687 define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) { %and = and i32 %a, 1234567 store i32 %and, i32 addrspace(1)* %out, align 4 ret void } -; FUNC-LABEL: @v_and_i32 -; SI: V_AND_B32 +; FUNC-LABEL: {{^}}v_and_i32: +; SI: v_and_b32 define void @v_and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) { %a = load i32 addrspace(1)* %aptr, align 4 %b = load i32 addrspace(1)* %bptr, align 4 @@ -63,8 +63,8 @@ define void @v_and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addr ret void } -; FUNC-LABEL: @v_and_constant_i32 -; SI: V_AND_B32 +; FUNC-LABEL: {{^}}v_and_constant_i32: +; SI: v_and_b32 define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) { %a = load i32 addrspace(1)* %aptr, align 4 %and = and i32 %a, 1234567 @@ -72,25 +72,34 @@ define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) ret void } -; FUNC-LABEL: @s_and_i64 -; SI: S_AND_B64 +; FUNC-LABEL: {{^}}s_and_i64: +; SI: s_and_b64 define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) { %and = and i64 %a, %b store i64 %and, i64 addrspace(1)* %out, align 8 ret void } -; FUNC-LABEL: @s_and_constant_i64 -; SI: S_AND_B64 +; FIXME: Should use SGPRs +; FUNC-LABEL: {{^}}s_and_i1: +; SI: v_and_b32 +define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) { + %and = and i1 %a, %b + store i1 %and, i1 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}s_and_constant_i64: +; SI: s_and_b64 define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) { %and = and i64 %a, 281474976710655 store i64 %and, i64 addrspace(1)* %out, align 8 ret void } -; FUNC-LABEL: @v_and_i64 -; SI: V_AND_B32 -; SI: V_AND_B32 +; FUNC-LABEL: {{^}}v_and_i64: +; SI: v_and_b32 +; SI: v_and_b32 define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) { %a = load i64 addrspace(1)* %aptr, align 8 %b = load i64 addrspace(1)* %bptr, align 8 @@ -99,12 +108,51 @@ define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addr ret void } -; FUNC-LABEL: @v_and_constant_i64 -; SI: V_AND_B32 -; SI: V_AND_B32 +; FUNC-LABEL: {{^}}v_and_i64_br: +; SI: v_and_b32 +; SI: v_and_b32 +define void @v_and_i64_br(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i32 %cond) { +entry: + %tmp0 = icmp eq i32 %cond, 0 + br i1 %tmp0, label %if, label %endif + +if: + %a = load i64 addrspace(1)* %aptr, align 8 + %b = load i64 addrspace(1)* %bptr, align 8 + %and = and i64 %a, %b + br label %endif + +endif: + %tmp1 = phi i64 [%and, %if], [0, %entry] + store i64 %tmp1, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: {{^}}v_and_constant_i64: +; SI: v_and_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} +; SI: v_and_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) { %a = load i64 addrspace(1)* %aptr, align 8 %and = and i64 %a, 1234567 store i64 %and, i64 addrspace(1)* %out, align 8 ret void } + +; FIXME: Replace and 0 with mov 0 +; FUNC-LABEL: {{^}}v_and_inline_imm_i64: +; SI: v_and_b32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}} +; SI: v_and_b32_e32 {{v[0-9]+}}, 0, {{v[0-9]+}} +define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) { + %a = load i64 addrspace(1)* %aptr, align 8 + %and = and i64 %a, 64 + store i64 %and, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: {{^}}s_and_inline_imm_i64: +; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 64 +define void @s_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { + %and = and i64 %a, 64 + store i64 %and, i64 addrspace(1)* %out, align 8 + ret void +} |