diff options
Diffstat (limited to 'test/CodeGen/R600/uint_to_fp.f64.ll')
-rw-r--r-- | test/CodeGen/R600/uint_to_fp.f64.ll | 111 |
1 files changed, 67 insertions, 44 deletions
diff --git a/test/CodeGen/R600/uint_to_fp.f64.ll b/test/CodeGen/R600/uint_to_fp.f64.ll index bddf700..f715243 100644 --- a/test/CodeGen/R600/uint_to_fp.f64.ll +++ b/test/CodeGen/R600/uint_to_fp.f64.ll @@ -1,47 +1,12 @@ -; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s declare i32 @llvm.r600.read.tidig.x() nounwind readnone -; SI-LABEL: {{^}}uint_to_fp_f64_i32 -; SI: v_cvt_f64_u32_e32 -; SI: s_endpgm -define void @uint_to_fp_f64_i32(double addrspace(1)* %out, i32 %in) { - %cast = uitofp i32 %in to double - store double %cast, double addrspace(1)* %out, align 8 - ret void -} - -; SI-LABEL: {{^}}uint_to_fp_i1_f64: -; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]\]]], -; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs, -; we should be able to fold the SGPRs into the V_CNDMASK instructions. -; SI: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]] -; SI: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]] -; SI: buffer_store_dwordx2 -; SI: s_endpgm -define void @uint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) { - %cmp = icmp eq i32 %in, 0 - %fp = uitofp i1 %cmp to double - store double %fp, double addrspace(1)* %out, align 4 - ret void -} - -; SI-LABEL: {{^}}uint_to_fp_i1_f64_load: -; SI: v_cndmask_b32_e64 [[IRESULT:v[0-9]]], 0, 1 -; SI-NEXT: v_cvt_f64_u32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]] -; SI: buffer_store_dwordx2 [[RESULT]] -; SI: s_endpgm -define void @uint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) { - %fp = uitofp i1 %in to double - store double %fp, double addrspace(1)* %out, align 8 - ret void -} - ; SI-LABEL: {{^}}v_uint_to_fp_i64_to_f64 ; SI: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}} -; SI-DAG: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]] -; SI-DAG: v_cvt_f64_u32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]] +; SI: v_cvt_f64_u32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]] ; SI: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32 +; SI: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]] ; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]] ; SI: buffer_store_dwordx2 [[RESULT]] define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) { @@ -53,23 +18,81 @@ define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1) ret void } -; SI-LABEL: {{^}}s_uint_to_fp_f64_i64 -define void @s_uint_to_fp_f64_i64(double addrspace(1)* %out, i64 %in) { +; SI-LABEL: {{^}}s_uint_to_fp_i64_to_f64 +define void @s_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) { %cast = uitofp i64 %in to double store double %cast, double addrspace(1)* %out, align 8 ret void } -; SI-LABEL: {{^}}s_uint_to_fp_v2f64_v2i64 -define void @s_uint_to_fp_v2f64_v2i64(<2 x double> addrspace(1)* %out, <2 x i64> %in) { +; SI-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f64 +define void @s_uint_to_fp_v2i64_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i64> %in) { %cast = uitofp <2 x i64> %in to <2 x double> store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16 ret void } -; SI-LABEL: {{^}}s_uint_to_fp_v4f64_v4i64 -define void @s_uint_to_fp_v4f64_v4i64(<4 x double> addrspace(1)* %out, <4 x i64> %in) { +; SI-LABEL: {{^}}s_uint_to_fp_v4i64_to_v4f64 +define void @s_uint_to_fp_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i64> %in) { %cast = uitofp <4 x i64> %in to <4 x double> store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16 ret void } + +; SI-LABEL: {{^}}s_uint_to_fp_i32_to_f64 +; SI: v_cvt_f64_u32_e32 +; SI: s_endpgm +define void @s_uint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) { + %cast = uitofp i32 %in to double + store double %cast, double addrspace(1)* %out, align 8 + ret void +} + +; SI-LABEL: {{^}}s_uint_to_fp_v2i32_to_v2f64 +; SI: v_cvt_f64_u32_e32 +; SI: v_cvt_f64_u32_e32 +; SI: s_endpgm +define void @s_uint_to_fp_v2i32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i32> %in) { + %cast = uitofp <2 x i32> %in to <2 x double> + store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16 + ret void +} + +; SI-LABEL: {{^}}s_uint_to_fp_v4i32_to_v4f64 +; SI: v_cvt_f64_u32_e32 +; SI: v_cvt_f64_u32_e32 +; SI: v_cvt_f64_u32_e32 +; SI: v_cvt_f64_u32_e32 +; SI: s_endpgm +define void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i32> %in) { + %cast = uitofp <4 x i32> %in to <4 x double> + store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16 + ret void +} + +; FIXME: select on 0, 0 +; SI-LABEL: {{^}}uint_to_fp_i1_to_f64: +; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]\]]], +; We can't fold the SGPRs into v_cndmask_b32_e64, because it already +; uses an SGPR for [[CMP]] +; SI: v_cndmask_b32_e64 v{{[0-9]+}}, 0, v{{[0-9]+}}, [[CMP]] +; SI: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 0, [[CMP]] +; SI: buffer_store_dwordx2 +; SI: s_endpgm +define void @uint_to_fp_i1_to_f64(double addrspace(1)* %out, i32 %in) { + %cmp = icmp eq i32 %in, 0 + %fp = uitofp i1 %cmp to double + store double %fp, double addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}uint_to_fp_i1_to_f64_load: +; SI: v_cndmask_b32_e64 [[IRESULT:v[0-9]]], 0, 1 +; SI-NEXT: v_cvt_f64_u32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]] +; SI: buffer_store_dwordx2 [[RESULT]] +; SI: s_endpgm +define void @uint_to_fp_i1_to_f64_load(double addrspace(1)* %out, i1 %in) { + %fp = uitofp i1 %in to double + store double %fp, double addrspace(1)* %out, align 8 + ret void +} |