diff options
author | Bill Schmidt <wschmidt@linux.vnet.ibm.com> | 2013-08-30 15:18:11 +0000 |
---|---|---|
committer | Bill Schmidt <wschmidt@linux.vnet.ibm.com> | 2013-08-30 15:18:11 +0000 |
commit | 9bc94276e796d644cb425a7c7d38cc44dbf4e9c1 (patch) | |
tree | d294503b8a5692ef83cd100c8133c4fb39f8e10c /test/CodeGen/PowerPC/fast-isel-conversion.ll | |
parent | 2cc396bfb06a8e4df2fae6450277e7a05ebf6e4b (diff) | |
download | external_llvm-9bc94276e796d644cb425a7c7d38cc44dbf4e9c1.zip external_llvm-9bc94276e796d644cb425a7c7d38cc44dbf4e9c1.tar.gz external_llvm-9bc94276e796d644cb425a7c7d38cc44dbf4e9c1.tar.bz2 |
[PowerPC] Add handling for conversions to fast-isel.
Yet another chunk of fast-isel code. This one handles various
conversions involving floating-point. (It also includes some
miscellaneous handling throughout the back end for LWA_32 and LWAX_32
that should have been part of the load-store patch.)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189677 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/PowerPC/fast-isel-conversion.ll')
-rw-r--r-- | test/CodeGen/PowerPC/fast-isel-conversion.ll | 305 |
1 files changed, 305 insertions, 0 deletions
diff --git a/test/CodeGen/PowerPC/fast-isel-conversion.ll b/test/CodeGen/PowerPC/fast-isel-conversion.ll new file mode 100644 index 0000000..a31c312 --- /dev/null +++ b/test/CodeGen/PowerPC/fast-isel-conversion.ll @@ -0,0 +1,305 @@ +; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64 + +; Test sitofp + +define void @sitofp_single_i64(i64 %a, float %b) nounwind ssp { +entry: +; ELF64: sitofp_single_i64 + %b.addr = alloca float, align 4 + %conv = sitofp i64 %a to float +; ELF64: std +; ELF64: lfd +; ELF64: fcfids + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @sitofp_single_i32(i32 %a, float %b) nounwind ssp { +entry: +; ELF64: sitofp_single_i32 + %b.addr = alloca float, align 4 + %conv = sitofp i32 %a to float +; ELF64: std +; ELF64: lfiwax +; ELF64: fcfids + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @sitofp_single_i16(i16 %a, float %b) nounwind ssp { +entry: +; ELF64: sitofp_single_i16 + %b.addr = alloca float, align 4 + %conv = sitofp i16 %a to float +; ELF64: extsh +; ELF64: std +; ELF64: lfd +; ELF64: fcfids + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @sitofp_single_i8(i8 %a) nounwind ssp { +entry: +; ELF64: sitofp_single_i8 + %b.addr = alloca float, align 4 + %conv = sitofp i8 %a to float +; ELF64: extsb +; ELF64: std +; ELF64: lfd +; ELF64: fcfids + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @sitofp_double_i32(i32 %a, double %b) nounwind ssp { +entry: +; ELF64: sitofp_double_i32 + %b.addr = alloca double, align 8 + %conv = sitofp i32 %a to double +; ELF64: std +; ELF64: lfiwax +; ELF64: fcfid + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @sitofp_double_i64(i64 %a, double %b) nounwind ssp { +entry: +; ELF64: sitofp_double_i64 + %b.addr = alloca double, align 8 + %conv = sitofp i64 %a to double +; ELF64: std +; ELF64: lfd +; ELF64: fcfid + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @sitofp_double_i16(i16 %a, double %b) nounwind ssp { +entry: +; ELF64: sitofp_double_i16 + %b.addr = alloca double, align 8 + %conv = sitofp i16 %a to double +; ELF64: extsh +; ELF64: std +; ELF64: lfd +; ELF64: fcfid + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @sitofp_double_i8(i8 %a, double %b) nounwind ssp { +entry: +; ELF64: sitofp_double_i8 + %b.addr = alloca double, align 8 + %conv = sitofp i8 %a to double +; ELF64: extsb +; ELF64: std +; ELF64: lfd +; ELF64: fcfid + store double %conv, double* %b.addr, align 8 + ret void +} + +; Test uitofp + +define void @uitofp_single_i64(i64 %a, float %b) nounwind ssp { +entry: +; ELF64: uitofp_single_i64 + %b.addr = alloca float, align 4 + %conv = uitofp i64 %a to float +; ELF64: std +; ELF64: lfd +; ELF64: fcfidus + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @uitofp_single_i32(i32 %a, float %b) nounwind ssp { +entry: +; ELF64: uitofp_single_i32 + %b.addr = alloca float, align 4 + %conv = uitofp i32 %a to float +; ELF64: std +; ELF64: lfiwzx +; ELF64: fcfidus + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @uitofp_single_i16(i16 %a, float %b) nounwind ssp { +entry: +; ELF64: uitofp_single_i16 + %b.addr = alloca float, align 4 + %conv = uitofp i16 %a to float +; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 48 +; ELF64: std +; ELF64: lfd +; ELF64: fcfidus + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @uitofp_single_i8(i8 %a) nounwind ssp { +entry: +; ELF64: uitofp_single_i8 + %b.addr = alloca float, align 4 + %conv = uitofp i8 %a to float +; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 56 +; ELF64: std +; ELF64: lfd +; ELF64: fcfidus + store float %conv, float* %b.addr, align 4 + ret void +} + +define void @uitofp_double_i64(i64 %a, double %b) nounwind ssp { +entry: +; ELF64: uitofp_double_i64 + %b.addr = alloca double, align 8 + %conv = uitofp i64 %a to double +; ELF64: std +; ELF64: lfd +; ELF64: fcfidu + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @uitofp_double_i32(i32 %a, double %b) nounwind ssp { +entry: +; ELF64: uitofp_double_i32 + %b.addr = alloca double, align 8 + %conv = uitofp i32 %a to double +; ELF64: std +; ELF64: lfiwzx +; ELF64: fcfidu + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @uitofp_double_i16(i16 %a, double %b) nounwind ssp { +entry: +; ELF64: uitofp_double_i16 + %b.addr = alloca double, align 8 + %conv = uitofp i16 %a to double +; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 48 +; ELF64: std +; ELF64: lfd +; ELF64: fcfidu + store double %conv, double* %b.addr, align 8 + ret void +} + +define void @uitofp_double_i8(i8 %a, double %b) nounwind ssp { +entry: +; ELF64: uitofp_double_i8 + %b.addr = alloca double, align 8 + %conv = uitofp i8 %a to double +; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 56 +; ELF64: std +; ELF64: lfd +; ELF64: fcfidu + store double %conv, double* %b.addr, align 8 + ret void +} + +; Test fptosi + +define void @fptosi_float_i32(float %a) nounwind ssp { +entry: +; ELF64: fptosi_float_i32 + %b.addr = alloca i32, align 4 + %conv = fptosi float %a to i32 +; ELF64: fctiwz +; ELF64: stfd +; ELF64: lwa + store i32 %conv, i32* %b.addr, align 4 + ret void +} + +define void @fptosi_float_i64(float %a) nounwind ssp { +entry: +; ELF64: fptosi_float_i64 + %b.addr = alloca i64, align 4 + %conv = fptosi float %a to i64 +; ELF64: fctidz +; ELF64: stfd +; ELF64: ld + store i64 %conv, i64* %b.addr, align 4 + ret void +} + +define void @fptosi_double_i32(double %a) nounwind ssp { +entry: +; ELF64: fptosi_double_i32 + %b.addr = alloca i32, align 8 + %conv = fptosi double %a to i32 +; ELF64: fctiwz +; ELF64: stfd +; ELF64: lwa + store i32 %conv, i32* %b.addr, align 8 + ret void +} + +define void @fptosi_double_i64(double %a) nounwind ssp { +entry: +; ELF64: fptosi_double_i64 + %b.addr = alloca i64, align 8 + %conv = fptosi double %a to i64 +; ELF64: fctidz +; ELF64: stfd +; ELF64: ld + store i64 %conv, i64* %b.addr, align 8 + ret void +} + +; Test fptoui + +define void @fptoui_float_i32(float %a) nounwind ssp { +entry: +; ELF64: fptoui_float_i32 + %b.addr = alloca i32, align 4 + %conv = fptoui float %a to i32 +; ELF64: fctiwuz +; ELF64: stfd +; ELF64: lwz + store i32 %conv, i32* %b.addr, align 4 + ret void +} + +define void @fptoui_float_i64(float %a) nounwind ssp { +entry: +; ELF64: fptoui_float_i64 + %b.addr = alloca i64, align 4 + %conv = fptoui float %a to i64 +; ELF64: fctiduz +; ELF64: stfd +; ELF64: ld + store i64 %conv, i64* %b.addr, align 4 + ret void +} + +define void @fptoui_double_i32(double %a) nounwind ssp { +entry: +; ELF64: fptoui_double_i32 + %b.addr = alloca i32, align 8 + %conv = fptoui double %a to i32 +; ELF64: fctiwuz +; ELF64: stfd +; ELF64: lwz + store i32 %conv, i32* %b.addr, align 8 + ret void +} + +define void @fptoui_double_i64(double %a) nounwind ssp { +entry: +; ELF64: fptoui_double_i64 + %b.addr = alloca i64, align 8 + %conv = fptoui double %a to i64 +; ELF64: fctiduz +; ELF64: stfd +; ELF64: ld + store i64 %conv, i64* %b.addr, align 8 + ret void +} |