From 7f47ce966219b8dbc37cf8c289660dd83923289f Mon Sep 17 00:00:00 2001 From: Richard Osborne Date: Thu, 16 Jul 2009 10:21:18 +0000 Subject: Custom lower unaligned 32 bit stores and loads into libcalls. This is a big code size win since before they were expanding to upto 16 instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@75901 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/XCore/unaligned_load.ll | 9 +++++++++ test/CodeGen/XCore/unaligned_store.ll | 9 +++++++++ 2 files changed, 18 insertions(+) create mode 100644 test/CodeGen/XCore/unaligned_load.ll create mode 100644 test/CodeGen/XCore/unaligned_store.ll (limited to 'test/CodeGen/XCore') diff --git a/test/CodeGen/XCore/unaligned_load.ll b/test/CodeGen/XCore/unaligned_load.ll new file mode 100644 index 0000000..a6a5089 --- /dev/null +++ b/test/CodeGen/XCore/unaligned_load.ll @@ -0,0 +1,9 @@ +; RUN: llvm-as < %s | llc -march=xcore > %t1.s +; RUN: grep "bl __misaligned_load" %t1.s | count 1 + +; Byte aligned load. Expands to call to __misaligned_load. +define i32 @align1(i32* %p) nounwind { +entry: + %0 = load i32* %p, align 1 ; [#uses=1] + ret i32 %0 +} diff --git a/test/CodeGen/XCore/unaligned_store.ll b/test/CodeGen/XCore/unaligned_store.ll new file mode 100644 index 0000000..b7a5192 --- /dev/null +++ b/test/CodeGen/XCore/unaligned_store.ll @@ -0,0 +1,9 @@ +; RUN: llvm-as < %s | llc -march=xcore > %t1.s +; RUN: grep "bl __misaligned_store" %t1.s | count 1 + +; Byte aligned store. Expands to call to __misaligned_store. +define void @align1(i32* %p, i32 %val) nounwind { +entry: + store i32 %val, i32* %p, align 1 + ret void +} -- cgit v1.1