aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/XCore
diff options
context:
space:
mode:
authorRichard Osborne <richard@xmos.com>2009-07-16 10:21:18 +0000
committerRichard Osborne <richard@xmos.com>2009-07-16 10:21:18 +0000
commit7f47ce966219b8dbc37cf8c289660dd83923289f (patch)
treea5b8d856a8df2137c97b554026a6ff7ffadb6877 /test/CodeGen/XCore
parent378445303b10b092a898a75131141a8259cff50b (diff)
downloadexternal_llvm-7f47ce966219b8dbc37cf8c289660dd83923289f.zip
external_llvm-7f47ce966219b8dbc37cf8c289660dd83923289f.tar.gz
external_llvm-7f47ce966219b8dbc37cf8c289660dd83923289f.tar.bz2
Custom lower unaligned 32 bit stores and loads into libcalls. This is
a big code size win since before they were expanding to upto 16 instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@75901 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/XCore')
-rw-r--r--test/CodeGen/XCore/unaligned_load.ll9
-rw-r--r--test/CodeGen/XCore/unaligned_store.ll9
2 files changed, 18 insertions, 0 deletions
diff --git a/test/CodeGen/XCore/unaligned_load.ll b/test/CodeGen/XCore/unaligned_load.ll
new file mode 100644
index 0000000..a6a5089
--- /dev/null
+++ b/test/CodeGen/XCore/unaligned_load.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-as < %s | llc -march=xcore > %t1.s
+; RUN: grep "bl __misaligned_load" %t1.s | count 1
+
+; Byte aligned load. Expands to call to __misaligned_load.
+define i32 @align1(i32* %p) nounwind {
+entry:
+ %0 = load i32* %p, align 1 ; <i32> [#uses=1]
+ ret i32 %0
+}
diff --git a/test/CodeGen/XCore/unaligned_store.ll b/test/CodeGen/XCore/unaligned_store.ll
new file mode 100644
index 0000000..b7a5192
--- /dev/null
+++ b/test/CodeGen/XCore/unaligned_store.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-as < %s | llc -march=xcore > %t1.s
+; RUN: grep "bl __misaligned_store" %t1.s | count 1
+
+; Byte aligned store. Expands to call to __misaligned_store.
+define void @align1(i32* %p, i32 %val) nounwind {
+entry:
+ store i32 %val, i32* %p, align 1
+ ret void
+}