diff options
author | Weiming Zhao <weimingz@codeaurora.org> | 2013-02-13 21:43:02 +0000 |
---|---|---|
committer | Weiming Zhao <weimingz@codeaurora.org> | 2013-02-13 21:43:02 +0000 |
commit | 3019fbbe6ab4c23a5a580f0cc6ba1ba1b124e1da (patch) | |
tree | 27fdebec48e730b8a6632d801ef6845617bd29ad /test | |
parent | 5867302b167a7cec0a6d282e2a72412e9b488a56 (diff) | |
download | external_llvm-3019fbbe6ab4c23a5a580f0cc6ba1ba1b124e1da.zip external_llvm-3019fbbe6ab4c23a5a580f0cc6ba1ba1b124e1da.tar.gz external_llvm-3019fbbe6ab4c23a5a580f0cc6ba1ba1b124e1da.tar.bz2 |
Bug fix 13622: Add paired register support for inline asm with 64-bit data on ARM
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@175088 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r-- | test/CodeGen/ARM/arm-modifier.ll | 3 | ||||
-rw-r--r-- | test/CodeGen/ARM/inlineasm-64bit.ll | 54 |
2 files changed, 55 insertions, 2 deletions
diff --git a/test/CodeGen/ARM/arm-modifier.ll b/test/CodeGen/ARM/arm-modifier.ll index 5e12d8e..c747016 100644 --- a/test/CodeGen/ARM/arm-modifier.ll +++ b/test/CodeGen/ARM/arm-modifier.ll @@ -61,8 +61,7 @@ ret void define i64 @f4(i64* %val) nounwind { entry: ;CHECK: f4 - ;CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], {{r[0-9]?[13579]}}, [r0] - ;CHECK: mov r0, [[REG1]] + ;CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], {{r[0-9]?[13579]}}, [r{{[0-9]+}}] %0 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [$1]", "=&r,r,*Qo"(i64* %val, i64* %val) nounwind ret i64 %0 } diff --git a/test/CodeGen/ARM/inlineasm-64bit.ll b/test/CodeGen/ARM/inlineasm-64bit.ll new file mode 100644 index 0000000..df5d440 --- /dev/null +++ b/test/CodeGen/ARM/inlineasm-64bit.ll @@ -0,0 +1,54 @@ +; RUN: llc < %s -O3 -march=arm | FileCheck %s + +; check if regs are passing correctly +define void @i64_write(i64* %p, i64 %val) nounwind { +; CHECK: i64_write: +; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; CHECK: strexd [[REG1]], {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}} + %1 = tail call i64 asm sideeffect "1: ldrexd $0, ${0:H}, [$2]\0A strexd $0, $3, ${3:H}, [$2]\0A teq $0, #0\0A bne 1b", "=&r,=*Qo,r,r,~{cc}"(i64* %p, i64* %p, i64 %val) nounwind + ret void +} + +; check if register allocation can reuse the registers +define void @multi_writes(i64* %p, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind { +entry: +; CHECK: multi_writes: +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] + +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] + +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}] + + tail call void asm sideeffect " strexd $1, ${1:H}, [$0]\0A strexd $2, ${2:H}, [$0]\0A strexd $3, ${3:H}, [$0]\0A strexd $4, ${4:H}, [$0]\0A strexd $5, ${5:H}, [$0]\0A strexd $6, ${6:H}, [$0]\0A", "r,r,r,r,r,r,r"(i64* %p, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind + %incdec.ptr = getelementptr inbounds i64* %p, i32 1 + tail call void asm sideeffect " strexd $1, ${1:H}, [$0]\0A strexd $2, ${2:H}, [$0]\0A strexd $3, ${3:H}, [$0]\0A strexd $4, ${4:H}, [$0]\0A strexd $5, ${5:H}, [$0]\0A strexd $6, ${6:H}, [$0]\0A", "r,r,r,r,r,r,r"(i64* %incdec.ptr, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind + tail call void asm sideeffect " strexd $1, ${1:H}, [$0]\0A strexd $2, ${2:H}, [$0]\0A strexd $3, ${3:H}, [$0]\0A strexd $4, ${4:H}, [$0]\0A strexd $5, ${5:H}, [$0]\0A strexd $6, ${6:H}, [$0]\0A", "r,r,r,r,r,r,r"(i64* %incdec.ptr, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind + ret void +} + + +; check if callee-saved registers used by inline asm are saved/restored +define void @foo(i64* %p, i64 %i) nounwind { +; CHECK:foo: +; CHECK: push {{{r[4-9]|r10|r11}} +; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], {{r[0-9]?[13579]}}, [r{{[0-9]+}}] +; CHECK: strexd [[REG1]], {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}} +; CHECK: pop {{{r[4-9]|r10|r11}} + %1 = tail call { i64, i64 } asm sideeffect "@ atomic64_set\0A1: ldrexd $0, ${0:H}, [$3]\0Aldrexd $1, ${1:H}, [$3]\0A strexd $0, $4, ${4:H}, [$3]\0A teq $0, #0\0A bne 1b", "=&r,=&r,=*Qo,r,r,~{cc}"(i64* %p, i64* %p, i64 %i) nounwind + ret void +} |