aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/ARM
diff options
context:
space:
mode:
authorJakob Stoklund Olesen <stoklund@2pi.dk>2011-09-01 17:18:50 +0000
committerJakob Stoklund Olesen <stoklund@2pi.dk>2011-09-01 17:18:50 +0000
commit9d548d0343774636e72713d678a078c8e808ed29 (patch)
tree04e15f64a3c3532669e1f5ed31c5129ae5fbc6bc /test/CodeGen/ARM
parent61560e205a7997749f066dcceaadd5f4b9b5e1be (diff)
downloadexternal_llvm-9d548d0343774636e72713d678a078c8e808ed29.zip
external_llvm-9d548d0343774636e72713d678a078c8e808ed29.tar.gz
external_llvm-9d548d0343774636e72713d678a078c8e808ed29.tar.bz2
Prevent remat of partial register redefinitions.
An instruction that redefines only part of a larger register can never be rematerialized since the virtual register value depends on the old value in other parts of the register. This was fixed for the inline spiller in r138794. This patch fixes the problem for all register allocators, and includes a small test case. <rdar://problem/10032939> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@138944 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/ARM')
-rw-r--r--test/CodeGen/ARM/subreg-remat.ll28
1 files changed, 28 insertions, 0 deletions
diff --git a/test/CodeGen/ARM/subreg-remat.ll b/test/CodeGen/ARM/subreg-remat.ll
new file mode 100644
index 0000000..cf45c03
--- /dev/null
+++ b/test/CodeGen/ARM/subreg-remat.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 -pre-RA-sched=source | FileCheck %s
+target triple = "thumbv7-apple-ios"
+; <rdar://problem/10032939>
+;
+; The vector %v2 is built like this:
+;
+; %vreg6:ssub_1<def> = VMOVSR %vreg0<kill>, pred:14, pred:%noreg, %vreg6<imp-def>; DPR_VFP2:%vreg6 GPR:%vreg0
+; %vreg6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%vreg6
+;
+; When %vreg6 spills, the VLDRS constant pool load cannot be rematerialized
+; since it implicitly reads the ssub_1 sub-register.
+;
+; CHECK: f1
+; CHECK: vmov s1, r0
+; CHECK: vldr.32 s0, LCPI
+; The vector must be spilled:
+; CHECK: vstr.64 d0,
+; CHECK: asm clobber d0
+; And reloaded after the asm:
+; CHECK: vldr.64 [[D16:d[0-9]+]],
+; CHECK: vstr.64 [[D16]], [r1]
+define void @f1(float %x, <2 x float>* %p) {
+ %v1 = insertelement <2 x float> undef, float %x, i32 1
+ %v2 = insertelement <2 x float> %v1, float 0x400921FB60000000, i32 0
+ %y = call double asm sideeffect "asm clobber $0", "=w,0,~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15},~{d16},~{d17},~{d18},~{d19},~{d20},~{d21},~{d22},~{d23},~{d24},~{d25},~{d26},~{d27},~{d28},~{d29},~{d30},~{d31}"(<2 x float> %v2) nounwind
+ store <2 x float> %v2, <2 x float>* %p, align 8
+ ret void
+}