aboutsummaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2013-02-25 14:20:21 +0000
committerChandler Carruth <chandlerc@gmail.com>2013-02-25 14:20:21 +0000
commitaf23f8e403d68e3f96eb5eb63e50e3aec4ea01c9 (patch)
tree6eb71c8e84ed3a57ae9901d1a123c8d6c98367ea /test
parentde89ecd011c453108c7641f44360f3a93af90206 (diff)
downloadexternal_llvm-af23f8e403d68e3f96eb5eb63e50e3aec4ea01c9.zip
external_llvm-af23f8e403d68e3f96eb5eb63e50e3aec4ea01c9.tar.gz
external_llvm-af23f8e403d68e3f96eb5eb63e50e3aec4ea01c9.tar.bz2
Fix the root cause of PR15348 by correctly handling alignment 0 on
memory intrinsics in the SDAG builder. When alignment is zero, the lang ref says that *no* alignment assumptions can be made. This is the exact opposite of the internal API contracts of the DAG where alignment 0 indicates that the alignment can be made to be anything desired. There is another, more explicit alignment that is better suited for the role of "no alignment at all": an alignment of 1. Map the intrinsic alignment to this early so that we don't end up generating aligned DAGs. It is really terrifying that we've never seen this before, but we suddenly started generating a large number of alignment 0 memcpys due to the new code to do memcpy-based copying of POD class members. That patch contains a bug that rounds bitfield alignments down when they are the first field. This can in turn produce zero alignments. This fixes weird crashes I've seen in library users of LLVM on 32-bit hosts, etc. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@176022 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/X86/memcpy.ll13
-rw-r--r--test/CodeGen/X86/memset.ll16
2 files changed, 29 insertions, 0 deletions
diff --git a/test/CodeGen/X86/memcpy.ll b/test/CodeGen/X86/memcpy.ll
index 2e02e45..3372a4a 100644
--- a/test/CodeGen/X86/memcpy.ll
+++ b/test/CodeGen/X86/memcpy.ll
@@ -105,3 +105,16 @@ entry:
ret void
}
+define void @PR15348(i8* %a, i8* %b) {
+; Ensure that alignment of '0' in an @llvm.memcpy intrinsic results in
+; unaligned loads and stores.
+; LINUX: PR15348
+; LINUX: movb
+; LINUX: movb
+; LINUX: movq
+; LINUX: movq
+; LINUX: movq
+; LINUX: movq
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 17, i32 0, i1 false)
+ ret void
+}
diff --git a/test/CodeGen/X86/memset.ll b/test/CodeGen/X86/memset.ll
index b35f261..0d479f0 100644
--- a/test/CodeGen/X86/memset.ll
+++ b/test/CodeGen/X86/memset.ll
@@ -20,15 +20,18 @@ entry:
; X86: movl $0,
; X86: movl $0,
; X86-NOT: movl $0,
+; X86: ret
; XMM: xorps %xmm{{[0-9]+}}, [[Z:%xmm[0-9]+]]
; XMM: movaps [[Z]],
; XMM: movaps [[Z]],
; XMM-NOT: movaps
+; XMM: ret
; YMM: vxorps %ymm{{[0-9]+}}, %ymm{{[0-9]+}}, [[Z:%ymm[0-9]+]]
; YMM: vmovaps [[Z]],
; YMM-NOT: movaps
+; YMM: ret
call void @foo( %struct.x* %up_mvd116 ) nounwind
ret void
@@ -37,3 +40,16 @@ entry:
declare void @foo(%struct.x*)
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+
+define void @PR15348(i8* %a) {
+; Ensure that alignment of '0' in an @llvm.memset intrinsic results in
+; unaligned loads and stores.
+; XMM: PR15348
+; XMM: movb $0,
+; XMM: movl $0,
+; XMM: movl $0,
+; XMM: movl $0,
+; XMM: movl $0,
+ call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 17, i32 0, i1 false)
+ ret void
+}