aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/include
diff options
context:
space:
mode:
authorLuck, Tony <tony.luck@intel.com>2012-04-16 16:28:01 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-04-22 16:21:23 -0700
commit9a8bf5fd4a27ee78730355f732f5bd8eaa5349f9 (patch)
tree785e1f1d7cc0cc9afd407c4214ab9e73102fc64b /arch/ia64/include
parent9c81dd8dd9d72712daf3d0ba7a587ae4a46c57b3 (diff)
downloadkernel_samsung_smdk4412-9a8bf5fd4a27ee78730355f732f5bd8eaa5349f9.zip
kernel_samsung_smdk4412-9a8bf5fd4a27ee78730355f732f5bd8eaa5349f9.tar.gz
kernel_samsung_smdk4412-9a8bf5fd4a27ee78730355f732f5bd8eaa5349f9.tar.bz2
ia64: fix futex_atomic_cmpxchg_inatomic()
commit c76f39bddb84f93f70a5520d9253ec0317bec216 upstream. Michel Lespinasse cleaned up the futex calling conventions in commit 37a9d912b24f ("futex: Sanitize cmpxchg_futex_value_locked API"). But the ia64 implementation was subtly broken. Gcc does not know that register "r8" will be updated by the fault handler if the cmpxchg instruction takes an exception. So it feels safe in letting the initialization of r8 slide to after the cmpxchg. Result: we always return 0 whether the user address faulted or not. Fix by moving the initialization of r8 into the __asm__ code so gcc won't move it. Reported-by: <emeric.maschino@gmail.com> Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=42757 Tested-by: <emeric.maschino@gmail.com> Acked-by: Michel Lespinasse <walken@google.com> Signed-off-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/ia64/include')
-rw-r--r--arch/ia64/include/asm/futex.h9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index 8428525..21ab376 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -107,15 +107,16 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
{
- register unsigned long r8 __asm ("r8") = 0;
+ register unsigned long r8 __asm ("r8");
unsigned long prev;
__asm__ __volatile__(
" mf;; \n"
- " mov ar.ccv=%3;; \n"
- "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n"
+ " mov %0=r0 \n"
+ " mov ar.ccv=%4;; \n"
+ "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
"[2:]"
- : "=r" (prev)
+ : "=r" (r8), "=r" (prev)
: "r" (uaddr), "r" (newval),
"rO" ((long) (unsigned) oldval)
: "memory");