diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-03-05 15:03:27 -0800 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-11 13:38:02 +0100 |
commit | f56e8a0765cc4374e02f4e3a79e2427b5096b075 (patch) | |
tree | 2139fc459bfe2d7d72843d4fa0fa946f0cd33cd4 /arch/x86 | |
parent | 007b09243b099811124f69d492adeebe9e439f96 (diff) | |
download | kernel_samsung_aries-f56e8a0765cc4374e02f4e3a79e2427b5096b075.zip kernel_samsung_aries-f56e8a0765cc4374e02f4e3a79e2427b5096b075.tar.gz kernel_samsung_aries-f56e8a0765cc4374e02f4e3a79e2427b5096b075.tar.bz2 |
x86/mce: Fix RCU lockdep splats
Create an rcu_dereference_check_mce() that checks for RCU-sched
read side and mce_read_mutex being held on update side. Replace
uses of rcu_dereference() in arch/x86/kernel/cpu/mcheck/mce.c
with this new macro.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1267830207-9474-3-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index a8aacd4..4442e9e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -46,6 +46,11 @@ #include "mce-internal.h" +#define rcu_dereference_check_mce(p) \ + rcu_dereference_check((p), \ + rcu_read_lock_sched_held() || \ + lockdep_is_held(&mce_read_mutex)) + #define CREATE_TRACE_POINTS #include <trace/events/mce.h> @@ -158,7 +163,7 @@ void mce_log(struct mce *mce) mce->finished = 0; wmb(); for (;;) { - entry = rcu_dereference(mcelog.next); + entry = rcu_dereference_check_mce(mcelog.next); for (;;) { /* * When the buffer fills up discard new entries. @@ -1500,7 +1505,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, return -ENOMEM; mutex_lock(&mce_read_mutex); - next = rcu_dereference(mcelog.next); + next = rcu_dereference_check_mce(mcelog.next); /* Only supports full reads right now */ if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { @@ -1565,7 +1570,7 @@ timeout: static unsigned int mce_poll(struct file *file, poll_table *wait) { poll_wait(file, &mce_wait, wait); - if (rcu_dereference(mcelog.next)) + if (rcu_dereference_check_mce(mcelog.next)) return POLLIN | POLLRDNORM; return 0; } |