diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-23 09:38:55 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-23 09:38:55 -0700 |
commit | f2e4bd2b37bf072babc6a1f6c2a7ef53b7b046ad (patch) | |
tree | 65d0ab5002c5f748a3fbca803e99b2f271072021 /kernel | |
parent | 133e887f90208d339088dd60cb1d08a72ba27288 (diff) | |
parent | 5f86515158ca86182c1dbecd546f1848121ba135 (diff) | |
download | kernel_samsung_tuna-f2e4bd2b37bf072babc6a1f6c2a7ef53b7b046ad.zip kernel_samsung_tuna-f2e4bd2b37bf072babc6a1f6c2a7ef53b7b046ad.tar.gz kernel_samsung_tuna-f2e4bd2b37bf072babc6a1f6c2a7ef53b7b046ad.tar.bz2 |
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
rcupdate: fix bug of rcu_barrier*()
profiling: fix !procfs build
Fixed trivial conflicts in 'include/linux/profile.h'
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcupdate.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 467d594..ad63af8 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -119,18 +119,19 @@ static void _rcu_barrier(enum rcu_barrier type) /* Take cpucontrol mutex to protect against CPU hotplug */ mutex_lock(&rcu_barrier_mutex); init_completion(&rcu_barrier_completion); - atomic_set(&rcu_barrier_cpu_count, 0); /* - * The queueing of callbacks in all CPUs must be atomic with - * respect to RCU, otherwise one CPU may queue a callback, - * wait for a grace period, decrement barrier count and call - * complete(), while other CPUs have not yet queued anything. - * So, we need to make sure that grace periods cannot complete - * until all the callbacks are queued. + * Initialize rcu_barrier_cpu_count to 1, then invoke + * rcu_barrier_func() on each CPU, so that each CPU also has + * incremented rcu_barrier_cpu_count. Only then is it safe to + * decrement rcu_barrier_cpu_count -- otherwise the first CPU + * might complete its grace period before all of the other CPUs + * did their increment, causing this function to return too + * early. */ - rcu_read_lock(); + atomic_set(&rcu_barrier_cpu_count, 1); on_each_cpu(rcu_barrier_func, (void *)type, 1); - rcu_read_unlock(); + if (atomic_dec_and_test(&rcu_barrier_cpu_count)) + complete(&rcu_barrier_completion); wait_for_completion(&rcu_barrier_completion); mutex_unlock(&rcu_barrier_mutex); } |