aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux-foundation.org>2009-10-03 19:48:23 +0900
committerTejun Heo <tj@kernel.org>2009-10-03 19:48:23 +0900
commite800879d50c5a528d40191528557b1bdfbccbd42 (patch)
tree5cf00a68df5c0b63dcb8c84f00b0ecb48910c0fa /kernel
parent4dac3e98840f11bb2d8d52fd375150c7c1912117 (diff)
downloadkernel_samsung_smdk4412-e800879d50c5a528d40191528557b1bdfbccbd42.zip
kernel_samsung_smdk4412-e800879d50c5a528d40191528557b1bdfbccbd42.tar.gz
kernel_samsung_smdk4412-e800879d50c5a528d40191528557b1bdfbccbd42.tar.bz2
this_cpu: Use this_cpu operations in RCU
RCU does not do dynamic allocations but it increments per cpu variables a lot. These instructions results in a move to a register and then back to memory. This patch will make it use the inc/dec instructions on x86 that do not need a register. Acked-by: Tejun Heo <tj@kernel.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutorture.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 233768f..178967b 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -731,13 +731,13 @@ static void rcu_torture_timer(unsigned long unused)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_count)[pipe_count];
+ __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_batch)[completed];
+ __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
preempt_enable();
cur_ops->readunlock(idx);
}
@@ -786,13 +786,13 @@ rcu_torture_reader(void *arg)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_count)[pipe_count];
+ __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_batch)[completed];
+ __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
preempt_enable();
cur_ops->readunlock(idx);
schedule();