diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-03-05 15:03:25 -0800 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-11 13:38:01 +0100 |
commit | 3f379b03fbfddd20536389a85c6456f8233d1f8d (patch) | |
tree | 2847590a23aa0d72e799bd6d65a8a794abb565ac /kernel | |
parent | 54dbf96c921513bf98484a20ef366d51944a4c4d (diff) | |
download | kernel_samsung_crespo-3f379b03fbfddd20536389a85c6456f8233d1f8d.zip kernel_samsung_crespo-3f379b03fbfddd20536389a85c6456f8233d1f8d.tar.gz kernel_samsung_crespo-3f379b03fbfddd20536389a85c6456f8233d1f8d.tar.bz2 |
ftrace: Replace read_barrier_depends() with rcu_dereference_raw()
Replace the calls to read_barrier_depends() in
ftrace_list_func() with rcu_dereference_raw() to improve
readability. The reason that we use rcu_dereference_raw() here
is that removed entries are never freed, instead they are simply
leaked. This is one of a very few cases where use of
rcu_dereference_raw() is the long-term right answer. And I
don't yet know of any others. ;-)
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1267830207-9474-1-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ftrace.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 8378357..8c5adc0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -27,6 +27,7 @@ #include <linux/ctype.h> #include <linux/list.h> #include <linux/hash.h> +#include <linux/rcupdate.h> #include <trace/events/sched.h> @@ -88,18 +89,22 @@ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); #endif +/* + * Traverse the ftrace_list, invoking all entries. The reason that we + * can use rcu_dereference_raw() is that elements removed from this list + * are simply leaked, so there is no need to interact with a grace-period + * mechanism. The rcu_dereference_raw() calls are needed to handle + * concurrent insertions into the ftrace_list. + * + * Silly Alpha and silly pointer-speculation compiler optimizations! + */ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) { - struct ftrace_ops *op = ftrace_list; - - /* in case someone actually ports this to alpha! */ - read_barrier_depends(); + struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ while (op != &ftrace_list_end) { - /* silly alpha */ - read_barrier_depends(); op->func(ip, parent_ip); - op = op->next; + op = rcu_dereference_raw(op->next); /*see above*/ }; } @@ -154,8 +159,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) * the ops->next pointer is valid before another CPU sees * the ops pointer included into the ftrace_list. */ - smp_wmb(); - ftrace_list = ops; + rcu_assign_pointer(ftrace_list, ops); if (ftrace_enabled) { ftrace_func_t func; |