diff options
author | Steven Rostedt <srostedt@redhat.com> | 2010-10-19 13:17:08 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-10-20 15:17:57 -0400 |
commit | d9abde2138e0a00a0d7e44676928efa0ef629d48 (patch) | |
tree | 29b75afdf39664debd21c50af5d882856f7fa2a9 /kernel | |
parent | 140ff89127c74b1b1c1b0152a36ea3720ccf6bc3 (diff) | |
download | kernel_samsung_tuna-d9abde2138e0a00a0d7e44676928efa0ef629d48.zip kernel_samsung_tuna-d9abde2138e0a00a0d7e44676928efa0ef629d48.tar.gz kernel_samsung_tuna-d9abde2138e0a00a0d7e44676928efa0ef629d48.tar.bz2 |
ring-buffer: Micro-optimize with some strategic inlining
By using inline and noinline, we are able to make the fast path of
recording an event 4% faster.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ring_buffer.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d9f3e7a..f5007d0 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2078,7 +2078,7 @@ static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) local_inc(&cpu_buffer->commits); } -static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) +static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) { unsigned long commits; @@ -2193,13 +2193,9 @@ rb_reserve_next_event(struct ring_buffer *buffer, #define TRACE_RECURSIVE_DEPTH 16 -static int trace_recursive_lock(void) +/* Keep this code out of the fast path cache */ +static noinline void trace_recursive_fail(void) { - current->trace_recursion++; - - if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) - return 0; - /* Disable all tracing before we do anything else */ tracing_off_permanent(); @@ -2211,10 +2207,21 @@ static int trace_recursive_lock(void) in_nmi()); WARN_ON_ONCE(1); +} + +static inline int trace_recursive_lock(void) +{ + current->trace_recursion++; + + if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) + return 0; + + trace_recursive_fail(); + return -1; } -static void trace_recursive_unlock(void) +static inline void trace_recursive_unlock(void) { WARN_ON_ONCE(!current->trace_recursion); |