aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-04-23 10:38:03 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-05-14 14:33:15 -0400
commit32c0edaeaad74a7883e736ae0f3798784cfc2a80 (patch)
treee70784ed690172cb0f1b4365b93aa077d40219c9 /include/trace/ftrace.h
parent80decc70afc57c87eee9d6b836aec2ecacba3457 (diff)
downloadkernel_samsung_aries-32c0edaeaad74a7883e736ae0f3798784cfc2a80.zip
kernel_samsung_aries-32c0edaeaad74a7883e736ae0f3798784cfc2a80.tar.gz
kernel_samsung_aries-32c0edaeaad74a7883e736ae0f3798784cfc2a80.tar.bz2
tracing: Remove duplicate id information in event structure
Now that the trace_event structure is embedded in the ftrace_event_call structure, there is no need for the ftrace_event_call id field. The id field is the same as the trace_event type field. Removing the id and re-arranging the structure brings down the tracepoint footprint by another 5K. text data bss dec hex filename 4913961 1088356 861512 6863829 68bbd5 vmlinux.orig 4895024 1023812 861512 6780348 6775bc vmlinux.print 4894944 1018052 861512 6774508 675eec vmlinux.id Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Acked-by: Masami Hiramatsu <mhiramat@redhat.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 824141d..4866c10 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -150,7 +150,7 @@
*
* entry = iter->ent;
*
- * if (entry->type != event_<call>.id) {
+ * if (entry->type != event_<call>->event.type) {
* WARN_ON_ONCE(1);
* return TRACE_TYPE_UNHANDLED;
* }
@@ -221,7 +221,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
\
entry = iter->ent; \
\
- if (entry->type != event->id) { \
+ if (entry->type != event->event.type) { \
WARN_ON_ONCE(1); \
return TRACE_TYPE_UNHANDLED; \
} \
@@ -257,7 +257,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
\
entry = iter->ent; \
\
- if (entry->type != event_##call.id) { \
+ if (entry->type != event_##call.event.type) { \
WARN_ON_ONCE(1); \
return TRACE_TYPE_UNHANDLED; \
} \
@@ -409,7 +409,7 @@ static inline notrace int ftrace_get_offsets_##call( \
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
*
* event = trace_current_buffer_lock_reserve(&buffer,
- * event_<call>.id,
+ * event_<call>->event.type,
* sizeof(*entry) + __data_size,
* irq_flags, pc);
* if (!event)
@@ -510,7 +510,7 @@ ftrace_raw_event_##call(void *__data, proto) \
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\
event = trace_current_buffer_lock_reserve(&buffer, \
- event_call->id, \
+ event_call->event.type, \
sizeof(*entry) + __data_size, \
irq_flags, pc); \
if (!event) \
@@ -711,7 +711,7 @@ perf_trace_##call(void *__data, proto) \
"profile buffer not large enough")) \
return; \
entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
- __entry_size, event_call->id, &rctx, &irq_flags); \
+ __entry_size, event_call->event.type, &rctx, &irq_flags); \
if (!entry) \
return; \
tstruct \