diff options
author | Jiri Olsa <jolsa@redhat.com> | 2013-05-06 18:27:17 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-05-07 13:17:28 +0200 |
commit | 524eff183f51d080a83b348d0ea97c08b3607b9a (patch) | |
tree | f82d50daa9e5cc77152f78c08d25ba92cc7b32de /kernel/events | |
parent | 534c97b0950b1967bca1c753aeaed32f5db40264 (diff) | |
download | kernel_goldelico_gta04-524eff183f51d080a83b348d0ea97c08b3607b9a.zip kernel_goldelico_gta04-524eff183f51d080a83b348d0ea97c08b3607b9a.tar.gz kernel_goldelico_gta04-524eff183f51d080a83b348d0ea97c08b3607b9a.tar.bz2 |
perf: Fix EXIT event notification
The perf_event_task_ctx() function needs to be called with
preemption disabled, since it's checking for currently
scheduled cpu against event cpu.
We disable preemption for task related perf event context
if there's one defined, leaving up to the chance which cpu
it gets scheduled in.
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Borislav Petkov <bp@alien8.de>
Link: http://lkml.kernel.org/r/1367857638-27631-2-git-send-email-jolsa@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 6b41c18..38b68a0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4474,7 +4474,7 @@ static void perf_event_task_ctx(struct perf_event_context *ctx, static void perf_event_task_event(struct perf_task_event *task_event) { struct perf_cpu_context *cpuctx; - struct perf_event_context *ctx; + struct perf_event_context *ctx, *task_ctx = task_event->task_ctx; struct pmu *pmu; int ctxn; @@ -4485,20 +4485,22 @@ static void perf_event_task_event(struct perf_task_event *task_event) goto next; perf_event_task_ctx(&cpuctx->ctx, task_event); - ctx = task_event->task_ctx; - if (!ctx) { - ctxn = pmu->task_ctx_nr; - if (ctxn < 0) - goto next; - ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); - if (ctx) - perf_event_task_ctx(ctx, task_event); - } + if (task_ctx) + goto next; + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + goto next; + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) + perf_event_task_ctx(ctx, task_event); next: put_cpu_ptr(pmu->pmu_cpu_context); } - if (task_event->task_ctx) - perf_event_task_ctx(task_event->task_ctx, task_event); + if (task_ctx) { + preempt_disable(); + perf_event_task_ctx(task_ctx, task_event); + preempt_enable(); + } rcu_read_unlock(); } |