aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-01-27 23:07:47 +0100
committerIngo Molnar <mingo@elte.hu>2010-01-29 09:01:47 +0100
commit1a6e21f791fe85b40a9ddbafe999ab8ccffc3f78 (patch)
tree2224e77f3b346e588e42b2e097abcc48ad6bf68c /arch/x86
parented8777fc132e589d48a0ba854fdbb5d8203b58e5 (diff)
downloadkernel_samsung_tuna-1a6e21f791fe85b40a9ddbafe999ab8ccffc3f78.zip
kernel_samsung_tuna-1a6e21f791fe85b40a9ddbafe999ab8ccffc3f78.tar.gz
kernel_samsung_tuna-1a6e21f791fe85b40a9ddbafe999ab8ccffc3f78.tar.bz2
perf_events, x86: Clean up hw_perf_*_all() implementation
Put the recursion avoidance code in the generic hook instead of replicating it in each implementation. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> LKML-Reference: <20100127221122.057507285@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c59
1 files changed, 14 insertions, 45 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 951213a..cf10839 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1099,15 +1099,8 @@ static int __hw_perf_event_init(struct perf_event *event)
static void p6_pmu_disable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
- if (!cpuc->enabled)
- return;
-
- cpuc->enabled = 0;
- barrier();
-
/* p6 only has one enable register */
rdmsrl(MSR_P6_EVNTSEL0, val);
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1118,12 +1111,6 @@ static void intel_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- if (!cpuc->enabled)
- return;
-
- cpuc->enabled = 0;
- barrier();
-
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
@@ -1135,17 +1122,6 @@ static void amd_pmu_disable_all(void)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
- if (!cpuc->enabled)
- return;
-
- cpuc->enabled = 0;
- /*
- * ensure we write the disable before we start disabling the
- * events proper, so that amd_pmu_enable_event() does the
- * right thing.
- */
- barrier();
-
for (idx = 0; idx < x86_pmu.num_events; idx++) {
u64 val;
@@ -1166,23 +1142,20 @@ void hw_perf_disable(void)
if (!x86_pmu_initialized())
return;
- if (cpuc->enabled)
- cpuc->n_added = 0;
+ if (!cpuc->enabled)
+ return;
+
+ cpuc->n_added = 0;
+ cpuc->enabled = 0;
+ barrier();
x86_pmu.disable_all();
}
static void p6_pmu_enable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long val;
- if (cpuc->enabled)
- return;
-
- cpuc->enabled = 1;
- barrier();
-
/* p6 only has one enable register */
rdmsrl(MSR_P6_EVNTSEL0, val);
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1193,12 +1166,6 @@ static void intel_pmu_enable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- if (cpuc->enabled)
- return;
-
- cpuc->enabled = 1;
- barrier();
-
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
@@ -1217,12 +1184,6 @@ static void amd_pmu_enable_all(void)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
- if (cpuc->enabled)
- return;
-
- cpuc->enabled = 1;
- barrier();
-
for (idx = 0; idx < x86_pmu.num_events; idx++) {
struct perf_event *event = cpuc->events[idx];
u64 val;
@@ -1417,6 +1378,10 @@ void hw_perf_enable(void)
if (!x86_pmu_initialized())
return;
+
+ if (cpuc->enabled)
+ return;
+
if (cpuc->n_added) {
/*
* apply assignment obtained either from
@@ -1461,6 +1426,10 @@ void hw_perf_enable(void)
cpuc->n_added = 0;
perf_events_lapic_init();
}
+
+ cpuc->enabled = 1;
+ barrier();
+
x86_pmu.enable_all();
}