aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-01-27 08:39:39 +0100
committerIngo Molnar <mingo@elte.hu>2010-01-29 09:01:44 +0100
commit184f412c3341cd24fbd26604634a5800b83dbdc3 (patch)
treec6e927f54a396d6affa56e84695d24ef01d107ad /arch/x86
parent6c9687abeb24d5b7aae7db5be070c2139ad29e29 (diff)
downloadkernel_samsung_tuna-184f412c3341cd24fbd26604634a5800b83dbdc3.zip
kernel_samsung_tuna-184f412c3341cd24fbd26604634a5800b83dbdc3.tar.gz
kernel_samsung_tuna-184f412c3341cd24fbd26604634a5800b83dbdc3.tar.bz2
perf, x86: Clean up event constraints code a bit
- Remove stray debug code - Improve ugly macros a bit - Remove some whitespace damage - (Also fix up some accumulated damage in perf_event.h) Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Stephane Eranian <eranian@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c37
1 files changed, 8 insertions, 29 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 66de282..fdbe248 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -93,24 +93,19 @@ struct cpu_hw_events {
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
};
-#define EVENT_CONSTRAINT(c, n, m) { \
+#define EVENT_CONSTRAINT(c, n, m) { \
{ .idxmsk64[0] = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = HWEIGHT64((u64)(n)), \
}
-#define INTEL_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
+#define INTEL_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
+#define FIXED_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
-#define FIXED_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
+#define EVENT_CONSTRAINT_END EVENT_CONSTRAINT(0, 0, 0)
-#define EVENT_CONSTRAINT_END \
- EVENT_CONSTRAINT(0, 0, 0)
-
-#define for_each_event_constraint(e, c) \
- for ((e) = (c); (e)->cmask; (e)++)
+#define for_each_event_constraint(e, c) for ((e) = (c); (e)->cmask; (e)++)
/*
* struct x86_pmu - generic x86 pmu
@@ -1276,14 +1271,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (test_bit(hwc->idx, used_mask))
break;
-#if 0
- pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n",
- smp_processor_id(),
- hwc->config,
- hwc->idx,
- assign ? 'y' : 'n');
-#endif
-
set_bit(hwc->idx, used_mask);
if (assign)
assign[i] = hwc->idx;
@@ -1333,14 +1320,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (j == X86_PMC_IDX_MAX)
break;
-#if 0
- pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n",
- smp_processor_id(),
- hwc->config,
- j,
- assign ? 'y' : 'n');
-#endif
-
set_bit(j, used_mask);
if (assign)
@@ -2596,9 +2575,9 @@ static const struct pmu pmu = {
* validate a single event group
*
* validation include:
- * - check events are compatible which each other
- * - events do not compete for the same counter
- * - number of events <= number of counters
+ * - check events are compatible which each other
+ * - events do not compete for the same counter
+ * - number of events <= number of counters
*
* validation ensures the group can be loaded onto the
* PMU if it was the only group available.