diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 412 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 237 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 673 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_lbr.c | 221 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 728 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p6.c | 2 |
7 files changed, 1939 insertions, 336 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 60398a0..f571f51 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -29,46 +29,51 @@ #include <asm/stacktrace.h> #include <asm/nmi.h> -static u64 perf_event_mask __read_mostly; +#if 0 +#undef wrmsrl +#define wrmsrl(msr, val) \ +do { \ + trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\ + (unsigned long)(val)); \ + native_write_msr((msr), (u32)((u64)(val)), \ + (u32)((u64)(val) >> 32)); \ +} while (0) +#endif -/* The maximal number of PEBS events: */ -#define MAX_PEBS_EVENTS 4 +/* + * best effort, GUP based copy_from_user() that assumes IRQ or NMI context + */ +static unsigned long +copy_from_user_nmi(void *to, const void __user *from, unsigned long n) +{ + unsigned long offset, addr = (unsigned long)from; + int type = in_nmi() ? KM_NMI : KM_IRQ0; + unsigned long size, len = 0; + struct page *page; + void *map; + int ret; -/* The size of a BTS record in bytes: */ -#define BTS_RECORD_SIZE 24 + do { + ret = __get_user_pages_fast(addr, 1, 0, &page); + if (!ret) + break; -/* The size of a per-cpu BTS buffer in bytes: */ -#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048) + offset = addr & (PAGE_SIZE - 1); + size = min(PAGE_SIZE - offset, n - len); -/* The BTS overflow threshold in bytes from the end of the buffer: */ -#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128) + map = kmap_atomic(page, type); + memcpy(to, map+offset, size); + kunmap_atomic(map, type); + put_page(page); + len += size; + to += size; + addr += size; -/* - * Bits in the debugctlmsr controlling branch tracing. - */ -#define X86_DEBUGCTL_TR (1 << 6) -#define X86_DEBUGCTL_BTS (1 << 7) -#define X86_DEBUGCTL_BTINT (1 << 8) -#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9) -#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10) + } while (len < n); -/* - * A debug store configuration. - * - * We only support architectures that use 64bit fields. - */ -struct debug_store { - u64 bts_buffer_base; - u64 bts_index; - u64 bts_absolute_maximum; - u64 bts_interrupt_threshold; - u64 pebs_buffer_base; - u64 pebs_index; - u64 pebs_absolute_maximum; - u64 pebs_interrupt_threshold; - u64 pebs_event_reset[MAX_PEBS_EVENTS]; -}; + return len; +} struct event_constraint { union { @@ -87,18 +92,39 @@ struct amd_nb { struct event_constraint event_constraints[X86_PMC_IDX_MAX]; }; +#define MAX_LBR_ENTRIES 16 + struct cpu_hw_events { + /* + * Generic x86 PMC bits + */ struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; - unsigned long interrupts; int enabled; - struct debug_store *ds; int n_events; int n_added; int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ u64 tags[X86_PMC_IDX_MAX]; struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ + + /* + * Intel DebugStore bits + */ + struct debug_store *ds; + u64 pebs_enabled; + + /* + * Intel LBR bits + */ + int lbr_users; + void *lbr_context; + struct perf_branch_stack lbr_stack; + struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; + + /* + * AMD specific bits + */ struct amd_nb *amd_nb; }; @@ -112,22 +138,48 @@ struct cpu_hw_events { #define EVENT_CONSTRAINT(c, n, m) \ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n)) +/* + * Constraint on the Event code. + */ #define INTEL_EVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) +/* + * Constraint on the Event code + UMask + fixed-mask + */ #define FIXED_EVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK) +/* + * Constraint on the Event code + UMask + */ +#define PEBS_EVENT_CONSTRAINT(c, n) \ + EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) + #define EVENT_CONSTRAINT_END \ EVENT_CONSTRAINT(0, 0, 0) #define for_each_event_constraint(e, c) \ for ((e) = (c); (e)->cmask; (e)++) +union perf_capabilities { + struct { + u64 lbr_format : 6; + u64 pebs_trap : 1; + u64 pebs_arch_reg : 1; + u64 pebs_format : 4; + u64 smm_freeze : 1; + }; + u64 capabilities; +}; + /* * struct x86_pmu - generic x86 pmu */ struct x86_pmu { + /* + * Generic x86 PMC bits + */ const char *name; int version; int (*handle_irq)(struct pt_regs *); @@ -135,6 +187,8 @@ struct x86_pmu { void (*enable_all)(void); void (*enable)(struct perf_event *); void (*disable)(struct perf_event *); + int (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc); + int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); unsigned eventsel; unsigned perfctr; u64 (*event_map)(int); @@ -146,10 +200,6 @@ struct x86_pmu { u64 event_mask; int apic; u64 max_period; - u64 intel_ctrl; - void (*enable_bts)(u64 config); - void (*disable_bts)(void); - struct event_constraint * (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); @@ -157,11 +207,32 @@ struct x86_pmu { void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); struct event_constraint *event_constraints; + void (*quirks)(void); void (*cpu_prepare)(int cpu); void (*cpu_starting)(int cpu); void (*cpu_dying)(int cpu); void (*cpu_dead)(int cpu); + + /* + * Intel Arch Perfmon v2+ + */ + u64 intel_ctrl; + union perf_capabilities intel_cap; + + /* + * Intel DebugStore bits + */ + int bts, pebs; + int pebs_record_size; + void (*drain_pebs)(struct pt_regs *regs); + struct event_constraint *pebs_constraints; + + /* + * Intel LBR + */ + unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ + int lbr_nr; /* hardware stack size */ }; static struct x86_pmu x86_pmu __read_mostly; @@ -239,9 +310,10 @@ again: static atomic_t active_events; static DEFINE_MUTEX(pmc_reserve_mutex); +#ifdef CONFIG_X86_LOCAL_APIC + static bool reserve_pmc_hardware(void) { -#ifdef CONFIG_X86_LOCAL_APIC int i; if (nmi_watchdog == NMI_LOCAL_APIC) @@ -256,11 +328,9 @@ static bool reserve_pmc_hardware(void) if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) goto eventsel_fail; } -#endif return true; -#ifdef CONFIG_X86_LOCAL_APIC eventsel_fail: for (i--; i >= 0; i--) release_evntsel_nmi(x86_pmu.eventsel + i); @@ -275,12 +345,10 @@ perfctr_fail: enable_lapic_nmi_watchdog(); return false; -#endif } static void release_pmc_hardware(void) { -#ifdef CONFIG_X86_LOCAL_APIC int i; for (i = 0; i < x86_pmu.num_events; i++) { @@ -290,113 +358,23 @@ static void release_pmc_hardware(void) if (nmi_watchdog == NMI_LOCAL_APIC) enable_lapic_nmi_watchdog(); -#endif -} - -static inline bool bts_available(void) -{ - return x86_pmu.enable_bts != NULL; } -static void init_debug_store_on_cpu(int cpu) -{ - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - - if (!ds) - return; - - wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, - (u32)((u64)(unsigned long)ds), - (u32)((u64)(unsigned long)ds >> 32)); -} - -static void fini_debug_store_on_cpu(int cpu) -{ - if (!per_cpu(cpu_hw_events, cpu).ds) - return; - - wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); -} - -static void release_bts_hardware(void) -{ - int cpu; - - if (!bts_available()) - return; - - get_online_cpus(); - - for_each_online_cpu(cpu) - fini_debug_store_on_cpu(cpu); - - for_each_possible_cpu(cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - - if (!ds) - continue; - - per_cpu(cpu_hw_events, cpu).ds = NULL; - - kfree((void *)(unsigned long)ds->bts_buffer_base); - kfree(ds); - } - - put_online_cpus(); -} - -static int reserve_bts_hardware(void) -{ - int cpu, err = 0; - - if (!bts_available()) - return 0; - - get_online_cpus(); - - for_each_possible_cpu(cpu) { - struct debug_store *ds; - void *buffer; - - err = -ENOMEM; - buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); - if (unlikely(!buffer)) - break; - - ds = kzalloc(sizeof(*ds), GFP_KERNEL); - if (unlikely(!ds)) { - kfree(buffer); - break; - } - - ds->bts_buffer_base = (u64)(unsigned long)buffer; - ds->bts_index = ds->bts_buffer_base; - ds->bts_absolute_maximum = - ds->bts_buffer_base + BTS_BUFFER_SIZE; - ds->bts_interrupt_threshold = - ds->bts_absolute_maximum - BTS_OVFL_TH; +#else - per_cpu(cpu_hw_events, cpu).ds = ds; - err = 0; - } +static bool reserve_pmc_hardware(void) { return true; } +static void release_pmc_hardware(void) {} - if (err) - release_bts_hardware(); - else { - for_each_online_cpu(cpu) - init_debug_store_on_cpu(cpu); - } - - put_online_cpus(); +#endif - return err; -} +static int reserve_ds_buffers(void); +static void release_ds_buffers(void); static void hw_perf_event_destroy(struct perf_event *event) { if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { release_pmc_hardware(); - release_bts_hardware(); + release_ds_buffers(); mutex_unlock(&pmc_reserve_mutex); } } @@ -439,6 +417,25 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) return 0; } +static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc) +{ + /* + * Generate PMC IRQs: + * (keep 'enabled' bit clear for now) + */ + hwc->config = ARCH_PERFMON_EVENTSEL_INT; + + /* + * Count user and OS events unless requested not to + */ + if (!attr->exclude_user) + hwc->config |= ARCH_PERFMON_EVENTSEL_USR; + if (!attr->exclude_kernel) + hwc->config |= ARCH_PERFMON_EVENTSEL_OS; + + return 0; +} + /* * Setup the hardware configuration for a given attr_type */ @@ -458,8 +455,11 @@ static int __hw_perf_event_init(struct perf_event *event) if (atomic_read(&active_events) == 0) { if (!reserve_pmc_hardware()) err = -EBUSY; - else - err = reserve_bts_hardware(); + else { + err = reserve_ds_buffers(); + if (err) + release_pmc_hardware(); + } } if (!err) atomic_inc(&active_events); @@ -470,23 +470,14 @@ static int __hw_perf_event_init(struct perf_event *event) event->destroy = hw_perf_event_destroy; - /* - * Generate PMC IRQs: - * (keep 'enabled' bit clear for now) - */ - hwc->config = ARCH_PERFMON_EVENTSEL_INT; - hwc->idx = -1; hwc->last_cpu = -1; hwc->last_tag = ~0ULL; - /* - * Count user and OS events unless requested not to. - */ - if (!attr->exclude_user) - hwc->config |= ARCH_PERFMON_EVENTSEL_USR; - if (!attr->exclude_kernel) - hwc->config |= ARCH_PERFMON_EVENTSEL_OS; + /* Processor specifics */ + err = x86_pmu.hw_config(attr, hwc); + if (err) + return err; if (!hwc->sample_period) { hwc->sample_period = x86_pmu.max_period; @@ -537,11 +528,11 @@ static int __hw_perf_event_init(struct perf_event *event) if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && (hwc->sample_period == 1)) { /* BTS is not supported by this architecture. */ - if (!bts_available()) + if (!x86_pmu.bts) return -EOPNOTSUPP; /* BTS is currently only allowed for user-mode. */ - if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) + if (!attr->exclude_kernel) return -EOPNOTSUPP; } @@ -850,14 +841,15 @@ void hw_perf_enable(void) static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc) { - (void)checking_wrmsrl(hwc->config_base + hwc->idx, + wrmsrl(hwc->config_base + hwc->idx, hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); } static inline void x86_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config); + + wrmsrl(hwc->config_base + hwc->idx, hwc->config); } static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); @@ -872,7 +864,7 @@ x86_perf_event_set_period(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; s64 left = atomic64_read(&hwc->period_left); s64 period = hwc->sample_period; - int err, ret = 0, idx = hwc->idx; + int ret = 0, idx = hwc->idx; if (idx == X86_PMC_IDX_FIXED_BTS) return 0; @@ -910,8 +902,8 @@ x86_perf_event_set_period(struct perf_event *event) */ atomic64_set(&hwc->prev_count, (u64)-left); - err = checking_wrmsrl(hwc->event_base + idx, - (u64)(-left) & x86_pmu.event_mask); + wrmsrl(hwc->event_base + idx, + (u64)(-left) & x86_pmu.event_mask); perf_event_update_userpage(event); @@ -948,7 +940,7 @@ static int x86_pmu_enable(struct perf_event *event) if (n < 0) return n; - ret = x86_schedule_events(cpuc, n, assign); + ret = x86_pmu.schedule_events(cpuc, n, assign); if (ret) return ret; /* @@ -989,6 +981,7 @@ static void x86_pmu_unthrottle(struct perf_event *event) void perf_event_print_debug(void) { u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; + u64 pebs; struct cpu_hw_events *cpuc; unsigned long flags; int cpu, idx; @@ -1006,14 +999,16 @@ void perf_event_print_debug(void) rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); + rdmsrl(MSR_IA32_PEBS_ENABLE, pebs); pr_info("\n"); pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); pr_info("CPU#%d: status: %016llx\n", cpu, status); pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); + pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs); } - pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); + pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); for (idx = 0; idx < x86_pmu.num_events; idx++) { rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); @@ -1144,7 +1139,6 @@ void set_perf_event_pending(void) void perf_events_lapic_init(void) { -#ifdef CONFIG_X86_LOCAL_APIC if (!x86_pmu.apic || !x86_pmu_initialized()) return; @@ -1152,7 +1146,6 @@ void perf_events_lapic_init(void) * Always use NMI for PMU */ apic_write(APIC_LVTPC, APIC_DM_NMI); -#endif } static int __kprobes @@ -1176,9 +1169,7 @@ perf_event_nmi_handler(struct notifier_block *self, regs = args->regs; -#ifdef CONFIG_X86_LOCAL_APIC apic_write(APIC_LVTPC, APIC_DM_NMI); -#endif /* * Can't rely on the handled return value to say it was our NMI, two * events could trigger 'simultaneously' raising two back-to-back NMIs. @@ -1272,12 +1263,15 @@ int hw_perf_group_sched_in(struct perf_event *leader, int assign[X86_PMC_IDX_MAX]; int n0, n1, ret; + if (!x86_pmu_initialized()) + return 0; + /* n0 = total number of events */ n0 = collect_events(cpuc, leader, true); if (n0 < 0) return n0; - ret = x86_schedule_events(cpuc, n0, assign); + ret = x86_pmu.schedule_events(cpuc, n0, assign); if (ret) return ret; @@ -1327,6 +1321,9 @@ undo: #include "perf_event_amd.c" #include "perf_event_p6.c" +#include "perf_event_p4.c" +#include "perf_event_intel_lbr.c" +#include "perf_event_intel_ds.c" #include "perf_event_intel.c" static int __cpuinit @@ -1398,12 +1395,15 @@ void __init init_hw_perf_events(void) pr_cont("%s PMU driver.\n", x86_pmu.name); + if (x86_pmu.quirks) + x86_pmu.quirks(); + if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", x86_pmu.num_events, X86_PMC_MAX_GENERIC); x86_pmu.num_events = X86_PMC_MAX_GENERIC; } - perf_event_mask = (1 << x86_pmu.num_events) - 1; + x86_pmu.intel_ctrl = (1 << x86_pmu.num_events) - 1; perf_max_events = x86_pmu.num_events; if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) { @@ -1412,9 +1412,8 @@ void __init init_hw_perf_events(void) x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED; } - perf_event_mask |= + x86_pmu.intel_ctrl |= ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED; - x86_pmu.intel_ctrl = perf_event_mask; perf_events_lapic_init(); register_die_notifier(&perf_event_nmi_notifier); @@ -1439,7 +1438,7 @@ void __init init_hw_perf_events(void) pr_info("... value mask: %016Lx\n", x86_pmu.event_mask); pr_info("... max period: %016Lx\n", x86_pmu.max_period); pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); - pr_info("... event mask: %016Lx\n", perf_event_mask); + pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); perf_cpu_notifier(x86_pmu_notifier); } @@ -1459,6 +1458,32 @@ static const struct pmu pmu = { }; /* + * validate that we can schedule this event + */ +static int validate_event(struct perf_event *event) +{ + struct cpu_hw_events *fake_cpuc; + struct event_constraint *c; + int ret = 0; + + fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); + if (!fake_cpuc) + return -ENOMEM; + + c = x86_pmu.get_event_constraints(fake_cpuc, event); + + if (!c || !c->weight) + ret = -ENOSPC; + + if (x86_pmu.put_event_constraints) + x86_pmu.put_event_constraints(fake_cpuc, event); + + kfree(fake_cpuc); + + return ret; +} + +/* * validate a single event group * * validation include: @@ -1498,7 +1523,7 @@ static int validate_group(struct perf_event *event) fake_cpuc->n_events = n; - ret = x86_schedule_events(fake_cpuc, n, NULL); + ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); out_free: kfree(fake_cpuc); @@ -1523,6 +1548,8 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) if (event->group_leader != event) err = validate_group(event); + else + err = validate_event(event); event->pmu = tmp; } @@ -1593,41 +1620,6 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); } -/* - * best effort, GUP based copy_from_user() that assumes IRQ or NMI context - */ -static unsigned long -copy_from_user_nmi(void *to, const void __user *from, unsigned long n) -{ - unsigned long offset, addr = (unsigned long)from; - int type = in_nmi() ? KM_NMI : KM_IRQ0; - unsigned long size, len = 0; - struct page *page; - void *map; - int ret; - - do { - ret = __get_user_pages_fast(addr, 1, 0, &page); - if (!ret) - break; - - offset = addr & (PAGE_SIZE - 1); - size = min(PAGE_SIZE - offset, n - len); - - map = kmap_atomic(page, type); - memcpy(to, map+offset, size); - kunmap_atomic(map, type); - put_page(page); - - len += size; - to += size; - addr += size; - - } while (len < n); - - return len; -} - static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) { unsigned long bytes; diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 573458f..358a8e3 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -363,6 +363,8 @@ static __initconst struct x86_pmu amd_pmu = { .enable_all = x86_pmu_enable_all, .enable = x86_pmu_enable_event, .disable = x86_pmu_disable_event, + .hw_config = x86_hw_config, + .schedule_events = x86_schedule_events, .eventsel = MSR_K7_EVNTSEL0, .perfctr = MSR_K7_PERFCTR0, .event_map = amd_pmu_event_map, diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 84bfde6..044b843 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -470,42 +470,6 @@ static u64 intel_pmu_raw_event(u64 hw_event) return hw_event & CORE_EVNTSEL_MASK; } -static void intel_pmu_enable_bts(u64 config) -{ - unsigned long debugctlmsr; - - debugctlmsr = get_debugctlmsr(); - - debugctlmsr |= X86_DEBUGCTL_TR; - debugctlmsr |= X86_DEBUGCTL_BTS; - debugctlmsr |= X86_DEBUGCTL_BTINT; - - if (!(config & ARCH_PERFMON_EVENTSEL_OS)) - debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; - - if (!(config & ARCH_PERFMON_EVENTSEL_USR)) - debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; - - update_debugctlmsr(debugctlmsr); -} - -static void intel_pmu_disable_bts(void) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - unsigned long debugctlmsr; - - if (!cpuc->ds) - return; - - debugctlmsr = get_debugctlmsr(); - - debugctlmsr &= - ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | - X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); - - update_debugctlmsr(debugctlmsr); -} - static void intel_pmu_disable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -514,12 +478,17 @@ static void intel_pmu_disable_all(void) if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) intel_pmu_disable_bts(); + + intel_pmu_pebs_disable_all(); + intel_pmu_lbr_disable_all(); } static void intel_pmu_enable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + intel_pmu_pebs_enable_all(); + intel_pmu_lbr_enable_all(); wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { @@ -547,8 +516,7 @@ static inline void intel_pmu_ack_status(u64 ack) wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); } -static inline void -intel_pmu_disable_fixed(struct hw_perf_event *hwc) +static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) { int idx = hwc->idx - X86_PMC_IDX_FIXED; u64 ctrl_val, mask; @@ -557,71 +525,10 @@ intel_pmu_disable_fixed(struct hw_perf_event *hwc) rdmsrl(hwc->config_base, ctrl_val); ctrl_val &= ~mask; - (void)checking_wrmsrl(hwc->config_base, ctrl_val); + wrmsrl(hwc->config_base, ctrl_val); } -static void intel_pmu_drain_bts_buffer(void) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct debug_store *ds = cpuc->ds; - struct bts_record { - u64 from; - u64 to; - u64 flags; - }; - struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; - struct bts_record *at, *top; - struct perf_output_handle handle; - struct perf_event_header header; - struct perf_sample_data data; - struct pt_regs regs; - - if (!event) - return; - - if (!ds) - return; - - at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; - top = (struct bts_record *)(unsigned long)ds->bts_index; - - if (top <= at) - return; - - ds->bts_index = ds->bts_buffer_base; - - perf_sample_data_init(&data, 0); - - data.period = event->hw.last_period; - regs.ip = 0; - - /* - * Prepare a generic sample, i.e. fill in the invariant fields. - * We will overwrite the from and to address before we output - * the sample. - */ - perf_prepare_sample(&header, &data, event, ®s); - - if (perf_output_begin(&handle, event, - header.size * (top - at), 1, 1)) - return; - - for (; at < top; at++) { - data.ip = at->from; - data.addr = at->to; - - perf_output_sample(&handle, &header, &data, event); - } - - perf_output_end(&handle); - - /* There's new data available. */ - event->hw.interrupts++; - event->pending_kill = POLL_IN; -} - -static inline void -intel_pmu_disable_event(struct perf_event *event) +static void intel_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -637,14 +544,15 @@ intel_pmu_disable_event(struct perf_event *event) } x86_pmu_disable_event(event); + + if (unlikely(event->attr.precise)) + intel_pmu_pebs_disable(event); } -static inline void -intel_pmu_enable_fixed(struct hw_perf_event *hwc) +static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) { int idx = hwc->idx - X86_PMC_IDX_FIXED; u64 ctrl_val, bits, mask; - int err; /* * Enable IRQ generation (0x8), @@ -669,7 +577,7 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc) rdmsrl(hwc->config_base, ctrl_val); ctrl_val &= ~mask; ctrl_val |= bits; - err = checking_wrmsrl(hwc->config_base, ctrl_val); + wrmsrl(hwc->config_base, ctrl_val); } static void intel_pmu_enable_event(struct perf_event *event) @@ -689,6 +597,9 @@ static void intel_pmu_enable_event(struct perf_event *event) return; } + if (unlikely(event->attr.precise)) + intel_pmu_pebs_enable(event); + __x86_pmu_enable_event(hwc); } @@ -762,6 +673,15 @@ again: inc_irq_stat(apic_perf_irqs); ack = status; + + intel_pmu_lbr_read(); + + /* + * PEBS overflow sets bit 62 in the global status register + */ + if (__test_and_clear_bit(62, (unsigned long *)&status)) + x86_pmu.drain_pebs(regs); + for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[bit]; @@ -791,22 +711,18 @@ done: return 1; } -static struct event_constraint bts_constraint = - EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); - static struct event_constraint * -intel_special_constraints(struct perf_event *event) +intel_bts_constraints(struct perf_event *event) { - unsigned int hw_event; - - hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK; + struct hw_perf_event *hwc = &event->hw; + unsigned int hw_event, bts_event; - if (unlikely((hw_event == - x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && - (event->hw.sample_period == 1))) { + hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; + bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); + if (unlikely(hw_event == bts_event && hwc->sample_period == 1)) return &bts_constraint; - } + return NULL; } @@ -815,7 +731,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event { struct event_constraint *c; - c = intel_special_constraints(event); + c = intel_bts_constraints(event); + if (c) + return c; + + c = intel_pebs_constraints(event); if (c) return c; @@ -829,6 +749,8 @@ static __initconst struct x86_pmu core_pmu = { .enable_all = x86_pmu_enable_all, .enable = x86_pmu_enable_event, .disable = x86_pmu_disable_event, + .hw_config = x86_hw_config, + .schedule_events = x86_schedule_events, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, .perfctr = MSR_ARCH_PERFMON_PERFCTR0, .event_map = intel_pmu_event_map, @@ -845,6 +767,20 @@ static __initconst struct x86_pmu core_pmu = { .event_constraints = intel_core_event_constraints, }; +static void intel_pmu_cpu_starting(int cpu) +{ + init_debug_store_on_cpu(cpu); + /* + * Deal with CPUs that don't clear their LBRs on power-up. + */ + intel_pmu_lbr_reset(); +} + +static void intel_pmu_cpu_dying(int cpu) +{ + fini_debug_store_on_cpu(cpu); +} + static __initconst struct x86_pmu intel_pmu = { .name = "Intel", .handle_irq = intel_pmu_handle_irq, @@ -852,6 +788,8 @@ static __initconst struct x86_pmu intel_pmu = { .enable_all = intel_pmu_enable_all, .enable = intel_pmu_enable_event, .disable = intel_pmu_disable_event, + .hw_config = x86_hw_config, + .schedule_events = x86_schedule_events, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, .perfctr = MSR_ARCH_PERFMON_PERFCTR0, .event_map = intel_pmu_event_map, @@ -864,14 +802,38 @@ static __initconst struct x86_pmu intel_pmu = { * the generic event period: */ .max_period = (1ULL << 31) - 1, - .enable_bts = intel_pmu_enable_bts, - .disable_bts = intel_pmu_disable_bts, .get_event_constraints = intel_get_event_constraints, - .cpu_starting = init_debug_store_on_cpu, - .cpu_dying = fini_debug_store_on_cpu, + .cpu_starting = intel_pmu_cpu_starting, + .cpu_dying = intel_pmu_cpu_dying, }; +static void intel_clovertown_quirks(void) +{ + /* + * PEBS is unreliable due to: + * + * AJ67 - PEBS may experience CPL leaks + * AJ68 - PEBS PMI may be delayed by one event + * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12] + * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS + * + * AJ67 could be worked around by restricting the OS/USR flags. + * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI. + * + * AJ106 could possibly be worked around by not allowing LBR + * usage from PEBS, including the fixup. + * AJ68 could possibly be worked around by always programming + * a pebs_event_reset[0] value and coping with the lost events. + * + * But taken together it might just make sense to not enable PEBS on + * these chips. + */ + printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); + x86_pmu.pebs = 0; + x86_pmu.pebs_constraints = NULL; +} + static __init int intel_pmu_init(void) { union cpuid10_edx edx; @@ -881,12 +843,13 @@ static __init int intel_pmu_init(void) int version; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { - /* check for P6 processor family */ - if (boot_cpu_data.x86 == 6) { - return p6_pmu_init(); - } else { + switch (boot_cpu_data.x86) { + case 0x6: + return p6_pmu_init(); + case 0xf: + return p4_pmu_init(); + } return -ENODEV; - } } /* @@ -916,6 +879,18 @@ static __init int intel_pmu_init(void) x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3); /* + * v2 and above have a perf capabilities MSR + */ + if (version > 1) { + u64 capabilities; + + rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); + x86_pmu.intel_cap.capabilities = capabilities; + } + + intel_ds_init(); + + /* * Install the hw-cache-events table: */ switch (boot_cpu_data.x86_model) { @@ -924,12 +899,15 @@ static __init int intel_pmu_init(void) break; case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ + x86_pmu.quirks = intel_clovertown_quirks; case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ case 29: /* six-core 45 nm xeon "Dunnington" */ memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + intel_pmu_lbr_init_core(); + x86_pmu.event_constraints = intel_core2_event_constraints; pr_cont("Core2 events, "); break; @@ -939,13 +917,18 @@ static __init int intel_pmu_init(void) memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + intel_pmu_lbr_init_nhm(); + x86_pmu.event_constraints = intel_nehalem_event_constraints; pr_cont("Nehalem/Corei7 events, "); break; + case 28: /* Atom */ memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + intel_pmu_lbr_init_atom(); + x86_pmu.event_constraints = intel_gen_event_constraints; pr_cont("Atom events, "); break; @@ -955,6 +938,8 @@ static __init int intel_pmu_init(void) memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + intel_pmu_lbr_init_nhm(); + x86_pmu.event_constraints = intel_westmere_event_constraints; pr_cont("Westmere events, "); break; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c new file mode 100644 index 0000000..c59678a --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -0,0 +1,673 @@ +#ifdef CONFIG_CPU_SUP_INTEL + +/* The maximal number of PEBS events: */ +#define MAX_PEBS_EVENTS 4 + +/* The size of a BTS record in bytes: */ +#define BTS_RECORD_SIZE 24 + +#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) +#define PEBS_BUFFER_SIZE PAGE_SIZE + +/* + * pebs_record_32 for p4 and core not supported + +struct pebs_record_32 { + u32 flags, ip; + u32 ax, bc, cx, dx; + u32 si, di, bp, sp; +}; + + */ + +struct pebs_record_core { + u64 flags, ip; + u64 ax, bx, cx, dx; + u64 si, di, bp, sp; + u64 r8, r9, r10, r11; + u64 r12, r13, r14, r15; +}; + +struct pebs_record_nhm { + u64 flags, ip; + u64 ax, bx, cx, dx; + u64 si, di, bp, sp; + u64 r8, r9, r10, r11; + u64 r12, r13, r14, r15; + u64 status, dla, dse, lat; +}; + +/* + * Bits in the debugctlmsr controlling branch tracing. + */ +#define X86_DEBUGCTL_TR (1 << 6) +#define X86_DEBUGCTL_BTS (1 << 7) +#define X86_DEBUGCTL_BTINT (1 << 8) +#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9) +#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10) + +/* + * A debug store configuration. + * + * We only support architectures that use 64bit fields. + */ +struct debug_store { + u64 bts_buffer_base; + u64 bts_index; + u64 bts_absolute_maximum; + u64 bts_interrupt_threshold; + u64 pebs_buffer_base; + u64 pebs_index; + u64 pebs_absolute_maximum; + u64 pebs_interrupt_threshold; + u64 pebs_event_reset[MAX_PEBS_EVENTS]; +}; + +static void init_debug_store_on_cpu(int cpu) +{ + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + + if (!ds) + return; + + wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, + (u32)((u64)(unsigned long)ds), + (u32)((u64)(unsigned long)ds >> 32)); +} + +static void fini_debug_store_on_cpu(int cpu) +{ + if (!per_cpu(cpu_hw_events, cpu).ds) + return; + + wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); +} + +static void release_ds_buffers(void) +{ + int cpu; + + if (!x86_pmu.bts && !x86_pmu.pebs) + return; + + get_online_cpus(); + + for_each_online_cpu(cpu) + fini_debug_store_on_cpu(cpu); + + for_each_possible_cpu(cpu) { + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + + if (!ds) + continue; + + per_cpu(cpu_hw_events, cpu).ds = NULL; + + kfree((void *)(unsigned long)ds->pebs_buffer_base); + kfree((void *)(unsigned long)ds->bts_buffer_base); + kfree(ds); + } + + put_online_cpus(); +} + +static int reserve_ds_buffers(void) +{ + int cpu, err = 0; + + if (!x86_pmu.bts && !x86_pmu.pebs) + return 0; + + get_online_cpus(); + + for_each_possible_cpu(cpu) { + struct debug_store *ds; + void *buffer; + int max, thresh; + + err = -ENOMEM; + ds = kzalloc(sizeof(*ds), GFP_KERNEL); + if (unlikely(!ds)) + break; + per_cpu(cpu_hw_events, cpu).ds = ds; + + if (x86_pmu.bts) { + buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); + if (unlikely(!buffer)) + break; + + max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; + thresh = max / 16; + + ds->bts_buffer_base = (u64)(unsigned long)buffer; + ds->bts_index = ds->bts_buffer_base; + ds->bts_absolute_maximum = ds->bts_buffer_base + + max * BTS_RECORD_SIZE; + ds->bts_interrupt_threshold = ds->bts_absolute_maximum - + thresh * BTS_RECORD_SIZE; + } + + if (x86_pmu.pebs) { + buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL); + if (unlikely(!buffer)) + break; + + max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size; + + ds->pebs_buffer_base = (u64)(unsigned long)buffer; + ds->pebs_index = ds->pebs_buffer_base; + ds->pebs_absolute_maximum = ds->pebs_buffer_base + + max * x86_pmu.pebs_record_size; + /* + * Always use single record PEBS + */ + ds->pebs_interrupt_threshold = ds->pebs_buffer_base + + x86_pmu.pebs_record_size; + } + + err = 0; + } + + if (err) + release_ds_buffers(); + else { + for_each_online_cpu(cpu) + init_debug_store_on_cpu(cpu); + } + + put_online_cpus(); + + return err; +} + +/* + * BTS + */ + +static struct event_constraint bts_constraint = + EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); + +static void intel_pmu_enable_bts(u64 config) +{ + unsigned long debugctlmsr; + + debugctlmsr = get_debugctlmsr(); + + debugctlmsr |= X86_DEBUGCTL_TR; + debugctlmsr |= X86_DEBUGCTL_BTS; + debugctlmsr |= X86_DEBUGCTL_BTINT; + + if (!(config & ARCH_PERFMON_EVENTSEL_OS)) + debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; + + if (!(config & ARCH_PERFMON_EVENTSEL_USR)) + debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; + + update_debugctlmsr(debugctlmsr); +} + +static void intel_pmu_disable_bts(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + unsigned long debugctlmsr; + + if (!cpuc->ds) + return; + + debugctlmsr = get_debugctlmsr(); + + debugctlmsr &= + ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | + X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); + + update_debugctlmsr(debugctlmsr); +} + +static void intel_pmu_drain_bts_buffer(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct debug_store *ds = cpuc->ds; + struct bts_record { + u64 from; + u64 to; + u64 flags; + }; + struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; + struct bts_record *at, *top; + struct perf_output_handle handle; + struct perf_event_header header; + struct perf_sample_data data; + struct pt_regs regs; + + if (!event) + return; + + if (!ds) + return; + + at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; + top = (struct bts_record *)(unsigned long)ds->bts_index; + + if (top <= at) + return; + + ds->bts_index = ds->bts_buffer_base; + + perf_sample_data_init(&data, 0); + data.period = event->hw.last_period; + regs.ip = 0; + + /* + * Prepare a generic sample, i.e. fill in the invariant fields. + * We will overwrite the from and to address before we output + * the sample. + */ + perf_prepare_sample(&header, &data, event, ®s); + + if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) + return; + + for (; at < top; at++) { + data.ip = at->from; + data.addr = at->to; + + perf_output_sample(&handle, &header, &data, event); + } + + perf_output_end(&handle); + + /* There's new data available. */ + event->hw.interrupts++; + event->pending_kill = POLL_IN; +} + +/* + * PEBS + */ + +static struct event_constraint intel_core_pebs_events[] = { + PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */ + PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ + PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ + PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ + PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */ + PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */ + PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */ + PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */ + PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */ + EVENT_CONSTRAINT_END +}; + +static struct event_constraint intel_nehalem_pebs_events[] = { + PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */ + PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */ + PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */ + PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */ + PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */ + PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */ + PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */ + PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */ + PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */ + EVENT_CONSTRAINT_END +}; + +static struct event_constraint * +intel_pebs_constraints(struct perf_event *event) +{ + struct event_constraint *c; + + if (!event->attr.precise) + return NULL; + + if (x86_pmu.pebs_constraints) { + for_each_event_constraint(c, x86_pmu.pebs_constraints) { + if ((event->hw.config & c->cmask) == c->code) + return c; + } + } + + return &emptyconstraint; +} + +static void intel_pmu_pebs_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + + hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; + + cpuc->pebs_enabled |= 1ULL << hwc->idx; + WARN_ON_ONCE(cpuc->enabled); + + if (x86_pmu.intel_cap.pebs_trap) + intel_pmu_lbr_enable(event); +} + +static void intel_pmu_pebs_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + + cpuc->pebs_enabled &= ~(1ULL << hwc->idx); + if (cpuc->enabled) + wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); + + hwc->config |= ARCH_PERFMON_EVENTSEL_INT; + + if (x86_pmu.intel_cap.pebs_trap) + intel_pmu_lbr_disable(event); +} + +static void intel_pmu_pebs_enable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (cpuc->pebs_enabled) + wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); +} + +static void intel_pmu_pebs_disable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (cpuc->pebs_enabled) + wrmsrl(MSR_IA32_PEBS_ENABLE, 0); +} + +#include <asm/insn.h> + +static inline bool kernel_ip(unsigned long ip) +{ +#ifdef CONFIG_X86_32 + return ip > PAGE_OFFSET; +#else + return (long)ip < 0; +#endif +} + +static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + unsigned long from = cpuc->lbr_entries[0].from; + unsigned long old_to, to = cpuc->lbr_entries[0].to; + unsigned long ip = regs->ip; + + /* + * We don't need to fixup if the PEBS assist is fault like + */ + if (!x86_pmu.intel_cap.pebs_trap) + return 1; + + /* + * No LBR entry, no basic block, no rewinding + */ + if (!cpuc->lbr_stack.nr || !from || !to) + return 0; + + /* + * Basic blocks should never cross user/kernel boundaries + */ + if (kernel_ip(ip) != kernel_ip(to)) + return 0; + + /* + * unsigned math, either ip is before the start (impossible) or + * the basic block is larger than 1 page (sanity) + */ + if ((ip - to) > PAGE_SIZE) + return 0; + + /* + * We sampled a branch insn, rewind using the LBR stack + */ + if (ip == to) { + regs->ip = from; + return 1; + } + + do { + struct insn insn; + u8 buf[MAX_INSN_SIZE]; + void *kaddr; + + old_to = to; + if (!kernel_ip(ip)) { + int bytes, size = MAX_INSN_SIZE; + + bytes = copy_from_user_nmi(buf, (void __user *)to, size); + if (bytes != size) + return 0; + + kaddr = buf; + } else + kaddr = (void *)to; + + kernel_insn_init(&insn, kaddr); + insn_get_length(&insn); + to += insn.length; + } while (to < ip); + + if (to == ip) { + regs->ip = old_to; + return 1; + } + + /* + * Even though we decoded the basic block, the instruction stream + * never matched the given IP, either the TO or the IP got corrupted. + */ + return 0; +} + +static int intel_pmu_save_and_restart(struct perf_event *event); + +static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct debug_store *ds = cpuc->ds; + struct perf_event *event = cpuc->events[0]; /* PMC0 only */ + struct pebs_record_core *at, *top; + struct perf_sample_data data; + struct perf_raw_record raw; + struct pt_regs regs; + int n; + + if (!ds || !x86_pmu.pebs) + return; + + at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; + top = (struct pebs_record_core *)(unsigned long)ds->pebs_index; + + /* + * Whatever else happens, drain the thing + */ + ds->pebs_index = ds->pebs_buffer_base; + + if (!test_bit(0, cpuc->active_mask)) + return; + + WARN_ON_ONCE(!event); + + if (!event->attr.precise) + return; + + n = top - at; + if (n <= 0) + return; + + if (!intel_pmu_save_and_restart(event)) + return; + + /* + * Should not happen, we program the threshold at 1 and do not + * set a reset value. + */ + WARN_ON_ONCE(n > 1); + at += n - 1; + + perf_sample_data_init(&data, 0); + data.period = event->hw.last_period; + + if (event->attr.sample_type & PERF_SAMPLE_RAW) { + raw.size = x86_pmu.pebs_record_size; + raw.data = at; + data.raw = &raw; + } + + /* + * We use the interrupt regs as a base because the PEBS record + * does not contain a full regs set, specifically it seems to + * lack segment descriptors, which get used by things like + * user_mode(). + * + * In the simple case fix up only the IP and BP,SP regs, for + * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. + * A possible PERF_SAMPLE_REGS will have to transfer all regs. + */ + regs = *iregs; + regs.ip = at->ip; + regs.bp = at->bp; + regs.sp = at->sp; + + if (intel_pmu_pebs_fixup_ip(®s)) + regs.flags |= PERF_EFLAGS_EXACT; + else + regs.flags &= ~PERF_EFLAGS_EXACT; + + if (perf_event_overflow(event, 1, &data, ®s)) + x86_pmu_stop(event); +} + +static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct debug_store *ds = cpuc->ds; + struct pebs_record_nhm *at, *top; + struct perf_sample_data data; + struct perf_event *event = NULL; + struct perf_raw_record raw; + struct pt_regs regs; + u64 status = 0; + int bit, n; + + if (!ds || !x86_pmu.pebs) + return; + + at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; + top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; + + ds->pebs_index = ds->pebs_buffer_base; + + n = top - at; + if (n <= 0) + return; + + /* + * Should not happen, we program the threshold at 1 and do not + * set a reset value. + */ + WARN_ON_ONCE(n > MAX_PEBS_EVENTS); + + for ( ; at < top; at++) { + for_each_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) { + event = cpuc->events[bit]; + if (!test_bit(bit, cpuc->active_mask)) + continue; + + WARN_ON_ONCE(!event); + + if (!event->attr.precise) + continue; + + if (__test_and_set_bit(bit, (unsigned long *)&status)) + continue; + + break; + } + + if (!event || bit >= MAX_PEBS_EVENTS) + continue; + + if (!intel_pmu_save_and_restart(event)) + continue; + + perf_sample_data_init(&data, 0); + data.period = event->hw.last_period; + + if (event->attr.sample_type & PERF_SAMPLE_RAW) { + raw.size = x86_pmu.pebs_record_size; + raw.data = at; + data.raw = &raw; + } + + /* + * See the comment in intel_pmu_drain_pebs_core() + */ + regs = *iregs; + regs.ip = at->ip; + regs.bp = at->bp; + regs.sp = at->sp; + + if (intel_pmu_pebs_fixup_ip(®s)) + regs.flags |= PERF_EFLAGS_EXACT; + else + regs.flags &= ~PERF_EFLAGS_EXACT; + + if (perf_event_overflow(event, 1, &data, ®s)) + x86_pmu_stop(event); + } +} + +/* + * BTS, PEBS probe and setup + */ + +static void intel_ds_init(void) +{ + /* + * No support for 32bit formats + */ + if (!boot_cpu_has(X86_FEATURE_DTES64)) + return; + + x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); + x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); + if (x86_pmu.pebs) { + char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; + int format = x86_pmu.intel_cap.pebs_format; + + switch (format) { + case 0: + printk(KERN_CONT "PEBS fmt0%c, ", pebs_type); + x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); + x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; + x86_pmu.pebs_constraints = intel_core_pebs_events; + break; + + case 1: + printk(KERN_CONT "PEBS fmt1%c, ", pebs_type); + x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); + x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; + x86_pmu.pebs_constraints = intel_nehalem_pebs_events; + break; + + default: + printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); + x86_pmu.pebs = 0; + break; + } + } +} + +#else /* CONFIG_CPU_SUP_INTEL */ + +static int reserve_ds_buffers(void) +{ + return 0; +} + +static void release_ds_buffers(void) +{ +} + +#endif /* CONFIG_CPU_SUP_INTEL */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c new file mode 100644 index 0000000..df4c98e --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -0,0 +1,221 @@ +#ifdef CONFIG_CPU_SUP_INTEL + +enum { + LBR_FORMAT_32 = 0x00, + LBR_FORMAT_LIP = 0x01, + LBR_FORMAT_EIP = 0x02, + LBR_FORMAT_EIP_FLAGS = 0x03, +}; + +/* + * We only support LBR implementations that have FREEZE_LBRS_ON_PMI + * otherwise it becomes near impossible to get a reliable stack. + */ + +#define X86_DEBUGCTL_LBR (1 << 0) +#define X86_DEBUGCTL_FREEZE_LBRS_ON_PMI (1 << 11) + +static void __intel_pmu_lbr_enable(void) +{ + u64 debugctl; + + rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + debugctl |= (X86_DEBUGCTL_LBR | X86_DEBUGCTL_FREEZE_LBRS_ON_PMI); + wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); +} + +static void __intel_pmu_lbr_disable(void) +{ + u64 debugctl; + + rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + debugctl &= ~(X86_DEBUGCTL_LBR | X86_DEBUGCTL_FREEZE_LBRS_ON_PMI); + wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); +} + +static void intel_pmu_lbr_reset_32(void) +{ + int i; + + for (i = 0; i < x86_pmu.lbr_nr; i++) + wrmsrl(x86_pmu.lbr_from + i, 0); +} + +static void intel_pmu_lbr_reset_64(void) +{ + int i; + + for (i = 0; i < x86_pmu.lbr_nr; i++) { + wrmsrl(x86_pmu.lbr_from + i, 0); + wrmsrl(x86_pmu.lbr_to + i, 0); + } +} + +static void intel_pmu_lbr_reset(void) +{ + if (!x86_pmu.lbr_nr) + return; + + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) + intel_pmu_lbr_reset_32(); + else + intel_pmu_lbr_reset_64(); +} + +static void intel_pmu_lbr_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (!x86_pmu.lbr_nr) + return; + + WARN_ON_ONCE(cpuc->enabled); + + /* + * Reset the LBR stack if we changed task context to + * avoid data leaks. + */ + + if (event->ctx->task && cpuc->lbr_context != event->ctx) { + intel_pmu_lbr_reset(); + cpuc->lbr_context = event->ctx; + } + + cpuc->lbr_users++; +} + +static void intel_pmu_lbr_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (!x86_pmu.lbr_nr) + return; + + cpuc->lbr_users--; + WARN_ON_ONCE(cpuc->lbr_users < 0); + + if (cpuc->enabled && !cpuc->lbr_users) + __intel_pmu_lbr_disable(); +} + +static void intel_pmu_lbr_enable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (cpuc->lbr_users) + __intel_pmu_lbr_enable(); +} + +static void intel_pmu_lbr_disable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (cpuc->lbr_users) + __intel_pmu_lbr_disable(); +} + +static inline u64 intel_pmu_lbr_tos(void) +{ + u64 tos; + + rdmsrl(x86_pmu.lbr_tos, tos); + + return tos; +} + +static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) +{ + unsigned long mask = x86_pmu.lbr_nr - 1; + u64 tos = intel_pmu_lbr_tos(); + int i; + + for (i = 0; i < x86_pmu.lbr_nr; i++) { + unsigned long lbr_idx = (tos - i) & mask; + union { + struct { + u32 from; + u32 to; + }; + u64 lbr; + } msr_lastbranch; + + rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); + + cpuc->lbr_entries[i].from = msr_lastbranch.from; + cpuc->lbr_entries[i].to = msr_lastbranch.to; + cpuc->lbr_entries[i].flags = 0; + } + cpuc->lbr_stack.nr = i; +} + +#define LBR_FROM_FLAG_MISPRED (1ULL << 63) + +/* + * Due to lack of segmentation in Linux the effective address (offset) + * is the same as the linear address, allowing us to merge the LIP and EIP + * LBR formats. + */ +static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) +{ + unsigned long mask = x86_pmu.lbr_nr - 1; + int lbr_format = x86_pmu.intel_cap.lbr_format; + u64 tos = intel_pmu_lbr_tos(); + int i; + + for (i = 0; i < x86_pmu.lbr_nr; i++) { + unsigned long lbr_idx = (tos - i) & mask; + u64 from, to, flags = 0; + + rdmsrl(x86_pmu.lbr_from + lbr_idx, from); + rdmsrl(x86_pmu.lbr_to + lbr_idx, to); + + if (lbr_format == LBR_FORMAT_EIP_FLAGS) { + flags = !!(from & LBR_FROM_FLAG_MISPRED); + from = (u64)((((s64)from) << 1) >> 1); + } + + cpuc->lbr_entries[i].from = from; + cpuc->lbr_entries[i].to = to; + cpuc->lbr_entries[i].flags = flags; + } + cpuc->lbr_stack.nr = i; +} + +static void intel_pmu_lbr_read(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (!cpuc->lbr_users) + return; + + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) + intel_pmu_lbr_read_32(cpuc); + else + intel_pmu_lbr_read_64(cpuc); +} + +static void intel_pmu_lbr_init_core(void) +{ + x86_pmu.lbr_nr = 4; + x86_pmu.lbr_tos = 0x01c9; + x86_pmu.lbr_from = 0x40; + x86_pmu.lbr_to = 0x60; +} + +static void intel_pmu_lbr_init_nhm(void) +{ + x86_pmu.lbr_nr = 16; + x86_pmu.lbr_tos = 0x01c9; + x86_pmu.lbr_from = 0x680; + x86_pmu.lbr_to = 0x6c0; +} + +static void intel_pmu_lbr_init_atom(void) +{ + x86_pmu.lbr_nr = 8; + x86_pmu.lbr_tos = 0x01c9; + x86_pmu.lbr_from = 0x40; + x86_pmu.lbr_to = 0x60; +} + +#endif /* CONFIG_CPU_SUP_INTEL */ diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c new file mode 100644 index 0000000..b8a811a --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -0,0 +1,728 @@ +/* + * Netburst Perfomance Events (P4, old Xeon) + * + * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org> + * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com> + * + * For licencing details see kernel-base/COPYING + */ + +#ifdef CONFIG_CPU_SUP_INTEL + +#include <asm/perf_event_p4.h> + +/* + * array indices: 0,1 - HT threads, used with HT enabled cpu + */ +struct p4_event_template { + u32 opcode; /* ESCR event + CCCR selector */ + u64 config; /* packed predefined bits */ + int dep; /* upstream dependency event index */ + int key; /* index into p4_templates */ + u64 msr; /* + * the high 32 bits set into MSR_IA32_PEBS_ENABLE and + * the low 32 bits set into MSR_P4_PEBS_MATRIX_VERT + * for cache events + */ + unsigned int emask; /* ESCR EventMask */ + unsigned int escr_msr[2]; /* ESCR MSR for this event */ + unsigned int cntr[2]; /* counter index (offset) */ +}; + +struct p4_pmu_res { + /* maps hw_conf::idx into template for ESCR sake */ + struct p4_event_template *tpl[ARCH_P4_MAX_CCCR]; +}; + +static DEFINE_PER_CPU(struct p4_pmu_res, p4_pmu_config); + +#define P4_CACHE_EVENT_CONFIG(event, bit) \ + p4_config_pack_escr(P4_EVENT_UNPACK_EVENT(event) << P4_EVNTSEL_EVENT_SHIFT) | \ + p4_config_pack_escr((event##_##bit) << P4_EVNTSEL_EVENTMASK_SHIFT) | \ + p4_config_pack_cccr(P4_EVENT_UNPACK_SELECTOR(event) << P4_CCCR_ESCR_SELECT_SHIFT) + +static __initconst u64 p4_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + /* 1stL_cache_load_miss_retired */ + [ C(RESULT_MISS) ] = P4_CACHE_EVENT_CONFIG(P4_REPLAY_EVENT, NBOGUS) + | KEY_P4_L1D_OP_READ_RESULT_MISS, + }, + }, + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + /* 2ndL_cache_load_miss_retired */ + [ C(RESULT_MISS) ] = P4_CACHE_EVENT_CONFIG(P4_REPLAY_EVENT, NBOGUS) + | KEY_P4_LL_OP_READ_RESULT_MISS, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + /* DTLB_load_miss_retired */ + [ C(RESULT_MISS) ] = P4_CACHE_EVENT_CONFIG(P4_REPLAY_EVENT, NBOGUS) + | KEY_P4_DTLB_OP_READ_RESULT_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + /* DTLB_store_miss_retired */ + [ C(RESULT_MISS) ] = P4_CACHE_EVENT_CONFIG(P4_REPLAY_EVENT, NBOGUS) + | KEY_P4_DTLB_OP_WRITE_RESULT_MISS, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + /* ITLB_reference.HIT */ + [ C(RESULT_ACCESS) ] = P4_CACHE_EVENT_CONFIG(P4_ITLB_REFERENCE, HIT) + | KEY_P4_ITLB_OP_READ_RESULT_ACCESS, + + /* ITLB_reference.MISS */ + [ C(RESULT_MISS) ] = P4_CACHE_EVENT_CONFIG(P4_ITLB_REFERENCE, MISS) + | KEY_P4_ITLB_OP_READ_RESULT_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +/* + * WARN: CCCR1 doesn't have a working enable bit so try to not + * use it if possible + * + * Also as only we start to support raw events we will need to + * append _all_ P4_EVENT_PACK'ed events here + */ +struct p4_event_template p4_templates[] = { + [0] = { + .opcode = P4_GLOBAL_POWER_EVENTS, + .config = 0, + .dep = -1, + .key = 0, + .emask = + P4_EVENT_ATTR(P4_GLOBAL_POWER_EVENTS, RUNNING), + .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, + .cntr = { 0, 2 }, + }, + [1] = { + .opcode = P4_INSTR_RETIRED, + .config = 0, + .dep = -1, /* needs front-end tagging */ + .key = 1, + .emask = + P4_EVENT_ATTR(P4_INSTR_RETIRED, NBOGUSNTAG) | + P4_EVENT_ATTR(P4_INSTR_RETIRED, BOGUSNTAG), + .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, + .cntr = { 12, 14 }, + }, + [2] = { + .opcode = P4_BSQ_CACHE_REFERENCE, + .config = 0, + .dep = -1, + .key = 2, + .emask = + P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) | + P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) | + P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) | + P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) | + P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) | + P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITM), + .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 }, + .cntr = { 0, 2 }, + }, + [3] = { + .opcode = P4_BSQ_CACHE_REFERENCE, + .config = 0, + .dep = -1, + .key = 3, + .emask = + P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) | + P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) | + P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, WR_2ndL_MISS), + .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 }, + .cntr = { 0, 3 }, + }, + [4] = { + .opcode = P4_RETIRED_BRANCH_TYPE, + .config = 0, + .dep = -1, + .key = 4, + .emask = + P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, CONDITIONAL) | + P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, CALL) | + P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, RETURN) | + P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, INDIRECT), + .escr_msr = { MSR_P4_TBPU_ESCR0, MSR_P4_TBPU_ESCR1 }, + .cntr = { 4, 6 }, + }, + [5] = { + .opcode = P4_MISPRED_BRANCH_RETIRED, + .config = 0, + .dep = -1, + .key = 5, + .emask = + P4_EVENT_ATTR(P4_MISPRED_BRANCH_RETIRED, NBOGUS), + .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, + .cntr = { 12, 14 }, + }, + [6] = { + .opcode = P4_FSB_DATA_ACTIVITY, + .config = p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE), + .dep = -1, + .key = 6, + .emask = + P4_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DRDY_DRV) | + P4_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DRDY_OWN), + .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, + .cntr = { 0, 2 }, + }, + [KEY_P4_L1D_OP_READ_RESULT_MISS] = { + .opcode = P4_REPLAY_EVENT, + .config = 0, + .dep = -1, + .msr = (u64)(1 << 0 | 1 << 24) << 32 | (1 << 0), + .key = KEY_P4_L1D_OP_READ_RESULT_MISS, + .emask = + P4_EVENT_ATTR(P4_REPLAY_EVENT, NBOGUS), + .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR2 }, + .cntr = { 16, 17 }, + }, + [KEY_P4_LL_OP_READ_RESULT_MISS] = { + .opcode = P4_REPLAY_EVENT, + .config = 0, + .dep = -1, + .msr = (u64)(1 << 1 | 1 << 24) << 32 | (1 << 0), + .key = KEY_P4_LL_OP_READ_RESULT_MISS, + .emask = + P4_EVENT_ATTR(P4_REPLAY_EVENT, NBOGUS), + .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR2 }, + .cntr = { 16, 17 }, + }, + [KEY_P4_DTLB_OP_READ_RESULT_MISS] = { + .opcode = P4_REPLAY_EVENT, + .config = 0, + .dep = -1, + .msr = (u64)(1 << 2 | 1 << 24) << 32 | (1 << 0), + .key = KEY_P4_DTLB_OP_READ_RESULT_MISS, + .emask = + P4_EVENT_ATTR(P4_REPLAY_EVENT, NBOGUS), + .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR2 }, + .cntr = { 16, 17 }, + }, + [KEY_P4_DTLB_OP_WRITE_RESULT_MISS] = { + .opcode = P4_REPLAY_EVENT, + .config = 0, + .dep = -1, + .msr = (u64)(1 << 2 | 1 << 24) << 32 | (1 << 1), + .key = KEY_P4_DTLB_OP_WRITE_RESULT_MISS, + .emask = + P4_EVENT_ATTR(P4_REPLAY_EVENT, NBOGUS), + .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR2 }, + .cntr = { 16, 17 }, + }, + [KEY_P4_ITLB_OP_READ_RESULT_ACCESS] = { + .opcode = P4_ITLB_REFERENCE, + .config = 0, + .dep = -1, + .msr = 0, + .key = KEY_P4_ITLB_OP_READ_RESULT_ACCESS, + .emask = + P4_EVENT_ATTR(P4_ITLB_REFERENCE, HIT), + .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 }, + .cntr = { 0, 2 }, + }, + [KEY_P4_ITLB_OP_READ_RESULT_MISS] = { + .opcode = P4_ITLB_REFERENCE, + .config = 0, + .dep = -1, + .msr = 0, + .key = KEY_P4_ITLB_OP_READ_RESULT_MISS, + .emask = + P4_EVENT_ATTR(P4_ITLB_REFERENCE, MISS), + .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 }, + .cntr = { 0, 2 }, + }, + [KEY_P4_UOP_TYPE] = { + .opcode = P4_UOP_TYPE, + .config = 0, + .dep = -1, + .key = KEY_P4_UOP_TYPE, + .emask = + P4_EVENT_ATTR(P4_UOP_TYPE, TAGLOADS) | + P4_EVENT_ATTR(P4_UOP_TYPE, TAGSTORES), + .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 }, + .cntr = { 16, 17 }, + }, +}; + +static u64 p4_pmu_event_map(int hw_event) +{ + struct p4_event_template *tpl; + u64 config; + + if (hw_event > ARRAY_SIZE(p4_templates)) { + printk_once(KERN_ERR "PMU: Incorrect event index\n"); + return 0; + } + tpl = &p4_templates[hw_event]; + + /* + * fill config up according to + * a predefined event template + */ + config = tpl->config; + config |= p4_config_pack_escr(P4_EVENT_UNPACK_EVENT(tpl->opcode) << P4_EVNTSEL_EVENT_SHIFT); + config |= p4_config_pack_escr(tpl->emask << P4_EVNTSEL_EVENTMASK_SHIFT); + config |= p4_config_pack_cccr(P4_EVENT_UNPACK_SELECTOR(tpl->opcode) << P4_CCCR_ESCR_SELECT_SHIFT); + config |= p4_config_pack_cccr(hw_event & P4_CCCR_RESERVED); + + return config; +} + +/* + * Note that we still have 5 events (from global events SDM list) + * intersected in opcode+emask bits so we will need another + * scheme there do distinguish templates. + */ +static inline int p4_pmu_emask_match(unsigned int dst, unsigned int src) +{ + return dst & src; +} + +static struct p4_event_template *p4_pmu_template_lookup(u64 config) +{ + int key = p4_config_unpack_key(config); + + if (key < ARRAY_SIZE(p4_templates)) + return &p4_templates[key]; + else + return NULL; +} + +/* + * We don't control raw events so it's up to the caller + * to pass sane values (and we don't count the thread number + * on HT machine but allow HT-compatible specifics to be + * passed on) + */ +static u64 p4_pmu_raw_event(u64 hw_event) +{ + return hw_event & + (p4_config_pack_escr(P4_EVNTSEL_MASK_HT) | + p4_config_pack_cccr(P4_CCCR_MASK_HT)); +} + +static int p4_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc) +{ + int cpu = raw_smp_processor_id(); + + /* + * the reason we use cpu that early is that: if we get scheduled + * first time on the same cpu -- we will not need swap thread + * specific flags in config (and will save some cpu cycles) + */ + + /* CCCR by default */ + hwc->config = p4_config_pack_cccr(p4_default_cccr_conf(cpu)); + + /* Count user and OS events unless not requested to */ + hwc->config |= p4_config_pack_escr(p4_default_escr_conf(cpu, attr->exclude_kernel, + attr->exclude_user)); + /* on HT machine we need a special bit */ + if (p4_ht_active() && p4_ht_thread(cpu)) + hwc->config = p4_set_ht_bit(hwc->config); + + return 0; +} + +static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) +{ + unsigned long dummy; + + rdmsrl(hwc->config_base + hwc->idx, dummy); + if (dummy & P4_CCCR_OVF) { + (void)checking_wrmsrl(hwc->config_base + hwc->idx, + ((u64)dummy) & ~P4_CCCR_OVF); + } +} + +static inline void p4_pmu_disable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* + * If event gets disabled while counter is in overflowed + * state we need to clear P4_CCCR_OVF, otherwise interrupt get + * asserted again and again + */ + (void)checking_wrmsrl(hwc->config_base + hwc->idx, + (u64)(p4_config_unpack_cccr(hwc->config)) & + ~P4_CCCR_ENABLE & ~P4_CCCR_OVF); +} + +static void p4_pmu_disable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx; + + for (idx = 0; idx < x86_pmu.num_events; idx++) { + struct perf_event *event = cpuc->events[idx]; + if (!test_bit(idx, cpuc->active_mask)) + continue; + p4_pmu_disable_event(event); + } +} + +static void p4_pmu_enable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int thread = p4_ht_config_thread(hwc->config); + u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config)); + u64 escr_base; + struct p4_event_template *tpl; + struct p4_pmu_res *c; + + /* + * some preparation work from per-cpu private fields + * since we need to find out which ESCR to use + */ + c = &__get_cpu_var(p4_pmu_config); + tpl = c->tpl[hwc->idx]; + if (!tpl) { + pr_crit("%s: Wrong index: %d\n", __func__, hwc->idx); + return; + } + + if (tpl->msr) { + (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, tpl->msr >> 32); + (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, tpl->msr & 0xffffffff); + } + + escr_base = (u64)tpl->escr_msr[thread]; + + /* + * - we dont support cascaded counters yet + * - and counter 1 is broken (erratum) + */ + WARN_ON_ONCE(p4_is_event_cascaded(hwc->config)); + WARN_ON_ONCE(hwc->idx == 1); + + (void)checking_wrmsrl(escr_base, escr_conf); + (void)checking_wrmsrl(hwc->config_base + hwc->idx, + (u64)(p4_config_unpack_cccr(hwc->config)) | P4_CCCR_ENABLE); +} + +static void p4_pmu_enable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx; + + for (idx = 0; idx < x86_pmu.num_events; idx++) { + struct perf_event *event = cpuc->events[idx]; + if (!test_bit(idx, cpuc->active_mask)) + continue; + p4_pmu_enable_event(event); + } +} + +static int p4_pmu_handle_irq(struct pt_regs *regs) +{ + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct perf_event *event; + struct hw_perf_event *hwc; + int idx, handled = 0; + u64 val; + + data.addr = 0; + data.raw = NULL; + + cpuc = &__get_cpu_var(cpu_hw_events); + + for (idx = 0; idx < x86_pmu.num_events; idx++) { + + if (!test_bit(idx, cpuc->active_mask)) + continue; + + event = cpuc->events[idx]; + hwc = &event->hw; + + WARN_ON_ONCE(hwc->idx != idx); + + /* + * FIXME: Redundant call, actually not needed + * but just to check if we're screwed + */ + p4_pmu_clear_cccr_ovf(hwc); + + val = x86_perf_event_update(event); + if (val & (1ULL << (x86_pmu.event_bits - 1))) + continue; + + /* + * event overflow + */ + handled = 1; + data.period = event->hw.last_period; + + if (!x86_perf_event_set_period(event)) + continue; + if (perf_event_overflow(event, 1, &data, regs)) + p4_pmu_disable_event(event); + } + + if (handled) { + /* p4 quirk: unmask it again */ + apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); + inc_irq_stat(apic_perf_irqs); + } + + return handled; +} + +/* + * swap thread specific fields according to a thread + * we are going to run on + */ +static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu) +{ + u32 escr, cccr; + + /* + * we either lucky and continue on same cpu or no HT support + */ + if (!p4_should_swap_ts(hwc->config, cpu)) + return; + + /* + * the event is migrated from an another logical + * cpu, so we need to swap thread specific flags + */ + + escr = p4_config_unpack_escr(hwc->config); + cccr = p4_config_unpack_cccr(hwc->config); + + if (p4_ht_thread(cpu)) { + cccr &= ~P4_CCCR_OVF_PMI_T0; + cccr |= P4_CCCR_OVF_PMI_T1; + if (escr & P4_EVNTSEL_T0_OS) { + escr &= ~P4_EVNTSEL_T0_OS; + escr |= P4_EVNTSEL_T1_OS; + } + if (escr & P4_EVNTSEL_T0_USR) { + escr &= ~P4_EVNTSEL_T0_USR; + escr |= P4_EVNTSEL_T1_USR; + } + hwc->config = p4_config_pack_escr(escr); + hwc->config |= p4_config_pack_cccr(cccr); + hwc->config |= P4_CONFIG_HT; + } else { + cccr &= ~P4_CCCR_OVF_PMI_T1; + cccr |= P4_CCCR_OVF_PMI_T0; + if (escr & P4_EVNTSEL_T1_OS) { + escr &= ~P4_EVNTSEL_T1_OS; + escr |= P4_EVNTSEL_T0_OS; + } + if (escr & P4_EVNTSEL_T1_USR) { + escr &= ~P4_EVNTSEL_T1_USR; + escr |= P4_EVNTSEL_T0_USR; + } + hwc->config = p4_config_pack_escr(escr); + hwc->config |= p4_config_pack_cccr(cccr); + hwc->config &= ~P4_CONFIG_HT; + } +} + +/* ESCRs are not sequential in memory so we need a map */ +static const unsigned int p4_escr_map[ARCH_P4_TOTAL_ESCR] = { + MSR_P4_ALF_ESCR0, /* 0 */ + MSR_P4_ALF_ESCR1, /* 1 */ + MSR_P4_BPU_ESCR0, /* 2 */ + MSR_P4_BPU_ESCR1, /* 3 */ + MSR_P4_BSU_ESCR0, /* 4 */ + MSR_P4_BSU_ESCR1, /* 5 */ + MSR_P4_CRU_ESCR0, /* 6 */ + MSR_P4_CRU_ESCR1, /* 7 */ + MSR_P4_CRU_ESCR2, /* 8 */ + MSR_P4_CRU_ESCR3, /* 9 */ + MSR_P4_CRU_ESCR4, /* 10 */ + MSR_P4_CRU_ESCR5, /* 11 */ + MSR_P4_DAC_ESCR0, /* 12 */ + MSR_P4_DAC_ESCR1, /* 13 */ + MSR_P4_FIRM_ESCR0, /* 14 */ + MSR_P4_FIRM_ESCR1, /* 15 */ + MSR_P4_FLAME_ESCR0, /* 16 */ + MSR_P4_FLAME_ESCR1, /* 17 */ + MSR_P4_FSB_ESCR0, /* 18 */ + MSR_P4_FSB_ESCR1, /* 19 */ + MSR_P4_IQ_ESCR0, /* 20 */ + MSR_P4_IQ_ESCR1, /* 21 */ + MSR_P4_IS_ESCR0, /* 22 */ + MSR_P4_IS_ESCR1, /* 23 */ + MSR_P4_ITLB_ESCR0, /* 24 */ + MSR_P4_ITLB_ESCR1, /* 25 */ + MSR_P4_IX_ESCR0, /* 26 */ + MSR_P4_IX_ESCR1, /* 27 */ + MSR_P4_MOB_ESCR0, /* 28 */ + MSR_P4_MOB_ESCR1, /* 29 */ + MSR_P4_MS_ESCR0, /* 30 */ + MSR_P4_MS_ESCR1, /* 31 */ + MSR_P4_PMH_ESCR0, /* 32 */ + MSR_P4_PMH_ESCR1, /* 33 */ + MSR_P4_RAT_ESCR0, /* 34 */ + MSR_P4_RAT_ESCR1, /* 35 */ + MSR_P4_SAAT_ESCR0, /* 36 */ + MSR_P4_SAAT_ESCR1, /* 37 */ + MSR_P4_SSU_ESCR0, /* 38 */ + MSR_P4_SSU_ESCR1, /* 39 */ + MSR_P4_TBPU_ESCR0, /* 40 */ + MSR_P4_TBPU_ESCR1, /* 41 */ + MSR_P4_TC_ESCR0, /* 42 */ + MSR_P4_TC_ESCR1, /* 43 */ + MSR_P4_U2L_ESCR0, /* 44 */ + MSR_P4_U2L_ESCR1, /* 45 */ +}; + +static int p4_get_escr_idx(unsigned int addr) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(p4_escr_map); i++) { + if (addr == p4_escr_map[i]) + return i; + } + + return -1; +} + +static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) +{ + unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + unsigned long escr_mask[BITS_TO_LONGS(ARCH_P4_TOTAL_ESCR)]; + + struct hw_perf_event *hwc; + struct p4_event_template *tpl; + struct p4_pmu_res *c; + int cpu = raw_smp_processor_id(); + int escr_idx, thread, i, num; + + bitmap_zero(used_mask, X86_PMC_IDX_MAX); + bitmap_zero(escr_mask, ARCH_P4_TOTAL_ESCR); + + c = &__get_cpu_var(p4_pmu_config); + /* + * Firstly find out which resource events are going + * to use, if ESCR+CCCR tuple is already borrowed + * then get out of here + */ + for (i = 0, num = n; i < n; i++, num--) { + hwc = &cpuc->event_list[i]->hw; + tpl = p4_pmu_template_lookup(hwc->config); + if (!tpl) + goto done; + thread = p4_ht_thread(cpu); + escr_idx = p4_get_escr_idx(tpl->escr_msr[thread]); + if (escr_idx == -1) + goto done; + + /* already allocated and remains on the same cpu */ + if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) { + if (assign) + assign[i] = hwc->idx; + /* upstream dependent event */ + if (unlikely(tpl->dep != -1)) + printk_once(KERN_WARNING "PMU: Dep events are " + "not implemented yet\n"); + goto reserve; + } + + /* it may be already borrowed */ + if (test_bit(tpl->cntr[thread], used_mask) || + test_bit(escr_idx, escr_mask)) + goto done; + + /* + * ESCR+CCCR+COUNTERs are available to use lets swap + * thread specific bits, push assigned bits + * back and save template into per-cpu + * area (which will allow us to find out the ESCR + * to be used at moment of "enable event via real MSR") + */ + p4_pmu_swap_config_ts(hwc, cpu); + if (assign) { + assign[i] = tpl->cntr[thread]; + c->tpl[assign[i]] = tpl; + } +reserve: + set_bit(tpl->cntr[thread], used_mask); + set_bit(escr_idx, escr_mask); + } + +done: + return num ? -ENOSPC : 0; +} + +static __initconst struct x86_pmu p4_pmu = { + .name = "Netburst P4/Xeon", + .handle_irq = p4_pmu_handle_irq, + .disable_all = p4_pmu_disable_all, + .enable_all = p4_pmu_enable_all, + .enable = p4_pmu_enable_event, + .disable = p4_pmu_disable_event, + .eventsel = MSR_P4_BPU_CCCR0, + .perfctr = MSR_P4_BPU_PERFCTR0, + .event_map = p4_pmu_event_map, + .raw_event = p4_pmu_raw_event, + .max_events = ARRAY_SIZE(p4_templates), + .get_event_constraints = x86_get_event_constraints, + /* + * IF HT disabled we may need to use all + * ARCH_P4_MAX_CCCR counters simulaneously + * though leave it restricted at moment assuming + * HT is on + */ + .num_events = ARCH_P4_MAX_CCCR, + .apic = 1, + .event_bits = 40, + .event_mask = (1ULL << 40) - 1, + .max_period = (1ULL << 39) - 1, + .hw_config = p4_hw_config, + .schedule_events = p4_pmu_schedule_events, +}; + +static __init int p4_pmu_init(void) +{ + unsigned int low, high; + + /* If we get stripped -- indexig fails */ + BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); + + rdmsr(MSR_IA32_MISC_ENABLE, low, high); + if (!(low & (1 << 7))) { + pr_cont("unsupported Netburst CPU model %d ", + boot_cpu_data.x86_model); + return -ENODEV; + } + + memcpy(hw_cache_event_ids, p4_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + pr_cont("Netburst events, "); + + x86_pmu = p4_pmu; + + return 0; +} + +#endif /* CONFIG_CPU_SUP_INTEL */ diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index a330485..6ff4d01 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c @@ -109,6 +109,8 @@ static __initconst struct x86_pmu p6_pmu = { .enable_all = p6_pmu_enable_all, .enable = p6_pmu_enable_event, .disable = p6_pmu_disable_event, + .hw_config = x86_hw_config, + .schedule_events = x86_schedule_events, .eventsel = MSR_P6_EVNTSEL0, .perfctr = MSR_P6_PERFCTR0, .event_map = p6_pmu_event_map, |