aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 10:20:25 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 10:20:25 -0800
commit6556a6743549defc32e5f90ee2cb1ecd833a44c3 (patch)
tree622306583d4a3c13235a8bfc012854c125c597f1
parente0d272429a34ff143bfa04ee8e29dd4eed2964c7 (diff)
parent1dd2980d990068e20045b90c424518cc7f3657ff (diff)
downloadkernel_samsung_crespo-6556a6743549defc32e5f90ee2cb1ecd833a44c3.zip
kernel_samsung_crespo-6556a6743549defc32e5f90ee2cb1ecd833a44c3.tar.gz
kernel_samsung_crespo-6556a6743549defc32e5f90ee2cb1ecd833a44c3.tar.bz2
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (172 commits) perf_event, amd: Fix spinlock initialization perf_event: Fix preempt warning in perf_clock() perf tools: Flush maps on COMM events perf_events, x86: Split PMU definitions into separate files perf annotate: Handle samples not at objdump output addr boundaries perf_events, x86: Remove superflous MSR writes perf_events: Simplify code by removing cpu argument to hw_perf_group_sched_in() perf_events, x86: AMD event scheduling perf_events: Add new start/stop PMU callbacks perf_events: Report the MMAP pgoff value in bytes perf annotate: Defer allocating sym_priv->hist array perf symbols: Improve debugging information about symtab origins perf top: Use a macro instead of a constant variable perf symbols: Check the right return variable perf/scripts: Tag syscall_name helper as not yet available perf/scripts: Add perf-trace-python Documentation perf/scripts: Remove unnecessary PyTuple resizes perf/scripts: Add syscall tracing scripts perf/scripts: Add Python scripting engine perf/scripts: Remove check-perf-trace from listed scripts ... Fix trivial conflict in tools/perf/util/probe-event.c
-rw-r--r--Documentation/trace/kprobetrace.txt57
-rw-r--r--arch/ia64/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/perf_callchain.c3
-rw-r--r--arch/powerpc/kernel/perf_event.c10
-rw-r--r--arch/sh/kernel/perf_callchain.c3
-rw-r--r--arch/sparc/kernel/perf_event.c10
-rw-r--r--arch/x86/include/asm/alternative.h5
-rw-r--r--arch/x86/include/asm/debugreg.h3
-rw-r--r--arch/x86/include/asm/nmi.h1
-rw-r--r--arch/x86/include/asm/perf_event.h16
-rw-r--r--arch/x86/include/asm/ptrace.h4
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/kernel/alternative.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event.c1854
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c416
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c971
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c157
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c11
-rw-r--r--arch/x86/kernel/dumpstack_32.c5
-rw-r--r--arch/x86/kernel/dumpstack_64.c5
-rw-r--r--arch/x86/kernel/hw_breakpoint.c10
-rw-r--r--arch/x86/kernel/kprobes.c5
-rw-r--r--arch/x86/kernel/ptrace.c24
-rw-r--r--arch/x86/kernel/traps.c3
-rw-r--r--include/linux/bitops.h29
-rw-r--r--include/linux/ftrace.h7
-rw-r--r--include/linux/ftrace_event.h20
-rw-r--r--include/linux/list.h14
-rw-r--r--include/linux/perf_event.h55
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/trace/events/lock.h29
-rw-r--r--include/trace/ftrace.h60
-rw-r--r--include/trace/syscall.h4
-rw-r--r--init/Kconfig13
-rw-r--r--kernel/kprobes.c33
-rw-r--r--kernel/perf_event.c627
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/ftrace.c54
-rw-r--r--kernel/trace/trace_event_profile.c52
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_kprobe.c196
-rw-r--r--kernel/trace/trace_syscalls.c76
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/Documentation/perf-archive.txt22
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt33
-rw-r--r--tools/perf/Documentation/perf-probe.txt20
-rw-r--r--tools/perf/Documentation/perf-top.txt2
-rw-r--r--tools/perf/Documentation/perf-trace-perl.txt2
-rw-r--r--tools/perf/Documentation/perf-trace-python.txt625
-rw-r--r--tools/perf/Documentation/perf-trace.txt15
-rw-r--r--tools/perf/Documentation/perf.txt2
-rw-r--r--tools/perf/Makefile51
-rw-r--r--tools/perf/builtin-annotate.c240
-rw-r--r--tools/perf/builtin-buildid-cache.c133
-rw-r--r--tools/perf/builtin-buildid-list.c31
-rw-r--r--tools/perf/builtin-diff.c74
-rw-r--r--tools/perf/builtin-help.c5
-rw-r--r--tools/perf/builtin-kmem.c48
-rw-r--r--tools/perf/builtin-lock.c678
-rw-r--r--tools/perf/builtin-probe.c94
-rw-r--r--tools/perf/builtin-record.c50
-rw-r--r--tools/perf/builtin-report.c58
-rw-r--r--tools/perf/builtin-sched.c32
-rw-r--r--tools/perf/builtin-stat.c106
-rw-r--r--tools/perf/builtin-timechart.c25
-rw-r--r--tools/perf/builtin-top.c109
-rw-r--r--tools/perf/builtin-trace.c34
-rw-r--r--tools/perf/builtin.h2
-rw-r--r--tools/perf/command-list.txt2
-rw-r--r--tools/perf/design.txt8
-rw-r--r--tools/perf/perf-archive.sh32
-rw-r--r--tools/perf/perf.c25
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.c5
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.xs3
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm2
-rw-r--r--tools/perf/scripts/perl/bin/check-perf-trace-record7
-rw-r--r--tools/perf/scripts/perl/bin/check-perf-trace-report6
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-record2
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-report4
-rw-r--r--tools/perf/scripts/perl/failed-syscalls.pl38
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Context.c88
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py91
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py25
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-report4
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-report4
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-report4
-rw-r--r--tools/perf/scripts/python/check-perf-trace.py83
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py68
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py64
-rw-r--r--tools/perf/scripts/python/syscall-counts.py58
-rw-r--r--tools/perf/util/build-id.c39
-rw-r--r--tools/perf/util/build-id.h8
-rw-r--r--tools/perf/util/data_map.c252
-rw-r--r--tools/perf/util/debug.c1
-rw-r--r--tools/perf/util/debugfs.c17
-rw-r--r--tools/perf/util/debugfs.h2
-rw-r--r--tools/perf/util/event.c220
-rw-r--r--tools/perf/util/event.h79
-rw-r--r--tools/perf/util/header.c284
-rw-r--r--tools/perf/util/header.h9
-rw-r--r--tools/perf/util/include/linux/hash.h5
-rw-r--r--tools/perf/util/include/linux/kernel.h1
-rw-r--r--tools/perf/util/map.c52
-rw-r--r--tools/perf/util/map.h94
-rw-r--r--tools/perf/util/parse-events.c48
-rw-r--r--tools/perf/util/probe-event.c105
-rw-r--r--tools/perf/util/probe-event.h2
-rw-r--r--tools/perf/util/probe-finder.c203
-rw-r--r--tools/perf/util/probe-finder.h33
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c (renamed from tools/perf/util/trace-event-perl.c)115
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c573
-rw-r--r--tools/perf/util/session.c431
-rw-r--r--tools/perf/util/session.h55
-rw-r--r--tools/perf/util/string.c65
-rw-r--r--tools/perf/util/symbol.c529
-rw-r--r--tools/perf/util/symbol.h52
-rw-r--r--tools/perf/util/thread.c52
-rw-r--r--tools/perf/util/thread.h24
-rw-r--r--tools/perf/util/trace-event-info.c64
-rw-r--r--tools/perf/util/trace-event-parse.c24
-rw-r--r--tools/perf/util/trace-event-perl.h55
-rw-r--r--tools/perf/util/trace-event-read.c18
-rw-r--r--tools/perf/util/trace-event-scripting.c167
-rw-r--r--tools/perf/util/trace-event.h10
-rw-r--r--tools/perf/util/util.c94
-rw-r--r--tools/perf/util/util.h3
-rw-r--r--tools/perf/util/values.c1
131 files changed, 8460 insertions, 3335 deletions
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt
index 47aabee..a9100b2 100644
--- a/Documentation/trace/kprobetrace.txt
+++ b/Documentation/trace/kprobetrace.txt
@@ -24,6 +24,7 @@ Synopsis of kprobe_events
-------------------------
p[:[GRP/]EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS] : Set a probe
r[:[GRP/]EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe
+ -:[GRP/]EVENT : Clear a probe
GRP : Group name. If omitted, use "kprobes" for it.
EVENT : Event name. If omitted, the event name is generated
@@ -37,15 +38,12 @@ Synopsis of kprobe_events
@SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol)
$stackN : Fetch Nth entry of stack (N >= 0)
$stack : Fetch stack address.
- $argN : Fetch function argument. (N >= 0)(*)
- $retval : Fetch return value.(**)
- +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(***)
+ $retval : Fetch return value.(*)
+ +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
NAME=FETCHARG: Set NAME as the argument name of FETCHARG.
- (*) aN may not correct on asmlinkaged functions and at the middle of
- function body.
- (**) only for return probe.
- (***) this is useful for fetching a field of data structures.
+ (*) only for return probe.
+ (**) this is useful for fetching a field of data structures.
Per-Probe Event Filtering
@@ -82,13 +80,16 @@ Usage examples
To add a probe as a new event, write a new definition to kprobe_events
as below.
- echo p:myprobe do_sys_open dfd=$arg0 filename=$arg1 flags=$arg2 mode=$arg3 > /sys/kernel/debug/tracing/kprobe_events
+ echo 'p:myprobe do_sys_open dfd=%ax filename=%dx flags=%cx mode=+4($stack)' > /sys/kernel/debug/tracing/kprobe_events
This sets a kprobe on the top of do_sys_open() function with recording
-1st to 4th arguments as "myprobe" event. As this example shows, users can
-choose more familiar names for each arguments.
+1st to 4th arguments as "myprobe" event. Note, which register/stack entry is
+assigned to each function argument depends on arch-specific ABI. If you unsure
+the ABI, please try to use probe subcommand of perf-tools (you can find it
+under tools/perf/).
+As this example shows, users can choose more familiar names for each arguments.
- echo r:myretprobe do_sys_open $retval >> /sys/kernel/debug/tracing/kprobe_events
+ echo 'r:myretprobe do_sys_open $retval' >> /sys/kernel/debug/tracing/kprobe_events
This sets a kretprobe on the return point of do_sys_open() function with
recording return value as "myretprobe" event.
@@ -97,23 +98,24 @@ recording return value as "myretprobe" event.
cat /sys/kernel/debug/tracing/events/kprobes/myprobe/format
name: myprobe
-ID: 75
+ID: 780
format:
- field:unsigned short common_type; offset:0; size:2;
- field:unsigned char common_flags; offset:2; size:1;
- field:unsigned char common_preempt_count; offset:3; size:1;
- field:int common_pid; offset:4; size:4;
- field:int common_tgid; offset:8; size:4;
+ field:unsigned short common_type; offset:0; size:2; signed:0;
+ field:unsigned char common_flags; offset:2; size:1; signed:0;
+ field:unsigned char common_preempt_count; offset:3; size:1;signed:0;
+ field:int common_pid; offset:4; size:4; signed:1;
+ field:int common_lock_depth; offset:8; size:4; signed:1;
- field: unsigned long ip; offset:16;tsize:8;
- field: int nargs; offset:24;tsize:4;
- field: unsigned long dfd; offset:32;tsize:8;
- field: unsigned long filename; offset:40;tsize:8;
- field: unsigned long flags; offset:48;tsize:8;
- field: unsigned long mode; offset:56;tsize:8;
+ field:unsigned long __probe_ip; offset:12; size:4; signed:0;
+ field:int __probe_nargs; offset:16; size:4; signed:1;
+ field:unsigned long dfd; offset:20; size:4; signed:0;
+ field:unsigned long filename; offset:24; size:4; signed:0;
+ field:unsigned long flags; offset:28; size:4; signed:0;
+ field:unsigned long mode; offset:32; size:4; signed:0;
-print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, REC->filename, REC->flags, REC->mode
+print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->__probe_ip,
+REC->dfd, REC->filename, REC->flags, REC->mode
You can see that the event has 4 arguments as in the expressions you specified.
@@ -121,6 +123,12 @@ print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, R
This clears all probe points.
+ Or,
+
+ echo -:myprobe >> kprobe_events
+
+ This clears probe points selectively.
+
Right after definition, each event is disabled by default. For tracing these
events, you need to enable it.
@@ -146,4 +154,3 @@ events, you need to enable it.
returns from SYMBOL(e.g. "sys_open+0x1b/0x1d <- do_sys_open" means kernel
returns from do_sys_open to sys_open+0x1b).
-
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 9adac44..7026b29 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -870,7 +870,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
return 1;
ss_probe:
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
+#if !defined(CONFIG_PREEMPT)
if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
ia64_psr(regs)->ri = p->ainsn.slot;
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index a3c11ca..95ad9da 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -495,9 +495,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
entry->nr = 0;
- if (current->pid == 0) /* idle task? */
- return entry;
-
if (!user_mode(regs)) {
perf_callchain_kernel(regs, entry);
if (current->mm)
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 1eb85fb..b6cf8f1 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -718,10 +718,10 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}
-static void event_sched_in(struct perf_event *event, int cpu)
+static void event_sched_in(struct perf_event *event)
{
event->state = PERF_EVENT_STATE_ACTIVE;
- event->oncpu = cpu;
+ event->oncpu = smp_processor_id();
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
if (is_software_event(event))
event->pmu->enable(event);
@@ -735,7 +735,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
*/
int hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx, int cpu)
+ struct perf_event_context *ctx)
{
struct cpu_hw_events *cpuhw;
long i, n, n0;
@@ -766,10 +766,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuctx->active_oncpu += n;
n = 1;
- event_sched_in(group_leader, cpu);
+ event_sched_in(group_leader);
list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
if (sub->state != PERF_EVENT_STATE_OFF) {
- event_sched_in(sub, cpu);
+ event_sched_in(sub);
++n;
}
}
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
index 24ea837..a9dd3ab 100644
--- a/arch/sh/kernel/perf_callchain.c
+++ b/arch/sh/kernel/perf_callchain.c
@@ -68,9 +68,6 @@ perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
is_user = user_mode(regs);
- if (!current || current->pid == 0)
- return;
-
if (is_user && current->state != TASK_RUNNING)
return;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index e856456..9f2b2ba 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -980,10 +980,10 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}
-static void event_sched_in(struct perf_event *event, int cpu)
+static void event_sched_in(struct perf_event *event)
{
event->state = PERF_EVENT_STATE_ACTIVE;
- event->oncpu = cpu;
+ event->oncpu = smp_processor_id();
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
if (is_software_event(event))
event->pmu->enable(event);
@@ -991,7 +991,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
int hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx, int cpu)
+ struct perf_event_context *ctx)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_event *sub;
@@ -1015,10 +1015,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
cpuctx->active_oncpu += n;
n = 1;
- event_sched_in(group_leader, cpu);
+ event_sched_in(group_leader);
list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
if (sub->state != PERF_EVENT_STATE_OFF) {
- event_sched_in(sub, cpu);
+ event_sched_in(sub);
n++;
}
}
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 69b74a7..ac80b7d 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -65,12 +65,17 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
void *text, void *text_end);
extern void alternatives_smp_module_del(struct module *mod);
extern void alternatives_smp_switch(int smp);
+extern int alternatives_text_reserved(void *start, void *end);
#else
static inline void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end) {}
static inline void alternatives_smp_module_del(struct module *mod) {}
static inline void alternatives_smp_switch(int smp) {}
+static inline int alternatives_text_reserved(void *start, void *end)
+{
+ return 0;
+}
#endif /* CONFIG_SMP */
/* alternative assembly primitive: */
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 8240f76..b81002f 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -14,6 +14,9 @@
which debugging register was responsible for the trap. The other bits
are either reserved or not of interest to us. */
+/* Define reserved bits in DR6 which are always set to 1 */
+#define DR6_RESERVED (0xFFFF0FF0)
+
#define DR_TRAP0 (0x1) /* db0 */
#define DR_TRAP1 (0x2) /* db1 */
#define DR_TRAP2 (0x4) /* db2 */
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 139d4c1..93da9c3 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -19,7 +19,6 @@ extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
extern int check_nmi_watchdog(void);
extern int nmi_watchdog_enabled;
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
-extern int avail_to_resrv_perfctr_nmi(unsigned int);
extern int reserve_perfctr_nmi(unsigned int);
extern void release_perfctr_nmi(unsigned int);
extern int reserve_evntsel_nmi(unsigned int);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 1380367..befd172 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -27,7 +27,14 @@
/*
* Includes eventsel and unit mask as well:
*/
-#define ARCH_PERFMON_EVENT_MASK 0xffff
+
+
+#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL
+#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL
+#define INTEL_ARCH_EDGE_MASK 0x00040000ULL
+#define INTEL_ARCH_INV_MASK 0x00800000ULL
+#define INTEL_ARCH_CNT_MASK 0xFF000000ULL
+#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
/*
* filter mask to validate fixed counter events.
@@ -38,7 +45,12 @@
* The other filters are supported by fixed counters.
* The any-thread option is supported starting with v3.
*/
-#define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000
+#define INTEL_ARCH_FIXED_MASK \
+ (INTEL_ARCH_CNT_MASK| \
+ INTEL_ARCH_INV_MASK| \
+ INTEL_ARCH_EDGE_MASK|\
+ INTEL_ARCH_UNIT_MASK|\
+ INTEL_ARCH_EVTSEL_MASK)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 9d369f6..2010280 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -274,10 +274,6 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
return 0;
}
-/* Get Nth argument at function call */
-extern unsigned long regs_get_argument_nth(struct pt_regs *regs,
- unsigned int n);
-
/*
* These are defined as per linux/ptrace.h, which see.
*/
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 35e8912..4dab78e 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -3,8 +3,6 @@
extern int kstack_depth_to_print;
-int x86_is_stack_id(int id, char *name);
-
struct thread_info;
struct stacktrace_ops;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index de7353c..e63b80e 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -390,6 +390,24 @@ void alternatives_smp_switch(int smp)
mutex_unlock(&smp_alt);
}
+/* Return 1 if the address range is reserved for smp-alternatives */
+int alternatives_text_reserved(void *start, void *end)
+{
+ struct smp_alt_module *mod;
+ u8 **ptr;
+ u8 *text_start = start;
+ u8 *text_end = end;
+
+ list_for_each_entry(mod, &smp_alt_modules, next) {
+ if (mod->text > text_end || mod->text_end < text_start)
+ continue;
+ for (ptr = mod->locks; ptr < mod->locks_end; ptr++)
+ if (text_start <= *ptr && text_end >= *ptr)
+ return 1;
+ }
+
+ return 0;
+}
#endif
#ifdef CONFIG_PARAVIRT
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8c1c070..641ccb9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -7,6 +7,7 @@
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
+ * Copyright (C) 2009 Google, Inc., Stephane Eranian
*
* For licencing details see kernel-base/COPYING
*/
@@ -22,6 +23,7 @@
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/cpu.h>
+#include <linux/bitops.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
@@ -68,26 +70,59 @@ struct debug_store {
u64 pebs_event_reset[MAX_PEBS_EVENTS];
};
+struct event_constraint {
+ union {
+ unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ u64 idxmsk64[1];
+ };
+ int code;
+ int cmask;
+ int weight;
+};
+
+struct amd_nb {
+ int nb_id; /* NorthBridge id */
+ int refcnt; /* reference count */
+ struct perf_event *owners[X86_PMC_IDX_MAX];
+ struct event_constraint event_constraints[X86_PMC_IDX_MAX];
+};
+
struct cpu_hw_events {
- struct perf_event *events[X86_PMC_IDX_MAX];
- unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long interrupts;
int enabled;
struct debug_store *ds;
-};
-struct event_constraint {
- unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- int code;
+ int n_events;
+ int n_added;
+ int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
+ u64 tags[X86_PMC_IDX_MAX];
+ struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
+ struct amd_nb *amd_nb;
};
-#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
-#define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 }
+#define __EVENT_CONSTRAINT(c, n, m, w) {\
+ { .idxmsk64[0] = (n) }, \
+ .code = (c), \
+ .cmask = (m), \
+ .weight = (w), \
+}
+
+#define EVENT_CONSTRAINT(c, n, m) \
+ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
-#define for_each_event_constraint(e, c) \
- for ((e) = (c); (e)->idxmsk[0]; (e)++)
+#define INTEL_EVENT_CONSTRAINT(c, n) \
+ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
+#define FIXED_EVENT_CONSTRAINT(c, n) \
+ EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
+
+#define EVENT_CONSTRAINT_END \
+ EVENT_CONSTRAINT(0, 0, 0)
+
+#define for_each_event_constraint(e, c) \
+ for ((e) = (c); (e)->cmask; (e)++)
/*
* struct x86_pmu - generic x86 pmu
@@ -114,8 +149,14 @@ struct x86_pmu {
u64 intel_ctrl;
void (*enable_bts)(u64 config);
void (*disable_bts)(void);
- int (*get_event_idx)(struct cpu_hw_events *cpuc,
- struct hw_perf_event *hwc);
+
+ struct event_constraint *
+ (*get_event_constraints)(struct cpu_hw_events *cpuc,
+ struct perf_event *event);
+
+ void (*put_event_constraints)(struct cpu_hw_events *cpuc,
+ struct perf_event *event);
+ struct event_constraint *event_constraints;
};
static struct x86_pmu x86_pmu __read_mostly;
@@ -124,111 +165,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
-static const struct event_constraint *event_constraints;
-
-/*
- * Not sure about some of these
- */
-static const u64 p6_perfmon_event_map[] =
-{
- [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
- [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
-};
-
-static u64 p6_pmu_event_map(int hw_event)
-{
- return p6_perfmon_event_map[hw_event];
-}
-
-/*
- * Event setting that is specified not to count anything.
- * We use this to effectively disable a counter.
- *
- * L2_RQSTS with 0 MESI unit mask.
- */
-#define P6_NOP_EVENT 0x0000002EULL
-
-static u64 p6_pmu_raw_event(u64 hw_event)
-{
-#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
-#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
-#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
-#define P6_EVNTSEL_INV_MASK 0x00800000ULL
-#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
-
-#define P6_EVNTSEL_MASK \
- (P6_EVNTSEL_EVENT_MASK | \
- P6_EVNTSEL_UNIT_MASK | \
- P6_EVNTSEL_EDGE_MASK | \
- P6_EVNTSEL_INV_MASK | \
- P6_EVNTSEL_REG_MASK)
-
- return hw_event & P6_EVNTSEL_MASK;
-}
-
-static const struct event_constraint intel_p6_event_constraints[] =
-{
- EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
- EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
- EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
- EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
- EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
- EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
- EVENT_CONSTRAINT_END
-};
-
-/*
- * Intel PerfMon v3. Used on Core2 and later.
- */
-static const u64 intel_perfmon_event_map[] =
-{
- [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
- [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
-};
-
-static const struct event_constraint intel_core_event_constraints[] =
-{
- EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
- EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
- EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
- EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
- EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
- EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
- EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
- EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
- EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
- EVENT_CONSTRAINT_END
-};
-
-static const struct event_constraint intel_nehalem_event_constraints[] =
-{
- EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
- EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
- EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
- EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
- EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
- EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
- EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
- EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
- EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
- EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
- EVENT_CONSTRAINT_END
-};
-
-static u64 intel_pmu_event_map(int hw_event)
-{
- return intel_perfmon_event_map[hw_event];
-}
+static int x86_perf_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx);
/*
* Generalized hw caching related hw_event table, filled
@@ -245,424 +183,6 @@ static u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
-static __initconst u64 nehalem_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
- [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
- [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
- [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
- },
- },
- [ C(L1I ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
- [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0,
- },
- },
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
- [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
- [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
- [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
- },
- },
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0,
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
- [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(BPU ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
- [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
-};
-
-static __initconst u64 core2_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
- [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
- [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(L1I ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
- [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
- [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
- [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
- [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(BPU ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
- [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
-};
-
-static __initconst u64 atom_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
- [ C(RESULT_MISS) ] = 0,
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
- [ C(RESULT_MISS) ] = 0,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(L1I ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
- [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
- [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
- [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
- [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
- [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(BPU ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
- [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
-};
-
-static u64 intel_pmu_raw_event(u64 hw_event)
-{
-#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
-#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
-#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
-#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
-#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
-
-#define CORE_EVNTSEL_MASK \
- (CORE_EVNTSEL_EVENT_MASK | \
- CORE_EVNTSEL_UNIT_MASK | \
- CORE_EVNTSEL_EDGE_MASK | \
- CORE_EVNTSEL_INV_MASK | \
- CORE_EVNTSEL_REG_MASK)
-
- return hw_event & CORE_EVNTSEL_MASK;
-}
-
-static __initconst u64 amd_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
- [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
- [ C(RESULT_MISS) ] = 0,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
- [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
- },
- },
- [ C(L1I ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
- [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
- [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
- [ C(RESULT_MISS) ] = 0,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
- [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
- [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(BPU ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
- [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
-};
-
-/*
- * AMD Performance Monitor K7 and later.
- */
-static const u64 amd_perfmon_event_map[] =
-{
- [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
-};
-
-static u64 amd_pmu_event_map(int hw_event)
-{
- return amd_perfmon_event_map[hw_event];
-}
-
-static u64 amd_pmu_raw_event(u64 hw_event)
-{
-#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
-#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
-#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
-#define K7_EVNTSEL_INV_MASK 0x000800000ULL
-#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
-
-#define K7_EVNTSEL_MASK \
- (K7_EVNTSEL_EVENT_MASK | \
- K7_EVNTSEL_UNIT_MASK | \
- K7_EVNTSEL_EDGE_MASK | \
- K7_EVNTSEL_INV_MASK | \
- K7_EVNTSEL_REG_MASK)
-
- return hw_event & K7_EVNTSEL_MASK;
-}
-
/*
* Propagate event elapsed time into the generic event.
* Can only be executed on the CPU where the event is active.
@@ -914,42 +434,6 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
return 0;
}
-static void intel_pmu_enable_bts(u64 config)
-{
- unsigned long debugctlmsr;
-
- debugctlmsr = get_debugctlmsr();
-
- debugctlmsr |= X86_DEBUGCTL_TR;
- debugctlmsr |= X86_DEBUGCTL_BTS;
- debugctlmsr |= X86_DEBUGCTL_BTINT;
-
- if (!(config & ARCH_PERFMON_EVENTSEL_OS))
- debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
-
- if (!(config & ARCH_PERFMON_EVENTSEL_USR))
- debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
-
- update_debugctlmsr(debugctlmsr);
-}
-
-static void intel_pmu_disable_bts(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- unsigned long debugctlmsr;
-
- if (!cpuc->ds)
- return;
-
- debugctlmsr = get_debugctlmsr();
-
- debugctlmsr &=
- ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
- X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
-
- update_debugctlmsr(debugctlmsr);
-}
-
/*
* Setup the hardware configuration for a given attr_type
*/
@@ -988,6 +472,8 @@ static int __hw_perf_event_init(struct perf_event *event)
hwc->config = ARCH_PERFMON_EVENTSEL_INT;
hwc->idx = -1;
+ hwc->last_cpu = -1;
+ hwc->last_tag = ~0ULL;
/*
* Count user and OS events unless requested not to.
@@ -1056,216 +542,323 @@ static int __hw_perf_event_init(struct perf_event *event)
return 0;
}
-static void p6_pmu_disable_all(void)
+static void x86_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- u64 val;
-
- if (!cpuc->enabled)
- return;
+ int idx;
- cpuc->enabled = 0;
- barrier();
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ u64 val;
- /* p6 only has one enable register */
- rdmsrl(MSR_P6_EVNTSEL0, val);
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_P6_EVNTSEL0, val);
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+ rdmsrl(x86_pmu.eventsel + idx, val);
+ if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
+ continue;
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(x86_pmu.eventsel + idx, val);
+ }
}
-static void intel_pmu_disable_all(void)
+void hw_perf_disable(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ if (!x86_pmu_initialized())
+ return;
+
if (!cpuc->enabled)
return;
+ cpuc->n_added = 0;
cpuc->enabled = 0;
barrier();
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
-
- if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
- intel_pmu_disable_bts();
+ x86_pmu.disable_all();
}
-static void amd_pmu_disable_all(void)
+static void x86_pmu_enable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
- if (!cpuc->enabled)
- return;
-
- cpuc->enabled = 0;
- /*
- * ensure we write the disable before we start disabling the
- * events proper, so that amd_pmu_enable_event() does the
- * right thing.
- */
- barrier();
-
for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ struct perf_event *event = cpuc->events[idx];
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
- rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
- continue;
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
+
+ val = event->hw.config;
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(x86_pmu.eventsel + idx, val);
}
}
-void hw_perf_disable(void)
+static const struct pmu pmu;
+
+static inline int is_x86_event(struct perf_event *event)
{
- if (!x86_pmu_initialized())
- return;
- return x86_pmu.disable_all();
+ return event->pmu == &pmu;
}
-static void p6_pmu_enable_all(void)
+static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- unsigned long val;
+ struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
+ unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ int i, j, w, wmax, num = 0;
+ struct hw_perf_event *hwc;
- if (cpuc->enabled)
- return;
+ bitmap_zero(used_mask, X86_PMC_IDX_MAX);
- cpuc->enabled = 1;
- barrier();
+ for (i = 0; i < n; i++) {
+ constraints[i] =
+ x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
+ }
- /* p6 only has one enable register */
- rdmsrl(MSR_P6_EVNTSEL0, val);
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_P6_EVNTSEL0, val);
-}
+ /*
+ * fastpath, try to reuse previous register
+ */
+ for (i = 0; i < n; i++) {
+ hwc = &cpuc->event_list[i]->hw;
+ c = constraints[i];
-static void intel_pmu_enable_all(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ /* never assigned */
+ if (hwc->idx == -1)
+ break;
- if (cpuc->enabled)
- return;
+ /* constraint still honored */
+ if (!test_bit(hwc->idx, c->idxmsk))
+ break;
- cpuc->enabled = 1;
- barrier();
+ /* not already used */
+ if (test_bit(hwc->idx, used_mask))
+ break;
+
+ set_bit(hwc->idx, used_mask);
+ if (assign)
+ assign[i] = hwc->idx;
+ }
+ if (i == n)
+ goto done;
+
+ /*
+ * begin slow path
+ */
+
+ bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+
+ /*
+ * weight = number of possible counters
+ *
+ * 1 = most constrained, only works on one counter
+ * wmax = least constrained, works on any counter
+ *
+ * assign events to counters starting with most
+ * constrained events.
+ */
+ wmax = x86_pmu.num_events;
+
+ /*
+ * when fixed event counters are present,
+ * wmax is incremented by 1 to account
+ * for one more choice
+ */
+ if (x86_pmu.num_events_fixed)
+ wmax++;
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
+ for (w = 1, num = n; num && w <= wmax; w++) {
+ /* for each event */
+ for (i = 0; num && i < n; i++) {
+ c = constraints[i];
+ hwc = &cpuc->event_list[i]->hw;
- if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
- struct perf_event *event =
- cpuc->events[X86_PMC_IDX_FIXED_BTS];
+ if (c->weight != w)
+ continue;
- if (WARN_ON_ONCE(!event))
- return;
+ for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
+ if (!test_bit(j, used_mask))
+ break;
+ }
+
+ if (j == X86_PMC_IDX_MAX)
+ break;
+
+ set_bit(j, used_mask);
- intel_pmu_enable_bts(event->hw.config);
+ if (assign)
+ assign[i] = j;
+ num--;
+ }
+ }
+done:
+ /*
+ * scheduling failed or is just a simulation,
+ * free resources if necessary
+ */
+ if (!assign || num) {
+ for (i = 0; i < n; i++) {
+ if (x86_pmu.put_event_constraints)
+ x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
+ }
}
+ return num ? -ENOSPC : 0;
}
-static void amd_pmu_enable_all(void)
+/*
+ * dogrp: true if must collect siblings events (group)
+ * returns total number of events and error code
+ */
+static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int idx;
+ struct perf_event *event;
+ int n, max_count;
- if (cpuc->enabled)
- return;
+ max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
- cpuc->enabled = 1;
- barrier();
+ /* current number of events already accepted */
+ n = cpuc->n_events;
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
- struct perf_event *event = cpuc->events[idx];
- u64 val;
+ if (is_x86_event(leader)) {
+ if (n >= max_count)
+ return -ENOSPC;
+ cpuc->event_list[n] = leader;
+ n++;
+ }
+ if (!dogrp)
+ return n;
- if (!test_bit(idx, cpuc->active_mask))
+ list_for_each_entry(event, &leader->sibling_list, group_entry) {
+ if (!is_x86_event(event) ||
+ event->state <= PERF_EVENT_STATE_OFF)
continue;
- val = event->hw.config;
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
- }
-}
+ if (n >= max_count)
+ return -ENOSPC;
-void hw_perf_enable(void)
-{
- if (!x86_pmu_initialized())
- return;
- x86_pmu.enable_all();
+ cpuc->event_list[n] = event;
+ n++;
+ }
+ return n;
}
-static inline u64 intel_pmu_get_status(void)
+static inline void x86_assign_hw_event(struct perf_event *event,
+ struct cpu_hw_events *cpuc, int i)
{
- u64 status;
+ struct hw_perf_event *hwc = &event->hw;
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+ hwc->idx = cpuc->assign[i];
+ hwc->last_cpu = smp_processor_id();
+ hwc->last_tag = ++cpuc->tags[i];
- return status;
+ if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
+ hwc->config_base = 0;
+ hwc->event_base = 0;
+ } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
+ hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
+ /*
+ * We set it so that event_base + idx in wrmsr/rdmsr maps to
+ * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
+ */
+ hwc->event_base =
+ MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
+ } else {
+ hwc->config_base = x86_pmu.eventsel;
+ hwc->event_base = x86_pmu.perfctr;
+ }
}
-static inline void intel_pmu_ack_status(u64 ack)
+static inline int match_prev_assignment(struct hw_perf_event *hwc,
+ struct cpu_hw_events *cpuc,
+ int i)
{
- wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
+ return hwc->idx == cpuc->assign[i] &&
+ hwc->last_cpu == smp_processor_id() &&
+ hwc->last_tag == cpuc->tags[i];
}
-static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
-{
- (void)checking_wrmsrl(hwc->config_base + idx,
- hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
-}
+static void x86_pmu_stop(struct perf_event *event);
-static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
+void hw_perf_enable(void)
{
- (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
-}
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
+ int i;
-static inline void
-intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
-{
- int idx = __idx - X86_PMC_IDX_FIXED;
- u64 ctrl_val, mask;
+ if (!x86_pmu_initialized())
+ return;
- mask = 0xfULL << (idx * 4);
+ if (cpuc->enabled)
+ return;
- rdmsrl(hwc->config_base, ctrl_val);
- ctrl_val &= ~mask;
- (void)checking_wrmsrl(hwc->config_base, ctrl_val);
-}
+ if (cpuc->n_added) {
+ /*
+ * apply assignment obtained either from
+ * hw_perf_group_sched_in() or x86_pmu_enable()
+ *
+ * step1: save events moving to new counters
+ * step2: reprogram moved events into new counters
+ */
+ for (i = 0; i < cpuc->n_events; i++) {
-static inline void
-p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- u64 val = P6_NOP_EVENT;
+ event = cpuc->event_list[i];
+ hwc = &event->hw;
- if (cpuc->enabled)
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ /*
+ * we can avoid reprogramming counter if:
+ * - assigned same counter as last time
+ * - running on same CPU as last time
+ * - no other event has used the counter since
+ */
+ if (hwc->idx == -1 ||
+ match_prev_assignment(hwc, cpuc, i))
+ continue;
- (void)checking_wrmsrl(hwc->config_base + idx, val);
-}
+ x86_pmu_stop(event);
-static inline void
-intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
-{
- if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
- intel_pmu_disable_bts();
- return;
- }
+ hwc->idx = -1;
+ }
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
- intel_pmu_disable_fixed(hwc, idx);
- return;
+ for (i = 0; i < cpuc->n_events; i++) {
+
+ event = cpuc->event_list[i];
+ hwc = &event->hw;
+
+ if (hwc->idx == -1) {
+ x86_assign_hw_event(event, cpuc, i);
+ x86_perf_event_set_period(event, hwc, hwc->idx);
+ }
+ /*
+ * need to mark as active because x86_pmu_disable()
+ * clear active_mask and events[] yet it preserves
+ * idx
+ */
+ set_bit(hwc->idx, cpuc->active_mask);
+ cpuc->events[hwc->idx] = event;
+
+ x86_pmu.enable(hwc, hwc->idx);
+ perf_event_update_userpage(event);
+ }
+ cpuc->n_added = 0;
+ perf_events_lapic_init();
}
- x86_pmu_disable_event(hwc, idx);
+ cpuc->enabled = 1;
+ barrier();
+
+ x86_pmu.enable_all();
+}
+
+static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ (void)checking_wrmsrl(hwc->config_base + idx,
+ hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
}
-static inline void
-amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
+static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
- x86_pmu_disable_event(hwc, idx);
+ (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -1326,220 +919,60 @@ x86_perf_event_set_period(struct perf_event *event,
return ret;
}
-static inline void
-intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
-{
- int idx = __idx - X86_PMC_IDX_FIXED;
- u64 ctrl_val, bits, mask;
- int err;
-
- /*
- * Enable IRQ generation (0x8),
- * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
- * if requested:
- */
- bits = 0x8ULL;
- if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
- bits |= 0x2;
- if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
- bits |= 0x1;
-
- /*
- * ANY bit is supported in v3 and up
- */
- if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
- bits |= 0x4;
-
- bits <<= (idx * 4);
- mask = 0xfULL << (idx * 4);
-
- rdmsrl(hwc->config_base, ctrl_val);
- ctrl_val &= ~mask;
- ctrl_val |= bits;
- err = checking_wrmsrl(hwc->config_base, ctrl_val);
-}
-
-static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
+static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- u64 val;
-
- val = hwc->config;
if (cpuc->enabled)
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
-
- (void)checking_wrmsrl(hwc->config_base + idx, val);
+ __x86_pmu_enable_event(hwc, idx);
}
-
-static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
-{
- if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
- if (!__get_cpu_var(cpu_hw_events).enabled)
- return;
-
- intel_pmu_enable_bts(hwc->config);
- return;
- }
-
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
- intel_pmu_enable_fixed(hwc, idx);
- return;
- }
-
- x86_pmu_enable_event(hwc, idx);
-}
-
-static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
+/*
+ * activate a single event
+ *
+ * The event is added to the group of enabled events
+ * but only if it can be scehduled with existing events.
+ *
+ * Called with PMU disabled. If successful and return value 1,
+ * then guaranteed to call perf_enable() and hw_perf_enable()
+ */
+static int x86_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc;
+ int assign[X86_PMC_IDX_MAX];
+ int n, n0, ret;
- if (cpuc->enabled)
- x86_pmu_enable_event(hwc, idx);
-}
-
-static int fixed_mode_idx(struct hw_perf_event *hwc)
-{
- unsigned int hw_event;
-
- hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
-
- if (unlikely((hw_event ==
- x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
- (hwc->sample_period == 1)))
- return X86_PMC_IDX_FIXED_BTS;
+ hwc = &event->hw;
- if (!x86_pmu.num_events_fixed)
- return -1;
+ n0 = cpuc->n_events;
+ n = collect_events(cpuc, event, false);
+ if (n < 0)
+ return n;
+ ret = x86_schedule_events(cpuc, n, assign);
+ if (ret)
+ return ret;
/*
- * fixed counters do not take all possible filters
+ * copy new assignment, now we know it is possible
+ * will be used by hw_perf_enable()
*/
- if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
- return -1;
+ memcpy(cpuc->assign, assign, n*sizeof(int));
- if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
- return X86_PMC_IDX_FIXED_INSTRUCTIONS;
- if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
- return X86_PMC_IDX_FIXED_CPU_CYCLES;
- if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
- return X86_PMC_IDX_FIXED_BUS_CYCLES;
+ cpuc->n_events = n;
+ cpuc->n_added = n - n0;
- return -1;
-}
-
-/*
- * generic counter allocator: get next free counter
- */
-static int
-gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
-{
- int idx;
-
- idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
- return idx == x86_pmu.num_events ? -1 : idx;
-}
-
-/*
- * intel-specific counter allocator: check event constraints
- */
-static int
-intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
-{
- const struct event_constraint *event_constraint;
- int i, code;
-
- if (!event_constraints)
- goto skip;
-
- code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
-
- for_each_event_constraint(event_constraint, event_constraints) {
- if (code == event_constraint->code) {
- for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
- if (!test_and_set_bit(i, cpuc->used_mask))
- return i;
- }
- return -1;
- }
- }
-skip:
- return gen_get_event_idx(cpuc, hwc);
-}
-
-static int
-x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
-{
- int idx;
-
- idx = fixed_mode_idx(hwc);
- if (idx == X86_PMC_IDX_FIXED_BTS) {
- /* BTS is already occupied. */
- if (test_and_set_bit(idx, cpuc->used_mask))
- return -EAGAIN;
-
- hwc->config_base = 0;
- hwc->event_base = 0;
- hwc->idx = idx;
- } else if (idx >= 0) {
- /*
- * Try to get the fixed event, if that is already taken
- * then try to get a generic event:
- */
- if (test_and_set_bit(idx, cpuc->used_mask))
- goto try_generic;
-
- hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
- /*
- * We set it so that event_base + idx in wrmsr/rdmsr maps to
- * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
- */
- hwc->event_base =
- MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
- hwc->idx = idx;
- } else {
- idx = hwc->idx;
- /* Try to get the previous generic event again */
- if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
-try_generic:
- idx = x86_pmu.get_event_idx(cpuc, hwc);
- if (idx == -1)
- return -EAGAIN;
-
- set_bit(idx, cpuc->used_mask);
- hwc->idx = idx;
- }
- hwc->config_base = x86_pmu.eventsel;
- hwc->event_base = x86_pmu.perfctr;
- }
-
- return idx;
+ return 0;
}
-/*
- * Find a PMC slot for the freshly enabled / scheduled in event:
- */
-static int x86_pmu_enable(struct perf_event *event)
+static int x86_pmu_start(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- int idx;
-
- idx = x86_schedule_event(cpuc, hwc);
- if (idx < 0)
- return idx;
- perf_events_lapic_init();
-
- x86_pmu.disable(hwc, idx);
+ if (hwc->idx == -1)
+ return -EAGAIN;
- cpuc->events[idx] = event;
- set_bit(idx, cpuc->active_mask);
-
- x86_perf_event_set_period(event, hwc, idx);
- x86_pmu.enable(hwc, idx);
-
- perf_event_update_userpage(event);
+ x86_perf_event_set_period(event, hwc, hwc->idx);
+ x86_pmu.enable(hwc, hwc->idx);
return 0;
}
@@ -1583,7 +1016,7 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
}
- pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
+ pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
for (idx = 0; idx < x86_pmu.num_events; idx++) {
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
@@ -1607,67 +1040,7 @@ void perf_event_print_debug(void)
local_irq_restore(flags);
}
-static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
-{
- struct debug_store *ds = cpuc->ds;
- struct bts_record {
- u64 from;
- u64 to;
- u64 flags;
- };
- struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
- struct bts_record *at, *top;
- struct perf_output_handle handle;
- struct perf_event_header header;
- struct perf_sample_data data;
- struct pt_regs regs;
-
- if (!event)
- return;
-
- if (!ds)
- return;
-
- at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
- top = (struct bts_record *)(unsigned long)ds->bts_index;
-
- if (top <= at)
- return;
-
- ds->bts_index = ds->bts_buffer_base;
-
-
- data.period = event->hw.last_period;
- data.addr = 0;
- data.raw = NULL;
- regs.ip = 0;
-
- /*
- * Prepare a generic sample, i.e. fill in the invariant fields.
- * We will overwrite the from and to address before we output
- * the sample.
- */
- perf_prepare_sample(&header, &data, event, &regs);
-
- if (perf_output_begin(&handle, event,
- header.size * (top - at), 1, 1))
- return;
-
- for (; at < top; at++) {
- data.ip = at->from;
- data.addr = at->to;
-
- perf_output_sample(&handle, &header, &data, event);
- }
-
- perf_output_end(&handle);
-
- /* There's new data available. */
- event->hw.interrupts++;
- event->pending_kill = POLL_IN;
-}
-
-static void x86_pmu_disable(struct perf_event *event)
+static void x86_pmu_stop(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
@@ -1681,183 +1054,38 @@ static void x86_pmu_disable(struct perf_event *event)
x86_pmu.disable(hwc, idx);
/*
- * Make sure the cleared pointer becomes visible before we
- * (potentially) free the event:
- */
- barrier();
-
- /*
* Drain the remaining delta count out of a event
* that we are disabling:
*/
x86_perf_event_update(event, hwc, idx);
- /* Drain the remaining BTS records. */
- if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
- intel_pmu_drain_bts_buffer(cpuc);
-
cpuc->events[idx] = NULL;
- clear_bit(idx, cpuc->used_mask);
-
- perf_event_update_userpage(event);
-}
-
-/*
- * Save and restart an expired event. Called by NMI contexts,
- * so it has to be careful about preempting normal event ops:
- */
-static int intel_pmu_save_and_restart(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- int ret;
-
- x86_perf_event_update(event, hwc, idx);
- ret = x86_perf_event_set_period(event, hwc, idx);
-
- if (event->state == PERF_EVENT_STATE_ACTIVE)
- intel_pmu_enable_event(hwc, idx);
-
- return ret;
}
-static void intel_pmu_reset(void)
-{
- struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
- unsigned long flags;
- int idx;
-
- if (!x86_pmu.num_events)
- return;
-
- local_irq_save(flags);
-
- printk("clearing PMU state on CPU#%d\n", smp_processor_id());
-
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
- checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
- checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
- }
- for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
- checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
- }
- if (ds)
- ds->bts_index = ds->bts_buffer_base;
-
- local_irq_restore(flags);
-}
-
-static int p6_pmu_handle_irq(struct pt_regs *regs)
-{
- struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
- struct perf_event *event;
- struct hw_perf_event *hwc;
- int idx, handled = 0;
- u64 val;
-
- data.addr = 0;
- data.raw = NULL;
-
- cpuc = &__get_cpu_var(cpu_hw_events);
-
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
- event = cpuc->events[idx];
- hwc = &event->hw;
-
- val = x86_perf_event_update(event, hwc, idx);
- if (val & (1ULL << (x86_pmu.event_bits - 1)))
- continue;
-
- /*
- * event overflow
- */
- handled = 1;
- data.period = event->hw.last_period;
-
- if (!x86_perf_event_set_period(event, hwc, idx))
- continue;
-
- if (perf_event_overflow(event, 1, &data, regs))
- p6_pmu_disable_event(hwc, idx);
- }
-
- if (handled)
- inc_irq_stat(apic_perf_irqs);
-
- return handled;
-}
-
-/*
- * This handler is triggered by the local APIC, so the APIC IRQ handling
- * rules apply:
- */
-static int intel_pmu_handle_irq(struct pt_regs *regs)
+static void x86_pmu_disable(struct perf_event *event)
{
- struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
- int bit, loops;
- u64 ack, status;
-
- data.addr = 0;
- data.raw = NULL;
-
- cpuc = &__get_cpu_var(cpu_hw_events);
-
- perf_disable();
- intel_pmu_drain_bts_buffer(cpuc);
- status = intel_pmu_get_status();
- if (!status) {
- perf_enable();
- return 0;
- }
-
- loops = 0;
-again:
- if (++loops > 100) {
- WARN_ONCE(1, "perfevents: irq loop stuck!\n");
- perf_event_print_debug();
- intel_pmu_reset();
- perf_enable();
- return 1;
- }
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int i;
- inc_irq_stat(apic_perf_irqs);
- ack = status;
- for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
- struct perf_event *event = cpuc->events[bit];
+ x86_pmu_stop(event);
- clear_bit(bit, (unsigned long *) &status);
- if (!test_bit(bit, cpuc->active_mask))
- continue;
+ for (i = 0; i < cpuc->n_events; i++) {
+ if (event == cpuc->event_list[i]) {
- if (!intel_pmu_save_and_restart(event))
- continue;
+ if (x86_pmu.put_event_constraints)
+ x86_pmu.put_event_constraints(cpuc, event);
- data.period = event->hw.last_period;
+ while (++i < cpuc->n_events)
+ cpuc->event_list[i-1] = cpuc->event_list[i];
- if (perf_event_overflow(event, 1, &data, regs))
- intel_pmu_disable_event(&event->hw, bit);
+ --cpuc->n_events;
+ break;
+ }
}
-
- intel_pmu_ack_status(ack);
-
- /*
- * Repeat if there is more work to be done:
- */
- status = intel_pmu_get_status();
- if (status)
- goto again;
-
- perf_enable();
-
- return 1;
+ perf_event_update_userpage(event);
}
-static int amd_pmu_handle_irq(struct pt_regs *regs)
+static int x86_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
@@ -1892,7 +1120,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
continue;
if (perf_event_overflow(event, 1, &data, regs))
- amd_pmu_disable_event(hwc, idx);
+ x86_pmu.disable(hwc, idx);
}
if (handled)
@@ -1975,194 +1203,137 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.priority = 1
};
-static __initconst struct x86_pmu p6_pmu = {
- .name = "p6",
- .handle_irq = p6_pmu_handle_irq,
- .disable_all = p6_pmu_disable_all,
- .enable_all = p6_pmu_enable_all,
- .enable = p6_pmu_enable_event,
- .disable = p6_pmu_disable_event,
- .eventsel = MSR_P6_EVNTSEL0,
- .perfctr = MSR_P6_PERFCTR0,
- .event_map = p6_pmu_event_map,
- .raw_event = p6_pmu_raw_event,
- .max_events = ARRAY_SIZE(p6_perfmon_event_map),
- .apic = 1,
- .max_period = (1ULL << 31) - 1,
- .version = 0,
- .num_events = 2,
- /*
- * Events have 40 bits implemented. However they are designed such
- * that bits [32-39] are sign extensions of bit 31. As such the
- * effective width of a event for P6-like PMU is 32 bits only.
- *
- * See IA-32 Intel Architecture Software developer manual Vol 3B
- */
- .event_bits = 32,
- .event_mask = (1ULL << 32) - 1,
- .get_event_idx = intel_get_event_idx,
-};
+static struct event_constraint unconstrained;
+static struct event_constraint emptyconstraint;
-static __initconst struct x86_pmu intel_pmu = {
- .name = "Intel",
- .handle_irq = intel_pmu_handle_irq,
- .disable_all = intel_pmu_disable_all,
- .enable_all = intel_pmu_enable_all,
- .enable = intel_pmu_enable_event,
- .disable = intel_pmu_disable_event,
- .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
- .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
- .event_map = intel_pmu_event_map,
- .raw_event = intel_pmu_raw_event,
- .max_events = ARRAY_SIZE(intel_perfmon_event_map),
- .apic = 1,
- /*
- * Intel PMCs cannot be accessed sanely above 32 bit width,
- * so we install an artificial 1<<31 period regardless of
- * the generic event period:
- */
- .max_period = (1ULL << 31) - 1,
- .enable_bts = intel_pmu_enable_bts,
- .disable_bts = intel_pmu_disable_bts,
- .get_event_idx = intel_get_event_idx,
-};
+static struct event_constraint *
+x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+ struct event_constraint *c;
-static __initconst struct x86_pmu amd_pmu = {
- .name = "AMD",
- .handle_irq = amd_pmu_handle_irq,
- .disable_all = amd_pmu_disable_all,
- .enable_all = amd_pmu_enable_all,
- .enable = amd_pmu_enable_event,
- .disable = amd_pmu_disable_event,
- .eventsel = MSR_K7_EVNTSEL0,
- .perfctr = MSR_K7_PERFCTR0,
- .event_map = amd_pmu_event_map,
- .raw_event = amd_pmu_raw_event,
- .max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_events = 4,
- .event_bits = 48,
- .event_mask = (1ULL << 48) - 1,
- .apic = 1,
- /* use highest bit to detect overflow */
- .max_period = (1ULL << 47) - 1,
- .get_event_idx = gen_get_event_idx,
-};
+ if (x86_pmu.event_constraints) {
+ for_each_event_constraint(c, x86_pmu.event_constraints) {
+ if ((event->hw.config & c->cmask) == c->code)
+ return c;
+ }
+ }
+
+ return &unconstrained;
+}
-static __init int p6_pmu_init(void)
+static int x86_event_sched_in(struct perf_event *event,
+ struct perf_cpu_context *cpuctx)
{
- switch (boot_cpu_data.x86_model) {
- case 1:
- case 3: /* Pentium Pro */
- case 5:
- case 6: /* Pentium II */
- case 7:
- case 8:
- case 11: /* Pentium III */
- event_constraints = intel_p6_event_constraints;
- break;
- case 9:
- case 13:
- /* Pentium M */
- event_constraints = intel_p6_event_constraints;
- break;
- default:
- pr_cont("unsupported p6 CPU model %d ",
- boot_cpu_data.x86_model);
- return -ENODEV;
- }
+ int ret = 0;
- x86_pmu = p6_pmu;
+ event->state = PERF_EVENT_STATE_ACTIVE;
+ event->oncpu = smp_processor_id();
+ event->tstamp_running += event->ctx->time - event->tstamp_stopped;
- return 0;
+ if (!is_x86_event(event))
+ ret = event->pmu->enable(event);
+
+ if (!ret && !is_software_event(event))
+ cpuctx->active_oncpu++;
+
+ if (!ret && event->attr.exclusive)
+ cpuctx->exclusive = 1;
+
+ return ret;
}
-static __init int intel_pmu_init(void)
+static void x86_event_sched_out(struct perf_event *event,
+ struct perf_cpu_context *cpuctx)
{
- union cpuid10_edx edx;
- union cpuid10_eax eax;
- unsigned int unused;
- unsigned int ebx;
- int version;
-
- if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
- /* check for P6 processor family */
- if (boot_cpu_data.x86 == 6) {
- return p6_pmu_init();
- } else {
- return -ENODEV;
- }
- }
+ event->state = PERF_EVENT_STATE_INACTIVE;
+ event->oncpu = -1;
- /*
- * Check whether the Architectural PerfMon supports
- * Branch Misses Retired hw_event or not.
- */
- cpuid(10, &eax.full, &ebx, &unused, &edx.full);
- if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
- return -ENODEV;
+ if (!is_x86_event(event))
+ event->pmu->disable(event);
- version = eax.split.version_id;
- if (version < 2)
- return -ENODEV;
+ event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
- x86_pmu = intel_pmu;
- x86_pmu.version = version;
- x86_pmu.num_events = eax.split.num_events;
- x86_pmu.event_bits = eax.split.bit_width;
- x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
+ if (!is_software_event(event))
+ cpuctx->active_oncpu--;
- /*
- * Quirk: v2 perfmon does not report fixed-purpose events, so
- * assume at least 3 events:
- */
- x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
+ if (event->attr.exclusive || !cpuctx->active_oncpu)
+ cpuctx->exclusive = 0;
+}
+/*
+ * Called to enable a whole group of events.
+ * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
+ * Assumes the caller has disabled interrupts and has
+ * frozen the PMU with hw_perf_save_disable.
+ *
+ * called with PMU disabled. If successful and return value 1,
+ * then guaranteed to call perf_enable() and hw_perf_enable()
+ */
+int hw_perf_group_sched_in(struct perf_event *leader,
+ struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct perf_event *sub;
+ int assign[X86_PMC_IDX_MAX];
+ int n0, n1, ret;
+
+ /* n0 = total number of events */
+ n0 = collect_events(cpuc, leader, true);
+ if (n0 < 0)
+ return n0;
+
+ ret = x86_schedule_events(cpuc, n0, assign);
+ if (ret)
+ return ret;
+
+ ret = x86_event_sched_in(leader, cpuctx);
+ if (ret)
+ return ret;
+
+ n1 = 1;
+ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
+ if (sub->state > PERF_EVENT_STATE_OFF) {
+ ret = x86_event_sched_in(sub, cpuctx);
+ if (ret)
+ goto undo;
+ ++n1;
+ }
+ }
/*
- * Install the hw-cache-events table:
+ * copy new assignment, now we know it is possible
+ * will be used by hw_perf_enable()
*/
- switch (boot_cpu_data.x86_model) {
- case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
- case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
- case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
- case 29: /* six-core 45 nm xeon "Dunnington" */
- memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
-
- pr_cont("Core2 events, ");
- event_constraints = intel_core_event_constraints;
- break;
- default:
- case 26:
- memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
+ memcpy(cpuc->assign, assign, n0*sizeof(int));
- event_constraints = intel_nehalem_event_constraints;
- pr_cont("Nehalem/Corei7 events, ");
- break;
- case 28:
- memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
+ cpuc->n_events = n0;
+ cpuc->n_added = n1;
+ ctx->nr_active += n1;
- pr_cont("Atom events, ");
- break;
+ /*
+ * 1 means successful and events are active
+ * This is not quite true because we defer
+ * actual activation until hw_perf_enable() but
+ * this way we* ensure caller won't try to enable
+ * individual events
+ */
+ return 1;
+undo:
+ x86_event_sched_out(leader, cpuctx);
+ n0 = 1;
+ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
+ if (sub->state == PERF_EVENT_STATE_ACTIVE) {
+ x86_event_sched_out(sub, cpuctx);
+ if (++n0 == n1)
+ break;
+ }
}
- return 0;
+ return ret;
}
-static __init int amd_pmu_init(void)
-{
- /* Performance-monitoring supported from K7 and later: */
- if (boot_cpu_data.x86 < 6)
- return -ENODEV;
-
- x86_pmu = amd_pmu;
-
- /* Events are common for all AMDs */
- memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
-
- return 0;
-}
+#include "perf_event_amd.c"
+#include "perf_event_p6.c"
+#include "perf_event_intel.c"
static void __init pmu_check_apic(void)
{
@@ -2220,6 +1391,10 @@ void __init init_hw_perf_events(void)
perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier);
+ unconstrained = (struct event_constraint)
+ __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
+ 0, x86_pmu.num_events);
+
pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.event_bits);
pr_info("... generic registers: %d\n", x86_pmu.num_events);
@@ -2237,50 +1412,79 @@ static inline void x86_pmu_read(struct perf_event *event)
static const struct pmu pmu = {
.enable = x86_pmu_enable,
.disable = x86_pmu_disable,
+ .start = x86_pmu_start,
+ .stop = x86_pmu_stop,
.read = x86_pmu_read,
.unthrottle = x86_pmu_unthrottle,
};
-static int
-validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
-{
- struct hw_perf_event fake_event = event->hw;
-
- if (event->pmu && event->pmu != &pmu)
- return 0;
-
- return x86_schedule_event(cpuc, &fake_event) >= 0;
-}
-
+/*
+ * validate a single event group
+ *
+ * validation include:
+ * - check events are compatible which each other
+ * - events do not compete for the same counter
+ * - number of events <= number of counters
+ *
+ * validation ensures the group can be loaded onto the
+ * PMU if it was the only group available.
+ */
static int validate_group(struct perf_event *event)
{
- struct perf_event *sibling, *leader = event->group_leader;
- struct cpu_hw_events fake_pmu;
+ struct perf_event *leader = event->group_leader;
+ struct cpu_hw_events *fake_cpuc;
+ int ret, n;
- memset(&fake_pmu, 0, sizeof(fake_pmu));
+ ret = -ENOMEM;
+ fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
+ if (!fake_cpuc)
+ goto out;
+
+ /*
+ * the event is not yet connected with its
+ * siblings therefore we must first collect
+ * existing siblings, then add the new event
+ * before we can simulate the scheduling
+ */
+ ret = -ENOSPC;
+ n = collect_events(fake_cpuc, leader, true);
+ if (n < 0)
+ goto out_free;
- if (!validate_event(&fake_pmu, leader))
- return -ENOSPC;
+ fake_cpuc->n_events = n;
+ n = collect_events(fake_cpuc, event, false);
+ if (n < 0)
+ goto out_free;
- list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
- if (!validate_event(&fake_pmu, sibling))
- return -ENOSPC;
- }
+ fake_cpuc->n_events = n;
- if (!validate_event(&fake_pmu, event))
- return -ENOSPC;
+ ret = x86_schedule_events(fake_cpuc, n, NULL);
- return 0;
+out_free:
+ kfree(fake_cpuc);
+out:
+ return ret;
}
const struct pmu *hw_perf_event_init(struct perf_event *event)
{
+ const struct pmu *tmp;
int err;
err = __hw_perf_event_init(event);
if (!err) {
+ /*
+ * we temporarily connect event to its pmu
+ * such that validate_group() can classify
+ * it as an x86 event using is_x86_event()
+ */
+ tmp = event->pmu;
+ event->pmu = &pmu;
+
if (event->group_leader != event)
err = validate_group(event);
+
+ event->pmu = tmp;
}
if (err) {
if (event->destroy)
@@ -2304,7 +1508,6 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
-static DEFINE_PER_CPU(int, in_ignored_frame);
static void
@@ -2320,10 +1523,6 @@ static void backtrace_warning(void *data, char *msg)
static int backtrace_stack(void *data, char *name)
{
- per_cpu(in_ignored_frame, smp_processor_id()) =
- x86_is_stack_id(NMI_STACK, name) ||
- x86_is_stack_id(DEBUG_STACK, name);
-
return 0;
}
@@ -2331,9 +1530,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
{
struct perf_callchain_entry *entry = data;
- if (per_cpu(in_ignored_frame, smp_processor_id()))
- return;
-
if (reliable)
callchain_store(entry, addr);
}
@@ -2440,9 +1636,6 @@ perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
is_user = user_mode(regs);
- if (!current || current->pid == 0)
- return;
-
if (is_user && current->state != TASK_RUNNING)
return;
@@ -2472,4 +1665,25 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
void hw_perf_event_setup_online(int cpu)
{
init_debug_store_on_cpu(cpu);
+
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+ amd_pmu_cpu_online(cpu);
+ break;
+ default:
+ return;
+ }
+}
+
+void hw_perf_event_setup_offline(int cpu)
+{
+ init_debug_store_on_cpu(cpu);
+
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+ amd_pmu_cpu_offline(cpu);
+ break;
+ default:
+ return;
+ }
}
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
new file mode 100644
index 0000000..8f3dbfd
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -0,0 +1,416 @@
+#ifdef CONFIG_CPU_SUP_AMD
+
+static DEFINE_RAW_SPINLOCK(amd_nb_lock);
+
+static __initconst u64 amd_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
+ [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
+ [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
+ [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
+ [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
+ [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
+ [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
+ [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+/*
+ * AMD Performance Monitor K7 and later.
+ */
+static const u64 amd_perfmon_event_map[] =
+{
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
+};
+
+static u64 amd_pmu_event_map(int hw_event)
+{
+ return amd_perfmon_event_map[hw_event];
+}
+
+static u64 amd_pmu_raw_event(u64 hw_event)
+{
+#define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL
+#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
+#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
+#define K7_EVNTSEL_INV_MASK 0x000800000ULL
+#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
+
+#define K7_EVNTSEL_MASK \
+ (K7_EVNTSEL_EVENT_MASK | \
+ K7_EVNTSEL_UNIT_MASK | \
+ K7_EVNTSEL_EDGE_MASK | \
+ K7_EVNTSEL_INV_MASK | \
+ K7_EVNTSEL_REG_MASK)
+
+ return hw_event & K7_EVNTSEL_MASK;
+}
+
+/*
+ * AMD64 events are detected based on their event codes.
+ */
+static inline int amd_is_nb_event(struct hw_perf_event *hwc)
+{
+ return (hwc->config & 0xe0) == 0xe0;
+}
+
+static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct amd_nb *nb = cpuc->amd_nb;
+ int i;
+
+ /*
+ * only care about NB events
+ */
+ if (!(nb && amd_is_nb_event(hwc)))
+ return;
+
+ /*
+ * need to scan whole list because event may not have
+ * been assigned during scheduling
+ *
+ * no race condition possible because event can only
+ * be removed on one CPU at a time AND PMU is disabled
+ * when we come here
+ */
+ for (i = 0; i < x86_pmu.num_events; i++) {
+ if (nb->owners[i] == event) {
+ cmpxchg(nb->owners+i, event, NULL);
+ break;
+ }
+ }
+}
+
+ /*
+ * AMD64 NorthBridge events need special treatment because
+ * counter access needs to be synchronized across all cores
+ * of a package. Refer to BKDG section 3.12
+ *
+ * NB events are events measuring L3 cache, Hypertransport
+ * traffic. They are identified by an event code >= 0xe00.
+ * They measure events on the NorthBride which is shared
+ * by all cores on a package. NB events are counted on a
+ * shared set of counters. When a NB event is programmed
+ * in a counter, the data actually comes from a shared
+ * counter. Thus, access to those counters needs to be
+ * synchronized.
+ *
+ * We implement the synchronization such that no two cores
+ * can be measuring NB events using the same counters. Thus,
+ * we maintain a per-NB allocation table. The available slot
+ * is propagated using the event_constraint structure.
+ *
+ * We provide only one choice for each NB event based on
+ * the fact that only NB events have restrictions. Consequently,
+ * if a counter is available, there is a guarantee the NB event
+ * will be assigned to it. If no slot is available, an empty
+ * constraint is returned and scheduling will eventually fail
+ * for this event.
+ *
+ * Note that all cores attached the same NB compete for the same
+ * counters to host NB events, this is why we use atomic ops. Some
+ * multi-chip CPUs may have more than one NB.
+ *
+ * Given that resources are allocated (cmpxchg), they must be
+ * eventually freed for others to use. This is accomplished by
+ * calling amd_put_event_constraints().
+ *
+ * Non NB events are not impacted by this restriction.
+ */
+static struct event_constraint *
+amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct amd_nb *nb = cpuc->amd_nb;
+ struct perf_event *old = NULL;
+ int max = x86_pmu.num_events;
+ int i, j, k = -1;
+
+ /*
+ * if not NB event or no NB, then no constraints
+ */
+ if (!(nb && amd_is_nb_event(hwc)))
+ return &unconstrained;
+
+ /*
+ * detect if already present, if so reuse
+ *
+ * cannot merge with actual allocation
+ * because of possible holes
+ *
+ * event can already be present yet not assigned (in hwc->idx)
+ * because of successive calls to x86_schedule_events() from
+ * hw_perf_group_sched_in() without hw_perf_enable()
+ */
+ for (i = 0; i < max; i++) {
+ /*
+ * keep track of first free slot
+ */
+ if (k == -1 && !nb->owners[i])
+ k = i;
+
+ /* already present, reuse */
+ if (nb->owners[i] == event)
+ goto done;
+ }
+ /*
+ * not present, so grab a new slot
+ * starting either at:
+ */
+ if (hwc->idx != -1) {
+ /* previous assignment */
+ i = hwc->idx;
+ } else if (k != -1) {
+ /* start from free slot found */
+ i = k;
+ } else {
+ /*
+ * event not found, no slot found in
+ * first pass, try again from the
+ * beginning
+ */
+ i = 0;
+ }
+ j = i;
+ do {
+ old = cmpxchg(nb->owners+i, NULL, event);
+ if (!old)
+ break;
+ if (++i == max)
+ i = 0;
+ } while (i != j);
+done:
+ if (!old)
+ return &nb->event_constraints[i];
+
+ return &emptyconstraint;
+}
+
+static __initconst struct x86_pmu amd_pmu = {
+ .name = "AMD",
+ .handle_irq = x86_pmu_handle_irq,
+ .disable_all = x86_pmu_disable_all,
+ .enable_all = x86_pmu_enable_all,
+ .enable = x86_pmu_enable_event,
+ .disable = x86_pmu_disable_event,
+ .eventsel = MSR_K7_EVNTSEL0,
+ .perfctr = MSR_K7_PERFCTR0,
+ .event_map = amd_pmu_event_map,
+ .raw_event = amd_pmu_raw_event,
+ .max_events = ARRAY_SIZE(amd_perfmon_event_map),
+ .num_events = 4,
+ .event_bits = 48,
+ .event_mask = (1ULL << 48) - 1,
+ .apic = 1,
+ /* use highest bit to detect overflow */
+ .max_period = (1ULL << 47) - 1,
+ .get_event_constraints = amd_get_event_constraints,
+ .put_event_constraints = amd_put_event_constraints
+};
+
+static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
+{
+ struct amd_nb *nb;
+ int i;
+
+ nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
+ if (!nb)
+ return NULL;
+
+ memset(nb, 0, sizeof(*nb));
+ nb->nb_id = nb_id;
+
+ /*
+ * initialize all possible NB constraints
+ */
+ for (i = 0; i < x86_pmu.num_events; i++) {
+ set_bit(i, nb->event_constraints[i].idxmsk);
+ nb->event_constraints[i].weight = 1;
+ }
+ return nb;
+}
+
+static void amd_pmu_cpu_online(int cpu)
+{
+ struct cpu_hw_events *cpu1, *cpu2;
+ struct amd_nb *nb = NULL;
+ int i, nb_id;
+
+ if (boot_cpu_data.x86_max_cores < 2)
+ return;
+
+ /*
+ * function may be called too early in the
+ * boot process, in which case nb_id is bogus
+ */
+ nb_id = amd_get_nb_id(cpu);
+ if (nb_id == BAD_APICID)
+ return;
+
+ cpu1 = &per_cpu(cpu_hw_events, cpu);
+ cpu1->amd_nb = NULL;
+
+ raw_spin_lock(&amd_nb_lock);
+
+ for_each_online_cpu(i) {
+ cpu2 = &per_cpu(cpu_hw_events, i);
+ nb = cpu2->amd_nb;
+ if (!nb)
+ continue;
+ if (nb->nb_id == nb_id)
+ goto found;
+ }
+
+ nb = amd_alloc_nb(cpu, nb_id);
+ if (!nb) {
+ pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
+ raw_spin_unlock(&amd_nb_lock);
+ return;
+ }
+found:
+ nb->refcnt++;
+ cpu1->amd_nb = nb;
+
+ raw_spin_unlock(&amd_nb_lock);
+}
+
+static void amd_pmu_cpu_offline(int cpu)
+{
+ struct cpu_hw_events *cpuhw;
+
+ if (boot_cpu_data.x86_max_cores < 2)
+ return;
+
+ cpuhw = &per_cpu(cpu_hw_events, cpu);
+
+ raw_spin_lock(&amd_nb_lock);
+
+ if (--cpuhw->amd_nb->refcnt == 0)
+ kfree(cpuhw->amd_nb);
+
+ cpuhw->amd_nb = NULL;
+
+ raw_spin_unlock(&amd_nb_lock);
+}
+
+static __init int amd_pmu_init(void)
+{
+ /* Performance-monitoring supported from K7 and later: */
+ if (boot_cpu_data.x86 < 6)
+ return -ENODEV;
+
+ x86_pmu = amd_pmu;
+
+ /* Events are common for all AMDs */
+ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+ /*
+ * explicitly initialize the boot cpu, other cpus will get
+ * the cpu hotplug callbacks from smp_init()
+ */
+ amd_pmu_cpu_online(smp_processor_id());
+ return 0;
+}
+
+#else /* CONFIG_CPU_SUP_AMD */
+
+static int amd_pmu_init(void)
+{
+ return 0;
+}
+
+static void amd_pmu_cpu_online(int cpu)
+{
+}
+
+static void amd_pmu_cpu_offline(int cpu)
+{
+}
+
+#endif
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
new file mode 100644
index 0000000..cf6590c
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -0,0 +1,971 @@
+#ifdef CONFIG_CPU_SUP_INTEL
+
+/*
+ * Intel PerfMon v3. Used on Core2 and later.
+ */
+static const u64 intel_perfmon_event_map[] =
+{
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
+ [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
+};
+
+static struct event_constraint intel_core_event_constraints[] =
+{
+ INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
+ INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
+ INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
+ INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
+ INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
+ INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint intel_core2_event_constraints[] =
+{
+ FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
+ FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+ INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
+ INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
+ INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
+ INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
+ INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
+ INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
+ INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
+ INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
+ INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint intel_nehalem_event_constraints[] =
+{
+ FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
+ FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+ INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
+ INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
+ INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
+ INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
+ INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
+ INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
+ INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
+ INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint intel_westmere_event_constraints[] =
+{
+ FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
+ FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+ INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
+ INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
+ INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint intel_gen_event_constraints[] =
+{
+ FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
+ FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+ EVENT_CONSTRAINT_END
+};
+
+static u64 intel_pmu_event_map(int hw_event)
+{
+ return intel_perfmon_event_map[hw_event];
+}
+
+static __initconst u64 westmere_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
+ [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
+ [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
+ [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
+ [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
+ [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
+ [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
+ [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
+ [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
+ [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
+ [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
+ [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static __initconst u64 nehalem_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
+ [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
+ [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
+ [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
+ [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
+ [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
+ [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
+ [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
+ [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
+ [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
+ [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
+ [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static __initconst u64 core2_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
+ [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
+ [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
+ [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
+ [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
+ [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
+ [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
+ [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
+ [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
+ [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static __initconst u64 atom_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
+ [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
+ [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
+ [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
+ [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
+ [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
+ [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
+ [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static u64 intel_pmu_raw_event(u64 hw_event)
+{
+#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
+#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
+#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
+#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
+#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
+
+#define CORE_EVNTSEL_MASK \
+ (INTEL_ARCH_EVTSEL_MASK | \
+ INTEL_ARCH_UNIT_MASK | \
+ INTEL_ARCH_EDGE_MASK | \
+ INTEL_ARCH_INV_MASK | \
+ INTEL_ARCH_CNT_MASK)
+
+ return hw_event & CORE_EVNTSEL_MASK;
+}
+
+static void intel_pmu_enable_bts(u64 config)
+{
+ unsigned long debugctlmsr;
+
+ debugctlmsr = get_debugctlmsr();
+
+ debugctlmsr |= X86_DEBUGCTL_TR;
+ debugctlmsr |= X86_DEBUGCTL_BTS;
+ debugctlmsr |= X86_DEBUGCTL_BTINT;
+
+ if (!(config & ARCH_PERFMON_EVENTSEL_OS))
+ debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
+
+ if (!(config & ARCH_PERFMON_EVENTSEL_USR))
+ debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
+
+ update_debugctlmsr(debugctlmsr);
+}
+
+static void intel_pmu_disable_bts(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long debugctlmsr;
+
+ if (!cpuc->ds)
+ return;
+
+ debugctlmsr = get_debugctlmsr();
+
+ debugctlmsr &=
+ ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
+ X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
+
+ update_debugctlmsr(debugctlmsr);
+}
+
+static void intel_pmu_disable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+
+ if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
+ intel_pmu_disable_bts();
+}
+
+static void intel_pmu_enable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
+
+ if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
+ struct perf_event *event =
+ cpuc->events[X86_PMC_IDX_FIXED_BTS];
+
+ if (WARN_ON_ONCE(!event))
+ return;
+
+ intel_pmu_enable_bts(event->hw.config);
+ }
+}
+
+static inline u64 intel_pmu_get_status(void)
+{
+ u64 status;
+
+ rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+
+ return status;
+}
+
+static inline void intel_pmu_ack_status(u64 ack)
+{
+ wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
+}
+
+static inline void
+intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
+{
+ int idx = __idx - X86_PMC_IDX_FIXED;
+ u64 ctrl_val, mask;
+
+ mask = 0xfULL << (idx * 4);
+
+ rdmsrl(hwc->config_base, ctrl_val);
+ ctrl_val &= ~mask;
+ (void)checking_wrmsrl(hwc->config_base, ctrl_val);
+}
+
+static void intel_pmu_drain_bts_buffer(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct debug_store *ds = cpuc->ds;
+ struct bts_record {
+ u64 from;
+ u64 to;
+ u64 flags;
+ };
+ struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
+ struct bts_record *at, *top;
+ struct perf_output_handle handle;
+ struct perf_event_header header;
+ struct perf_sample_data data;
+ struct pt_regs regs;
+
+ if (!event)
+ return;
+
+ if (!ds)
+ return;
+
+ at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
+ top = (struct bts_record *)(unsigned long)ds->bts_index;
+
+ if (top <= at)
+ return;
+
+ ds->bts_index = ds->bts_buffer_base;
+
+
+ data.period = event->hw.last_period;
+ data.addr = 0;
+ data.raw = NULL;
+ regs.ip = 0;
+
+ /*
+ * Prepare a generic sample, i.e. fill in the invariant fields.
+ * We will overwrite the from and to address before we output
+ * the sample.
+ */
+ perf_prepare_sample(&header, &data, event, &regs);
+
+ if (perf_output_begin(&handle, event,
+ header.size * (top - at), 1, 1))
+ return;
+
+ for (; at < top; at++) {
+ data.ip = at->from;
+ data.addr = at->to;
+
+ perf_output_sample(&handle, &header, &data, event);
+ }
+
+ perf_output_end(&handle);
+
+ /* There's new data available. */
+ event->hw.interrupts++;
+ event->pending_kill = POLL_IN;
+}
+
+static inline void
+intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
+ intel_pmu_disable_bts();
+ intel_pmu_drain_bts_buffer();
+ return;
+ }
+
+ if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+ intel_pmu_disable_fixed(hwc, idx);
+ return;
+ }
+
+ x86_pmu_disable_event(hwc, idx);
+}
+
+static inline void
+intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
+{
+ int idx = __idx - X86_PMC_IDX_FIXED;
+ u64 ctrl_val, bits, mask;
+ int err;
+
+ /*
+ * Enable IRQ generation (0x8),
+ * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
+ * if requested:
+ */
+ bits = 0x8ULL;
+ if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
+ bits |= 0x2;
+ if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
+ bits |= 0x1;
+
+ /*
+ * ANY bit is supported in v3 and up
+ */
+ if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
+ bits |= 0x4;
+
+ bits <<= (idx * 4);
+ mask = 0xfULL << (idx * 4);
+
+ rdmsrl(hwc->config_base, ctrl_val);
+ ctrl_val &= ~mask;
+ ctrl_val |= bits;
+ err = checking_wrmsrl(hwc->config_base, ctrl_val);
+}
+
+static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
+ if (!__get_cpu_var(cpu_hw_events).enabled)
+ return;
+
+ intel_pmu_enable_bts(hwc->config);
+ return;
+ }
+
+ if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+ intel_pmu_enable_fixed(hwc, idx);
+ return;
+ }
+
+ __x86_pmu_enable_event(hwc, idx);
+}
+
+/*
+ * Save and restart an expired event. Called by NMI contexts,
+ * so it has to be careful about preempting normal event ops:
+ */
+static int intel_pmu_save_and_restart(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ int ret;
+
+ x86_perf_event_update(event, hwc, idx);
+ ret = x86_perf_event_set_period(event, hwc, idx);
+
+ return ret;
+}
+
+static void intel_pmu_reset(void)
+{
+ struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
+ unsigned long flags;
+ int idx;
+
+ if (!x86_pmu.num_events)
+ return;
+
+ local_irq_save(flags);
+
+ printk("clearing PMU state on CPU#%d\n", smp_processor_id());
+
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
+ checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
+ }
+ for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
+ checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+ }
+ if (ds)
+ ds->bts_index = ds->bts_buffer_base;
+
+ local_irq_restore(flags);
+}
+
+/*
+ * This handler is triggered by the local APIC, so the APIC IRQ handling
+ * rules apply:
+ */
+static int intel_pmu_handle_irq(struct pt_regs *regs)
+{
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ int bit, loops;
+ u64 ack, status;
+
+ data.addr = 0;
+ data.raw = NULL;
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+
+ perf_disable();
+ intel_pmu_drain_bts_buffer();
+ status = intel_pmu_get_status();
+ if (!status) {
+ perf_enable();
+ return 0;
+ }
+
+ loops = 0;
+again:
+ if (++loops > 100) {
+ WARN_ONCE(1, "perfevents: irq loop stuck!\n");
+ perf_event_print_debug();
+ intel_pmu_reset();
+ perf_enable();
+ return 1;
+ }
+
+ inc_irq_stat(apic_perf_irqs);
+ ack = status;
+ for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
+ struct perf_event *event = cpuc->events[bit];
+
+ clear_bit(bit, (unsigned long *) &status);
+ if (!test_bit(bit, cpuc->active_mask))
+ continue;
+
+ if (!intel_pmu_save_and_restart(event))
+ continue;
+
+ data.period = event->hw.last_period;
+
+ if (perf_event_overflow(event, 1, &data, regs))
+ intel_pmu_disable_event(&event->hw, bit);
+ }
+
+ intel_pmu_ack_status(ack);
+
+ /*
+ * Repeat if there is more work to be done:
+ */
+ status = intel_pmu_get_status();
+ if (status)
+ goto again;
+
+ perf_enable();
+
+ return 1;
+}
+
+static struct event_constraint bts_constraint =
+ EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
+
+static struct event_constraint *
+intel_special_constraints(struct perf_event *event)
+{
+ unsigned int hw_event;
+
+ hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
+
+ if (unlikely((hw_event ==
+ x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
+ (event->hw.sample_period == 1))) {
+
+ return &bts_constraint;
+ }
+ return NULL;
+}
+
+static struct event_constraint *
+intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+ struct event_constraint *c;
+
+ c = intel_special_constraints(event);
+ if (c)
+ return c;
+
+ return x86_get_event_constraints(cpuc, event);
+}
+
+static __initconst struct x86_pmu core_pmu = {
+ .name = "core",
+ .handle_irq = x86_pmu_handle_irq,
+ .disable_all = x86_pmu_disable_all,
+ .enable_all = x86_pmu_enable_all,
+ .enable = x86_pmu_enable_event,
+ .disable = x86_pmu_disable_event,
+ .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
+ .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
+ .event_map = intel_pmu_event_map,
+ .raw_event = intel_pmu_raw_event,
+ .max_events = ARRAY_SIZE(intel_perfmon_event_map),
+ .apic = 1,
+ /*
+ * Intel PMCs cannot be accessed sanely above 32 bit width,
+ * so we install an artificial 1<<31 period regardless of
+ * the generic event period:
+ */
+ .max_period = (1ULL << 31) - 1,
+ .get_event_constraints = intel_get_event_constraints,
+ .event_constraints = intel_core_event_constraints,
+};
+
+static __initconst struct x86_pmu intel_pmu = {
+ .name = "Intel",
+ .handle_irq = intel_pmu_handle_irq,
+ .disable_all = intel_pmu_disable_all,
+ .enable_all = intel_pmu_enable_all,
+ .enable = intel_pmu_enable_event,
+ .disable = intel_pmu_disable_event,
+ .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
+ .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
+ .event_map = intel_pmu_event_map,
+ .raw_event = intel_pmu_raw_event,
+ .max_events = ARRAY_SIZE(intel_perfmon_event_map),
+ .apic = 1,
+ /*
+ * Intel PMCs cannot be accessed sanely above 32 bit width,
+ * so we install an artificial 1<<31 period regardless of
+ * the generic event period:
+ */
+ .max_period = (1ULL << 31) - 1,
+ .enable_bts = intel_pmu_enable_bts,
+ .disable_bts = intel_pmu_disable_bts,
+ .get_event_constraints = intel_get_event_constraints
+};
+
+static __init int intel_pmu_init(void)
+{
+ union cpuid10_edx edx;
+ union cpuid10_eax eax;
+ unsigned int unused;
+ unsigned int ebx;
+ int version;
+
+ if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+ /* check for P6 processor family */
+ if (boot_cpu_data.x86 == 6) {
+ return p6_pmu_init();
+ } else {
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * Check whether the Architectural PerfMon supports
+ * Branch Misses Retired hw_event or not.
+ */
+ cpuid(10, &eax.full, &ebx, &unused, &edx.full);
+ if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
+ return -ENODEV;
+
+ version = eax.split.version_id;
+ if (version < 2)
+ x86_pmu = core_pmu;
+ else
+ x86_pmu = intel_pmu;
+
+ x86_pmu.version = version;
+ x86_pmu.num_events = eax.split.num_events;
+ x86_pmu.event_bits = eax.split.bit_width;
+ x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
+
+ /*
+ * Quirk: v2 perfmon does not report fixed-purpose events, so
+ * assume at least 3 events:
+ */
+ if (version > 1)
+ x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
+
+ /*
+ * Install the hw-cache-events table:
+ */
+ switch (boot_cpu_data.x86_model) {
+ case 14: /* 65 nm core solo/duo, "Yonah" */
+ pr_cont("Core events, ");
+ break;
+
+ case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
+ case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
+ case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
+ case 29: /* six-core 45 nm xeon "Dunnington" */
+ memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+ x86_pmu.event_constraints = intel_core2_event_constraints;
+ pr_cont("Core2 events, ");
+ break;
+
+ case 26: /* 45 nm nehalem, "Bloomfield" */
+ case 30: /* 45 nm nehalem, "Lynnfield" */
+ memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+ x86_pmu.event_constraints = intel_nehalem_event_constraints;
+ pr_cont("Nehalem/Corei7 events, ");
+ break;
+ case 28:
+ memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+ x86_pmu.event_constraints = intel_gen_event_constraints;
+ pr_cont("Atom events, ");
+ break;
+
+ case 37: /* 32 nm nehalem, "Clarkdale" */
+ case 44: /* 32 nm nehalem, "Gulftown" */
+ memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+ x86_pmu.event_constraints = intel_westmere_event_constraints;
+ pr_cont("Westmere events, ");
+ break;
+ default:
+ /*
+ * default constraints for v2 and up
+ */
+ x86_pmu.event_constraints = intel_gen_event_constraints;
+ pr_cont("generic architected perfmon, ");
+ }
+ return 0;
+}
+
+#else /* CONFIG_CPU_SUP_INTEL */
+
+static int intel_pmu_init(void)
+{
+ return 0;
+}
+
+#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
new file mode 100644
index 0000000..1ca5ba0
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -0,0 +1,157 @@
+#ifdef CONFIG_CPU_SUP_INTEL
+
+/*
+ * Not sure about some of these
+ */
+static const u64 p6_perfmon_event_map[] =
+{
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
+ [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
+};
+
+static u64 p6_pmu_event_map(int hw_event)
+{
+ return p6_perfmon_event_map[hw_event];
+}
+
+/*
+ * Event setting that is specified not to count anything.
+ * We use this to effectively disable a counter.
+ *
+ * L2_RQSTS with 0 MESI unit mask.
+ */
+#define P6_NOP_EVENT 0x0000002EULL
+
+static u64 p6_pmu_raw_event(u64 hw_event)
+{
+#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
+#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
+#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
+#define P6_EVNTSEL_INV_MASK 0x00800000ULL
+#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
+
+#define P6_EVNTSEL_MASK \
+ (P6_EVNTSEL_EVENT_MASK | \
+ P6_EVNTSEL_UNIT_MASK | \
+ P6_EVNTSEL_EDGE_MASK | \
+ P6_EVNTSEL_INV_MASK | \
+ P6_EVNTSEL_REG_MASK)
+
+ return hw_event & P6_EVNTSEL_MASK;
+}
+
+static struct event_constraint p6_event_constraints[] =
+{
+ INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
+ INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
+ INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
+ INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
+ INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
+ INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
+ EVENT_CONSTRAINT_END
+};
+
+static void p6_pmu_disable_all(void)
+{
+ u64 val;
+
+ /* p6 only has one enable register */
+ rdmsrl(MSR_P6_EVNTSEL0, val);
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_P6_EVNTSEL0, val);
+}
+
+static void p6_pmu_enable_all(void)
+{
+ unsigned long val;
+
+ /* p6 only has one enable register */
+ rdmsrl(MSR_P6_EVNTSEL0, val);
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_P6_EVNTSEL0, val);
+}
+
+static inline void
+p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ u64 val = P6_NOP_EVENT;
+
+ if (cpuc->enabled)
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+
+ (void)checking_wrmsrl(hwc->config_base + idx, val);
+}
+
+static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ u64 val;
+
+ val = hwc->config;
+ if (cpuc->enabled)
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+
+ (void)checking_wrmsrl(hwc->config_base + idx, val);
+}
+
+static __initconst struct x86_pmu p6_pmu = {
+ .name = "p6",
+ .handle_irq = x86_pmu_handle_irq,
+ .disable_all = p6_pmu_disable_all,
+ .enable_all = p6_pmu_enable_all,
+ .enable = p6_pmu_enable_event,
+ .disable = p6_pmu_disable_event,
+ .eventsel = MSR_P6_EVNTSEL0,
+ .perfctr = MSR_P6_PERFCTR0,
+ .event_map = p6_pmu_event_map,
+ .raw_event = p6_pmu_raw_event,
+ .max_events = ARRAY_SIZE(p6_perfmon_event_map),
+ .apic = 1,
+ .max_period = (1ULL << 31) - 1,
+ .version = 0,
+ .num_events = 2,
+ /*
+ * Events have 40 bits implemented. However they are designed such
+ * that bits [32-39] are sign extensions of bit 31. As such the
+ * effective width of a event for P6-like PMU is 32 bits only.
+ *
+ * See IA-32 Intel Architecture Software developer manual Vol 3B
+ */
+ .event_bits = 32,
+ .event_mask = (1ULL << 32) - 1,
+ .get_event_constraints = x86_get_event_constraints,
+ .event_constraints = p6_event_constraints,
+};
+
+static __init int p6_pmu_init(void)
+{
+ switch (boot_cpu_data.x86_model) {
+ case 1:
+ case 3: /* Pentium Pro */
+ case 5:
+ case 6: /* Pentium II */
+ case 7:
+ case 8:
+ case 11: /* Pentium III */
+ case 9:
+ case 13:
+ /* Pentium M */
+ break;
+ default:
+ pr_cont("unsupported p6 CPU model %d ",
+ boot_cpu_data.x86_model);
+ return -ENODEV;
+ }
+
+ x86_pmu = p6_pmu;
+
+ return 0;
+}
+
+#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 898df97..74f4e85 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -115,17 +115,6 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
return !test_bit(counter, perfctr_nmi_owner);
}
-
-/* checks the an msr for availability */
-int avail_to_resrv_perfctr_nmi(unsigned int msr)
-{
- unsigned int counter;
-
- counter = nmi_perfctr_msr_to_bit(msr);
- BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
- return !test_bit(counter, perfctr_nmi_owner);
-}
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
int reserve_perfctr_nmi(unsigned int msr)
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index ae775ca..11540a1 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -18,11 +18,6 @@
#include "dumpstack.h"
-/* Just a stub for now */
-int x86_is_stack_id(int id, char *name)
-{
- return 0;
-}
void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 907a90e..dce99ab 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -33,11 +33,6 @@ static char x86_stack_ids[][8] = {
#endif
};
-int x86_is_stack_id(int id, char *name)
-{
- return x86_stack_ids[id - 1] == name;
-}
-
static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
unsigned *usedp, char **idp)
{
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index bb6006e..dca2802 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -486,8 +486,6 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
rcu_read_lock();
bp = per_cpu(bp_per_reg[i], cpu);
- if (bp)
- rc = NOTIFY_DONE;
/*
* Reset the 'i'th TRAP bit in dr6 to denote completion of
* exception handling
@@ -506,7 +504,13 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
rcu_read_unlock();
}
- if (dr6 & (~DR_TRAP_BITS))
+ /*
+ * Further processing in do_debug() is needed for a) user-space
+ * breakpoints (to generate signals) and b) when the system has
+ * taken exception due to multiple causes
+ */
+ if ((current->thread.debugreg6 & DR_TRAP_BITS) ||
+ (dr6 & (~DR_TRAP_BITS)))
rc = NOTIFY_DONE;
set_debugreg(dr7, 7);
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 5b8c750..5de9f4a 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -337,6 +337,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
+ if (alternatives_text_reserved(p->addr, p->addr))
+ return -EINVAL;
+
if (!can_probe((unsigned long)p->addr))
return -EILSEQ;
/* insn: must be on special executable page on x86. */
@@ -429,7 +432,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
+#if !defined(CONFIG_PREEMPT)
if (p->ainsn.boostable == 1 && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
reset_current_kprobe();
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 0c1033d..d03146f 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -140,30 +140,6 @@ static const int arg_offs_table[] = {
#endif
};
-/**
- * regs_get_argument_nth() - get Nth argument at function call
- * @regs: pt_regs which contains registers at function entry.
- * @n: argument number.
- *
- * regs_get_argument_nth() returns @n th argument of a function call.
- * Since usually the kernel stack will be changed right after function entry,
- * you must use this at function entry. If the @n th entry is NOT in the
- * kernel stack or pt_regs, this returns 0.
- */
-unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n)
-{
- if (n < ARRAY_SIZE(arg_offs_table))
- return *(unsigned long *)((char *)regs + arg_offs_table[n]);
- else {
- /*
- * The typical case: arg n is on the stack.
- * (Note: stack[0] = return address, so skip it)
- */
- n -= ARRAY_SIZE(arg_offs_table);
- return regs_get_kernel_stack_nth(regs, 1 + n);
- }
-}
-
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 3339917..1168e44 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -534,6 +534,9 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
get_debugreg(dr6, 6);
+ /* Filter out all the reserved bits which are preset to 1 */
+ dr6 &= ~DR6_RESERVED;
+
/* Catch kmemcheck conditions first of all! */
if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
return;
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index c05a29c..25b8b2f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -25,7 +25,7 @@
static __inline__ int get_bitmask_order(unsigned int count)
{
int order;
-
+
order = fls(count);
return order; /* We could be slightly more clever with -1 here... */
}
@@ -33,7 +33,7 @@ static __inline__ int get_bitmask_order(unsigned int count)
static __inline__ int get_count_order(unsigned int count)
{
int order;
-
+
order = fls(count) - 1;
if (count & (count - 1))
order++;
@@ -45,6 +45,31 @@ static inline unsigned long hweight_long(unsigned long w)
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
+/*
+ * Clearly slow versions of the hweightN() functions, their benefit is
+ * of course compile time evaluation of constant arguments.
+ */
+#define HWEIGHT8(w) \
+ ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \
+ (!!((w) & (1ULL << 0))) + \
+ (!!((w) & (1ULL << 1))) + \
+ (!!((w) & (1ULL << 2))) + \
+ (!!((w) & (1ULL << 3))) + \
+ (!!((w) & (1ULL << 4))) + \
+ (!!((w) & (1ULL << 5))) + \
+ (!!((w) & (1ULL << 6))) + \
+ (!!((w) & (1ULL << 7))) )
+
+#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8))
+#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16))
+#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32))
+
+/*
+ * Type invariant version that simply casts things to the
+ * largest type.
+ */
+#define HWEIGHT(w) HWEIGHT64((u64)(w))
+
/**
* rol32 - rotate a 32-bit value left
* @word: value to rotate
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1cbb36f..01e6ade 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -134,6 +134,8 @@ extern void
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
extern void unregister_ftrace_function_probe_all(char *glob);
+extern int ftrace_text_reserved(void *start, void *end);
+
enum {
FTRACE_FL_FREE = (1 << 0),
FTRACE_FL_FAILED = (1 << 1),
@@ -141,7 +143,6 @@ enum {
FTRACE_FL_ENABLED = (1 << 3),
FTRACE_FL_NOTRACE = (1 << 4),
FTRACE_FL_CONVERTED = (1 << 5),
- FTRACE_FL_FROZEN = (1 << 6),
};
struct dyn_ftrace {
@@ -250,6 +251,10 @@ static inline int unregister_ftrace_command(char *cmd_name)
{
return -EINVAL;
}
+static inline int ftrace_text_reserved(void *start, void *end)
+{
+ return 0;
+}
#endif /* CONFIG_DYNAMIC_FTRACE */
/* totally disable ftrace - can not re-enable after this */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 84a5629..6b7c444 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -5,6 +5,7 @@
#include <linux/trace_seq.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
+#include <linux/perf_event.h>
struct trace_array;
struct tracer;
@@ -137,9 +138,6 @@ struct ftrace_event_call {
#define FTRACE_MAX_PROFILE_SIZE 2048
-extern char *perf_trace_buf;
-extern char *perf_trace_buf_nmi;
-
#define MAX_FILTER_PRED 32
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
@@ -187,13 +185,27 @@ do { \
__trace_printk(ip, fmt, ##args); \
} while (0)
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
struct perf_event;
extern int ftrace_profile_enable(int event_id);
extern void ftrace_profile_disable(int event_id);
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
+extern void *
+ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp,
+ unsigned long *irq_flags);
+
+static inline void
+ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr,
+ u64 count, unsigned long irq_flags)
+{
+ struct trace_entry *entry = raw_data;
+
+ perf_tp_event(entry->type, addr, count, raw_data, size);
+ perf_swevent_put_recursion_context(rctx);
+ local_irq_restore(irq_flags);
+}
#endif
#endif /* _LINUX_FTRACE_EVENT_H */
diff --git a/include/linux/list.h b/include/linux/list.h
index 969f6e9..5d9c655 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -206,6 +206,20 @@ static inline int list_empty_careful(const struct list_head *head)
}
/**
+ * list_rotate_left - rotate the list to the left
+ * @head: the head of the list
+ */
+static inline void list_rotate_left(struct list_head *head)
+{
+ struct list_head *first;
+
+ if (!list_empty(head)) {
+ first = head->next;
+ list_move_tail(first, head);
+ }
+}
+
+/**
* list_is_singular - tests whether a list has just one entry.
* @head: the list to test.
*/
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a177698..7b18b4f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -288,7 +288,7 @@ struct perf_event_mmap_page {
};
#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
-#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
+#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
#define PERF_RECORD_MISC_KERNEL (1 << 0)
#define PERF_RECORD_MISC_USER (2 << 0)
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
@@ -354,8 +354,8 @@ enum perf_event_type {
* u64 stream_id;
* };
*/
- PERF_RECORD_THROTTLE = 5,
- PERF_RECORD_UNTHROTTLE = 6,
+ PERF_RECORD_THROTTLE = 5,
+ PERF_RECORD_UNTHROTTLE = 6,
/*
* struct {
@@ -369,10 +369,10 @@ enum perf_event_type {
/*
* struct {
- * struct perf_event_header header;
- * u32 pid, tid;
+ * struct perf_event_header header;
+ * u32 pid, tid;
*
- * struct read_format values;
+ * struct read_format values;
* };
*/
PERF_RECORD_READ = 8,
@@ -410,7 +410,7 @@ enum perf_event_type {
* char data[size];}&& PERF_SAMPLE_RAW
* };
*/
- PERF_RECORD_SAMPLE = 9,
+ PERF_RECORD_SAMPLE = 9,
PERF_RECORD_MAX, /* non-ABI */
};
@@ -476,9 +476,11 @@ struct hw_perf_event {
union {
struct { /* hardware */
u64 config;
+ u64 last_tag;
unsigned long config_base;
unsigned long event_base;
int idx;
+ int last_cpu;
};
struct { /* software */
s64 remaining;
@@ -496,9 +498,8 @@ struct hw_perf_event {
atomic64_t period_left;
u64 interrupts;
- u64 freq_count;
- u64 freq_interrupts;
- u64 freq_stamp;
+ u64 freq_time_stamp;
+ u64 freq_count_stamp;
#endif
};
@@ -510,6 +511,8 @@ struct perf_event;
struct pmu {
int (*enable) (struct perf_event *event);
void (*disable) (struct perf_event *event);
+ int (*start) (struct perf_event *event);
+ void (*stop) (struct perf_event *event);
void (*read) (struct perf_event *event);
void (*unthrottle) (struct perf_event *event);
};
@@ -563,6 +566,10 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
struct perf_sample_data *,
struct pt_regs *regs);
+enum perf_group_flag {
+ PERF_GROUP_SOFTWARE = 0x1,
+};
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -572,6 +579,7 @@ struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
int nr_siblings;
+ int group_flags;
struct perf_event *group_leader;
struct perf_event *output;
const struct pmu *pmu;
@@ -656,7 +664,7 @@ struct perf_event {
perf_overflow_handler_t overflow_handler;
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_EVENT_TRACING
struct event_filter *filter;
#endif
@@ -681,7 +689,8 @@ struct perf_event_context {
*/
struct mutex mutex;
- struct list_head group_list;
+ struct list_head pinned_groups;
+ struct list_head flexible_groups;
struct list_head event_list;
int nr_events;
int nr_active;
@@ -744,10 +753,9 @@ extern int perf_max_events;
extern const struct pmu *hw_perf_event_init(struct perf_event *event);
-extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
-extern void perf_event_task_sched_out(struct task_struct *task,
- struct task_struct *next, int cpu);
-extern void perf_event_task_tick(struct task_struct *task, int cpu);
+extern void perf_event_task_sched_in(struct task_struct *task);
+extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
+extern void perf_event_task_tick(struct task_struct *task);
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
@@ -762,7 +770,7 @@ extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern int hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx, int cpu);
+ struct perf_event_context *ctx);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
@@ -851,8 +859,7 @@ extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
extern void perf_event_init(void);
-extern void perf_tp_event(int event_id, u64 addr, u64 count,
- void *record, int entry_size);
+extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
@@ -873,12 +880,12 @@ extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
#else
static inline void
-perf_event_task_sched_in(struct task_struct *task, int cpu) { }
+perf_event_task_sched_in(struct task_struct *task) { }
static inline void
perf_event_task_sched_out(struct task_struct *task,
- struct task_struct *next, int cpu) { }
+ struct task_struct *next) { }
static inline void
-perf_event_task_tick(struct task_struct *task, int cpu) { }
+perf_event_task_tick(struct task_struct *task) { }
static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
@@ -893,13 +900,13 @@ static inline void
perf_sw_event(u32 event_id, u64 nr, int nmi,
struct pt_regs *regs, u64 addr) { }
static inline void
-perf_bp_event(struct perf_event *event, void *data) { }
+perf_bp_event(struct perf_event *event, void *data) { }
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_comm(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
-static inline int perf_swevent_get_recursion_context(void) { return -1; }
+static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { }
static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { }
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 91bd7d7..8126f23 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -99,7 +99,7 @@ struct perf_event_attr;
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
.profile_enable = prof_sysenter_enable, \
@@ -113,7 +113,7 @@ struct perf_event_attr;
#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
#define TRACE_SYS_EXIT_PROFILE(sname)
#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
-#endif
+#endif /* CONFIG_PERF_EVENTS */
#ifdef CONFIG_FTRACE_SYSCALLS
#define __SC_STR_ADECL1(t, a) #a
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index a870ba1..5c1dcfc 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -20,14 +20,17 @@ TRACE_EVENT(lock_acquire,
TP_STRUCT__entry(
__field(unsigned int, flags)
__string(name, lock->name)
+ __field(void *, lockdep_addr)
),
TP_fast_assign(
__entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
),
- TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "",
+ TP_printk("%p %s%s%s", __entry->lockdep_addr,
+ (__entry->flags & 1) ? "try " : "",
(__entry->flags & 2) ? "read " : "",
__get_str(name))
);
@@ -40,13 +43,16 @@ TRACE_EVENT(lock_release,
TP_STRUCT__entry(
__string(name, lock->name)
+ __field(void *, lockdep_addr)
),
TP_fast_assign(
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
),
- TP_printk("%s", __get_str(name))
+ TP_printk("%p %s",
+ __entry->lockdep_addr, __get_str(name))
);
#ifdef CONFIG_LOCK_STAT
@@ -59,13 +65,16 @@ TRACE_EVENT(lock_contended,
TP_STRUCT__entry(
__string(name, lock->name)
+ __field(void *, lockdep_addr)
),
TP_fast_assign(
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
),
- TP_printk("%s", __get_str(name))
+ TP_printk("%p %s",
+ __entry->lockdep_addr, __get_str(name))
);
TRACE_EVENT(lock_acquired,
@@ -75,16 +84,18 @@ TRACE_EVENT(lock_acquired,
TP_STRUCT__entry(
__string(name, lock->name)
- __field(unsigned long, wait_usec)
- __field(unsigned long, wait_nsec_rem)
+ __field(s64, wait_nsec)
+ __field(void *, lockdep_addr)
),
+
TP_fast_assign(
__assign_str(name, lock->name);
- __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC);
- __entry->wait_usec = (unsigned long) waittime;
+ __entry->wait_nsec = waittime;
+ __entry->lockdep_addr = lock;
),
- TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec,
- __entry->wait_nsec_rem)
+ TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
+ __get_str(name),
+ __entry->wait_nsec)
);
#endif
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index f23a0ca..0804cd5 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -376,7 +376,7 @@ static inline notrace int ftrace_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
/*
* Generate the functions needed for tracepoint perf_event support.
@@ -421,7 +421,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#endif
+#endif /* CONFIG_PERF_EVENTS */
/*
* Stage 4 of the trace events.
@@ -505,7 +505,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
*
*/
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
#define _TRACE_PROFILE_INIT(call) \
.profile_enable = ftrace_profile_enable_##call, \
@@ -513,7 +513,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
#else
#define _TRACE_PROFILE_INIT(call)
-#endif
+#endif /* CONFIG_PERF_EVENTS */
#undef __entry
#define __entry entry
@@ -736,7 +736,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
* }
*/
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
#undef __entry
#define __entry entry
@@ -761,22 +761,12 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
- extern int perf_swevent_get_recursion_context(void); \
- extern void perf_swevent_put_recursion_context(int rctx); \
- extern void perf_tp_event(int, u64, u64, void *, int); \
struct ftrace_raw_##call *entry; \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
- struct trace_entry *ent; \
int __entry_size; \
int __data_size; \
- char *trace_buf; \
- char *raw_data; \
- int __cpu; \
int rctx; \
- int pc; \
- \
- pc = preempt_count(); \
\
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
@@ -786,42 +776,16 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
"profile buffer not large enough")) \
return; \
- \
- local_irq_save(irq_flags); \
- \
- rctx = perf_swevent_get_recursion_context(); \
- if (rctx < 0) \
- goto end_recursion; \
- \
- __cpu = smp_processor_id(); \
- \
- if (in_nmi()) \
- trace_buf = rcu_dereference(perf_trace_buf_nmi); \
- else \
- trace_buf = rcu_dereference(perf_trace_buf); \
- \
- if (!trace_buf) \
- goto end; \
- \
- raw_data = per_cpu_ptr(trace_buf, __cpu); \
- \
- *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
- entry = (struct ftrace_raw_##call *)raw_data; \
- ent = &entry->ent; \
- tracing_generic_entry_update(ent, irq_flags, pc); \
- ent->type = event_call->id; \
- \
+ entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \
+ __entry_size, event_call->id, &rctx, &irq_flags); \
+ if (!entry) \
+ return; \
tstruct \
\
{ assign; } \
\
- perf_tp_event(event_call->id, __addr, __count, entry, \
- __entry_size); \
- \
-end: \
- perf_swevent_put_recursion_context(rctx); \
-end_recursion: \
- local_irq_restore(irq_flags); \
+ ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \
+ __count, irq_flags); \
}
#undef DEFINE_EVENT
@@ -838,7 +802,7 @@ static notrace void ftrace_profile_##call(proto) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#endif /* CONFIG_EVENT_PROFILE */
+#endif /* CONFIG_PERF_EVENTS */
#undef _TRACE_PROFILE_INIT
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 8cd4102..0387100 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -45,12 +45,12 @@ ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
#endif
-#ifdef CONFIG_EVENT_PROFILE
+
+#ifdef CONFIG_PERF_EVENTS
int prof_sysenter_enable(struct ftrace_event_call *call);
void prof_sysenter_disable(struct ftrace_event_call *call);
int prof_sysexit_enable(struct ftrace_event_call *call);
void prof_sysexit_disable(struct ftrace_event_call *call);
-
#endif
#endif /* _TRACE_SYSCALL_H */
diff --git a/init/Kconfig b/init/Kconfig
index 207ae29..c6d95f8 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -992,19 +992,6 @@ config PERF_EVENTS
Say Y if unsure.
-config EVENT_PROFILE
- bool "Tracepoint profiling sources"
- depends on PERF_EVENTS && EVENT_TRACING
- default y
- help
- Allow the use of tracepoints as software performance events.
-
- When this is enabled, you can create perf events based on
- tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID
- found in debugfs://tracing/events/*/*/id. (The -e/--events
- option to the perf tool can parse and interpret symbolic
- tracepoints, in the subsystem:tracepoint_name format.)
-
config PERF_COUNTERS
bool "Kernel performance counters (old config option)"
depends on HAVE_PERF_EVENTS
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c4b4343..ccec774 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -44,6 +44,7 @@
#include <linux/debugfs.h>
#include <linux/kdebug.h>
#include <linux/memory.h>
+#include <linux/ftrace.h>
#include <asm-generic/sections.h>
#include <asm/cacheflush.h>
@@ -125,30 +126,6 @@ static LIST_HEAD(kprobe_insn_pages);
static int kprobe_garbage_slots;
static int collect_garbage_slots(void);
-static int __kprobes check_safety(void)
-{
- int ret = 0;
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
- ret = freeze_processes();
- if (ret == 0) {
- struct task_struct *p, *q;
- do_each_thread(p, q) {
- if (p != current && p->state == TASK_RUNNING &&
- p->pid != 0) {
- printk("Check failed: %s is running\n",p->comm);
- ret = -1;
- goto loop_end;
- }
- } while_each_thread(p, q);
- }
-loop_end:
- thaw_processes();
-#else
- synchronize_sched();
-#endif
- return ret;
-}
-
/**
* __get_insn_slot() - Find a slot on an executable page for an instruction.
* We allocate an executable page if there's no room on existing ones.
@@ -236,9 +213,8 @@ static int __kprobes collect_garbage_slots(void)
{
struct kprobe_insn_page *kip, *next;
- /* Ensure no-one is preepmted on the garbages */
- if (check_safety())
- return -EAGAIN;
+ /* Ensure no-one is interrupted on the garbages */
+ synchronize_sched();
list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
int i;
@@ -729,7 +705,8 @@ int __kprobes register_kprobe(struct kprobe *p)
preempt_disable();
if (!kernel_text_address((unsigned long) p->addr) ||
- in_kprobes_functions((unsigned long) p->addr)) {
+ in_kprobes_functions((unsigned long) p->addr) ||
+ ftrace_text_reserved(p->addr, p->addr)) {
preempt_enable();
return -EINVAL;
}
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 2ae7409..a661e79 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -98,11 +98,12 @@ void __weak hw_perf_enable(void) { barrier(); }
void __weak hw_perf_event_setup(int cpu) { barrier(); }
void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
+void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }
int __weak
hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx, int cpu)
+ struct perf_event_context *ctx)
{
return 0;
}
@@ -248,7 +249,7 @@ static void perf_unpin_context(struct perf_event_context *ctx)
static inline u64 perf_clock(void)
{
- return cpu_clock(smp_processor_id());
+ return cpu_clock(raw_smp_processor_id());
}
/*
@@ -289,6 +290,15 @@ static void update_event_times(struct perf_event *event)
event->total_time_running = run_end - event->tstamp_running;
}
+static struct list_head *
+ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
+{
+ if (event->attr.pinned)
+ return &ctx->pinned_groups;
+ else
+ return &ctx->flexible_groups;
+}
+
/*
* Add a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
@@ -303,9 +313,19 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
* add it straight to the context's event list, or to the group
* leader's sibling list:
*/
- if (group_leader == event)
- list_add_tail(&event->group_entry, &ctx->group_list);
- else {
+ if (group_leader == event) {
+ struct list_head *list;
+
+ if (is_software_event(event))
+ event->group_flags |= PERF_GROUP_SOFTWARE;
+
+ list = ctx_group_list(event, ctx);
+ list_add_tail(&event->group_entry, list);
+ } else {
+ if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
+ !is_software_event(event))
+ group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
+
list_add_tail(&event->group_entry, &group_leader->sibling_list);
group_leader->nr_siblings++;
}
@@ -355,9 +375,14 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
* to the context list directly:
*/
list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
+ struct list_head *list;
- list_move_tail(&sibling->group_entry, &ctx->group_list);
+ list = ctx_group_list(event, ctx);
+ list_move_tail(&sibling->group_entry, list);
sibling->group_leader = sibling;
+
+ /* Inherit group flags from the previous leader */
+ sibling->group_flags = event->group_flags;
}
}
@@ -608,14 +633,13 @@ void perf_event_disable(struct perf_event *event)
static int
event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx,
- int cpu)
+ struct perf_event_context *ctx)
{
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
event->state = PERF_EVENT_STATE_ACTIVE;
- event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
+ event->oncpu = smp_processor_id();
/*
* The new state must be visible before we turn it on in the hardware:
*/
@@ -642,8 +666,7 @@ event_sched_in(struct perf_event *event,
static int
group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx,
- int cpu)
+ struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group;
int ret;
@@ -651,18 +674,18 @@ group_sched_in(struct perf_event *group_event,
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
- ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu);
+ ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
if (ret)
return ret < 0 ? ret : 0;
- if (event_sched_in(group_event, cpuctx, ctx, cpu))
+ if (event_sched_in(group_event, cpuctx, ctx))
return -EAGAIN;
/*
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
- if (event_sched_in(event, cpuctx, ctx, cpu)) {
+ if (event_sched_in(event, cpuctx, ctx)) {
partial_group = event;
goto group_error;
}
@@ -686,24 +709,6 @@ group_error:
}
/*
- * Return 1 for a group consisting entirely of software events,
- * 0 if the group contains any hardware events.
- */
-static int is_software_only_group(struct perf_event *leader)
-{
- struct perf_event *event;
-
- if (!is_software_event(leader))
- return 0;
-
- list_for_each_entry(event, &leader->sibling_list, group_entry)
- if (!is_software_event(event))
- return 0;
-
- return 1;
-}
-
-/*
* Work out whether we can put this event group on the CPU now.
*/
static int group_can_go_on(struct perf_event *event,
@@ -713,7 +718,7 @@ static int group_can_go_on(struct perf_event *event,
/*
* Groups consisting entirely of software events can always go on.
*/
- if (is_software_only_group(event))
+ if (event->group_flags & PERF_GROUP_SOFTWARE)
return 1;
/*
* If an exclusive group is already on, no other hardware
@@ -754,7 +759,6 @@ static void __perf_install_in_context(void *info)
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_event *leader = event->group_leader;
- int cpu = smp_processor_id();
int err;
/*
@@ -801,7 +805,7 @@ static void __perf_install_in_context(void *info)
if (!group_can_go_on(event, cpuctx, 1))
err = -EEXIST;
else
- err = event_sched_in(event, cpuctx, ctx, cpu);
+ err = event_sched_in(event, cpuctx, ctx);
if (err) {
/*
@@ -943,11 +947,9 @@ static void __perf_event_enable(void *info)
} else {
perf_disable();
if (event == leader)
- err = group_sched_in(event, cpuctx, ctx,
- smp_processor_id());
+ err = group_sched_in(event, cpuctx, ctx);
else
- err = event_sched_in(event, cpuctx, ctx,
- smp_processor_id());
+ err = event_sched_in(event, cpuctx, ctx);
perf_enable();
}
@@ -1043,8 +1045,15 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
return 0;
}
-void __perf_event_sched_out(struct perf_event_context *ctx,
- struct perf_cpu_context *cpuctx)
+enum event_type_t {
+ EVENT_FLEXIBLE = 0x1,
+ EVENT_PINNED = 0x2,
+ EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+};
+
+static void ctx_sched_out(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type)
{
struct perf_event *event;
@@ -1055,10 +1064,18 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
update_context_time(ctx);
perf_disable();
- if (ctx->nr_active) {
- list_for_each_entry(event, &ctx->group_list, group_entry)
+ if (!ctx->nr_active)
+ goto out_enable;
+
+ if (event_type & EVENT_PINNED)
+ list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
- }
+
+ if (event_type & EVENT_FLEXIBLE)
+ list_for_each_entry(event, &ctx->flexible_groups, group_entry)
+ group_sched_out(event, cpuctx, ctx);
+
+ out_enable:
perf_enable();
out:
raw_spin_unlock(&ctx->lock);
@@ -1170,9 +1187,9 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
* not restart the event.
*/
void perf_event_task_sched_out(struct task_struct *task,
- struct task_struct *next, int cpu)
+ struct task_struct *next)
{
- struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = task->perf_event_ctxp;
struct perf_event_context *next_ctx;
struct perf_event_context *parent;
@@ -1220,15 +1237,13 @@ void perf_event_task_sched_out(struct task_struct *task,
rcu_read_unlock();
if (do_switch) {
- __perf_event_sched_out(ctx, cpuctx);
+ ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
}
}
-/*
- * Called with IRQs disabled
- */
-static void __perf_event_task_sched_out(struct perf_event_context *ctx)
+static void task_ctx_sched_out(struct perf_event_context *ctx,
+ enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
@@ -1238,47 +1253,41 @@ static void __perf_event_task_sched_out(struct perf_event_context *ctx)
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
- __perf_event_sched_out(ctx, cpuctx);
+ ctx_sched_out(ctx, cpuctx, event_type);
cpuctx->task_ctx = NULL;
}
/*
* Called with IRQs disabled
*/
-static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
+static void __perf_event_task_sched_out(struct perf_event_context *ctx)
+{
+ task_ctx_sched_out(ctx, EVENT_ALL);
+}
+
+/*
+ * Called with IRQs disabled
+ */
+static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type)
{
- __perf_event_sched_out(&cpuctx->ctx, cpuctx);
+ ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
}
static void
-__perf_event_sched_in(struct perf_event_context *ctx,
- struct perf_cpu_context *cpuctx, int cpu)
+ctx_pinned_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx)
{
struct perf_event *event;
- int can_add_hw = 1;
-
- raw_spin_lock(&ctx->lock);
- ctx->is_active = 1;
- if (likely(!ctx->nr_events))
- goto out;
- ctx->timestamp = perf_clock();
-
- perf_disable();
-
- /*
- * First go through the list and put on any pinned groups
- * in order to give them the best chance of going on.
- */
- list_for_each_entry(event, &ctx->group_list, group_entry) {
- if (event->state <= PERF_EVENT_STATE_OFF ||
- !event->attr.pinned)
+ list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
+ if (event->state <= PERF_EVENT_STATE_OFF)
continue;
- if (event->cpu != -1 && event->cpu != cpu)
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
continue;
if (group_can_go_on(event, cpuctx, 1))
- group_sched_in(event, cpuctx, ctx, cpu);
+ group_sched_in(event, cpuctx, ctx);
/*
* If this pinned group hasn't been scheduled,
@@ -1289,32 +1298,83 @@ __perf_event_sched_in(struct perf_event_context *ctx,
event->state = PERF_EVENT_STATE_ERROR;
}
}
+}
- list_for_each_entry(event, &ctx->group_list, group_entry) {
- /*
- * Ignore events in OFF or ERROR state, and
- * ignore pinned events since we did them already.
- */
- if (event->state <= PERF_EVENT_STATE_OFF ||
- event->attr.pinned)
- continue;
+static void
+ctx_flexible_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx)
+{
+ struct perf_event *event;
+ int can_add_hw = 1;
+ list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
+ /* Ignore events in OFF or ERROR state */
+ if (event->state <= PERF_EVENT_STATE_OFF)
+ continue;
/*
* Listen to the 'cpu' scheduling filter constraint
* of events:
*/
- if (event->cpu != -1 && event->cpu != cpu)
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
continue;
if (group_can_go_on(event, cpuctx, can_add_hw))
- if (group_sched_in(event, cpuctx, ctx, cpu))
+ if (group_sched_in(event, cpuctx, ctx))
can_add_hw = 0;
}
+}
+
+static void
+ctx_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type)
+{
+ raw_spin_lock(&ctx->lock);
+ ctx->is_active = 1;
+ if (likely(!ctx->nr_events))
+ goto out;
+
+ ctx->timestamp = perf_clock();
+
+ perf_disable();
+
+ /*
+ * First go through the list and put on any pinned groups
+ * in order to give them the best chance of going on.
+ */
+ if (event_type & EVENT_PINNED)
+ ctx_pinned_sched_in(ctx, cpuctx);
+
+ /* Then walk through the lower prio flexible groups */
+ if (event_type & EVENT_FLEXIBLE)
+ ctx_flexible_sched_in(ctx, cpuctx);
+
perf_enable();
out:
raw_spin_unlock(&ctx->lock);
}
+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type)
+{
+ struct perf_event_context *ctx = &cpuctx->ctx;
+
+ ctx_sched_in(ctx, cpuctx, event_type);
+}
+
+static void task_ctx_sched_in(struct task_struct *task,
+ enum event_type_t event_type)
+{
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+ struct perf_event_context *ctx = task->perf_event_ctxp;
+
+ if (likely(!ctx))
+ return;
+ if (cpuctx->task_ctx == ctx)
+ return;
+ ctx_sched_in(ctx, cpuctx, event_type);
+ cpuctx->task_ctx = ctx;
+}
/*
* Called from scheduler to add the events of the current task
* with interrupts disabled.
@@ -1326,38 +1386,128 @@ __perf_event_sched_in(struct perf_event_context *ctx,
* accessing the event control register. If a NMI hits, then it will
* keep the event running.
*/
-void perf_event_task_sched_in(struct task_struct *task, int cpu)
+void perf_event_task_sched_in(struct task_struct *task)
{
- struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = task->perf_event_ctxp;
if (likely(!ctx))
return;
+
if (cpuctx->task_ctx == ctx)
return;
- __perf_event_sched_in(ctx, cpuctx, cpu);
+
+ /*
+ * We want to keep the following priority order:
+ * cpu pinned (that don't need to move), task pinned,
+ * cpu flexible, task flexible.
+ */
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+
+ ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
+ cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
+ ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
+
cpuctx->task_ctx = ctx;
}
-static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
+#define MAX_INTERRUPTS (~0ULL)
+
+static void perf_log_throttle(struct perf_event *event, int enable);
+
+static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
- struct perf_event_context *ctx = &cpuctx->ctx;
+ u64 frequency = event->attr.sample_freq;
+ u64 sec = NSEC_PER_SEC;
+ u64 divisor, dividend;
+
+ int count_fls, nsec_fls, frequency_fls, sec_fls;
+
+ count_fls = fls64(count);
+ nsec_fls = fls64(nsec);
+ frequency_fls = fls64(frequency);
+ sec_fls = 30;
- __perf_event_sched_in(ctx, cpuctx, cpu);
+ /*
+ * We got @count in @nsec, with a target of sample_freq HZ
+ * the target period becomes:
+ *
+ * @count * 10^9
+ * period = -------------------
+ * @nsec * sample_freq
+ *
+ */
+
+ /*
+ * Reduce accuracy by one bit such that @a and @b converge
+ * to a similar magnitude.
+ */
+#define REDUCE_FLS(a, b) \
+do { \
+ if (a##_fls > b##_fls) { \
+ a >>= 1; \
+ a##_fls--; \
+ } else { \
+ b >>= 1; \
+ b##_fls--; \
+ } \
+} while (0)
+
+ /*
+ * Reduce accuracy until either term fits in a u64, then proceed with
+ * the other, so that finally we can do a u64/u64 division.
+ */
+ while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
+ REDUCE_FLS(nsec, frequency);
+ REDUCE_FLS(sec, count);
+ }
+
+ if (count_fls + sec_fls > 64) {
+ divisor = nsec * frequency;
+
+ while (count_fls + sec_fls > 64) {
+ REDUCE_FLS(count, sec);
+ divisor >>= 1;
+ }
+
+ dividend = count * sec;
+ } else {
+ dividend = count * sec;
+
+ while (nsec_fls + frequency_fls > 64) {
+ REDUCE_FLS(nsec, frequency);
+ dividend >>= 1;
+ }
+
+ divisor = nsec * frequency;
+ }
+
+ return div64_u64(dividend, divisor);
}
-#define MAX_INTERRUPTS (~0ULL)
+static void perf_event_stop(struct perf_event *event)
+{
+ if (!event->pmu->stop)
+ return event->pmu->disable(event);
-static void perf_log_throttle(struct perf_event *event, int enable);
+ return event->pmu->stop(event);
+}
+
+static int perf_event_start(struct perf_event *event)
+{
+ if (!event->pmu->start)
+ return event->pmu->enable(event);
-static void perf_adjust_period(struct perf_event *event, u64 events)
+ return event->pmu->start(event);
+}
+
+static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
{
struct hw_perf_event *hwc = &event->hw;
u64 period, sample_period;
s64 delta;
- events *= hwc->sample_period;
- period = div64_u64(events, event->attr.sample_freq);
+ period = perf_calculate_period(event, nsec, count);
delta = (s64)(period - hwc->sample_period);
delta = (delta + 7) / 8; /* low pass filter */
@@ -1368,13 +1518,22 @@ static void perf_adjust_period(struct perf_event *event, u64 events)
sample_period = 1;
hwc->sample_period = sample_period;
+
+ if (atomic64_read(&hwc->period_left) > 8*sample_period) {
+ perf_disable();
+ perf_event_stop(event);
+ atomic64_set(&hwc->period_left, 0);
+ perf_event_start(event);
+ perf_enable();
+ }
}
static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
{
struct perf_event *event;
struct hw_perf_event *hwc;
- u64 interrupts, freq;
+ u64 interrupts, now;
+ s64 delta;
raw_spin_lock(&ctx->lock);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
@@ -1395,44 +1554,18 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
if (interrupts == MAX_INTERRUPTS) {
perf_log_throttle(event, 1);
event->pmu->unthrottle(event);
- interrupts = 2*sysctl_perf_event_sample_rate/HZ;
}
if (!event->attr.freq || !event->attr.sample_freq)
continue;
- /*
- * if the specified freq < HZ then we need to skip ticks
- */
- if (event->attr.sample_freq < HZ) {
- freq = event->attr.sample_freq;
-
- hwc->freq_count += freq;
- hwc->freq_interrupts += interrupts;
-
- if (hwc->freq_count < HZ)
- continue;
-
- interrupts = hwc->freq_interrupts;
- hwc->freq_interrupts = 0;
- hwc->freq_count -= HZ;
- } else
- freq = HZ;
-
- perf_adjust_period(event, freq * interrupts);
+ event->pmu->read(event);
+ now = atomic64_read(&event->count);
+ delta = now - hwc->freq_count_stamp;
+ hwc->freq_count_stamp = now;
- /*
- * In order to avoid being stalled by an (accidental) huge
- * sample period, force reset the sample period if we didn't
- * get any events in this freq period.
- */
- if (!interrupts) {
- perf_disable();
- event->pmu->disable(event);
- atomic64_set(&hwc->period_left, 0);
- event->pmu->enable(event);
- perf_enable();
- }
+ if (delta > 0)
+ perf_adjust_period(event, TICK_NSEC, delta);
}
raw_spin_unlock(&ctx->lock);
}
@@ -1442,26 +1575,18 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
*/
static void rotate_ctx(struct perf_event_context *ctx)
{
- struct perf_event *event;
-
if (!ctx->nr_events)
return;
raw_spin_lock(&ctx->lock);
- /*
- * Rotate the first entry last (works just fine for group events too):
- */
- perf_disable();
- list_for_each_entry(event, &ctx->group_list, group_entry) {
- list_move_tail(&event->group_entry, &ctx->group_list);
- break;
- }
- perf_enable();
+
+ /* Rotate the first entry last of non-pinned groups */
+ list_rotate_left(&ctx->flexible_groups);
raw_spin_unlock(&ctx->lock);
}
-void perf_event_task_tick(struct task_struct *curr, int cpu)
+void perf_event_task_tick(struct task_struct *curr)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
@@ -1469,24 +1594,43 @@ void perf_event_task_tick(struct task_struct *curr, int cpu)
if (!atomic_read(&nr_events))
return;
- cpuctx = &per_cpu(perf_cpu_context, cpu);
+ cpuctx = &__get_cpu_var(perf_cpu_context);
ctx = curr->perf_event_ctxp;
+ perf_disable();
+
perf_ctx_adjust_freq(&cpuctx->ctx);
if (ctx)
perf_ctx_adjust_freq(ctx);
- perf_event_cpu_sched_out(cpuctx);
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
- __perf_event_task_sched_out(ctx);
+ task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
- perf_event_cpu_sched_in(cpuctx, cpu);
+ cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
if (ctx)
- perf_event_task_sched_in(curr, cpu);
+ task_ctx_sched_in(curr, EVENT_FLEXIBLE);
+
+ perf_enable();
+}
+
+static int event_enable_on_exec(struct perf_event *event,
+ struct perf_event_context *ctx)
+{
+ if (!event->attr.enable_on_exec)
+ return 0;
+
+ event->attr.enable_on_exec = 0;
+ if (event->state >= PERF_EVENT_STATE_INACTIVE)
+ return 0;
+
+ __perf_event_mark_enabled(event, ctx);
+
+ return 1;
}
/*
@@ -1499,6 +1643,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
struct perf_event *event;
unsigned long flags;
int enabled = 0;
+ int ret;
local_irq_save(flags);
ctx = task->perf_event_ctxp;
@@ -1509,14 +1654,16 @@ static void perf_event_enable_on_exec(struct task_struct *task)
raw_spin_lock(&ctx->lock);
- list_for_each_entry(event, &ctx->group_list, group_entry) {
- if (!event->attr.enable_on_exec)
- continue;
- event->attr.enable_on_exec = 0;
- if (event->state >= PERF_EVENT_STATE_INACTIVE)
- continue;
- __perf_event_mark_enabled(event, ctx);
- enabled = 1;
+ list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
+ ret = event_enable_on_exec(event, ctx);
+ if (ret)
+ enabled = 1;
+ }
+
+ list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
+ ret = event_enable_on_exec(event, ctx);
+ if (ret)
+ enabled = 1;
}
/*
@@ -1527,7 +1674,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
raw_spin_unlock(&ctx->lock);
- perf_event_task_sched_in(task, smp_processor_id());
+ perf_event_task_sched_in(task);
out:
local_irq_restore(flags);
}
@@ -1590,7 +1737,8 @@ __perf_event_init_context(struct perf_event_context *ctx,
{
raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
- INIT_LIST_HEAD(&ctx->group_list);
+ INIT_LIST_HEAD(&ctx->pinned_groups);
+ INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
ctx->task = task;
@@ -3608,7 +3756,7 @@ void __perf_event_mmap(struct vm_area_struct *vma)
/* .tid */
.start = vma->vm_start,
.len = vma->vm_end - vma->vm_start,
- .pgoff = vma->vm_pgoff,
+ .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
},
};
@@ -3688,12 +3836,12 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
if (event->attr.freq) {
u64 now = perf_clock();
- s64 delta = now - hwc->freq_stamp;
+ s64 delta = now - hwc->freq_time_stamp;
- hwc->freq_stamp = now;
+ hwc->freq_time_stamp = now;
- if (delta > 0 && delta < TICK_NSEC)
- perf_adjust_period(event, NSEC_PER_SEC / (int)delta);
+ if (delta > 0 && delta < 2*TICK_NSEC)
+ perf_adjust_period(event, delta, hwc->last_period);
}
/*
@@ -4184,7 +4332,7 @@ static const struct pmu perf_ops_task_clock = {
.read = task_clock_perf_event_read,
};
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_EVENT_TRACING
void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
int entry_size)
@@ -4289,7 +4437,7 @@ static void perf_event_free_filter(struct perf_event *event)
{
}
-#endif /* CONFIG_EVENT_PROFILE */
+#endif /* CONFIG_EVENT_TRACING */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
static void bp_perf_event_destroy(struct perf_event *event)
@@ -4870,8 +5018,15 @@ inherit_event(struct perf_event *parent_event,
else
child_event->state = PERF_EVENT_STATE_OFF;
- if (parent_event->attr.freq)
- child_event->hw.sample_period = parent_event->hw.sample_period;
+ if (parent_event->attr.freq) {
+ u64 sample_period = parent_event->hw.sample_period;
+ struct hw_perf_event *hwc = &child_event->hw;
+
+ hwc->sample_period = sample_period;
+ hwc->last_period = sample_period;
+
+ atomic64_set(&hwc->period_left, sample_period);
+ }
child_event->overflow_handler = parent_event->overflow_handler;
@@ -5039,7 +5194,11 @@ void perf_event_exit_task(struct task_struct *child)
mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
again:
- list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list,
+ list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
+ group_entry)
+ __perf_event_exit_task(child_event, child_ctx, child);
+
+ list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
group_entry)
__perf_event_exit_task(child_event, child_ctx, child);
@@ -5048,7 +5207,8 @@ again:
* its siblings to the list, but we obtained 'tmp' before that which
* will still point to the list head terminating the iteration.
*/
- if (!list_empty(&child_ctx->group_list))
+ if (!list_empty(&child_ctx->pinned_groups) ||
+ !list_empty(&child_ctx->flexible_groups))
goto again;
mutex_unlock(&child_ctx->mutex);
@@ -5056,6 +5216,24 @@ again:
put_ctx(child_ctx);
}
+static void perf_free_event(struct perf_event *event,
+ struct perf_event_context *ctx)
+{
+ struct perf_event *parent = event->parent;
+
+ if (WARN_ON_ONCE(!parent))
+ return;
+
+ mutex_lock(&parent->child_mutex);
+ list_del_init(&event->child_list);
+ mutex_unlock(&parent->child_mutex);
+
+ fput(parent->filp);
+
+ list_del_event(event, ctx);
+ free_event(event);
+}
+
/*
* free an unexposed, unused context as created by inheritance by
* init_task below, used by fork() in case of fail.
@@ -5070,36 +5248,70 @@ void perf_event_free_task(struct task_struct *task)
mutex_lock(&ctx->mutex);
again:
- list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) {
- struct perf_event *parent = event->parent;
+ list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
+ perf_free_event(event, ctx);
- if (WARN_ON_ONCE(!parent))
- continue;
+ list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
+ group_entry)
+ perf_free_event(event, ctx);
- mutex_lock(&parent->child_mutex);
- list_del_init(&event->child_list);
- mutex_unlock(&parent->child_mutex);
+ if (!list_empty(&ctx->pinned_groups) ||
+ !list_empty(&ctx->flexible_groups))
+ goto again;
- fput(parent->filp);
+ mutex_unlock(&ctx->mutex);
- list_del_event(event, ctx);
- free_event(event);
+ put_ctx(ctx);
+}
+
+static int
+inherit_task_group(struct perf_event *event, struct task_struct *parent,
+ struct perf_event_context *parent_ctx,
+ struct task_struct *child,
+ int *inherited_all)
+{
+ int ret;
+ struct perf_event_context *child_ctx = child->perf_event_ctxp;
+
+ if (!event->attr.inherit) {
+ *inherited_all = 0;
+ return 0;
}
- if (!list_empty(&ctx->group_list))
- goto again;
+ if (!child_ctx) {
+ /*
+ * This is executed from the parent task context, so
+ * inherit events that have been marked for cloning.
+ * First allocate and initialize a context for the
+ * child.
+ */
- mutex_unlock(&ctx->mutex);
+ child_ctx = kzalloc(sizeof(struct perf_event_context),
+ GFP_KERNEL);
+ if (!child_ctx)
+ return -ENOMEM;
- put_ctx(ctx);
+ __perf_event_init_context(child_ctx, child);
+ child->perf_event_ctxp = child_ctx;
+ get_task_struct(child);
+ }
+
+ ret = inherit_group(event, parent, parent_ctx,
+ child, child_ctx);
+
+ if (ret)
+ *inherited_all = 0;
+
+ return ret;
}
+
/*
* Initialize the perf_event context in task_struct
*/
int perf_event_init_task(struct task_struct *child)
{
- struct perf_event_context *child_ctx = NULL, *parent_ctx;
+ struct perf_event_context *child_ctx, *parent_ctx;
struct perf_event_context *cloned_ctx;
struct perf_event *event;
struct task_struct *parent = current;
@@ -5137,41 +5349,22 @@ int perf_event_init_task(struct task_struct *child)
* We dont have to disable NMIs - we are only looking at
* the list, not manipulating it:
*/
- list_for_each_entry(event, &parent_ctx->group_list, group_entry) {
-
- if (!event->attr.inherit) {
- inherited_all = 0;
- continue;
- }
-
- if (!child->perf_event_ctxp) {
- /*
- * This is executed from the parent task context, so
- * inherit events that have been marked for cloning.
- * First allocate and initialize a context for the
- * child.
- */
-
- child_ctx = kzalloc(sizeof(struct perf_event_context),
- GFP_KERNEL);
- if (!child_ctx) {
- ret = -ENOMEM;
- break;
- }
-
- __perf_event_init_context(child_ctx, child);
- child->perf_event_ctxp = child_ctx;
- get_task_struct(child);
- }
+ list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
+ ret = inherit_task_group(event, parent, parent_ctx, child,
+ &inherited_all);
+ if (ret)
+ break;
+ }
- ret = inherit_group(event, parent, parent_ctx,
- child, child_ctx);
- if (ret) {
- inherited_all = 0;
+ list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
+ ret = inherit_task_group(event, parent, parent_ctx, child,
+ &inherited_all);
+ if (ret)
break;
- }
}
+ child_ctx = child->perf_event_ctxp;
+
if (child_ctx && inherited_all) {
/*
* Mark the child context as a clone of the parent
@@ -5220,7 +5413,9 @@ static void __perf_event_exit_cpu(void *info)
struct perf_event_context *ctx = &cpuctx->ctx;
struct perf_event *event, *tmp;
- list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry)
+ list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
+ __perf_event_remove_from_context(event);
+ list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
__perf_event_remove_from_context(event);
}
static void perf_event_exit_cpu(int cpu)
@@ -5258,6 +5453,10 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
perf_event_exit_cpu(cpu);
break;
+ case CPU_DEAD:
+ hw_perf_event_setup_offline(cpu);
+ break;
+
default:
break;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 3218f52..9d163f8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2799,7 +2799,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
*/
prev_state = prev->state;
finish_arch_switch(prev);
- perf_event_task_sched_in(current, cpu_of(rq));
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+ local_irq_disable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+ perf_event_task_sched_in(current);
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+ local_irq_enable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
finish_lock_switch(rq, prev);
fire_sched_in_preempt_notifiers(current);
@@ -5314,7 +5320,7 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0);
raw_spin_unlock(&rq->lock);
- perf_event_task_tick(curr, cpu);
+ perf_event_task_tick(curr);
#ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu);
@@ -5528,7 +5534,7 @@ need_resched_nonpreemptible:
if (likely(prev != next)) {
sched_info_switch(prev, next);
- perf_event_task_sched_out(prev, next, cpu);
+ perf_event_task_sched_out(prev, next);
rq->nr_switches++;
rq->curr = next;
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index cd9ecd8..d00c6fe 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -51,7 +51,9 @@ endif
obj-$(CONFIG_EVENT_TRACING) += trace_events.o
obj-$(CONFIG_EVENT_TRACING) += trace_export.o
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
-obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
+ifeq ($(CONFIG_PERF_EVENTS),y)
+obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o
+endif
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d996353..8378357 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -22,7 +22,6 @@
#include <linux/hardirq.h>
#include <linux/kthread.h>
#include <linux/uaccess.h>
-#include <linux/kprobes.h>
#include <linux/ftrace.h>
#include <linux/sysctl.h>
#include <linux/ctype.h>
@@ -898,36 +897,6 @@ static struct dyn_ftrace *ftrace_free_records;
} \
}
-#ifdef CONFIG_KPROBES
-
-static int frozen_record_count;
-
-static inline void freeze_record(struct dyn_ftrace *rec)
-{
- if (!(rec->flags & FTRACE_FL_FROZEN)) {
- rec->flags |= FTRACE_FL_FROZEN;
- frozen_record_count++;
- }
-}
-
-static inline void unfreeze_record(struct dyn_ftrace *rec)
-{
- if (rec->flags & FTRACE_FL_FROZEN) {
- rec->flags &= ~FTRACE_FL_FROZEN;
- frozen_record_count--;
- }
-}
-
-static inline int record_frozen(struct dyn_ftrace *rec)
-{
- return rec->flags & FTRACE_FL_FROZEN;
-}
-#else
-# define freeze_record(rec) ({ 0; })
-# define unfreeze_record(rec) ({ 0; })
-# define record_frozen(rec) ({ 0; })
-#endif /* CONFIG_KPROBES */
-
static void ftrace_free_rec(struct dyn_ftrace *rec)
{
rec->freelist = ftrace_free_records;
@@ -1025,6 +994,21 @@ static void ftrace_bug(int failed, unsigned long ip)
}
+/* Return 1 if the address range is reserved for ftrace */
+int ftrace_text_reserved(void *start, void *end)
+{
+ struct dyn_ftrace *rec;
+ struct ftrace_page *pg;
+
+ do_for_each_ftrace_rec(pg, rec) {
+ if (rec->ip <= (unsigned long)end &&
+ rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
+ return 1;
+ } while_for_each_ftrace_rec();
+ return 0;
+}
+
+
static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
@@ -1076,14 +1060,6 @@ static void ftrace_replace_code(int enable)
!(rec->flags & FTRACE_FL_CONVERTED))
continue;
- /* ignore updates to this record's mcount site */
- if (get_kprobe((void *)rec->ip)) {
- freeze_record(rec);
- continue;
- } else {
- unfreeze_record(rec);
- }
-
failed = __ftrace_replace_code(rec, enable);
if (failed) {
rec->flags |= FTRACE_FL_FAILED;
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 9e25573..f0d6930 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -6,14 +6,12 @@
*/
#include <linux/module.h>
+#include <linux/kprobes.h>
#include "trace.h"
-char *perf_trace_buf;
-EXPORT_SYMBOL_GPL(perf_trace_buf);
-
-char *perf_trace_buf_nmi;
-EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
+static char *perf_trace_buf;
+static char *perf_trace_buf_nmi;
typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
@@ -120,3 +118,47 @@ void ftrace_profile_disable(int event_id)
}
mutex_unlock(&event_mutex);
}
+
+__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
+ int *rctxp, unsigned long *irq_flags)
+{
+ struct trace_entry *entry;
+ char *trace_buf, *raw_data;
+ int pc, cpu;
+
+ pc = preempt_count();
+
+ /* Protect the per cpu buffer, begin the rcu read side */
+ local_irq_save(*irq_flags);
+
+ *rctxp = perf_swevent_get_recursion_context();
+ if (*rctxp < 0)
+ goto err_recursion;
+
+ cpu = smp_processor_id();
+
+ if (in_nmi())
+ trace_buf = rcu_dereference(perf_trace_buf_nmi);
+ else
+ trace_buf = rcu_dereference(perf_trace_buf);
+
+ if (!trace_buf)
+ goto err;
+
+ raw_data = per_cpu_ptr(trace_buf, cpu);
+
+ /* zero the dead bytes from align to not leak stack to user */
+ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+
+ entry = (struct trace_entry *)raw_data;
+ tracing_generic_entry_update(entry, *irq_flags, pc);
+ entry->type = type;
+
+ return raw_data;
+err:
+ perf_swevent_put_recursion_context(*rctxp);
+err_recursion:
+ local_irq_restore(*irq_flags);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index e42af9a..4615f62 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1371,7 +1371,7 @@ out_unlock:
return err;
}
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
void ftrace_profile_free_filter(struct perf_event *event)
{
@@ -1439,5 +1439,5 @@ out_unlock:
return err;
}
-#endif /* CONFIG_EVENT_PROFILE */
+#endif /* CONFIG_PERF_EVENTS */
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 465b36b..505c922 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -91,11 +91,6 @@ static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
return retval;
}
-static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
-{
- return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
-}
-
static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
void *dummy)
{
@@ -231,9 +226,7 @@ static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
{
int ret = -EINVAL;
- if (ff->func == fetch_argument)
- ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data);
- else if (ff->func == fetch_register) {
+ if (ff->func == fetch_register) {
const char *name;
name = regs_query_register_name((unsigned int)((long)ff->data));
ret = snprintf(buf, n, "%%%s", name);
@@ -489,14 +482,6 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
}
} else
ret = -EINVAL;
- } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) {
- ret = strict_strtoul(arg + 3, 10, &param);
- if (ret || param > PARAM_MAX_ARGS)
- ret = -EINVAL;
- else {
- ff->func = fetch_argument;
- ff->data = (void *)param;
- }
} else
ret = -EINVAL;
return ret;
@@ -611,7 +596,6 @@ static int create_trace_probe(int argc, char **argv)
* - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
* - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
* Fetch args:
- * $argN : fetch Nth of function argument. (N:0-)
* $retval : fetch return value
* $stack : fetch stack address
* $stackN : fetch Nth of stack (N:0-)
@@ -958,7 +942,7 @@ static const struct file_operations kprobe_profile_ops = {
};
/* Kprobe handler */
-static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
+static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
{
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
struct kprobe_trace_entry *entry;
@@ -978,7 +962,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
irq_flags, pc);
if (!event)
- return 0;
+ return;
entry = ring_buffer_event_data(event);
entry->nargs = tp->nr_args;
@@ -988,11 +972,10 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
- return 0;
}
/* Kretprobe handler */
-static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
+static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
@@ -1011,7 +994,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
irq_flags, pc);
if (!event)
- return 0;
+ return;
entry = ring_buffer_event_data(event);
entry->nargs = tp->nr_args;
@@ -1022,8 +1005,6 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
-
- return 0;
}
/* Event entry printers */
@@ -1230,137 +1211,67 @@ static int set_print_fmt(struct trace_probe *tp)
return 0;
}
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
/* Kprobe profile handler */
-static __kprobes int kprobe_profile_func(struct kprobe *kp,
+static __kprobes void kprobe_profile_func(struct kprobe *kp,
struct pt_regs *regs)
{
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
struct ftrace_event_call *call = &tp->call;
struct kprobe_trace_entry *entry;
- struct trace_entry *ent;
- int size, __size, i, pc, __cpu;
+ int size, __size, i;
unsigned long irq_flags;
- char *trace_buf;
- char *raw_data;
int rctx;
- pc = preempt_count();
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
"profile buffer not large enough"))
- return 0;
-
- /*
- * Protect the non nmi buffer
- * This also protects the rcu read side
- */
- local_irq_save(irq_flags);
+ return;
- rctx = perf_swevent_get_recursion_context();
- if (rctx < 0)
- goto end_recursion;
-
- __cpu = smp_processor_id();
-
- if (in_nmi())
- trace_buf = rcu_dereference(perf_trace_buf_nmi);
- else
- trace_buf = rcu_dereference(perf_trace_buf);
-
- if (!trace_buf)
- goto end;
-
- raw_data = per_cpu_ptr(trace_buf, __cpu);
-
- /* Zero dead bytes from alignment to avoid buffer leak to userspace */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
- entry = (struct kprobe_trace_entry *)raw_data;
- ent = &entry->ent;
+ entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
+ if (!entry)
+ return;
- tracing_generic_entry_update(ent, irq_flags, pc);
- ent->type = call->id;
entry->nargs = tp->nr_args;
entry->ip = (unsigned long)kp->addr;
for (i = 0; i < tp->nr_args; i++)
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
- perf_tp_event(call->id, entry->ip, 1, entry, size);
-end:
- perf_swevent_put_recursion_context(rctx);
-end_recursion:
- local_irq_restore(irq_flags);
-
- return 0;
+ ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags);
}
/* Kretprobe profile handler */
-static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
+static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
struct ftrace_event_call *call = &tp->call;
struct kretprobe_trace_entry *entry;
- struct trace_entry *ent;
- int size, __size, i, pc, __cpu;
+ int size, __size, i;
unsigned long irq_flags;
- char *trace_buf;
- char *raw_data;
int rctx;
- pc = preempt_count();
__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
"profile buffer not large enough"))
- return 0;
-
- /*
- * Protect the non nmi buffer
- * This also protects the rcu read side
- */
- local_irq_save(irq_flags);
-
- rctx = perf_swevent_get_recursion_context();
- if (rctx < 0)
- goto end_recursion;
-
- __cpu = smp_processor_id();
-
- if (in_nmi())
- trace_buf = rcu_dereference(perf_trace_buf_nmi);
- else
- trace_buf = rcu_dereference(perf_trace_buf);
-
- if (!trace_buf)
- goto end;
-
- raw_data = per_cpu_ptr(trace_buf, __cpu);
+ return;
- /* Zero dead bytes from alignment to avoid buffer leak to userspace */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
- entry = (struct kretprobe_trace_entry *)raw_data;
- ent = &entry->ent;
+ entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
+ if (!entry)
+ return;
- tracing_generic_entry_update(ent, irq_flags, pc);
- ent->type = call->id;
entry->nargs = tp->nr_args;
entry->func = (unsigned long)tp->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr;
for (i = 0; i < tp->nr_args; i++)
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
- perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
-
-end:
- perf_swevent_put_recursion_context(rctx);
-end_recursion:
- local_irq_restore(irq_flags);
- return 0;
+ ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags);
}
static int probe_profile_enable(struct ftrace_event_call *call)
@@ -1388,7 +1299,7 @@ static void probe_profile_disable(struct ftrace_event_call *call)
disable_kprobe(&tp->rp.kp);
}
}
-#endif /* CONFIG_EVENT_PROFILE */
+#endif /* CONFIG_PERF_EVENTS */
static __kprobes
@@ -1398,10 +1309,10 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
if (tp->flags & TP_FLAG_TRACE)
kprobe_trace_func(kp, regs);
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
if (tp->flags & TP_FLAG_PROFILE)
kprobe_profile_func(kp, regs);
-#endif /* CONFIG_EVENT_PROFILE */
+#endif
return 0; /* We don't tweek kernel, so just return 0 */
}
@@ -1412,10 +1323,10 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
if (tp->flags & TP_FLAG_TRACE)
kretprobe_trace_func(ri, regs);
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
if (tp->flags & TP_FLAG_PROFILE)
kretprobe_profile_func(ri, regs);
-#endif /* CONFIG_EVENT_PROFILE */
+#endif
return 0; /* We don't tweek kernel, so just return 0 */
}
@@ -1446,7 +1357,7 @@ static int register_probe_event(struct trace_probe *tp)
call->regfunc = probe_event_enable;
call->unregfunc = probe_event_disable;
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
call->profile_enable = probe_profile_enable;
call->profile_disable = probe_profile_disable;
#endif
@@ -1507,28 +1418,67 @@ static int kprobe_trace_selftest_target(int a1, int a2, int a3,
static __init int kprobe_trace_self_tests_init(void)
{
- int ret;
+ int ret, warn = 0;
int (*target)(int, int, int, int, int, int);
+ struct trace_probe *tp;
target = kprobe_trace_selftest_target;
pr_info("Testing kprobe tracing: ");
ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
- "$arg1 $arg2 $arg3 $arg4 $stack $stack0");
- if (WARN_ON_ONCE(ret))
- pr_warning("error enabling function entry\n");
+ "$stack $stack0 +0($stack)");
+ if (WARN_ON_ONCE(ret)) {
+ pr_warning("error on probing function entry.\n");
+ warn++;
+ } else {
+ /* Enable trace point */
+ tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM);
+ if (WARN_ON_ONCE(tp == NULL)) {
+ pr_warning("error on getting new probe.\n");
+ warn++;
+ } else
+ probe_event_enable(&tp->call);
+ }
ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
"$retval");
- if (WARN_ON_ONCE(ret))
- pr_warning("error enabling function return\n");
+ if (WARN_ON_ONCE(ret)) {
+ pr_warning("error on probing function return.\n");
+ warn++;
+ } else {
+ /* Enable trace point */
+ tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM);
+ if (WARN_ON_ONCE(tp == NULL)) {
+ pr_warning("error on getting new probe.\n");
+ warn++;
+ } else
+ probe_event_enable(&tp->call);
+ }
+
+ if (warn)
+ goto end;
ret = target(1, 2, 3, 4, 5, 6);
- cleanup_all_probes();
+ ret = command_trace_probe("-:testprobe");
+ if (WARN_ON_ONCE(ret)) {
+ pr_warning("error on deleting a probe.\n");
+ warn++;
+ }
+
+ ret = command_trace_probe("-:testprobe2");
+ if (WARN_ON_ONCE(ret)) {
+ pr_warning("error on deleting a probe.\n");
+ warn++;
+ }
- pr_cont("OK\n");
+end:
+ cleanup_all_probes();
+ if (warn)
+ pr_cont("NG: Some tests are failed. Please check them.\n");
+ else
+ pr_cont("OK\n");
return 0;
}
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index a1834dd..cba47d7 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -426,7 +426,7 @@ int __init init_ftrace_syscalls(void)
}
core_initcall(init_ftrace_syscalls);
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
@@ -438,12 +438,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
struct syscall_metadata *sys_data;
struct syscall_trace_enter *rec;
unsigned long flags;
- char *trace_buf;
- char *raw_data;
int syscall_nr;
int rctx;
int size;
- int cpu;
syscall_nr = syscall_get_nr(current, regs);
if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
@@ -462,37 +459,15 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
"profile buffer not large enough"))
return;
- /* Protect the per cpu buffer, begin the rcu read side */
- local_irq_save(flags);
-
- rctx = perf_swevent_get_recursion_context();
- if (rctx < 0)
- goto end_recursion;
-
- cpu = smp_processor_id();
-
- trace_buf = rcu_dereference(perf_trace_buf);
-
- if (!trace_buf)
- goto end;
-
- raw_data = per_cpu_ptr(trace_buf, cpu);
-
- /* zero the dead bytes from align to not leak stack to user */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+ rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size,
+ sys_data->enter_event->id, &rctx, &flags);
+ if (!rec)
+ return;
- rec = (struct syscall_trace_enter *) raw_data;
- tracing_generic_entry_update(&rec->ent, 0, 0);
- rec->ent.type = sys_data->enter_event->id;
rec->nr = syscall_nr;
syscall_get_arguments(current, regs, 0, sys_data->nb_args,
(unsigned long *)&rec->args);
- perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size);
-
-end:
- perf_swevent_put_recursion_context(rctx);
-end_recursion:
- local_irq_restore(flags);
+ ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
}
int prof_sysenter_enable(struct ftrace_event_call *call)
@@ -536,11 +511,8 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
struct syscall_trace_exit *rec;
unsigned long flags;
int syscall_nr;
- char *trace_buf;
- char *raw_data;
int rctx;
int size;
- int cpu;
syscall_nr = syscall_get_nr(current, regs);
if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
@@ -562,38 +534,15 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
"exit event has grown above profile buffer size"))
return;
- /* Protect the per cpu buffer, begin the rcu read side */
- local_irq_save(flags);
-
- rctx = perf_swevent_get_recursion_context();
- if (rctx < 0)
- goto end_recursion;
-
- cpu = smp_processor_id();
-
- trace_buf = rcu_dereference(perf_trace_buf);
-
- if (!trace_buf)
- goto end;
-
- raw_data = per_cpu_ptr(trace_buf, cpu);
-
- /* zero the dead bytes from align to not leak stack to user */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
-
- rec = (struct syscall_trace_exit *)raw_data;
+ rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size,
+ sys_data->exit_event->id, &rctx, &flags);
+ if (!rec)
+ return;
- tracing_generic_entry_update(&rec->ent, 0, 0);
- rec->ent.type = sys_data->exit_event->id;
rec->nr = syscall_nr;
rec->ret = syscall_get_return_value(current, regs);
- perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size);
-
-end:
- perf_swevent_put_recursion_context(rctx);
-end_recursion:
- local_irq_restore(flags);
+ ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
}
int prof_sysexit_enable(struct ftrace_event_call *call)
@@ -631,6 +580,5 @@ void prof_sysexit_disable(struct ftrace_event_call *call)
mutex_unlock(&syscall_trace_lock);
}
-#endif
-
+#endif /* CONFIG_PERF_EVENTS */
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 124760b..e1d60d7 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -14,6 +14,7 @@ perf*.html
common-cmds.h
perf.data
perf.data.old
+perf-archive
tags
TAGS
cscope*
diff --git a/tools/perf/Documentation/perf-archive.txt b/tools/perf/Documentation/perf-archive.txt
new file mode 100644
index 0000000..fae174d
--- /dev/null
+++ b/tools/perf/Documentation/perf-archive.txt
@@ -0,0 +1,22 @@
+perf-archive(1)
+===============
+
+NAME
+----
+perf-archive - Create archive with object files with build-ids found in perf.data file
+
+SYNOPSIS
+--------
+[verse]
+'perf archive' [file]
+
+DESCRIPTION
+-----------
+This command runs runs perf-buildid-list --with-hits, and collects the files
+with the buildids found so that analisys of perf.data contents can be possible
+on another machine.
+
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-buildid-list[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
new file mode 100644
index 0000000..88bc3b5
--- /dev/null
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -0,0 +1,33 @@
+perf-buildid-cache(1)
+=====================
+
+NAME
+----
+perf-buildid-cache - Manage build-id cache.
+
+SYNOPSIS
+--------
+[verse]
+'perf buildid-list <options>'
+
+DESCRIPTION
+-----------
+This command manages the build-id cache. It can add and remove files to the
+cache. In the future it should as well purge older entries, set upper limits
+for the space used by the cache, etc.
+
+OPTIONS
+-------
+-a::
+--add=::
+ Add specified file to the cache.
+-r::
+--remove=::
+ Remove specified file to the cache.
+-v::
+--verbose::
+ Be more verbose.
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 250e391..2de3407 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -15,6 +15,8 @@ or
'perf probe' [options] --del='[GROUP:]EVENT' [...]
or
'perf probe' --list
+or
+'perf probe' --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
DESCRIPTION
-----------
@@ -45,6 +47,11 @@ OPTIONS
--list::
List up current probe events.
+-L::
+--line=::
+ Show source code lines which can be probed. This needs an argument
+ which specifies a range of the source code.
+
PROBE SYNTAX
------------
Probe points are defined by following syntax.
@@ -56,6 +63,19 @@ Probe points are defined by following syntax.
It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number.
'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc).
+LINE SYNTAX
+-----------
+Line range is descripted by following syntax.
+
+ "FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]"
+
+FUNC specifies the function name of showing lines. 'RLN' is the start line
+number from function entry line, and 'RLN2' is the end line number. As same as
+probe syntax, 'SRC' means the source file path, 'ALN' is start line number,
+and 'ALN2' is end line number in the file. It is also possible to specify how
+many lines to show by using 'NUM'.
+So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function.
+
SEE ALSO
--------
linkperf:perf-trace[1], linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 4a7d558..785b9fc 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -74,7 +74,7 @@ OPTIONS
-s <symbol>::
--sym-annotate=<symbol>::
- Annotate this symbol. Requires -k option.
+ Annotate this symbol.
-v::
--verbose::
diff --git a/tools/perf/Documentation/perf-trace-perl.txt b/tools/perf/Documentation/perf-trace-perl.txt
index c5f55f4..d729cee 100644
--- a/tools/perf/Documentation/perf-trace-perl.txt
+++ b/tools/perf/Documentation/perf-trace-perl.txt
@@ -8,7 +8,7 @@ perf-trace-perl - Process trace data with a Perl script
SYNOPSIS
--------
[verse]
-'perf trace' [-s [lang]:script[.ext] ]
+'perf trace' [-s [Perl]:script[.pl] ]
DESCRIPTION
-----------
diff --git a/tools/perf/Documentation/perf-trace-python.txt b/tools/perf/Documentation/perf-trace-python.txt
new file mode 100644
index 0000000..a241aca
--- /dev/null
+++ b/tools/perf/Documentation/perf-trace-python.txt
@@ -0,0 +1,625 @@
+perf-trace-python(1)
+==================
+
+NAME
+----
+perf-trace-python - Process trace data with a Python script
+
+SYNOPSIS
+--------
+[verse]
+'perf trace' [-s [Python]:script[.py] ]
+
+DESCRIPTION
+-----------
+
+This perf trace option is used to process perf trace data using perf's
+built-in Python interpreter. It reads and processes the input file and
+displays the results of the trace analysis implemented in the given
+Python script, if any.
+
+A QUICK EXAMPLE
+---------------
+
+This section shows the process, start to finish, of creating a working
+Python script that aggregates and extracts useful information from a
+raw perf trace stream. You can avoid reading the rest of this
+document if an example is enough for you; the rest of the document
+provides more details on each step and lists the library functions
+available to script writers.
+
+This example actually details the steps that were used to create the
+'syscall-counts' script you see when you list the available perf trace
+scripts via 'perf trace -l'. As such, this script also shows how to
+integrate your script into the list of general-purpose 'perf trace'
+scripts listed by that command.
+
+The syscall-counts script is a simple script, but demonstrates all the
+basic ideas necessary to create a useful script. Here's an example
+of its output (syscall names are not yet supported, they will appear
+as numbers):
+
+----
+syscall events:
+
+event count
+---------------------------------------- -----------
+sys_write 455067
+sys_getdents 4072
+sys_close 3037
+sys_swapoff 1769
+sys_read 923
+sys_sched_setparam 826
+sys_open 331
+sys_newfstat 326
+sys_mmap 217
+sys_munmap 216
+sys_futex 141
+sys_select 102
+sys_poll 84
+sys_setitimer 12
+sys_writev 8
+15 8
+sys_lseek 7
+sys_rt_sigprocmask 6
+sys_wait4 3
+sys_ioctl 3
+sys_set_robust_list 1
+sys_exit 1
+56 1
+sys_access 1
+----
+
+Basically our task is to keep a per-syscall tally that gets updated
+every time a system call occurs in the system. Our script will do
+that, but first we need to record the data that will be processed by
+that script. Theoretically, there are a couple of ways we could do
+that:
+
+- we could enable every event under the tracing/events/syscalls
+ directory, but this is over 600 syscalls, well beyond the number
+ allowable by perf. These individual syscall events will however be
+ useful if we want to later use the guidance we get from the
+ general-purpose scripts to drill down and get more detail about
+ individual syscalls of interest.
+
+- we can enable the sys_enter and/or sys_exit syscalls found under
+ tracing/events/raw_syscalls. These are called for all syscalls; the
+ 'id' field can be used to distinguish between individual syscall
+ numbers.
+
+For this script, we only need to know that a syscall was entered; we
+don't care how it exited, so we'll use 'perf record' to record only
+the sys_enter events:
+
+----
+# perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
+
+^C[ perf record: Woken up 1 times to write data ]
+[ perf record: Captured and wrote 56.545 MB perf.data (~2470503 samples) ]
+----
+
+The options basically say to collect data for every syscall event
+system-wide and multiplex the per-cpu output into a single stream.
+That single stream will be recorded in a file in the current directory
+called perf.data.
+
+Once we have a perf.data file containing our data, we can use the -g
+'perf trace' option to generate a Python script that will contain a
+callback handler for each event type found in the perf.data trace
+stream (for more details, see the STARTER SCRIPTS section).
+
+----
+# perf trace -g python
+generated Python script: perf-trace.py
+
+The output file created also in the current directory is named
+perf-trace.py. Here's the file in its entirety:
+
+# perf trace event handlers, generated by perf trace -g python
+# Licensed under the terms of the GNU GPL License version 2
+
+# The common_* event handler fields are the most useful fields common to
+# all events. They don't necessarily correspond to the 'common_*' fields
+# in the format files. Those fields not available as handler params can
+# be retrieved using Python functions of the form common_*(context).
+# See the perf-trace-python Documentation for the list of available functions.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+def trace_begin():
+ print "in trace_begin"
+
+def trace_end():
+ print "in trace_end"
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+
+ print "id=%d, args=%s\n" % \
+ (id, args),
+
+def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+
+def print_header(event_name, cpu, secs, nsecs, pid, comm):
+ print "%-20s %5u %05u.%09u %8u %-20s " % \
+ (event_name, cpu, secs, nsecs, pid, comm),
+----
+
+At the top is a comment block followed by some import statements and a
+path append which every perf trace script should include.
+
+Following that are a couple generated functions, trace_begin() and
+trace_end(), which are called at the beginning and the end of the
+script respectively (for more details, see the SCRIPT_LAYOUT section
+below).
+
+Following those are the 'event handler' functions generated one for
+every event in the 'perf record' output. The handler functions take
+the form subsystem__event_name, and contain named parameters, one for
+each field in the event; in this case, there's only one event,
+raw_syscalls__sys_enter(). (see the EVENT HANDLERS section below for
+more info on event handlers).
+
+The final couple of functions are, like the begin and end functions,
+generated for every script. The first, trace_unhandled(), is called
+every time the script finds an event in the perf.data file that
+doesn't correspond to any event handler in the script. This could
+mean either that the record step recorded event types that it wasn't
+really interested in, or the script was run against a trace file that
+doesn't correspond to the script.
+
+The script generated by -g option option simply prints a line for each
+event found in the trace stream i.e. it basically just dumps the event
+and its parameter values to stdout. The print_header() function is
+simply a utility function used for that purpose. Let's rename the
+script and run it to see the default output:
+
+----
+# mv perf-trace.py syscall-counts.py
+# perf trace -s syscall-counts.py
+
+raw_syscalls__sys_enter 1 00840.847582083 7506 perf id=1, args=
+raw_syscalls__sys_enter 1 00840.847595764 7506 perf id=1, args=
+raw_syscalls__sys_enter 1 00840.847620860 7506 perf id=1, args=
+raw_syscalls__sys_enter 1 00840.847710478 6533 npviewer.bin id=78, args=
+raw_syscalls__sys_enter 1 00840.847719204 6533 npviewer.bin id=142, args=
+raw_syscalls__sys_enter 1 00840.847755445 6533 npviewer.bin id=3, args=
+raw_syscalls__sys_enter 1 00840.847775601 6533 npviewer.bin id=3, args=
+raw_syscalls__sys_enter 1 00840.847781820 6533 npviewer.bin id=3, args=
+.
+.
+.
+----
+
+Of course, for this script, we're not interested in printing every
+trace event, but rather aggregating it in a useful way. So we'll get
+rid of everything to do with printing as well as the trace_begin() and
+trace_unhandled() functions, which we won't be using. That leaves us
+with this minimalistic skeleton:
+
+----
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+def trace_end():
+ print "in trace_end"
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+----
+
+In trace_end(), we'll simply print the results, but first we need to
+generate some results to print. To do that we need to have our
+sys_enter() handler do the necessary tallying until all events have
+been counted. A hash table indexed by syscall id is a good way to
+store that information; every time the sys_enter() handler is called,
+we simply increment a count associated with that hash entry indexed by
+that syscall id:
+
+----
+ syscalls = autodict()
+
+ try:
+ syscalls[id] += 1
+ except TypeError:
+ syscalls[id] = 1
+----
+
+The syscalls 'autodict' object is a special kind of Python dictionary
+(implemented in Core.py) that implements Perl's 'autovivifying' hashes
+in Python i.e. with autovivifying hashes, you can assign nested hash
+values without having to go to the trouble of creating intermediate
+levels if they don't exist e.g syscalls[comm][pid][id] = 1 will create
+the intermediate hash levels and finally assign the value 1 to the
+hash entry for 'id' (because the value being assigned isn't a hash
+object itself, the initial value is assigned in the TypeError
+exception. Well, there may be a better way to do this in Python but
+that's what works for now).
+
+Putting that code into the raw_syscalls__sys_enter() handler, we
+effectively end up with a single-level dictionary keyed on syscall id
+and having the counts we've tallied as values.
+
+The print_syscall_totals() function iterates over the entries in the
+dictionary and displays a line for each entry containing the syscall
+name (the dictonary keys contain the syscall ids, which are passed to
+the Util function syscall_name(), which translates the raw syscall
+numbers to the corresponding syscall name strings). The output is
+displayed after all the events in the trace have been processed, by
+calling the print_syscall_totals() function from the trace_end()
+handler called at the end of script processing.
+
+The final script producing the output shown above is shown in its
+entirety below (syscall_name() helper is not yet available, you can
+only deal with id's for now):
+
+----
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+syscalls = autodict()
+
+def trace_end():
+ print_syscall_totals()
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ try:
+ syscalls[id] += 1
+ except TypeError:
+ syscalls[id] = 1
+
+def print_syscall_totals():
+ if for_comm is not None:
+ print "\nsyscall events for %s:\n\n" % (for_comm),
+ else:
+ print "\nsyscall events:\n\n",
+
+ print "%-40s %10s\n" % ("event", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "-----------"),
+
+ for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
+ reverse = True):
+ print "%-40s %10d\n" % (syscall_name(id), val),
+----
+
+The script can be run just as before:
+
+ # perf trace -s syscall-counts.py
+
+So those are the essential steps in writing and running a script. The
+process can be generalized to any tracepoint or set of tracepoints
+you're interested in - basically find the tracepoint(s) you're
+interested in by looking at the list of available events shown by
+'perf list' and/or look in /sys/kernel/debug/tracing events for
+detailed event and field info, record the corresponding trace data
+using 'perf record', passing it the list of interesting events,
+generate a skeleton script using 'perf trace -g python' and modify the
+code to aggregate and display it for your particular needs.
+
+After you've done that you may end up with a general-purpose script
+that you want to keep around and have available for future use. By
+writing a couple of very simple shell scripts and putting them in the
+right place, you can have your script listed alongside the other
+scripts listed by the 'perf trace -l' command e.g.:
+
+----
+root@tropicana:~# perf trace -l
+List of available trace scripts:
+ workqueue-stats workqueue stats (ins/exe/create/destroy)
+ wakeup-latency system-wide min/max/avg wakeup latency
+ rw-by-file <comm> r/w activity for a program, by file
+ rw-by-pid system-wide r/w activity
+----
+
+A nice side effect of doing this is that you also then capture the
+probably lengthy 'perf record' command needed to record the events for
+the script.
+
+To have the script appear as a 'built-in' script, you write two simple
+scripts, one for recording and one for 'reporting'.
+
+The 'record' script is a shell script with the same base name as your
+script, but with -record appended. The shell script should be put
+into the perf/scripts/python/bin directory in the kernel source tree.
+In that script, you write the 'perf record' command-line needed for
+your script:
+
+----
+# cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-record
+
+#!/bin/bash
+perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
+----
+
+The 'report' script is also a shell script with the same base name as
+your script, but with -report appended. It should also be located in
+the perf/scripts/python/bin directory. In that script, you write the
+'perf trace -s' command-line needed for running your script:
+
+----
+# cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-report
+
+#!/bin/bash
+# description: system-wide syscall counts
+perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts.py
+----
+
+Note that the location of the Python script given in the shell script
+is in the libexec/perf-core/scripts/python directory - this is where
+the script will be copied by 'make install' when you install perf.
+For the installation to install your script there, your script needs
+to be located in the perf/scripts/python directory in the kernel
+source tree:
+
+----
+# ls -al kernel-source/tools/perf/scripts/python
+
+root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python
+total 32
+drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
+drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
+drwxr-xr-x 2 trz trz 4096 2010-01-26 22:29 bin
+-rw-r--r-- 1 trz trz 2548 2010-01-26 22:29 check-perf-trace.py
+drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 Perf-Trace-Util
+-rw-r--r-- 1 trz trz 1462 2010-01-26 22:30 syscall-counts.py
+----
+
+Once you've done that (don't forget to do a new 'make install',
+otherwise your script won't show up at run-time), 'perf trace -l'
+should show a new entry for your script:
+
+----
+root@tropicana:~# perf trace -l
+List of available trace scripts:
+ workqueue-stats workqueue stats (ins/exe/create/destroy)
+ wakeup-latency system-wide min/max/avg wakeup latency
+ rw-by-file <comm> r/w activity for a program, by file
+ rw-by-pid system-wide r/w activity
+ syscall-counts system-wide syscall counts
+----
+
+You can now perform the record step via 'perf trace record':
+
+ # perf trace record syscall-counts
+
+and display the output using 'perf trace report':
+
+ # perf trace report syscall-counts
+
+STARTER SCRIPTS
+---------------
+
+You can quickly get started writing a script for a particular set of
+trace data by generating a skeleton script using 'perf trace -g
+python' in the same directory as an existing perf.data trace file.
+That will generate a starter script containing a handler for each of
+the event types in the trace file; it simply prints every available
+field for each event in the trace file.
+
+You can also look at the existing scripts in
+~/libexec/perf-core/scripts/python for typical examples showing how to
+do basic things like aggregate event data, print results, etc. Also,
+the check-perf-trace.py script, while not interesting for its results,
+attempts to exercise all of the main scripting features.
+
+EVENT HANDLERS
+--------------
+
+When perf trace is invoked using a trace script, a user-defined
+'handler function' is called for each event in the trace. If there's
+no handler function defined for a given event type, the event is
+ignored (or passed to a 'trace_handled' function, see below) and the
+next event is processed.
+
+Most of the event's field values are passed as arguments to the
+handler function; some of the less common ones aren't - those are
+available as calls back into the perf executable (see below).
+
+As an example, the following perf record command can be used to record
+all sched_wakeup events in the system:
+
+ # perf record -c 1 -f -a -M -R -e sched:sched_wakeup
+
+Traces meant to be processed using a script should be recorded with
+the above options: -c 1 says to sample every event, -a to enable
+system-wide collection, -M to multiplex the output, and -R to collect
+raw samples.
+
+The format file for the sched_wakep event defines the following fields
+(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
+
+----
+ format:
+ field:unsigned short common_type;
+ field:unsigned char common_flags;
+ field:unsigned char common_preempt_count;
+ field:int common_pid;
+ field:int common_lock_depth;
+
+ field:char comm[TASK_COMM_LEN];
+ field:pid_t pid;
+ field:int prio;
+ field:int success;
+ field:int target_cpu;
+----
+
+The handler function for this event would be defined as:
+
+----
+def sched__sched_wakeup(event_name, context, common_cpu, common_secs,
+ common_nsecs, common_pid, common_comm,
+ comm, pid, prio, success, target_cpu):
+ pass
+----
+
+The handler function takes the form subsystem__event_name.
+
+The common_* arguments in the handler's argument list are the set of
+arguments passed to all event handlers; some of the fields correspond
+to the common_* fields in the format file, but some are synthesized,
+and some of the common_* fields aren't common enough to to be passed
+to every event as arguments but are available as library functions.
+
+Here's a brief description of each of the invariant event args:
+
+ event_name the name of the event as text
+ context an opaque 'cookie' used in calls back into perf
+ common_cpu the cpu the event occurred on
+ common_secs the secs portion of the event timestamp
+ common_nsecs the nsecs portion of the event timestamp
+ common_pid the pid of the current task
+ common_comm the name of the current process
+
+All of the remaining fields in the event's format file have
+counterparts as handler function arguments of the same name, as can be
+seen in the example above.
+
+The above provides the basics needed to directly access every field of
+every event in a trace, which covers 90% of what you need to know to
+write a useful trace script. The sections below cover the rest.
+
+SCRIPT LAYOUT
+-------------
+
+Every perf trace Python script should start by setting up a Python
+module search path and 'import'ing a few support modules (see module
+descriptions below):
+
+----
+ import os
+ import sys
+
+ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+ from perf_trace_context import *
+ from Core import *
+----
+
+The rest of the script can contain handler functions and support
+functions in any order.
+
+Aside from the event handler functions discussed above, every script
+can implement a set of optional functions:
+
+*trace_begin*, if defined, is called before any event is processed and
+gives scripts a chance to do setup tasks:
+
+----
+def trace_begin:
+ pass
+----
+
+*trace_end*, if defined, is called after all events have been
+ processed and gives scripts a chance to do end-of-script tasks, such
+ as display results:
+
+----
+def trace_end:
+ pass
+----
+
+*trace_unhandled*, if defined, is called after for any event that
+ doesn't have a handler explicitly defined for it. The standard set
+ of common arguments are passed into it:
+
+----
+def trace_unhandled(event_name, context, common_cpu, common_secs,
+ common_nsecs, common_pid, common_comm):
+ pass
+----
+
+The remaining sections provide descriptions of each of the available
+built-in perf trace Python modules and their associated functions.
+
+AVAILABLE MODULES AND FUNCTIONS
+-------------------------------
+
+The following sections describe the functions and variables available
+via the various perf trace Python modules. To use the functions and
+variables from the given module, add the corresponding 'from XXXX
+import' line to your perf trace script.
+
+Core.py Module
+~~~~~~~~~~~~~~
+
+These functions provide some essential functions to user scripts.
+
+The *flag_str* and *symbol_str* functions provide human-readable
+strings for flag and symbolic fields. These correspond to the strings
+and values parsed from the 'print fmt' fields of the event format
+files:
+
+ flag_str(event_name, field_name, field_value) - returns the string represention corresponding to field_value for the flag field field_name of event event_name
+ symbol_str(event_name, field_name, field_value) - returns the string represention corresponding to field_value for the symbolic field field_name of event event_name
+
+The *autodict* function returns a special special kind of Python
+dictionary that implements Perl's 'autovivifying' hashes in Python
+i.e. with autovivifying hashes, you can assign nested hash values
+without having to go to the trouble of creating intermediate levels if
+they don't exist.
+
+ autodict() - returns an autovivifying dictionary instance
+
+
+perf_trace_context Module
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some of the 'common' fields in the event format file aren't all that
+common, but need to be made accessible to user scripts nonetheless.
+
+perf_trace_context defines a set of functions that can be used to
+access this data in the context of the current event. Each of these
+functions expects a context variable, which is the same as the
+context variable passed into every event handler as the second
+argument.
+
+ common_pc(context) - returns common_preempt count for the current event
+ common_flags(context) - returns common_flags for the current event
+ common_lock_depth(context) - returns common_lock_depth for the current event
+
+Util.py Module
+~~~~~~~~~~~~~~
+
+Various utility functions for use with perf trace:
+
+ nsecs(secs, nsecs) - returns total nsecs given secs/nsecs pair
+ nsecs_secs(nsecs) - returns whole secs portion given nsecs
+ nsecs_nsecs(nsecs) - returns nsecs remainder given nsecs
+ nsecs_str(nsecs) - returns printable string in the form secs.nsecs
+ avg(total, n) - returns average given a sum and a total number of values
+
+SEE ALSO
+--------
+linkperf:perf-trace[1]
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 60e5900..8879299 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -19,6 +19,11 @@ There are several variants of perf trace:
'perf trace' to see a detailed trace of the workload that was
recorded.
+ You can also run a set of pre-canned scripts that aggregate and
+ summarize the raw trace data in various ways (the list of scripts is
+ available via 'perf trace -l'). The following variants allow you to
+ record and run those scripts:
+
'perf trace record <script>' to record the events required for 'perf
trace report'. <script> is the name displayed in the output of
'perf trace --list' i.e. the actual script name minus any language
@@ -31,6 +36,9 @@ There are several variants of perf trace:
record <script>' is used and should be present for this command to
succeed.
+ See the 'SEE ALSO' section for links to language-specific
+ information on how to write and run your own trace scripts.
+
OPTIONS
-------
-D::
@@ -45,9 +53,11 @@ OPTIONS
--list=::
Display a list of available trace scripts.
--s::
+-s ['lang']::
--script=::
Process trace data with the given script ([lang]:script[.ext]).
+ If the string 'lang' is specified in place of a script name, a
+ list of supported languages will be displayed instead.
-g::
--gen-script=::
@@ -56,4 +66,5 @@ OPTIONS
SEE ALSO
--------
-linkperf:perf-record[1], linkperf:perf-trace-perl[1]
+linkperf:perf-record[1], linkperf:perf-trace-perl[1],
+linkperf:perf-trace-python[1]
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index 69c8325..0eeb247 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -12,7 +12,7 @@ SYNOPSIS
DESCRIPTION
-----------
-Performance counters for Linux are are a new kernel-based subsystem
+Performance counters for Linux are a new kernel-based subsystem
that provide a framework for all things performance analysis. It
covers hardware level (CPU/PMU, Performance Monitoring Unit) features
and software features (software counters, tracepoints) as well.
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 2e7fa3a..54a5b50 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -286,11 +286,7 @@ SCRIPT_PERL =
SCRIPT_SH =
TEST_PROGRAMS =
-#
-# No scripts right now:
-#
-
-# SCRIPT_SH += perf-am.sh
+SCRIPT_SH += perf-archive.sh
#
# No Perl scripts right now:
@@ -315,9 +311,6 @@ PROGRAMS += perf
# List built-in command $C whose implementation cmd_$C() is not in
# builtin-$C.o but is linked in as part of some other command.
#
-# None right now:
-#
-# BUILT_INS += perf-init $X
# what 'all' will build and 'install' will install, in perfexecdir
ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
@@ -340,6 +333,7 @@ LIB_FILE=libperf.a
LIB_H += ../../include/linux/perf_event.h
LIB_H += ../../include/linux/rbtree.h
LIB_H += ../../include/linux/list.h
+LIB_H += ../../include/linux/hash.h
LIB_H += ../../include/linux/stringify.h
LIB_H += util/include/linux/bitmap.h
LIB_H += util/include/linux/bitops.h
@@ -363,12 +357,14 @@ LIB_H += util/include/asm/uaccess.h
LIB_H += perf.h
LIB_H += util/cache.h
LIB_H += util/callchain.h
+LIB_H += util/build-id.h
LIB_H += util/debug.h
LIB_H += util/debugfs.h
LIB_H += util/event.h
LIB_H += util/exec_cmd.h
LIB_H += util/types.h
LIB_H += util/levenshtein.h
+LIB_H += util/map.h
LIB_H += util/parse-options.h
LIB_H += util/parse-events.h
LIB_H += util/quote.h
@@ -389,12 +385,12 @@ LIB_H += util/sort.h
LIB_H += util/hist.h
LIB_H += util/thread.h
LIB_H += util/trace-event.h
-LIB_H += util/trace-event-perl.h
LIB_H += util/probe-finder.h
LIB_H += util/probe-event.h
LIB_OBJS += util/abspath.o
LIB_OBJS += util/alias.o
+LIB_OBJS += util/build-id.o
LIB_OBJS += util/config.o
LIB_OBJS += util/ctype.o
LIB_OBJS += util/debugfs.o
@@ -431,12 +427,12 @@ LIB_OBJS += util/thread.o
LIB_OBJS += util/trace-event-parse.o
LIB_OBJS += util/trace-event-read.o
LIB_OBJS += util/trace-event-info.o
-LIB_OBJS += util/trace-event-perl.o
+LIB_OBJS += util/trace-event-scripting.o
LIB_OBJS += util/svghelper.o
LIB_OBJS += util/sort.o
LIB_OBJS += util/hist.o
-LIB_OBJS += util/data_map.o
LIB_OBJS += util/probe-event.o
+LIB_OBJS += util/util.o
BUILTIN_OBJS += builtin-annotate.o
@@ -451,6 +447,7 @@ BUILTIN_OBJS += builtin-diff.o
BUILTIN_OBJS += builtin-help.o
BUILTIN_OBJS += builtin-sched.o
BUILTIN_OBJS += builtin-buildid-list.o
+BUILTIN_OBJS += builtin-buildid-cache.o
BUILTIN_OBJS += builtin-list.o
BUILTIN_OBJS += builtin-record.o
BUILTIN_OBJS += builtin-report.o
@@ -460,6 +457,7 @@ BUILTIN_OBJS += builtin-top.o
BUILTIN_OBJS += builtin-trace.o
BUILTIN_OBJS += builtin-probe.o
BUILTIN_OBJS += builtin-kmem.o
+BUILTIN_OBJS += builtin-lock.o
PERFLIBS = $(LIB_FILE)
@@ -520,9 +518,23 @@ ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; e
BASIC_CFLAGS += -DNO_LIBPERL
else
ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
+ LIB_OBJS += util/scripting-engines/trace-event-perl.o
LIB_OBJS += scripts/perl/Perf-Trace-Util/Context.o
endif
+ifndef NO_LIBPYTHON
+PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null`
+PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
+endif
+
+ifneq ($(shell sh -c "(echo '\#include <Python.h>'; echo 'int main(void) { Py_Initialize(); return 0; }') | $(CC) -x c - $(PYTHON_EMBED_CCOPTS) -o /dev/null $(PYTHON_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
+ BASIC_CFLAGS += -DNO_LIBPYTHON
+else
+ ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
+ LIB_OBJS += util/scripting-engines/trace-event-python.o
+ LIB_OBJS += scripts/python/Perf-Trace-Util/Context.o
+endif
+
ifdef NO_DEMANGLE
BASIC_CFLAGS += -DNO_DEMANGLE
else
@@ -894,12 +906,18 @@ util/hweight.o: ../../lib/hweight.c PERF-CFLAGS
util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS
$(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
-util/trace-event-perl.o: util/trace-event-perl.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o util/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
+util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS
$(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
+util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-python.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
+
+scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o scripts/python/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
+
perf-%$X: %.o $(PERFLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
@@ -1009,9 +1027,16 @@ install: all
$(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
+ $(INSTALL) perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
$(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
+ $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
+ $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'
+ $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
+
ifdef BUILT_INS
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 593ff25..5ec5de9 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -53,32 +53,20 @@ struct sym_priv {
static const char *sym_hist_filter;
-static int symbol_filter(struct map *map __used, struct symbol *sym)
+static int sym__alloc_hist(struct symbol *self)
{
- if (sym_hist_filter == NULL ||
- strcmp(sym->name, sym_hist_filter) == 0) {
- struct sym_priv *priv = symbol__priv(sym);
- const int size = (sizeof(*priv->hist) +
- (sym->end - sym->start) * sizeof(u64));
+ struct sym_priv *priv = symbol__priv(self);
+ const int size = (sizeof(*priv->hist) +
+ (self->end - self->start) * sizeof(u64));
- priv->hist = malloc(size);
- if (priv->hist)
- memset(priv->hist, 0, size);
- return 0;
- }
- /*
- * FIXME: We should really filter it out, as we don't want to go thru symbols
- * we're not interested, and if a DSO ends up with no symbols, delete it too,
- * but right now the kernel loading routines in symbol.c bail out if no symbols
- * are found, fix it later.
- */
- return 0;
+ priv->hist = zalloc(size);
+ return priv->hist == NULL ? -1 : 0;
}
/*
* collect histogram counts
*/
-static void hist_hit(struct hist_entry *he, u64 ip)
+static int annotate__hist_hit(struct hist_entry *he, u64 ip)
{
unsigned int sym_size, offset;
struct symbol *sym = he->sym;
@@ -88,83 +76,127 @@ static void hist_hit(struct hist_entry *he, u64 ip)
he->count++;
if (!sym || !he->map)
- return;
+ return 0;
priv = symbol__priv(sym);
- if (!priv->hist)
- return;
+ if (priv->hist == NULL && sym__alloc_hist(sym) < 0)
+ return -ENOMEM;
sym_size = sym->end - sym->start;
offset = ip - sym->start;
- if (verbose)
- fprintf(stderr, "%s: ip=%Lx\n", __func__,
- he->map->unmap_ip(he->map, ip));
+ pr_debug3("%s: ip=%#Lx\n", __func__, he->map->unmap_ip(he->map, ip));
if (offset >= sym_size)
- return;
+ return 0;
h = priv->hist;
h->sum++;
h->ip[offset]++;
- if (verbose >= 3)
- printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n",
- (void *)(unsigned long)he->sym->start,
- he->sym->name,
- (void *)(unsigned long)ip, ip - he->sym->start,
- h->ip[offset]);
+ pr_debug3("%#Lx %s: count++ [ip: %#Lx, %#Lx] => %Ld\n", he->sym->start,
+ he->sym->name, ip, ip - he->sym->start, h->ip[offset]);
+ return 0;
}
static int perf_session__add_hist_entry(struct perf_session *self,
struct addr_location *al, u64 count)
{
bool hit;
- struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL,
- count, &hit);
+ struct hist_entry *he;
+
+ if (sym_hist_filter != NULL &&
+ (al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) {
+ /* We're only interested in a symbol named sym_hist_filter */
+ if (al->sym != NULL) {
+ rb_erase(&al->sym->rb_node,
+ &al->map->dso->symbols[al->map->type]);
+ symbol__delete(al->sym);
+ }
+ return 0;
+ }
+
+ he = __perf_session__add_hist_entry(self, al, NULL, count, &hit);
if (he == NULL)
return -ENOMEM;
- hist_hit(he, al->addr);
- return 0;
+
+ return annotate__hist_hit(he, al->addr);
}
static int process_sample_event(event_t *event, struct perf_session *session)
{
struct addr_location al;
- dump_printf("(IP, %d): %d: %p\n", event->header.misc,
- event->ip.pid, (void *)(long)event->ip.ip);
+ dump_printf("(IP, %d): %d: %#Lx\n", event->header.misc,
+ event->ip.pid, event->ip.ip);
- if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) {
- fprintf(stderr, "problem processing %d event, skipping it.\n",
- event->header.type);
+ if (event__preprocess_sample(event, session, &al, NULL) < 0) {
+ pr_warning("problem processing %d event, skipping it.\n",
+ event->header.type);
return -1;
}
if (!al.filtered && perf_session__add_hist_entry(session, &al, 1)) {
- fprintf(stderr, "problem incrementing symbol count, "
- "skipping event\n");
+ pr_warning("problem incrementing symbol count, "
+ "skipping event\n");
return -1;
}
return 0;
}
-static int parse_line(FILE *file, struct hist_entry *he, u64 len)
+struct objdump_line {
+ struct list_head node;
+ s64 offset;
+ char *line;
+};
+
+static struct objdump_line *objdump_line__new(s64 offset, char *line)
+{
+ struct objdump_line *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ self->offset = offset;
+ self->line = line;
+ }
+
+ return self;
+}
+
+static void objdump_line__free(struct objdump_line *self)
+{
+ free(self->line);
+ free(self);
+}
+
+static void objdump__add_line(struct list_head *head, struct objdump_line *line)
+{
+ list_add_tail(&line->node, head);
+}
+
+static struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
+ struct objdump_line *pos)
+{
+ list_for_each_entry_continue(pos, head, node)
+ if (pos->offset >= 0)
+ return pos;
+
+ return NULL;
+}
+
+static int parse_line(FILE *file, struct hist_entry *he,
+ struct list_head *head)
{
struct symbol *sym = he->sym;
+ struct objdump_line *objdump_line;
char *line = NULL, *tmp, *tmp2;
- static const char *prev_line;
- static const char *prev_color;
- unsigned int offset;
size_t line_len;
- u64 start;
- s64 line_ip;
- int ret;
+ s64 line_ip, offset = -1;
char *c;
if (getline(&line, &line_len, file) < 0)
return -1;
+
if (!line)
return -1;
@@ -173,8 +205,6 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
*c = 0;
line_ip = -1;
- offset = 0;
- ret = -2;
/*
* Strip leading spaces:
@@ -195,9 +225,30 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
line_ip = -1;
}
- start = he->map->unmap_ip(he->map, sym->start);
-
if (line_ip != -1) {
+ u64 start = map__rip_2objdump(he->map, sym->start);
+ offset = line_ip - start;
+ }
+
+ objdump_line = objdump_line__new(offset, line);
+ if (objdump_line == NULL) {
+ free(line);
+ return -1;
+ }
+ objdump__add_line(head, objdump_line);
+
+ return 0;
+}
+
+static int objdump_line__print(struct objdump_line *self,
+ struct list_head *head,
+ struct hist_entry *he, u64 len)
+{
+ struct symbol *sym = he->sym;
+ static const char *prev_line;
+ static const char *prev_color;
+
+ if (self->offset != -1) {
const char *path = NULL;
unsigned int hits = 0;
double percent = 0.0;
@@ -205,15 +256,22 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
struct sym_priv *priv = symbol__priv(sym);
struct sym_ext *sym_ext = priv->ext;
struct sym_hist *h = priv->hist;
+ s64 offset = self->offset;
+ struct objdump_line *next = objdump__get_next_ip_line(head, self);
+
+ while (offset < (s64)len &&
+ (next == NULL || offset < next->offset)) {
+ if (sym_ext) {
+ if (path == NULL)
+ path = sym_ext[offset].path;
+ percent += sym_ext[offset].percent;
+ } else
+ hits += h->ip[offset];
+
+ ++offset;
+ }
- offset = line_ip - start;
- if (offset < len)
- hits = h->ip[offset];
-
- if (offset < len && sym_ext) {
- path = sym_ext[offset].path;
- percent = sym_ext[offset].percent;
- } else if (h->sum)
+ if (sym_ext == NULL && h->sum)
percent = 100.0 * hits / h->sum;
color = get_percent_color(percent);
@@ -234,12 +292,12 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
color_fprintf(stdout, color, " %7.2f", percent);
printf(" : ");
- color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", line);
+ color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", self->line);
} else {
- if (!*line)
+ if (!*self->line)
printf(" :\n");
else
- printf(" : %s\n", line);
+ printf(" : %s\n", self->line);
}
return 0;
@@ -365,6 +423,20 @@ static void print_summary(const char *filename)
}
}
+static void hist_entry__print_hits(struct hist_entry *self)
+{
+ struct symbol *sym = self->sym;
+ struct sym_priv *priv = symbol__priv(sym);
+ struct sym_hist *h = priv->hist;
+ u64 len = sym->end - sym->start, offset;
+
+ for (offset = 0; offset < len; ++offset)
+ if (h->ip[offset] != 0)
+ printf("%*Lx: %Lu\n", BITS_PER_LONG / 2,
+ sym->start + offset, h->ip[offset]);
+ printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum);
+}
+
static void annotate_sym(struct hist_entry *he)
{
struct map *map = he->map;
@@ -374,15 +446,15 @@ static void annotate_sym(struct hist_entry *he)
u64 len;
char command[PATH_MAX*2];
FILE *file;
+ LIST_HEAD(head);
+ struct objdump_line *pos, *n;
if (!filename)
return;
- if (verbose)
- fprintf(stderr, "%s: filename=%s, sym=%s, start=%Lx, end=%Lx\n",
- __func__, filename, sym->name,
- map->unmap_ip(map, sym->start),
- map->unmap_ip(map, sym->end));
+ pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
+ filename, sym->name, map->unmap_ip(map, sym->start),
+ map->unmap_ip(map, sym->end));
if (full_paths)
d_filename = filename;
@@ -405,7 +477,8 @@ static void annotate_sym(struct hist_entry *he)
dso, dso->long_name, sym, sym->name);
sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s",
- map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end),
+ map__rip_2objdump(map, sym->start),
+ map__rip_2objdump(map, sym->end),
filename, filename);
if (verbose >= 3)
@@ -416,11 +489,21 @@ static void annotate_sym(struct hist_entry *he)
return;
while (!feof(file)) {
- if (parse_line(file, he, len) < 0)
+ if (parse_line(file, he, &head) < 0)
break;
}
pclose(file);
+
+ if (verbose)
+ hist_entry__print_hits(he);
+
+ list_for_each_entry_safe(pos, n, &head, node) {
+ objdump_line__print(pos, &head, he, len);
+ list_del(&pos->node);
+ objdump_line__free(pos);
+ }
+
if (print_line)
free_source_line(he, len);
}
@@ -451,10 +534,10 @@ static void perf_session__find_annotations(struct perf_session *self)
}
static struct perf_event_ops event_ops = {
- .process_sample_event = process_sample_event,
- .process_mmap_event = event__process_mmap,
- .process_comm_event = event__process_comm,
- .process_fork_event = event__process_task,
+ .sample = process_sample_event,
+ .mmap = event__process_mmap,
+ .comm = event__process_comm,
+ .fork = event__process_task,
};
static int __cmd_annotate(void)
@@ -542,9 +625,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
setup_pager();
if (field_sep && *field_sep == '.') {
- fputs("'.' is the only non valid --field-separator argument\n",
- stderr);
- exit(129);
+ pr_err("'.' is the only non valid --field-separator argument\n");
+ return -1;
}
return __cmd_annotate();
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
new file mode 100644
index 0000000..30a05f5
--- /dev/null
+++ b/tools/perf/builtin-buildid-cache.c
@@ -0,0 +1,133 @@
+/*
+ * builtin-buildid-cache.c
+ *
+ * Builtin buildid-cache command: Manages build-id cache
+ *
+ * Copyright (C) 2010, Red Hat Inc.
+ * Copyright (C) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+#include "builtin.h"
+#include "perf.h"
+#include "util/cache.h"
+#include "util/debug.h"
+#include "util/header.h"
+#include "util/parse-options.h"
+#include "util/strlist.h"
+#include "util/symbol.h"
+
+static char const *add_name_list_str, *remove_name_list_str;
+
+static const char * const buildid_cache_usage[] = {
+ "perf buildid-cache [<options>]",
+ NULL
+};
+
+static const struct option buildid_cache_options[] = {
+ OPT_STRING('a', "add", &add_name_list_str,
+ "file list", "file(s) to add"),
+ OPT_STRING('r', "remove", &remove_name_list_str, "file list",
+ "file(s) to remove"),
+ OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"),
+ OPT_END()
+};
+
+static int build_id_cache__add_file(const char *filename, const char *debugdir)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+ u8 build_id[BUILD_ID_SIZE];
+ int err;
+
+ if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) {
+ pr_debug("Couldn't read a build-id in %s\n", filename);
+ return -1;
+ }
+
+ build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
+ err = build_id_cache__add_s(sbuild_id, debugdir, filename, false);
+ if (verbose)
+ pr_info("Adding %s %s: %s\n", sbuild_id, filename,
+ err ? "FAIL" : "Ok");
+ return err;
+}
+
+static int build_id_cache__remove_file(const char *filename __used,
+ const char *debugdir __used)
+{
+ u8 build_id[BUILD_ID_SIZE];
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ int err;
+
+ if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) {
+ pr_debug("Couldn't read a build-id in %s\n", filename);
+ return -1;
+ }
+
+ build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
+ err = build_id_cache__remove_s(sbuild_id, debugdir);
+ if (verbose)
+ pr_info("Removing %s %s: %s\n", sbuild_id, filename,
+ err ? "FAIL" : "Ok");
+
+ return err;
+}
+
+static int __cmd_buildid_cache(void)
+{
+ struct strlist *list;
+ struct str_node *pos;
+ char debugdir[PATH_MAX];
+
+ snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
+ DEBUG_CACHE_DIR);
+
+ if (add_name_list_str) {
+ list = strlist__new(true, add_name_list_str);
+ if (list) {
+ strlist__for_each(pos, list)
+ if (build_id_cache__add_file(pos->s, debugdir)) {
+ if (errno == EEXIST) {
+ pr_debug("%s already in the cache\n",
+ pos->s);
+ continue;
+ }
+ pr_warning("Couldn't add %s: %s\n",
+ pos->s, strerror(errno));
+ }
+
+ strlist__delete(list);
+ }
+ }
+
+ if (remove_name_list_str) {
+ list = strlist__new(true, remove_name_list_str);
+ if (list) {
+ strlist__for_each(pos, list)
+ if (build_id_cache__remove_file(pos->s, debugdir)) {
+ if (errno == ENOENT) {
+ pr_debug("%s wasn't in the cache\n",
+ pos->s);
+ continue;
+ }
+ pr_warning("Couldn't remove %s: %s\n",
+ pos->s, strerror(errno));
+ }
+
+ strlist__delete(list);
+ }
+ }
+
+ return 0;
+}
+
+int cmd_buildid_cache(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, buildid_cache_options,
+ buildid_cache_usage, 0);
+
+ if (symbol__init() < 0)
+ return -1;
+
+ setup_pager();
+ return __cmd_buildid_cache();
+}
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 1e99ac8..d0675c0 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -8,6 +8,7 @@
*/
#include "builtin.h"
#include "perf.h"
+#include "util/build-id.h"
#include "util/cache.h"
#include "util/debug.h"
#include "util/parse-options.h"
@@ -16,6 +17,7 @@
static char const *input_name = "perf.data";
static int force;
+static bool with_hits;
static const char * const buildid_list_usage[] = {
"perf buildid-list [<options>]",
@@ -23,6 +25,7 @@ static const char * const buildid_list_usage[] = {
};
static const struct option options[] = {
+ OPT_BOOLEAN('H', "with-hits", &with_hits, "Show only DSOs with hits"),
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
@@ -31,26 +34,6 @@ static const struct option options[] = {
OPT_END()
};
-static int perf_file_section__process_buildids(struct perf_file_section *self,
- int feat, int fd)
-{
- if (feat != HEADER_BUILD_ID)
- return 0;
-
- if (lseek(fd, self->offset, SEEK_SET) < 0) {
- pr_warning("Failed to lseek to %Ld offset for buildids!\n",
- self->offset);
- return -1;
- }
-
- if (perf_header__read_build_ids(fd, self->offset, self->size)) {
- pr_warning("Failed to read buildids!\n");
- return -1;
- }
-
- return 0;
-}
-
static int __cmd_buildid_list(void)
{
int err = -1;
@@ -60,10 +43,10 @@ static int __cmd_buildid_list(void)
if (session == NULL)
return -1;
- err = perf_header__process_sections(&session->header, session->fd,
- perf_file_section__process_buildids);
- if (err >= 0)
- dsos__fprintf_buildid(stdout);
+ if (with_hits)
+ perf_session__process_events(session, &build_id__mark_dso_hit_ops);
+
+ dsos__fprintf_buildid(stdout, with_hits);
perf_session__delete(session);
return err;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index bd71b8c..18b3f50 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -42,8 +42,8 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
struct addr_location al;
struct sample_data data = { .period = 1, };
- dump_printf("(IP, %d): %d: %p\n", event->header.misc,
- event->ip.pid, (void *)(long)event->ip.ip);
+ dump_printf("(IP, %d): %d: %#Lx\n", event->header.misc,
+ event->ip.pid, event->ip.ip);
if (event__preprocess_sample(event, session, &al, NULL) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
@@ -51,12 +51,12 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
return -1;
}
- if (al.filtered)
+ if (al.filtered || al.sym == NULL)
return 0;
event__parse_sample(event, session->sample_type, &data);
- if (al.sym && perf_session__add_hist_entry(session, &al, data.period)) {
+ if (perf_session__add_hist_entry(session, &al, data.period)) {
pr_warning("problem incrementing symbol count, skipping event\n");
return -1;
}
@@ -66,12 +66,12 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
}
static struct perf_event_ops event_ops = {
- .process_sample_event = diff__process_sample_event,
- .process_mmap_event = event__process_mmap,
- .process_comm_event = event__process_comm,
- .process_exit_event = event__process_task,
- .process_fork_event = event__process_task,
- .process_lost_event = event__process_lost,
+ .sample = diff__process_sample_event,
+ .mmap = event__process_mmap,
+ .comm = event__process_comm,
+ .exit = event__process_task,
+ .fork = event__process_task,
+ .lost = event__process_lost,
};
static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
@@ -82,29 +82,19 @@ static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
struct hist_entry *iter;
while (*p != NULL) {
- int cmp;
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
-
- cmp = strcmp(he->map->dso->name, iter->map->dso->name);
- if (cmp > 0)
+ if (hist_entry__cmp(he, iter) < 0)
p = &(*p)->rb_left;
- else if (cmp < 0)
+ else
p = &(*p)->rb_right;
- else {
- cmp = strcmp(he->sym->name, iter->sym->name);
- if (cmp > 0)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, root);
}
-static void perf_session__resort_by_name(struct perf_session *self)
+static void perf_session__resort_hist_entries(struct perf_session *self)
{
unsigned long position = 1;
struct rb_root tmp = RB_ROOT;
@@ -122,29 +112,28 @@ static void perf_session__resort_by_name(struct perf_session *self)
self->hists = tmp;
}
+static void perf_session__set_hist_entries_positions(struct perf_session *self)
+{
+ perf_session__output_resort(self, self->events_stats.total);
+ perf_session__resort_hist_entries(self);
+}
+
static struct hist_entry *
-perf_session__find_hist_entry_by_name(struct perf_session *self,
- struct hist_entry *he)
+perf_session__find_hist_entry(struct perf_session *self,
+ struct hist_entry *he)
{
struct rb_node *n = self->hists.rb_node;
while (n) {
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
- int cmp = strcmp(he->map->dso->name, iter->map->dso->name);
+ int64_t cmp = hist_entry__cmp(he, iter);
- if (cmp > 0)
+ if (cmp < 0)
n = n->rb_left;
- else if (cmp < 0)
+ else if (cmp > 0)
n = n->rb_right;
- else {
- cmp = strcmp(he->sym->name, iter->sym->name);
- if (cmp > 0)
- n = n->rb_left;
- else if (cmp < 0)
- n = n->rb_right;
- else
- return iter;
- }
+ else
+ return iter;
}
return NULL;
@@ -155,11 +144,9 @@ static void perf_session__match_hists(struct perf_session *old_session,
{
struct rb_node *nd;
- perf_session__resort_by_name(old_session);
-
for (nd = rb_first(&new_session->hists); nd; nd = rb_next(nd)) {
struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node);
- pos->pair = perf_session__find_hist_entry_by_name(old_session, pos);
+ pos->pair = perf_session__find_hist_entry(old_session, pos);
}
}
@@ -177,9 +164,12 @@ static int __cmd_diff(void)
ret = perf_session__process_events(session[i], &event_ops);
if (ret)
goto out_delete;
- perf_session__output_resort(session[i], session[i]->events_stats.total);
}
+ perf_session__output_resort(session[1], session[1]->events_stats.total);
+ if (show_displacement)
+ perf_session__set_hist_entries_positions(session[0]);
+
perf_session__match_hists(session[0], session[1]);
perf_session__fprintf_hists(session[1], session[0],
show_displacement, stdout);
@@ -204,7 +194,7 @@ static const struct option options[] = {
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
- OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths,
+ OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths,
"Don't shorten the pathnames taking into account the cwd"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 9f810b1..215b584 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -286,8 +286,7 @@ void list_common_cmds_help(void)
puts(" The most commonly used perf commands are:");
for (i = 0; i < ARRAY_SIZE(common_cmds); i++) {
- printf(" %s ", common_cmds[i].name);
- mput_char(' ', longest - strlen(common_cmds[i].name));
+ printf(" %-*s ", longest, common_cmds[i].name);
puts(common_cmds[i].help);
}
}
@@ -314,8 +313,6 @@ static const char *cmd_to_page(const char *perf_cmd)
return "perf";
else if (!prefixcmp(perf_cmd, "perf"))
return perf_cmd;
- else if (is_perf_command(perf_cmd))
- return prepend("perf-", perf_cmd);
else
return prepend("perf-", perf_cmd);
}
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 93c67bf..924a951 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -92,23 +92,18 @@ static void setup_cpunode_map(void)
if (!dir1)
return;
- while (true) {
- dent1 = readdir(dir1);
- if (!dent1)
- break;
-
- if (sscanf(dent1->d_name, "node%u", &mem) < 1)
+ while ((dent1 = readdir(dir1)) != NULL) {
+ if (dent1->d_type != DT_DIR ||
+ sscanf(dent1->d_name, "node%u", &mem) < 1)
continue;
snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
dir2 = opendir(buf);
if (!dir2)
continue;
- while (true) {
- dent2 = readdir(dir2);
- if (!dent2)
- break;
- if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
+ while ((dent2 = readdir(dir2)) != NULL) {
+ if (dent2->d_type != DT_LNK ||
+ sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
continue;
cpunode_map[cpu] = mem;
}
@@ -321,11 +316,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
event__parse_sample(event, session->sample_type, &data);
- dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
- event->header.misc,
- data.pid, data.tid,
- (void *)(long)data.ip,
- (long long)data.period);
+ dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
+ data.pid, data.tid, data.ip, data.period);
thread = perf_session__findnew(session, event->ip.pid);
if (thread == NULL) {
@@ -342,22 +334,9 @@ static int process_sample_event(event_t *event, struct perf_session *session)
return 0;
}
-static int sample_type_check(struct perf_session *session)
-{
- if (!(session->sample_type & PERF_SAMPLE_RAW)) {
- fprintf(stderr,
- "No trace sample to read. Did you call perf record "
- "without -R?");
- return -1;
- }
-
- return 0;
-}
-
static struct perf_event_ops event_ops = {
- .process_sample_event = process_sample_event,
- .process_comm_event = event__process_comm,
- .sample_type_check = sample_type_check,
+ .sample = process_sample_event,
+ .comm = event__process_comm,
};
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -390,7 +369,7 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
if (is_caller) {
addr = data->call_site;
if (!raw_ip)
- sym = map_groups__find_function(&session->kmaps, session, addr, NULL);
+ sym = map_groups__find_function(&session->kmaps, addr, NULL);
} else
addr = data->ptr;
@@ -504,11 +483,14 @@ static void sort_result(void)
static int __cmd_kmem(void)
{
- int err;
+ int err = -EINVAL;
struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
if (session == NULL)
return -ENOMEM;
+ if (!perf_session__has_traces(session, "kmem record"))
+ goto out_delete;
+
setup_pager();
err = perf_session__process_events(session, &event_ops);
if (err != 0)
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
new file mode 100644
index 0000000..fb9ab2a
--- /dev/null
+++ b/tools/perf/builtin-lock.c
@@ -0,0 +1,678 @@
+#include "builtin.h"
+#include "perf.h"
+
+#include "util/util.h"
+#include "util/cache.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/header.h"
+
+#include "util/parse-options.h"
+#include "util/trace-event.h"
+
+#include "util/debug.h"
+#include "util/session.h"
+
+#include <sys/types.h>
+#include <sys/prctl.h>
+#include <semaphore.h>
+#include <pthread.h>
+#include <math.h>
+#include <limits.h>
+
+#include <linux/list.h>
+#include <linux/hash.h>
+
+/* based on kernel/lockdep.c */
+#define LOCKHASH_BITS 12
+#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
+
+static struct list_head lockhash_table[LOCKHASH_SIZE];
+
+#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
+#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
+
+#define LOCK_STATE_UNLOCKED 0 /* initial state */
+#define LOCK_STATE_LOCKED 1
+
+struct lock_stat {
+ struct list_head hash_entry;
+ struct rb_node rb; /* used for sorting */
+
+ /*
+ * FIXME: raw_field_value() returns unsigned long long,
+ * so address of lockdep_map should be dealed as 64bit.
+ * Is there more better solution?
+ */
+ void *addr; /* address of lockdep_map, used as ID */
+ char *name; /* for strcpy(), we cannot use const */
+
+ int state;
+ u64 prev_event_time; /* timestamp of previous event */
+
+ unsigned int nr_acquired;
+ unsigned int nr_acquire;
+ unsigned int nr_contended;
+ unsigned int nr_release;
+
+ /* these times are in nano sec. */
+ u64 wait_time_total;
+ u64 wait_time_min;
+ u64 wait_time_max;
+};
+
+/* build simple key function one is bigger than two */
+#define SINGLE_KEY(member) \
+ static int lock_stat_key_ ## member(struct lock_stat *one, \
+ struct lock_stat *two) \
+ { \
+ return one->member > two->member; \
+ }
+
+SINGLE_KEY(nr_acquired)
+SINGLE_KEY(nr_contended)
+SINGLE_KEY(wait_time_total)
+SINGLE_KEY(wait_time_min)
+SINGLE_KEY(wait_time_max)
+
+struct lock_key {
+ /*
+ * name: the value for specify by user
+ * this should be simpler than raw name of member
+ * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
+ */
+ const char *name;
+ int (*key)(struct lock_stat*, struct lock_stat*);
+};
+
+static const char *sort_key = "acquired";
+
+static int (*compare)(struct lock_stat *, struct lock_stat *);
+
+static struct rb_root result; /* place to store sorted data */
+
+#define DEF_KEY_LOCK(name, fn_suffix) \
+ { #name, lock_stat_key_ ## fn_suffix }
+struct lock_key keys[] = {
+ DEF_KEY_LOCK(acquired, nr_acquired),
+ DEF_KEY_LOCK(contended, nr_contended),
+ DEF_KEY_LOCK(wait_total, wait_time_total),
+ DEF_KEY_LOCK(wait_min, wait_time_min),
+ DEF_KEY_LOCK(wait_max, wait_time_max),
+
+ /* extra comparisons much complicated should be here */
+
+ { NULL, NULL }
+};
+
+static void select_key(void)
+{
+ int i;
+
+ for (i = 0; keys[i].name; i++) {
+ if (!strcmp(keys[i].name, sort_key)) {
+ compare = keys[i].key;
+ return;
+ }
+ }
+
+ die("Unknown compare key:%s\n", sort_key);
+}
+
+static void insert_to_result(struct lock_stat *st,
+ int (*bigger)(struct lock_stat *, struct lock_stat *))
+{
+ struct rb_node **rb = &result.rb_node;
+ struct rb_node *parent = NULL;
+ struct lock_stat *p;
+
+ while (*rb) {
+ p = container_of(*rb, struct lock_stat, rb);
+ parent = *rb;
+
+ if (bigger(st, p))
+ rb = &(*rb)->rb_left;
+ else
+ rb = &(*rb)->rb_right;
+ }
+
+ rb_link_node(&st->rb, parent, rb);
+ rb_insert_color(&st->rb, &result);
+}
+
+/* returns left most element of result, and erase it */
+static struct lock_stat *pop_from_result(void)
+{
+ struct rb_node *node = result.rb_node;
+
+ if (!node)
+ return NULL;
+
+ while (node->rb_left)
+ node = node->rb_left;
+
+ rb_erase(node, &result);
+ return container_of(node, struct lock_stat, rb);
+}
+
+static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
+{
+ struct list_head *entry = lockhashentry(addr);
+ struct lock_stat *ret, *new;
+
+ list_for_each_entry(ret, entry, hash_entry) {
+ if (ret->addr == addr)
+ return ret;
+ }
+
+ new = zalloc(sizeof(struct lock_stat));
+ if (!new)
+ goto alloc_failed;
+
+ new->addr = addr;
+ new->name = zalloc(sizeof(char) * strlen(name) + 1);
+ if (!new->name)
+ goto alloc_failed;
+ strcpy(new->name, name);
+
+ /* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
+ new->state = LOCK_STATE_UNLOCKED;
+ new->wait_time_min = ULLONG_MAX;
+
+ list_add(&new->hash_entry, entry);
+ return new;
+
+alloc_failed:
+ die("memory allocation failed\n");
+}
+
+static char const *input_name = "perf.data";
+
+static int profile_cpu = -1;
+
+struct raw_event_sample {
+ u32 size;
+ char data[0];
+};
+
+struct trace_acquire_event {
+ void *addr;
+ const char *name;
+};
+
+struct trace_acquired_event {
+ void *addr;
+ const char *name;
+};
+
+struct trace_contended_event {
+ void *addr;
+ const char *name;
+};
+
+struct trace_release_event {
+ void *addr;
+ const char *name;
+};
+
+struct trace_lock_handler {
+ void (*acquire_event)(struct trace_acquire_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*acquired_event)(struct trace_acquired_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*contended_event)(struct trace_contended_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*release_event)(struct trace_release_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+};
+
+static void
+report_lock_acquire_event(struct trace_acquire_event *acquire_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+
+ st = lock_stat_findnew(acquire_event->addr, acquire_event->name);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ st->prev_event_time = timestamp;
+}
+
+static void
+report_lock_acquired_event(struct trace_acquired_event *acquired_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+
+ st = lock_stat_findnew(acquired_event->addr, acquired_event->name);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ st->state = LOCK_STATE_LOCKED;
+ st->nr_acquired++;
+ break;
+ case LOCK_STATE_LOCKED:
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ st->prev_event_time = timestamp;
+}
+
+static void
+report_lock_contended_event(struct trace_contended_event *contended_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+
+ st = lock_stat_findnew(contended_event->addr, contended_event->name);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ st->nr_contended++;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ st->prev_event_time = timestamp;
+}
+
+static void
+report_lock_release_event(struct trace_release_event *release_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+ u64 hold_time;
+
+ st = lock_stat_findnew(release_event->addr, release_event->name);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ st->state = LOCK_STATE_UNLOCKED;
+ hold_time = timestamp - st->prev_event_time;
+
+ if (timestamp < st->prev_event_time) {
+ /* terribly, this can happen... */
+ goto end;
+ }
+
+ if (st->wait_time_min > hold_time)
+ st->wait_time_min = hold_time;
+ if (st->wait_time_max < hold_time)
+ st->wait_time_max = hold_time;
+ st->wait_time_total += hold_time;
+
+ st->nr_release++;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+end:
+ st->prev_event_time = timestamp;
+}
+
+/* lock oriented handlers */
+/* TODO: handlers for CPU oriented, thread oriented */
+static struct trace_lock_handler report_lock_ops = {
+ .acquire_event = report_lock_acquire_event,
+ .acquired_event = report_lock_acquired_event,
+ .contended_event = report_lock_contended_event,
+ .release_event = report_lock_release_event,
+};
+
+static struct trace_lock_handler *trace_handler;
+
+static void
+process_lock_acquire_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_acquire_event acquire_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&acquire_event.addr, &tmp, sizeof(void *));
+ acquire_event.name = (char *)raw_field_ptr(event, "name", data);
+
+ if (trace_handler->acquire_event)
+ trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
+}
+
+static void
+process_lock_acquired_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_acquired_event acquired_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&acquired_event.addr, &tmp, sizeof(void *));
+ acquired_event.name = (char *)raw_field_ptr(event, "name", data);
+
+ if (trace_handler->acquire_event)
+ trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
+}
+
+static void
+process_lock_contended_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_contended_event contended_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&contended_event.addr, &tmp, sizeof(void *));
+ contended_event.name = (char *)raw_field_ptr(event, "name", data);
+
+ if (trace_handler->acquire_event)
+ trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
+}
+
+static void
+process_lock_release_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_release_event release_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&release_event.addr, &tmp, sizeof(void *));
+ release_event.name = (char *)raw_field_ptr(event, "name", data);
+
+ if (trace_handler->acquire_event)
+ trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
+}
+
+static void
+process_raw_event(void *data, int cpu,
+ u64 timestamp, struct thread *thread)
+{
+ struct event *event;
+ int type;
+
+ type = trace_parse_common_type(data);
+ event = trace_find_event(type);
+
+ if (!strcmp(event->name, "lock_acquire"))
+ process_lock_acquire_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_acquired"))
+ process_lock_acquired_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_contended"))
+ process_lock_contended_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_release"))
+ process_lock_release_event(data, event, cpu, timestamp, thread);
+}
+
+static int process_sample_event(event_t *event, struct perf_session *session)
+{
+ struct thread *thread;
+ struct sample_data data;
+
+ bzero(&data, sizeof(struct sample_data));
+ event__parse_sample(event, session->sample_type, &data);
+ thread = perf_session__findnew(session, data.pid);
+
+ if (thread == NULL) {
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+
+ if (profile_cpu != -1 && profile_cpu != (int) data.cpu)
+ return 0;
+
+ process_raw_event(data.raw_data, data.cpu, data.time, thread);
+
+ return 0;
+}
+
+/* TODO: various way to print, coloring, nano or milli sec */
+static void print_result(void)
+{
+ struct lock_stat *st;
+ char cut_name[20];
+
+ printf("%18s ", "ID");
+ printf("%20s ", "Name");
+ printf("%10s ", "acquired");
+ printf("%10s ", "contended");
+
+ printf("%15s ", "total wait (ns)");
+ printf("%15s ", "max wait (ns)");
+ printf("%15s ", "min wait (ns)");
+
+ printf("\n\n");
+
+ while ((st = pop_from_result())) {
+ bzero(cut_name, 20);
+
+ printf("%p ", st->addr);
+
+ if (strlen(st->name) < 16) {
+ /* output raw name */
+ printf("%20s ", st->name);
+ } else {
+ strncpy(cut_name, st->name, 16);
+ cut_name[16] = '.';
+ cut_name[17] = '.';
+ cut_name[18] = '.';
+ cut_name[19] = '\0';
+ /* cut off name for saving output style */
+ printf("%20s ", cut_name);
+ }
+
+ printf("%10u ", st->nr_acquired);
+ printf("%10u ", st->nr_contended);
+
+ printf("%15llu ", st->wait_time_total);
+ printf("%15llu ", st->wait_time_max);
+ printf("%15llu ", st->wait_time_min == ULLONG_MAX ?
+ 0 : st->wait_time_min);
+ printf("\n");
+ }
+}
+
+static void dump_map(void)
+{
+ unsigned int i;
+ struct lock_stat *st;
+
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ printf("%p: %s\n", st->addr, st->name);
+ }
+ }
+}
+
+static struct perf_event_ops eops = {
+ .sample = process_sample_event,
+ .comm = event__process_comm,
+};
+
+static struct perf_session *session;
+
+static int read_events(void)
+{
+ session = perf_session__new(input_name, O_RDONLY, 0);
+ if (!session)
+ die("Initializing perf session failed\n");
+
+ return perf_session__process_events(session, &eops);
+}
+
+static void sort_result(void)
+{
+ unsigned int i;
+ struct lock_stat *st;
+
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ insert_to_result(st, compare);
+ }
+ }
+}
+
+static void __cmd_report(void)
+{
+ setup_pager();
+ select_key();
+ read_events();
+ sort_result();
+ print_result();
+}
+
+static const char * const report_usage[] = {
+ "perf lock report [<options>]",
+ NULL
+};
+
+static const struct option report_options[] = {
+ OPT_STRING('k', "key", &sort_key, "acquired",
+ "key for sorting"),
+ /* TODO: type */
+ OPT_END()
+};
+
+static const char * const lock_usage[] = {
+ "perf lock [<options>] {record|trace|report}",
+ NULL
+};
+
+static const struct option lock_options[] = {
+ OPT_STRING('i', "input", &input_name, "file", "input file name"),
+ OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
+ OPT_END()
+};
+
+static const char *record_args[] = {
+ "record",
+ "-a",
+ "-R",
+ "-M",
+ "-f",
+ "-m", "1024",
+ "-c", "1",
+ "-e", "lock:lock_acquire:r",
+ "-e", "lock:lock_acquired:r",
+ "-e", "lock:lock_contended:r",
+ "-e", "lock:lock_release:r",
+};
+
+static int __cmd_record(int argc, const char **argv)
+{
+ unsigned int rec_argc, i, j;
+ const char **rec_argv;
+
+ rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = strdup(record_args[i]);
+
+ for (j = 1; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+int cmd_lock(int argc, const char **argv, const char *prefix __used)
+{
+ unsigned int i;
+
+ symbol__init();
+ for (i = 0; i < LOCKHASH_SIZE; i++)
+ INIT_LIST_HEAD(lockhash_table + i);
+
+ argc = parse_options(argc, argv, lock_options, lock_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc)
+ usage_with_options(lock_usage, lock_options);
+
+ if (!strncmp(argv[0], "rec", 3)) {
+ return __cmd_record(argc, argv);
+ } else if (!strncmp(argv[0], "report", 6)) {
+ trace_handler = &report_lock_ops;
+ if (argc) {
+ argc = parse_options(argc, argv,
+ report_options, report_usage, 0);
+ if (argc)
+ usage_with_options(report_usage, report_options);
+ }
+ __cmd_report();
+ } else if (!strcmp(argv[0], "trace")) {
+ /* Aliased to 'perf trace' */
+ return cmd_trace(argc, argv, prefix);
+ } else if (!strcmp(argv[0], "map")) {
+ /* recycling report_lock_ops */
+ trace_handler = &report_lock_ops;
+ setup_pager();
+ read_events();
+ dump_map();
+ } else {
+ usage_with_options(lock_usage, lock_options);
+ }
+
+ return 0;
+}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index c1e6774..ad47bd4 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -41,7 +41,6 @@
#include "util/debugfs.h"
#include "util/symbol.h"
#include "util/thread.h"
-#include "util/session.h"
#include "util/parse-options.h"
#include "util/parse-events.h" /* For debugfs_path */
#include "util/probe-finder.h"
@@ -55,11 +54,13 @@ static struct {
bool need_dwarf;
bool list_events;
bool force_add;
+ bool show_lines;
int nr_probe;
struct probe_point probes[MAX_PROBES];
struct strlist *dellist;
- struct perf_session *psession;
- struct map *kmap;
+ struct map_groups kmap_groups;
+ struct map *kmaps[MAP__NR_TYPES];
+ struct line_range line_range;
} session;
@@ -120,8 +121,8 @@ static int opt_del_probe_event(const struct option *opt __used,
static void evaluate_probe_point(struct probe_point *pp)
{
struct symbol *sym;
- sym = map__find_symbol_by_name(session.kmap, pp->function,
- session.psession, NULL);
+ sym = map__find_symbol_by_name(session.kmaps[MAP__FUNCTION],
+ pp->function, NULL);
if (!sym)
die("Kernel symbol \'%s\' not found - probe not added.",
pp->function);
@@ -130,12 +131,23 @@ static void evaluate_probe_point(struct probe_point *pp)
#ifndef NO_LIBDWARF
static int open_vmlinux(void)
{
- if (map__load(session.kmap, session.psession, NULL) < 0) {
+ if (map__load(session.kmaps[MAP__FUNCTION], NULL) < 0) {
pr_debug("Failed to load kernel map.\n");
return -EINVAL;
}
- pr_debug("Try to open %s\n", session.kmap->dso->long_name);
- return open(session.kmap->dso->long_name, O_RDONLY);
+ pr_debug("Try to open %s\n",
+ session.kmaps[MAP__FUNCTION]->dso->long_name);
+ return open(session.kmaps[MAP__FUNCTION]->dso->long_name, O_RDONLY);
+}
+
+static int opt_show_lines(const struct option *opt __used,
+ const char *str, int unset __used)
+{
+ if (str)
+ parse_line_range_desc(str, &session.line_range);
+ INIT_LIST_HEAD(&session.line_range.line_list);
+ session.show_lines = true;
+ return 0;
}
#endif
@@ -144,6 +156,7 @@ static const char * const probe_usage[] = {
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
"perf probe [<options>] --del '[GROUP:]EVENT' ...",
"perf probe --list",
+ "perf probe --line 'LINEDESC'",
NULL
};
@@ -182,9 +195,31 @@ static const struct option options[] = {
opt_add_probe_event),
OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events"
" with existing name"),
+#ifndef NO_LIBDWARF
+ OPT_CALLBACK('L', "line", NULL,
+ "FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]",
+ "Show source code lines.", opt_show_lines),
+#endif
OPT_END()
};
+/* Initialize symbol maps for vmlinux */
+static void init_vmlinux(void)
+{
+ symbol_conf.sort_by_name = true;
+ if (symbol_conf.vmlinux_name == NULL)
+ symbol_conf.try_vmlinux_path = true;
+ else
+ pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
+ if (symbol__init() < 0)
+ die("Failed to init symbol map.");
+
+ map_groups__init(&session.kmap_groups);
+ if (map_groups__create_kernel_maps(&session.kmap_groups,
+ session.kmaps) < 0)
+ die("Failed to create kernel maps.");
+}
+
int cmd_probe(int argc, const char **argv, const char *prefix __used)
{
int i, ret;
@@ -203,7 +238,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
parse_probe_event_argv(argc, argv);
}
- if ((!session.nr_probe && !session.dellist && !session.list_events))
+ if ((!session.nr_probe && !session.dellist && !session.list_events &&
+ !session.show_lines))
usage_with_options(probe_usage, options);
if (debugfs_valid_mountpoint(debugfs_path) < 0)
@@ -215,10 +251,34 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
" --add/--del.\n");
usage_with_options(probe_usage, options);
}
+ if (session.show_lines) {
+ pr_warning(" Error: Don't use --list with --line.\n");
+ usage_with_options(probe_usage, options);
+ }
show_perf_probe_events();
return 0;
}
+#ifndef NO_LIBDWARF
+ if (session.show_lines) {
+ if (session.nr_probe != 0 || session.dellist) {
+ pr_warning(" Error: Don't use --line with"
+ " --add/--del.\n");
+ usage_with_options(probe_usage, options);
+ }
+ init_vmlinux();
+ fd = open_vmlinux();
+ if (fd < 0)
+ die("Could not open debuginfo file.");
+ ret = find_line_range(fd, &session.line_range);
+ if (ret <= 0)
+ die("Source line is not found.\n");
+ close(fd);
+ show_line_range(&session.line_range);
+ return 0;
+ }
+#endif
+
if (session.dellist) {
del_trace_kprobe_events(session.dellist);
strlist__delete(session.dellist);
@@ -226,20 +286,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
return 0;
}
- /* Initialize symbol maps for vmlinux */
- symbol_conf.sort_by_name = true;
- if (symbol_conf.vmlinux_name == NULL)
- symbol_conf.try_vmlinux_path = true;
- if (symbol__init() < 0)
- die("Failed to init symbol map.");
- session.psession = perf_session__new(NULL, O_WRONLY, false);
- if (session.psession == NULL)
- die("Failed to init perf_session.");
- session.kmap = map_groups__find_by_name(&session.psession->kmaps,
- MAP__FUNCTION,
- "[kernel.kallsyms]");
- if (!session.kmap)
- die("Could not find kernel map.\n");
+ /* Add probes */
+ init_vmlinux();
if (session.need_dwarf)
#ifdef NO_LIBDWARF
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 2654253..771533c 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -5,10 +5,13 @@
* (or a CPU, or a PID) into the perf.data output file - for
* later analysis via perf report.
*/
+#define _FILE_OFFSET_BITS 64
+
#include "builtin.h"
#include "perf.h"
+#include "util/build-id.h"
#include "util/util.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
@@ -62,6 +65,7 @@ static int nr_poll = 0;
static int nr_cpu = 0;
static int file_new = 1;
+static off_t post_processing_offset;
static struct perf_session *session;
@@ -111,22 +115,10 @@ static void write_output(void *buf, size_t size)
}
}
-static void write_event(event_t *buf, size_t size)
-{
- /*
- * Add it to the list of DSOs, so that when we finish this
- * record session we can pick the available build-ids.
- */
- if (buf->header.type == PERF_RECORD_MMAP)
- dsos__findnew(buf->mmap.filename);
-
- write_output(buf, size);
-}
-
static int process_synthesized_event(event_t *event,
struct perf_session *self __used)
{
- write_event(event, event->header.size);
+ write_output(event, event->header.size);
return 0;
}
@@ -178,14 +170,14 @@ static void mmap_read(struct mmap_data *md)
size = md->mask + 1 - (old & md->mask);
old += size;
- write_event(buf, size);
+ write_output(buf, size);
}
buf = &data[old & md->mask];
size = head - old;
old += size;
- write_event(buf, size);
+ write_output(buf, size);
md->prev = old;
mmap_write_tail(md, old);
@@ -395,10 +387,21 @@ static void open_counters(int cpu, pid_t pid)
nr_cpu++;
}
+static int process_buildids(void)
+{
+ u64 size = lseek(output, 0, SEEK_CUR);
+
+ session->fd = output;
+ return __perf_session__process_events(session, post_processing_offset,
+ size - post_processing_offset,
+ size, &build_id__mark_dso_hit_ops);
+}
+
static void atexit_header(void)
{
session->header.data_size += bytes_written;
+ process_buildids();
perf_header__write(&session->header, output, true);
}
@@ -551,8 +554,23 @@ static int __cmd_record(int argc, const char **argv)
return err;
}
+ post_processing_offset = lseek(output, 0, SEEK_CUR);
+
+ err = event__synthesize_kernel_mmap(process_synthesized_event,
+ session, "_text");
+ if (err < 0) {
+ pr_err("Couldn't record kernel reference relocation symbol.\n");
+ return err;
+ }
+
+ err = event__synthesize_modules(process_synthesized_event, session);
+ if (err < 0) {
+ pr_err("Couldn't record kernel reference relocation symbol.\n");
+ return err;
+ }
+
if (!system_wide && profile_cpu == -1)
- event__synthesize_thread(pid, process_synthesized_event,
+ event__synthesize_thread(target_pid, process_synthesized_event,
session);
else
event__synthesize_threads(process_synthesized_event, session);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 860f1ee..cfc655d 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -34,6 +34,8 @@
static char const *input_name = "perf.data";
static int force;
+static bool hide_unresolved;
+static bool dont_use_callchains;
static int show_threads;
static struct perf_read_values show_threads_values;
@@ -91,11 +93,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
event__parse_sample(event, session->sample_type, &data);
- dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
- event->header.misc,
- data.pid, data.tid,
- (void *)(long)data.ip,
- (long long)data.period);
+ dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
+ data.pid, data.tid, data.ip, data.period);
if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
unsigned int i;
@@ -121,7 +120,7 @@ static int process_sample_event(event_t *event, struct perf_session *session)
return -1;
}
- if (al.filtered)
+ if (al.filtered || (hide_unresolved && al.sym == NULL))
return 0;
if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) {
@@ -156,14 +155,14 @@ static int process_read_event(event_t *event, struct perf_session *session __use
return 0;
}
-static int sample_type_check(struct perf_session *session)
+static int perf_session__setup_sample_type(struct perf_session *self)
{
- if (!(session->sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
fprintf(stderr, "selected --sort parent, but no"
" callchain data. Did you call"
" perf record without -g?\n");
- return -1;
+ return -EINVAL;
}
if (symbol_conf.use_callchain) {
fprintf(stderr, "selected -g but no callchain data."
@@ -171,12 +170,13 @@ static int sample_type_check(struct perf_session *session)
" -g?\n");
return -1;
}
- } else if (callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) {
+ } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE &&
+ !symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
if (register_callchain_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain"
" params\n");
- return -1;
+ return -EINVAL;
}
}
@@ -184,20 +184,18 @@ static int sample_type_check(struct perf_session *session)
}
static struct perf_event_ops event_ops = {
- .process_sample_event = process_sample_event,
- .process_mmap_event = event__process_mmap,
- .process_comm_event = event__process_comm,
- .process_exit_event = event__process_task,
- .process_fork_event = event__process_task,
- .process_lost_event = event__process_lost,
- .process_read_event = process_read_event,
- .sample_type_check = sample_type_check,
+ .sample = process_sample_event,
+ .mmap = event__process_mmap,
+ .comm = event__process_comm,
+ .exit = event__process_task,
+ .fork = event__process_task,
+ .lost = event__process_lost,
+ .read = process_read_event,
};
-
static int __cmd_report(void)
{
- int ret;
+ int ret = -EINVAL;
struct perf_session *session;
session = perf_session__new(input_name, O_RDONLY, force);
@@ -207,6 +205,10 @@ static int __cmd_report(void)
if (show_threads)
perf_read_values_init(&show_threads_values);
+ ret = perf_session__setup_sample_type(session);
+ if (ret)
+ goto out_delete;
+
ret = perf_session__process_events(session, &event_ops);
if (ret)
goto out_delete;
@@ -243,11 +245,19 @@ out_delete:
static int
parse_callchain_opt(const struct option *opt __used, const char *arg,
- int unset __used)
+ int unset)
{
char *tok;
char *endptr;
+ /*
+ * --no-call-graph
+ */
+ if (unset) {
+ dont_use_callchains = true;
+ return 0;
+ }
+
symbol_conf.use_callchain = true;
if (!arg)
@@ -319,7 +329,7 @@ static const struct option options[] = {
"pretty printing style key: normal raw"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
- OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths,
+ OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths,
"Don't shorten the pathnames taking into account the cwd"),
OPT_STRING('p', "parent", &parent_pattern, "regex",
"regex filter to identify parent, see: '--sort parent'"),
@@ -340,6 +350,8 @@ static const struct option options[] = {
OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
+ OPT_BOOLEAN('U', "hide-unresolved", &hide_unresolved,
+ "Only display entries resolved to a symbol"),
OPT_END()
};
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 80209df..4f5a03e 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1621,11 +1621,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
event__parse_sample(event, session->sample_type, &data);
- dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
- event->header.misc,
- data.pid, data.tid,
- (void *)(long)data.ip,
- (long long)data.period);
+ dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
+ data.pid, data.tid, data.ip, data.period);
thread = perf_session__findnew(session, data.pid);
if (thread == NULL) {
@@ -1653,33 +1650,22 @@ static int process_lost_event(event_t *event __used,
return 0;
}
-static int sample_type_check(struct perf_session *session __used)
-{
- if (!(session->sample_type & PERF_SAMPLE_RAW)) {
- fprintf(stderr,
- "No trace sample to read. Did you call perf record "
- "without -R?");
- return -1;
- }
-
- return 0;
-}
-
static struct perf_event_ops event_ops = {
- .process_sample_event = process_sample_event,
- .process_comm_event = event__process_comm,
- .process_lost_event = process_lost_event,
- .sample_type_check = sample_type_check,
+ .sample = process_sample_event,
+ .comm = event__process_comm,
+ .lost = process_lost_event,
};
static int read_events(void)
{
- int err;
+ int err = -EINVAL;
struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
if (session == NULL)
return -ENOMEM;
- err = perf_session__process_events(session, &event_ops);
+ if (perf_session__has_traces(session, "record -R"))
+ err = perf_session__process_events(session, &event_ops);
+
perf_session__delete(session);
return err;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index c70d720..e8c85d5 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -44,6 +44,7 @@
#include "util/parse-events.h"
#include "util/event.h"
#include "util/debug.h"
+#include "util/header.h"
#include <sys/prctl.h>
#include <math.h>
@@ -79,6 +80,8 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS];
static int event_scaled[MAX_COUNTERS];
+static volatile int done = 0;
+
struct stats
{
double n, mean, M2;
@@ -247,61 +250,64 @@ static int run_perf_stat(int argc __used, const char **argv)
unsigned long long t0, t1;
int status = 0;
int counter;
- int pid;
+ int pid = target_pid;
int child_ready_pipe[2], go_pipe[2];
+ const bool forks = (target_pid == -1 && argc > 0);
char buf;
if (!system_wide)
nr_cpus = 1;
- if (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0) {
+ if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
perror("failed to create pipes");
exit(1);
}
- if ((pid = fork()) < 0)
- perror("failed to fork");
+ if (forks) {
+ if ((pid = fork()) < 0)
+ perror("failed to fork");
+
+ if (!pid) {
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+ /*
+ * Do a dummy execvp to get the PLT entry resolved,
+ * so we avoid the resolver overhead on the real
+ * execvp call.
+ */
+ execvp("", (char **)argv);
+
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
+
+ /*
+ * Wait until the parent tells us to go.
+ */
+ if (read(go_pipe[0], &buf, 1) == -1)
+ perror("unable to read pipe");
+
+ execvp(argv[0], (char **)argv);
+
+ perror(argv[0]);
+ exit(-1);
+ }
- if (!pid) {
- close(child_ready_pipe[0]);
- close(go_pipe[1]);
- fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+ child_pid = pid;
/*
- * Do a dummy execvp to get the PLT entry resolved,
- * so we avoid the resolver overhead on the real
- * execvp call.
- */
- execvp("", (char **)argv);
-
- /*
- * Tell the parent we're ready to go
+ * Wait for the child to be ready to exec.
*/
close(child_ready_pipe[1]);
-
- /*
- * Wait until the parent tells us to go.
- */
- if (read(go_pipe[0], &buf, 1) == -1)
+ close(go_pipe[0]);
+ if (read(child_ready_pipe[0], &buf, 1) == -1)
perror("unable to read pipe");
-
- execvp(argv[0], (char **)argv);
-
- perror(argv[0]);
- exit(-1);
+ close(child_ready_pipe[0]);
}
- child_pid = pid;
-
- /*
- * Wait for the child to be ready to exec.
- */
- close(child_ready_pipe[1]);
- close(go_pipe[0]);
- if (read(child_ready_pipe[0], &buf, 1) == -1)
- perror("unable to read pipe");
- close(child_ready_pipe[0]);
-
for (counter = 0; counter < nr_counters; counter++)
create_perf_stat_counter(counter, pid);
@@ -310,8 +316,12 @@ static int run_perf_stat(int argc __used, const char **argv)
*/
t0 = rdclock();
- close(go_pipe[1]);
- wait(&status);
+ if (forks) {
+ close(go_pipe[1]);
+ wait(&status);
+ } else {
+ while(!done);
+ }
t1 = rdclock();
@@ -417,10 +427,13 @@ static void print_stat(int argc, const char **argv)
fflush(stdout);
fprintf(stderr, "\n");
- fprintf(stderr, " Performance counter stats for \'%s", argv[0]);
-
- for (i = 1; i < argc; i++)
- fprintf(stderr, " %s", argv[i]);
+ fprintf(stderr, " Performance counter stats for ");
+ if(target_pid == -1) {
+ fprintf(stderr, "\'%s", argv[0]);
+ for (i = 1; i < argc; i++)
+ fprintf(stderr, " %s", argv[i]);
+ }else
+ fprintf(stderr, "task pid \'%d", target_pid);
fprintf(stderr, "\'");
if (run_count > 1)
@@ -445,6 +458,9 @@ static volatile int signr = -1;
static void skip_signal(int signo)
{
+ if(target_pid != -1)
+ done = 1;
+
signr = signo;
}
@@ -461,7 +477,7 @@ static void sig_atexit(void)
}
static const char * const stat_usage[] = {
- "perf stat [<options>] <command>",
+ "perf stat [<options>] [<command>]",
NULL
};
@@ -492,7 +508,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
argc = parse_options(argc, argv, options, stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- if (!argc)
+ if (!argc && target_pid == -1)
usage_with_options(stat_usage, options);
if (run_count <= 0)
usage_with_options(stat_usage, options);
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 3f8bbcf..0d4d8ff 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1029,33 +1029,24 @@ static void process_samples(struct perf_session *session)
}
}
-static int sample_type_check(struct perf_session *session)
-{
- if (!(session->sample_type & PERF_SAMPLE_RAW)) {
- fprintf(stderr, "No trace samples found in the file.\n"
- "Have you used 'perf timechart record' to record it?\n");
- return -1;
- }
-
- return 0;
-}
-
static struct perf_event_ops event_ops = {
- .process_comm_event = process_comm_event,
- .process_fork_event = process_fork_event,
- .process_exit_event = process_exit_event,
- .process_sample_event = queue_sample_event,
- .sample_type_check = sample_type_check,
+ .comm = process_comm_event,
+ .fork = process_fork_event,
+ .exit = process_exit_event,
+ .sample = queue_sample_event,
};
static int __cmd_timechart(void)
{
struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
- int ret;
+ int ret = -EINVAL;
if (session == NULL)
return -ENOMEM;
+ if (!perf_session__has_traces(session, "timechart record"))
+ goto out_delete;
+
ret = perf_session__process_events(session, &event_ops);
if (ret)
goto out_delete;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 4b91d8c..31f2e59 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -94,6 +94,7 @@ struct source_line {
static char *sym_filter = NULL;
struct sym_entry *sym_filter_entry = NULL;
+struct sym_entry *sym_filter_entry_sched = NULL;
static int sym_pcnt_filter = 5;
static int sym_counter = 0;
static int display_weighted = -1;
@@ -201,10 +202,9 @@ static void parse_source(struct sym_entry *syme)
len = sym->end - sym->start;
sprintf(command,
- "objdump --start-address=0x%016Lx "
- "--stop-address=0x%016Lx -dS %s",
- map->unmap_ip(map, sym->start),
- map->unmap_ip(map, sym->end), path);
+ "objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
+ BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
+ BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
file = popen(command, "r");
if (!file)
@@ -215,7 +215,7 @@ static void parse_source(struct sym_entry *syme)
while (!feof(file)) {
struct source_line *src;
size_t dummy = 0;
- char *c;
+ char *c, *sep;
src = malloc(sizeof(struct source_line));
assert(src != NULL);
@@ -234,14 +234,11 @@ static void parse_source(struct sym_entry *syme)
*source->lines_tail = src;
source->lines_tail = &src->next;
- if (strlen(src->line)>8 && src->line[8] == ':') {
- src->eip = strtoull(src->line, NULL, 16);
- src->eip = map->unmap_ip(map, src->eip);
- }
- if (strlen(src->line)>8 && src->line[16] == ':') {
- src->eip = strtoull(src->line, NULL, 16);
- src->eip = map->unmap_ip(map, src->eip);
- }
+ src->eip = strtoull(src->line, &sep, 16);
+ if (*sep == ':')
+ src->eip = map__objdump_2ip(map, src->eip);
+ else /* this line has no ip info (e.g. source line) */
+ src->eip = 0;
}
pclose(file);
out_assign:
@@ -276,6 +273,9 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
goto out_unlock;
for (line = syme->src->lines; line; line = line->next) {
+ /* skip lines without IP info */
+ if (line->eip == 0)
+ continue;
if (line->eip == ip) {
line->count[counter]++;
break;
@@ -287,17 +287,20 @@ out_unlock:
pthread_mutex_unlock(&syme->src->lock);
}
+#define PATTERN_LEN (BITS_PER_LONG / 4 + 2)
+
static void lookup_sym_source(struct sym_entry *syme)
{
struct symbol *symbol = sym_entry__symbol(syme);
struct source_line *line;
- char pattern[PATH_MAX];
+ char pattern[PATTERN_LEN + 1];
- sprintf(pattern, "<%s>:", symbol->name);
+ sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
+ map__rip_2objdump(syme->map, symbol->start));
pthread_mutex_lock(&syme->src->lock);
for (line = syme->src->lines; line; line = line->next) {
- if (strstr(line->line, pattern)) {
+ if (memcmp(line->line, pattern, PATTERN_LEN) == 0) {
syme->src->source = line;
break;
}
@@ -667,7 +670,7 @@ static void prompt_symbol(struct sym_entry **target, const char *msg)
}
if (!found) {
- fprintf(stderr, "Sorry, %s is not active.\n", sym_filter);
+ fprintf(stderr, "Sorry, %s is not active.\n", buf);
sleep(1);
return;
} else
@@ -695,11 +698,9 @@ static void print_mapped_keys(void)
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter);
- if (symbol_conf.vmlinux_name) {
- fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
- fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
- fprintf(stdout, "\t[S] stop annotation.\n");
- }
+ fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
+ fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
+ fprintf(stdout, "\t[S] stop annotation.\n");
if (nr_counters > 1)
fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);
@@ -725,14 +726,13 @@ static int key_mapped(int c)
case 'Q':
case 'K':
case 'U':
+ case 'F':
+ case 's':
+ case 'S':
return 1;
case 'E':
case 'w':
return nr_counters > 1 ? 1 : 0;
- case 'F':
- case 's':
- case 'S':
- return symbol_conf.vmlinux_name ? 1 : 0;
default:
break;
}
@@ -910,8 +910,12 @@ static int symbol_filter(struct map *map, struct symbol *sym)
syme = symbol__priv(sym);
syme->map = map;
syme->src = NULL;
- if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter))
- sym_filter_entry = syme;
+
+ if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
+ /* schedule initial sym_filter_entry setup */
+ sym_filter_entry_sched = syme;
+ sym_filter = NULL;
+ }
for (i = 0; skip_symbols[i]; i++) {
if (!strcmp(skip_symbols[i], name)) {
@@ -934,8 +938,11 @@ static void event__process_sample(const event_t *self,
struct addr_location al;
u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ ++samples;
+
switch (origin) {
case PERF_RECORD_MISC_USER:
+ ++userspace_samples;
if (hide_user_symbols)
return;
break;
@@ -948,9 +955,38 @@ static void event__process_sample(const event_t *self,
}
if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 ||
- al.sym == NULL || al.filtered)
+ al.filtered)
return;
+ if (al.sym == NULL) {
+ /*
+ * As we do lazy loading of symtabs we only will know if the
+ * specified vmlinux file is invalid when we actually have a
+ * hit in kernel space and then try to load it. So if we get
+ * here and there are _no_ symbols in the DSO backing the
+ * kernel map, bail out.
+ *
+ * We may never get here, for instance, if we use -K/
+ * --hide-kernel-symbols, even if the user specifies an
+ * invalid --vmlinux ;-)
+ */
+ if (al.map == session->vmlinux_maps[MAP__FUNCTION] &&
+ RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
+ pr_err("The %s file can't be used\n",
+ symbol_conf.vmlinux_name);
+ exit(1);
+ }
+
+ return;
+ }
+
+ /* let's see, whether we need to install initial sym_filter_entry */
+ if (sym_filter_entry_sched) {
+ sym_filter_entry = sym_filter_entry_sched;
+ sym_filter_entry_sched = NULL;
+ parse_source(sym_filter_entry);
+ }
+
syme = symbol__priv(al.sym);
if (!syme->skip) {
syme->count[counter]++;
@@ -960,9 +996,6 @@ static void event__process_sample(const event_t *self,
if (list_empty(&syme->node) || !syme->node.next)
__list_insert_active_sym(syme);
pthread_mutex_unlock(&active_symbols_lock);
- if (origin == PERF_RECORD_MISC_USER)
- ++userspace_samples;
- ++samples;
}
}
@@ -975,6 +1008,10 @@ static int event__process(event_t *event, struct perf_session *session)
case PERF_RECORD_MMAP:
event__process_mmap(event, session);
break;
+ case PERF_RECORD_FORK:
+ case PERF_RECORD_EXIT:
+ event__process_task(event, session);
+ break;
default:
break;
}
@@ -1244,7 +1281,7 @@ static const struct option options[] = {
OPT_BOOLEAN('i', "inherit", &inherit,
"child tasks inherit counters"),
OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
- "symbol to annotate - requires -k option"),
+ "symbol to annotate"),
OPT_BOOLEAN('z', "zero", &zero,
"zero history across updates"),
OPT_INTEGER('F', "freq", &freq,
@@ -1280,16 +1317,14 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
symbol_conf.priv_size = (sizeof(struct sym_entry) +
(nr_counters + 1) * sizeof(unsigned long));
- if (symbol_conf.vmlinux_name == NULL)
- symbol_conf.try_vmlinux_path = true;
+
+ symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
if (symbol__init() < 0)
return -1;
if (delay_secs < 1)
delay_secs = 1;
- parse_source(sym_filter_entry);
-
/*
* User specified count overrides default frequency.
*/
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 574a215..5db687f 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -44,6 +44,7 @@ static void setup_scripting(void)
perf_set_argv_exec_path(perf_exec_path());
setup_perl_scripting();
+ setup_python_scripting();
scripting_ops = &default_scripting_ops;
}
@@ -75,11 +76,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
event__parse_sample(event, session->sample_type, &data);
- dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
- event->header.misc,
- data.pid, data.tid,
- (void *)(long)data.ip,
- (long long)data.period);
+ dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
+ data.pid, data.tid, data.ip, data.period);
thread = perf_session__findnew(session, event->ip.pid);
if (thread == NULL) {
@@ -103,22 +101,9 @@ static int process_sample_event(event_t *event, struct perf_session *session)
return 0;
}
-static int sample_type_check(struct perf_session *session)
-{
- if (!(session->sample_type & PERF_SAMPLE_RAW)) {
- fprintf(stderr,
- "No trace sample to read. Did you call perf record "
- "without -R?");
- return -1;
- }
-
- return 0;
-}
-
static struct perf_event_ops event_ops = {
- .process_sample_event = process_sample_event,
- .process_comm_event = event__process_comm,
- .sample_type_check = sample_type_check,
+ .sample = process_sample_event,
+ .comm = event__process_comm,
};
static int __cmd_trace(struct perf_session *session)
@@ -235,9 +220,9 @@ static int parse_scriptname(const struct option *opt __used,
const char *script, *ext;
int len;
- if (strcmp(str, "list") == 0) {
+ if (strcmp(str, "lang") == 0) {
list_available_languages();
- return 0;
+ exit(0);
}
script = strchr(str, ':');
@@ -531,6 +516,8 @@ static const struct option options[] = {
parse_scriptname),
OPT_STRING('g', "gen-script", &generate_script_lang, "lang",
"generate perf-trace.xx script in specified language"),
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
OPT_END()
};
@@ -592,6 +579,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
if (session == NULL)
return -ENOMEM;
+ if (!perf_session__has_traces(session, "record -R"))
+ return -EINVAL;
+
if (generate_script_lang) {
struct stat perf_stat;
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 18035b1..10fe49e 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -16,6 +16,7 @@ extern int check_pager_config(const char *cmd);
extern int cmd_annotate(int argc, const char **argv, const char *prefix);
extern int cmd_bench(int argc, const char **argv, const char *prefix);
+extern int cmd_buildid_cache(int argc, const char **argv, const char *prefix);
extern int cmd_buildid_list(int argc, const char **argv, const char *prefix);
extern int cmd_diff(int argc, const char **argv, const char *prefix);
extern int cmd_help(int argc, const char **argv, const char *prefix);
@@ -30,5 +31,6 @@ extern int cmd_trace(int argc, const char **argv, const char *prefix);
extern int cmd_version(int argc, const char **argv, const char *prefix);
extern int cmd_probe(int argc, const char **argv, const char *prefix);
extern int cmd_kmem(int argc, const char **argv, const char *prefix);
+extern int cmd_lock(int argc, const char **argv, const char *prefix);
#endif
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index 71dc7c3..9afcff2 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -3,7 +3,9 @@
# command name category [deprecated] [common]
#
perf-annotate mainporcelain common
+perf-archive mainporcelain common
perf-bench mainporcelain common
+perf-buildid-cache mainporcelain common
perf-buildid-list mainporcelain common
perf-diff mainporcelain common
perf-list mainporcelain common
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index 8d0de51..bd0bb1b 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -101,10 +101,10 @@ enum hw_event_ids {
*/
PERF_COUNT_HW_CPU_CYCLES = 0,
PERF_COUNT_HW_INSTRUCTIONS = 1,
- PERF_COUNT_HW_CACHE_REFERENCES = 2,
+ PERF_COUNT_HW_CACHE_REFERENCES = 2,
PERF_COUNT_HW_CACHE_MISSES = 3,
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
- PERF_COUNT_HW_BRANCH_MISSES = 5,
+ PERF_COUNT_HW_BRANCH_MISSES = 5,
PERF_COUNT_HW_BUS_CYCLES = 6,
};
@@ -131,8 +131,8 @@ software events, selected by 'event_id':
*/
enum sw_event_ids {
PERF_COUNT_SW_CPU_CLOCK = 0,
- PERF_COUNT_SW_TASK_CLOCK = 1,
- PERF_COUNT_SW_PAGE_FAULTS = 2,
+ PERF_COUNT_SW_TASK_CLOCK = 1,
+ PERF_COUNT_SW_PAGE_FAULTS = 2,
PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
PERF_COUNT_SW_CPU_MIGRATIONS = 4,
PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh
new file mode 100644
index 0000000..45fbe2f
--- /dev/null
+++ b/tools/perf/perf-archive.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+# perf archive
+# Arnaldo Carvalho de Melo <acme@redhat.com>
+
+PERF_DATA=perf.data
+if [ $# -ne 0 ] ; then
+ PERF_DATA=$1
+fi
+
+DEBUGDIR=~/.debug/
+BUILDIDS=$(mktemp /tmp/perf-archive-buildids.XXXXXX)
+
+perf buildid-list -i $PERF_DATA --with-hits > $BUILDIDS
+if [ ! -s $BUILDIDS ] ; then
+ echo "perf archive: no build-ids found"
+ rm -f $BUILDIDS
+ exit 1
+fi
+
+MANIFEST=$(mktemp /tmp/perf-archive-manifest.XXXXXX)
+
+cut -d ' ' -f 1 $BUILDIDS | \
+while read build_id ; do
+ linkname=$DEBUGDIR.build-id/${build_id:0:2}/${build_id:2}
+ filename=$(readlink -f $linkname)
+ echo ${linkname#$DEBUGDIR} >> $MANIFEST
+ echo ${filename#$DEBUGDIR} >> $MANIFEST
+done
+
+tar cfj $PERF_DATA.tar.bz2 -C $DEBUGDIR -T $MANIFEST
+rm -f $MANIFEST $BUILDIDS
+exit 0
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 873e55f..57cb107 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -48,7 +48,8 @@ int check_pager_config(const char *cmd)
return c.val;
}
-static void commit_pager_choice(void) {
+static void commit_pager_choice(void)
+{
switch (use_pager) {
case 0:
setenv("PERF_PAGER", "cat", 1);
@@ -70,7 +71,7 @@ static void set_debugfs_path(void)
"tracing/events");
}
-static int handle_options(const char*** argv, int* argc, int* envchanged)
+static int handle_options(const char ***argv, int *argc, int *envchanged)
{
int handled = 0;
@@ -109,7 +110,7 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--perf-dir")) {
if (*argc < 2) {
- fprintf(stderr, "No directory given for --perf-dir.\n" );
+ fprintf(stderr, "No directory given for --perf-dir.\n");
usage(perf_usage_string);
}
setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1);
@@ -124,7 +125,7 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--work-tree")) {
if (*argc < 2) {
- fprintf(stderr, "No directory given for --work-tree.\n" );
+ fprintf(stderr, "No directory given for --work-tree.\n");
usage(perf_usage_string);
}
setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
@@ -168,7 +169,7 @@ static int handle_alias(int *argcp, const char ***argv)
{
int envchanged = 0, ret = 0, saved_errno = errno;
int count, option_count;
- const char** new_argv;
+ const char **new_argv;
const char *alias_command;
char *alias_string;
@@ -210,11 +211,11 @@ static int handle_alias(int *argcp, const char ***argv)
if (!strcmp(alias_command, new_argv[0]))
die("recursive alias: %s", alias_command);
- new_argv = realloc(new_argv, sizeof(char*) *
+ new_argv = realloc(new_argv, sizeof(char *) *
(count + *argcp + 1));
/* insert after command name */
- memcpy(new_argv + count, *argv + 1, sizeof(char*) * *argcp);
- new_argv[count+*argcp] = NULL;
+ memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp);
+ new_argv[count + *argcp] = NULL;
*argv = new_argv;
*argcp += count - 1;
@@ -285,6 +286,7 @@ static void handle_internal_command(int argc, const char **argv)
{
const char *cmd = argv[0];
static struct cmd_struct commands[] = {
+ { "buildid-cache", cmd_buildid_cache, 0 },
{ "buildid-list", cmd_buildid_list, 0 },
{ "diff", cmd_diff, 0 },
{ "help", cmd_help, 0 },
@@ -301,6 +303,7 @@ static void handle_internal_command(int argc, const char **argv)
{ "sched", cmd_sched, 0 },
{ "probe", cmd_probe, 0 },
{ "kmem", cmd_kmem, 0 },
+ { "lock", cmd_lock, 0 },
};
unsigned int i;
static const char ext[] = STRIP_EXTENSION;
@@ -388,7 +391,7 @@ static int run_argv(int *argcp, const char ***argv)
/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */
static void get_debugfs_mntpt(void)
{
- const char *path = debugfs_find_mountpoint();
+ const char *path = debugfs_mount(NULL);
if (path)
strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt));
@@ -449,8 +452,8 @@ int main(int argc, const char **argv)
setup_path();
while (1) {
- static int done_help = 0;
- static int was_alias = 0;
+ static int done_help;
+ static int was_alias;
was_alias = run_argv(&argc, &argv);
if (errno != ENOENT)
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
index af78d9a..01a64ad 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
@@ -31,13 +31,14 @@
#include "EXTERN.h"
#include "perl.h"
#include "XSUB.h"
-#include "../../../util/trace-event-perl.h"
+#include "../../../perf.h"
+#include "../../../util/trace-event.h"
#ifndef PERL_UNUSED_VAR
# define PERL_UNUSED_VAR(var) if (0) var = var
#endif
-#line 41 "Context.c"
+#line 42 "Context.c"
XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */
XS(XS_Perf__Trace__Context_common_pc)
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
index fb78006..549cf04 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
@@ -22,7 +22,8 @@
#include "EXTERN.h"
#include "perl.h"
#include "XSUB.h"
-#include "../../../util/trace-event-perl.h"
+#include "../../../perf.h"
+#include "../../../util/trace-event.h"
MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context
PROTOTYPES: ENABLE
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
index 052f132..f869c48 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
@@ -44,7 +44,7 @@ sub nsecs_secs {
sub nsecs_nsecs {
my ($nsecs) = @_;
- return $nsecs - nsecs_secs($nsecs);
+ return $nsecs % $NSECS_PER_SEC;
}
sub nsecs_str {
diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-record b/tools/perf/scripts/perl/bin/check-perf-trace-record
index c7ec5de..e6cb147 100644
--- a/tools/perf/scripts/perl/bin/check-perf-trace-record
+++ b/tools/perf/scripts/perl/bin/check-perf-trace-record
@@ -1,7 +1,2 @@
#!/bin/bash
-perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry
-
-
-
-
-
+perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry -e kmem:kfree
diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-report b/tools/perf/scripts/perl/bin/check-perf-trace-report
deleted file mode 100644
index 7fc4a03..0000000
--- a/tools/perf/scripts/perl/bin/check-perf-trace-report
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-# description: useless but exhaustive test script
-perf trace -s ~/libexec/perf-core/scripts/perl/check-perf-trace.pl
-
-
-
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record
new file mode 100644
index 0000000..f8885d3
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-report b/tools/perf/scripts/perl/bin/failed-syscalls-report
new file mode 100644
index 0000000..8bfc660
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+# description: system-wide failed syscalls
+# args: [comm]
+perf trace -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $1
diff --git a/tools/perf/scripts/perl/failed-syscalls.pl b/tools/perf/scripts/perl/failed-syscalls.pl
new file mode 100644
index 0000000..c18e7e2
--- /dev/null
+++ b/tools/perf/scripts/perl/failed-syscalls.pl
@@ -0,0 +1,38 @@
+# failed system call counts
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide failed system call totals
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Context;
+use Perf::Trace::Util;
+
+my %failed_syscalls;
+
+sub raw_syscalls::sys_exit
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $id, $ret) = @_;
+
+ if ($ret < 0) {
+ $failed_syscalls{$common_comm}++;
+ }
+}
+
+sub trace_end
+{
+ printf("\nfailed syscalls by comm:\n\n");
+
+ printf("%-20s %10s\n", "comm", "# errors");
+ printf("%-20s %6s %10s\n", "--------------------", "----------");
+
+ foreach my $comm (sort {$failed_syscalls{$b} <=> $failed_syscalls{$a}}
+ keys %failed_syscalls) {
+ printf("%-20s %10s\n", $comm, $failed_syscalls{$comm});
+ }
+}
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
new file mode 100644
index 0000000..957085d
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
@@ -0,0 +1,88 @@
+/*
+ * Context.c. Python interfaces for perf trace.
+ *
+ * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <Python.h>
+#include "../../../perf.h"
+#include "../../../util/trace-event.h"
+
+PyMODINIT_FUNC initperf_trace_context(void);
+
+static PyObject *perf_trace_context_common_pc(PyObject *self, PyObject *args)
+{
+ static struct scripting_context *scripting_context;
+ PyObject *context;
+ int retval;
+
+ if (!PyArg_ParseTuple(args, "O", &context))
+ return NULL;
+
+ scripting_context = PyCObject_AsVoidPtr(context);
+ retval = common_pc(scripting_context);
+
+ return Py_BuildValue("i", retval);
+}
+
+static PyObject *perf_trace_context_common_flags(PyObject *self,
+ PyObject *args)
+{
+ static struct scripting_context *scripting_context;
+ PyObject *context;
+ int retval;
+
+ if (!PyArg_ParseTuple(args, "O", &context))
+ return NULL;
+
+ scripting_context = PyCObject_AsVoidPtr(context);
+ retval = common_flags(scripting_context);
+
+ return Py_BuildValue("i", retval);
+}
+
+static PyObject *perf_trace_context_common_lock_depth(PyObject *self,
+ PyObject *args)
+{
+ static struct scripting_context *scripting_context;
+ PyObject *context;
+ int retval;
+
+ if (!PyArg_ParseTuple(args, "O", &context))
+ return NULL;
+
+ scripting_context = PyCObject_AsVoidPtr(context);
+ retval = common_lock_depth(scripting_context);
+
+ return Py_BuildValue("i", retval);
+}
+
+static PyMethodDef ContextMethods[] = {
+ { "common_pc", perf_trace_context_common_pc, METH_VARARGS,
+ "Get the common preempt count event field value."},
+ { "common_flags", perf_trace_context_common_flags, METH_VARARGS,
+ "Get the common flags event field value."},
+ { "common_lock_depth", perf_trace_context_common_lock_depth,
+ METH_VARARGS, "Get the common lock depth event field value."},
+ { NULL, NULL, 0, NULL}
+};
+
+PyMODINIT_FUNC initperf_trace_context(void)
+{
+ (void) Py_InitModule("perf_trace_context", ContextMethods);
+}
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
new file mode 100644
index 0000000..1dc464e
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
@@ -0,0 +1,91 @@
+# Core.py - Python extension for perf trace, core functions
+#
+# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
+#
+# This software may be distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+from collections import defaultdict
+
+def autodict():
+ return defaultdict(autodict)
+
+flag_fields = autodict()
+symbolic_fields = autodict()
+
+def define_flag_field(event_name, field_name, delim):
+ flag_fields[event_name][field_name]['delim'] = delim
+
+def define_flag_value(event_name, field_name, value, field_str):
+ flag_fields[event_name][field_name]['values'][value] = field_str
+
+def define_symbolic_field(event_name, field_name):
+ # nothing to do, really
+ pass
+
+def define_symbolic_value(event_name, field_name, value, field_str):
+ symbolic_fields[event_name][field_name]['values'][value] = field_str
+
+def flag_str(event_name, field_name, value):
+ string = ""
+
+ if flag_fields[event_name][field_name]:
+ print_delim = 0
+ keys = flag_fields[event_name][field_name]['values'].keys()
+ keys.sort()
+ for idx in keys:
+ if not value and not idx:
+ string += flag_fields[event_name][field_name]['values'][idx]
+ break
+ if idx and (value & idx) == idx:
+ if print_delim and flag_fields[event_name][field_name]['delim']:
+ string += " " + flag_fields[event_name][field_name]['delim'] + " "
+ string += flag_fields[event_name][field_name]['values'][idx]
+ print_delim = 1
+ value &= ~idx
+
+ return string
+
+def symbol_str(event_name, field_name, value):
+ string = ""
+
+ if symbolic_fields[event_name][field_name]:
+ keys = symbolic_fields[event_name][field_name]['values'].keys()
+ keys.sort()
+ for idx in keys:
+ if not value and not idx:
+ string = symbolic_fields[event_name][field_name]['values'][idx]
+ break
+ if (value == idx):
+ string = symbolic_fields[event_name][field_name]['values'][idx]
+ break
+
+ return string
+
+trace_flags = { 0x00: "NONE", \
+ 0x01: "IRQS_OFF", \
+ 0x02: "IRQS_NOSUPPORT", \
+ 0x04: "NEED_RESCHED", \
+ 0x08: "HARDIRQ", \
+ 0x10: "SOFTIRQ" }
+
+def trace_flag_str(value):
+ string = ""
+ print_delim = 0
+
+ keys = trace_flags.keys()
+
+ for idx in keys:
+ if not value and not idx:
+ string += "NONE"
+ break
+
+ if idx and (value & idx) == idx:
+ if print_delim:
+ string += " | ";
+ string += trace_flags[idx]
+ print_delim = 1
+ value &= ~idx
+
+ return string
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
new file mode 100644
index 0000000..83e9143
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
@@ -0,0 +1,25 @@
+# Util.py - Python extension for perf trace, miscellaneous utility code
+#
+# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
+#
+# This software may be distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+NSECS_PER_SEC = 1000000000
+
+def avg(total, n):
+ return total / n
+
+def nsecs(secs, nsecs):
+ return secs * NSECS_PER_SEC + nsecs
+
+def nsecs_secs(nsecs):
+ return nsecs / NSECS_PER_SEC
+
+def nsecs_nsecs(nsecs):
+ return nsecs % NSECS_PER_SEC
+
+def nsecs_str(nsecs):
+ str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
+ return str
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
new file mode 100644
index 0000000..f8885d3
--- /dev/null
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
new file mode 100644
index 0000000..1e0c0a8
--- /dev/null
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+# description: system-wide failed syscalls, by pid
+# args: [comm]
+perf trace -s ~/libexec/perf-core/scripts/python/failed-syscalls-by-pid.py $1
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
new file mode 100644
index 0000000..45a8c50
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
new file mode 100644
index 0000000..f8044d1
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+# description: system-wide syscall counts, by pid
+# args: [comm]
+perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts-by-pid.py $1
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
new file mode 100644
index 0000000..45a8c50
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
diff --git a/tools/perf/scripts/python/bin/syscall-counts-report b/tools/perf/scripts/python/bin/syscall-counts-report
new file mode 100644
index 0000000..a366aa61
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+# description: system-wide syscall counts
+# args: [comm]
+perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts.py $1
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
new file mode 100644
index 0000000..964d934
--- /dev/null
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -0,0 +1,83 @@
+# perf trace event handlers, generated by perf trace -g python
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# This script tests basic functionality such as flag and symbol
+# strings, common_xxx() calls back into perf, begin, end, unhandled
+# events, etc. Basically, if this script runs successfully and
+# displays expected results, Python scripting support should be ok.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from Core import *
+from perf_trace_context import *
+
+unhandled = autodict()
+
+def trace_begin():
+ print "trace_begin"
+ pass
+
+def trace_end():
+ print_unhandled()
+
+def irq__softirq_entry(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ vec):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+
+ print_uncommon(context)
+
+ print "vec=%s\n" % \
+ (symbol_str("irq__softirq_entry", "vec", vec)),
+
+def kmem__kmalloc(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ call_site, ptr, bytes_req, bytes_alloc,
+ gfp_flags):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+
+ print_uncommon(context)
+
+ print "call_site=%u, ptr=%u, bytes_req=%u, " \
+ "bytes_alloc=%u, gfp_flags=%s\n" % \
+ (call_site, ptr, bytes_req, bytes_alloc,
+
+ flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
+
+def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm):
+ try:
+ unhandled[event_name] += 1
+ except TypeError:
+ unhandled[event_name] = 1
+
+def print_header(event_name, cpu, secs, nsecs, pid, comm):
+ print "%-20s %5u %05u.%09u %8u %-20s " % \
+ (event_name, cpu, secs, nsecs, pid, comm),
+
+# print trace fields not included in handler args
+def print_uncommon(context):
+ print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
+ % (common_pc(context), trace_flag_str(common_flags(context)), \
+ common_lock_depth(context))
+
+def print_unhandled():
+ keys = unhandled.keys()
+ if not keys:
+ return
+
+ print "\nunhandled events:\n\n",
+
+ print "%-40s %10s\n" % ("event", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "-----------"),
+
+ for event_name in keys:
+ print "%-40s %10d\n" % (event_name, unhandled[event_name])
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
new file mode 100644
index 0000000..0ca0227
--- /dev/null
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -0,0 +1,68 @@
+# failed system call counts, by pid
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide failed system call totals, broken down by pid.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
+
+for_comm = None
+
+if len(sys.argv) > 2:
+ sys.exit(usage)
+
+if len(sys.argv) > 1:
+ for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+ pass
+
+def trace_end():
+ print_error_totals()
+
+def raw_syscalls__sys_exit(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, ret):
+ if for_comm is not None:
+ if common_comm != for_comm:
+ return
+
+ if ret < 0:
+ try:
+ syscalls[common_comm][common_pid][id][ret] += 1
+ except TypeError:
+ syscalls[common_comm][common_pid][id][ret] = 1
+
+def print_error_totals():
+ if for_comm is not None:
+ print "\nsyscall errors for %s:\n\n" % (for_comm),
+ else:
+ print "\nsyscall errors:\n\n",
+
+ print "%-30s %10s\n" % ("comm [pid]", "count"),
+ print "%-30s %10s\n" % ("------------------------------", \
+ "----------"),
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print "\n%s [%d]\n" % (comm, pid),
+ id_keys = syscalls[comm][pid].keys()
+ for id in id_keys:
+ print " syscall: %-16d\n" % (id),
+ ret_keys = syscalls[comm][pid][id].keys()
+ for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
+ print " err = %-20d %10d\n" % (ret, val),
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
new file mode 100644
index 0000000..af722d6
--- /dev/null
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -0,0 +1,64 @@
+# system call counts, by pid
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide system call totals, broken down by syscall.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
+
+for_comm = None
+
+if len(sys.argv) > 2:
+ sys.exit(usage)
+
+if len(sys.argv) > 1:
+ for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+ pass
+
+def trace_end():
+ print_syscall_totals()
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ if for_comm is not None:
+ if common_comm != for_comm:
+ return
+ try:
+ syscalls[common_comm][common_pid][id] += 1
+ except TypeError:
+ syscalls[common_comm][common_pid][id] = 1
+
+def print_syscall_totals():
+ if for_comm is not None:
+ print "\nsyscall events for %s:\n\n" % (for_comm),
+ else:
+ print "\nsyscall events by comm/pid:\n\n",
+
+ print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "----------"),
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print "\n%s [%d]\n" % (comm, pid),
+ id_keys = syscalls[comm][pid].keys()
+ for id, val in sorted(syscalls[comm][pid].iteritems(), \
+ key = lambda(k, v): (v, k), reverse = True):
+ print " %-38d %10d\n" % (id, val),
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
new file mode 100644
index 0000000..f977e85
--- /dev/null
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -0,0 +1,58 @@
+# system call counts
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide system call totals, broken down by syscall.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+usage = "perf trace -s syscall-counts.py [comm]\n";
+
+for_comm = None
+
+if len(sys.argv) > 2:
+ sys.exit(usage)
+
+if len(sys.argv) > 1:
+ for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+ pass
+
+def trace_end():
+ print_syscall_totals()
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ if for_comm is not None:
+ if common_comm != for_comm:
+ return
+ try:
+ syscalls[id] += 1
+ except TypeError:
+ syscalls[id] = 1
+
+def print_syscall_totals():
+ if for_comm is not None:
+ print "\nsyscall events for %s:\n\n" % (for_comm),
+ else:
+ print "\nsyscall events:\n\n",
+
+ print "%-40s %10s\n" % ("event", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "-----------"),
+
+ for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
+ reverse = True):
+ print "%-40d %10d\n" % (id, val),
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
new file mode 100644
index 0000000..04904b3
--- /dev/null
+++ b/tools/perf/util/build-id.c
@@ -0,0 +1,39 @@
+/*
+ * build-id.c
+ *
+ * build-id support
+ *
+ * Copyright (C) 2009, 2010 Red Hat Inc.
+ * Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+#include "build-id.h"
+#include "event.h"
+#include "symbol.h"
+#include <linux/kernel.h>
+
+static int build_id__mark_dso_hit(event_t *event, struct perf_session *session)
+{
+ struct addr_location al;
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct thread *thread = perf_session__findnew(session, event->ip.pid);
+
+ if (thread == NULL) {
+ pr_err("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
+ event->ip.ip, &al);
+
+ if (al.map != NULL)
+ al.map->dso->hit = 1;
+
+ return 0;
+}
+
+struct perf_event_ops build_id__mark_dso_hit_ops = {
+ .sample = build_id__mark_dso_hit,
+ .mmap = event__process_mmap,
+ .fork = event__process_task,
+};
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
new file mode 100644
index 0000000..1d981d6
--- /dev/null
+++ b/tools/perf/util/build-id.h
@@ -0,0 +1,8 @@
+#ifndef PERF_BUILD_ID_H_
+#define PERF_BUILD_ID_H_ 1
+
+#include "session.h"
+
+extern struct perf_event_ops build_id__mark_dso_hit_ops;
+
+#endif
diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c
deleted file mode 100644
index b557b83..0000000
--- a/tools/perf/util/data_map.c
+++ /dev/null
@@ -1,252 +0,0 @@
-#include "symbol.h"
-#include "util.h"
-#include "debug.h"
-#include "thread.h"
-#include "session.h"
-
-static int process_event_stub(event_t *event __used,
- struct perf_session *session __used)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
-{
- if (!handler->process_sample_event)
- handler->process_sample_event = process_event_stub;
- if (!handler->process_mmap_event)
- handler->process_mmap_event = process_event_stub;
- if (!handler->process_comm_event)
- handler->process_comm_event = process_event_stub;
- if (!handler->process_fork_event)
- handler->process_fork_event = process_event_stub;
- if (!handler->process_exit_event)
- handler->process_exit_event = process_event_stub;
- if (!handler->process_lost_event)
- handler->process_lost_event = process_event_stub;
- if (!handler->process_read_event)
- handler->process_read_event = process_event_stub;
- if (!handler->process_throttle_event)
- handler->process_throttle_event = process_event_stub;
- if (!handler->process_unthrottle_event)
- handler->process_unthrottle_event = process_event_stub;
-}
-
-static const char *event__name[] = {
- [0] = "TOTAL",
- [PERF_RECORD_MMAP] = "MMAP",
- [PERF_RECORD_LOST] = "LOST",
- [PERF_RECORD_COMM] = "COMM",
- [PERF_RECORD_EXIT] = "EXIT",
- [PERF_RECORD_THROTTLE] = "THROTTLE",
- [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
- [PERF_RECORD_FORK] = "FORK",
- [PERF_RECORD_READ] = "READ",
- [PERF_RECORD_SAMPLE] = "SAMPLE",
-};
-
-unsigned long event__total[PERF_RECORD_MAX];
-
-void event__print_totals(void)
-{
- int i;
- for (i = 0; i < PERF_RECORD_MAX; ++i)
- pr_info("%10s events: %10ld\n",
- event__name[i], event__total[i]);
-}
-
-static int process_event(event_t *event, struct perf_session *session,
- struct perf_event_ops *ops,
- unsigned long offset, unsigned long head)
-{
- trace_event(event);
-
- if (event->header.type < PERF_RECORD_MAX) {
- dump_printf("%p [%p]: PERF_RECORD_%s",
- (void *)(offset + head),
- (void *)(long)(event->header.size),
- event__name[event->header.type]);
- ++event__total[0];
- ++event__total[event->header.type];
- }
-
- switch (event->header.type) {
- case PERF_RECORD_SAMPLE:
- return ops->process_sample_event(event, session);
- case PERF_RECORD_MMAP:
- return ops->process_mmap_event(event, session);
- case PERF_RECORD_COMM:
- return ops->process_comm_event(event, session);
- case PERF_RECORD_FORK:
- return ops->process_fork_event(event, session);
- case PERF_RECORD_EXIT:
- return ops->process_exit_event(event, session);
- case PERF_RECORD_LOST:
- return ops->process_lost_event(event, session);
- case PERF_RECORD_READ:
- return ops->process_read_event(event, session);
- case PERF_RECORD_THROTTLE:
- return ops->process_throttle_event(event, session);
- case PERF_RECORD_UNTHROTTLE:
- return ops->process_unthrottle_event(event, session);
- default:
- ops->total_unknown++;
- return -1;
- }
-}
-
-int perf_header__read_build_ids(int input, u64 offset, u64 size)
-{
- struct build_id_event bev;
- char filename[PATH_MAX];
- u64 limit = offset + size;
- int err = -1;
-
- while (offset < limit) {
- struct dso *dso;
- ssize_t len;
-
- if (read(input, &bev, sizeof(bev)) != sizeof(bev))
- goto out;
-
- len = bev.header.size - sizeof(bev);
- if (read(input, filename, len) != len)
- goto out;
-
- dso = dsos__findnew(filename);
- if (dso != NULL)
- dso__set_build_id(dso, &bev.build_id);
-
- offset += bev.header.size;
- }
- err = 0;
-out:
- return err;
-}
-
-static struct thread *perf_session__register_idle_thread(struct perf_session *self)
-{
- struct thread *thread = perf_session__findnew(self, 0);
-
- if (!thread || thread__set_comm(thread, "swapper")) {
- pr_err("problem inserting idle task.\n");
- thread = NULL;
- }
-
- return thread;
-}
-
-int perf_session__process_events(struct perf_session *self,
- struct perf_event_ops *ops)
-{
- int err;
- unsigned long head, shift;
- unsigned long offset = 0;
- size_t page_size;
- event_t *event;
- uint32_t size;
- char *buf;
-
- if (perf_session__register_idle_thread(self) == NULL)
- return -ENOMEM;
-
- perf_event_ops__fill_defaults(ops);
-
- page_size = getpagesize();
-
- head = self->header.data_offset;
- self->sample_type = perf_header__sample_type(&self->header);
-
- err = -EINVAL;
- if (ops->sample_type_check && ops->sample_type_check(self) < 0)
- goto out_err;
-
- if (!ops->full_paths) {
- char bf[PATH_MAX];
-
- if (getcwd(bf, sizeof(bf)) == NULL) {
- err = -errno;
-out_getcwd_err:
- pr_err("failed to get the current directory\n");
- goto out_err;
- }
- self->cwd = strdup(bf);
- if (self->cwd == NULL) {
- err = -ENOMEM;
- goto out_getcwd_err;
- }
- self->cwdlen = strlen(self->cwd);
- }
-
- shift = page_size * (head / page_size);
- offset += shift;
- head -= shift;
-
-remap:
- buf = mmap(NULL, page_size * self->mmap_window, PROT_READ,
- MAP_SHARED, self->fd, offset);
- if (buf == MAP_FAILED) {
- pr_err("failed to mmap file\n");
- err = -errno;
- goto out_err;
- }
-
-more:
- event = (event_t *)(buf + head);
-
- size = event->header.size;
- if (!size)
- size = 8;
-
- if (head + event->header.size >= page_size * self->mmap_window) {
- int munmap_ret;
-
- shift = page_size * (head / page_size);
-
- munmap_ret = munmap(buf, page_size * self->mmap_window);
- assert(munmap_ret == 0);
-
- offset += shift;
- head -= shift;
- goto remap;
- }
-
- size = event->header.size;
-
- dump_printf("\n%p [%p]: event: %d\n",
- (void *)(offset + head),
- (void *)(long)event->header.size,
- event->header.type);
-
- if (!size || process_event(event, self, ops, offset, head) < 0) {
-
- dump_printf("%p [%p]: skipping unknown header type: %d\n",
- (void *)(offset + head),
- (void *)(long)(event->header.size),
- event->header.type);
-
- /*
- * assume we lost track of the stream, check alignment, and
- * increment a single u64 in the hope to catch on again 'soon'.
- */
-
- if (unlikely(head & 7))
- head &= ~7ULL;
-
- size = 8;
- }
-
- head += size;
-
- if (offset + head >= self->header.data_offset + self->header.data_size)
- goto done;
-
- if (offset + head < self->size)
- goto more;
-
-done:
- err = 0;
-out_err:
- return err;
-}
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 28d520d..0905600 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -9,6 +9,7 @@
#include "color.h"
#include "event.h"
#include "debug.h"
+#include "util.h"
int verbose = 0;
int dump_trace = 0;
diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c
index 06b73ee..a88fefc 100644
--- a/tools/perf/util/debugfs.c
+++ b/tools/perf/util/debugfs.c
@@ -106,16 +106,14 @@ int debugfs_valid_entry(const char *path)
return 0;
}
-/* mount the debugfs somewhere */
+/* mount the debugfs somewhere if it's not mounted */
-int debugfs_mount(const char *mountpoint)
+char *debugfs_mount(const char *mountpoint)
{
- char mountcmd[128];
-
/* see if it's already mounted */
if (debugfs_find_mountpoint()) {
debugfs_premounted = 1;
- return 0;
+ return debugfs_mountpoint;
}
/* if not mounted and no argument */
@@ -127,13 +125,14 @@ int debugfs_mount(const char *mountpoint)
mountpoint = "/sys/kernel/debug";
}
+ if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)
+ return NULL;
+
/* save the mountpoint */
strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
+ debugfs_found = 1;
- /* mount it */
- snprintf(mountcmd, sizeof(mountcmd),
- "/bin/mount -t debugfs debugfs %s", mountpoint);
- return system(mountcmd);
+ return debugfs_mountpoint;
}
/* umount the debugfs */
diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h
index 3cd14f9..83a0287 100644
--- a/tools/perf/util/debugfs.h
+++ b/tools/perf/util/debugfs.h
@@ -15,7 +15,7 @@
extern const char *debugfs_find_mountpoint(void);
extern int debugfs_valid_mountpoint(const char *debugfs);
extern int debugfs_valid_entry(const char *path);
-extern int debugfs_mount(const char *mountpoint);
+extern char *debugfs_mount(const char *mountpoint);
extern int debugfs_umount(void);
extern int debugfs_write(const char *entry, const char *value);
extern int debugfs_read(const char *entry, char *buffer, size_t size);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 8a9e6ba..705ec63 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -8,8 +8,7 @@
#include "thread.h"
static pid_t event__synthesize_comm(pid_t pid, int full,
- int (*process)(event_t *event,
- struct perf_session *session),
+ event__handler_t process,
struct perf_session *session)
{
event_t ev;
@@ -91,8 +90,7 @@ out_failure:
}
static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
- int (*process)(event_t *event,
- struct perf_session *session),
+ event__handler_t process,
struct perf_session *session)
{
char filename[PATH_MAX];
@@ -112,7 +110,10 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
while (1) {
char bf[BUFSIZ], *pbf = bf;
event_t ev = {
- .header = { .type = PERF_RECORD_MMAP },
+ .header = {
+ .type = PERF_RECORD_MMAP,
+ .misc = 0, /* Just like the kernel, see kernel/perf_event.c __perf_event_mmap */
+ },
};
int n;
size_t size;
@@ -156,9 +157,38 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
return 0;
}
-int event__synthesize_thread(pid_t pid,
- int (*process)(event_t *event,
- struct perf_session *session),
+int event__synthesize_modules(event__handler_t process,
+ struct perf_session *session)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(&session->kmaps.maps[MAP__FUNCTION]);
+ nd; nd = rb_next(nd)) {
+ event_t ev;
+ size_t size;
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+
+ if (pos->dso->kernel)
+ continue;
+
+ size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
+ memset(&ev, 0, sizeof(ev));
+ ev.mmap.header.misc = 1; /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */
+ ev.mmap.header.type = PERF_RECORD_MMAP;
+ ev.mmap.header.size = (sizeof(ev.mmap) -
+ (sizeof(ev.mmap.filename) - size));
+ ev.mmap.start = pos->start;
+ ev.mmap.len = pos->end - pos->start;
+
+ memcpy(ev.mmap.filename, pos->dso->long_name,
+ pos->dso->long_name_len + 1);
+ process(&ev, session);
+ }
+
+ return 0;
+}
+
+int event__synthesize_thread(pid_t pid, event__handler_t process,
struct perf_session *session)
{
pid_t tgid = event__synthesize_comm(pid, 1, process, session);
@@ -167,8 +197,7 @@ int event__synthesize_thread(pid_t pid,
return event__synthesize_mmap_events(pid, tgid, process, session);
}
-void event__synthesize_threads(int (*process)(event_t *event,
- struct perf_session *session),
+void event__synthesize_threads(event__handler_t process,
struct perf_session *session)
{
DIR *proc;
@@ -189,6 +218,59 @@ void event__synthesize_threads(int (*process)(event_t *event,
closedir(proc);
}
+struct process_symbol_args {
+ const char *name;
+ u64 start;
+};
+
+static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
+{
+ struct process_symbol_args *args = arg;
+
+ /*
+ * Must be a function or at least an alias, as in PARISC64, where "_text" is
+ * an 'A' to the same address as "_stext".
+ */
+ if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
+ type == 'A') || strcmp(name, args->name))
+ return 0;
+
+ args->start = start;
+ return 1;
+}
+
+int event__synthesize_kernel_mmap(event__handler_t process,
+ struct perf_session *session,
+ const char *symbol_name)
+{
+ size_t size;
+ event_t ev = {
+ .header = {
+ .type = PERF_RECORD_MMAP,
+ .misc = 1, /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */
+ },
+ };
+ /*
+ * We should get this from /sys/kernel/sections/.text, but till that is
+ * available use this, and after it is use this as a fallback for older
+ * kernels.
+ */
+ struct process_symbol_args args = { .name = symbol_name, };
+
+ if (kallsyms__parse("/proc/kallsyms", &args, find_symbol_cb) <= 0)
+ return -ENOENT;
+
+ size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
+ "[kernel.kallsyms.%s]", symbol_name) + 1;
+ size = ALIGN(size, sizeof(u64));
+ ev.mmap.header.size = (sizeof(ev.mmap) - (sizeof(ev.mmap.filename) - size));
+ ev.mmap.pgoff = args.start;
+ ev.mmap.start = session->vmlinux_maps[MAP__FUNCTION]->start;
+ ev.mmap.len = session->vmlinux_maps[MAP__FUNCTION]->end - ev.mmap.start ;
+
+ return process(&ev, session);
+}
+
static void thread__comm_adjust(struct thread *self)
{
char *comm = self->comm;
@@ -240,22 +322,88 @@ int event__process_lost(event_t *self, struct perf_session *session)
int event__process_mmap(event_t *self, struct perf_session *session)
{
- struct thread *thread = perf_session__findnew(session, self->mmap.pid);
- struct map *map = map__new(&self->mmap, MAP__FUNCTION,
- session->cwd, session->cwdlen);
+ struct thread *thread;
+ struct map *map;
+
+ dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
+ self->mmap.pid, self->mmap.tid, self->mmap.start,
+ self->mmap.len, self->mmap.pgoff, self->mmap.filename);
+
+ if (self->mmap.pid == 0) {
+ static const char kmmap_prefix[] = "[kernel.kallsyms.";
+
+ if (self->mmap.filename[0] == '/') {
+ char short_module_name[1024];
+ char *name = strrchr(self->mmap.filename, '/'), *dot;
+
+ if (name == NULL)
+ goto out_problem;
+
+ ++name; /* skip / */
+ dot = strrchr(name, '.');
+ if (dot == NULL)
+ goto out_problem;
+
+ snprintf(short_module_name, sizeof(short_module_name),
+ "[%.*s]", (int)(dot - name), name);
+ strxfrchar(short_module_name, '-', '_');
+
+ map = perf_session__new_module_map(session,
+ self->mmap.start,
+ self->mmap.filename);
+ if (map == NULL)
+ goto out_problem;
+
+ name = strdup(short_module_name);
+ if (name == NULL)
+ goto out_problem;
+
+ map->dso->short_name = name;
+ map->end = map->start + self->mmap.len;
+ } else if (memcmp(self->mmap.filename, kmmap_prefix,
+ sizeof(kmmap_prefix) - 1) == 0) {
+ const char *symbol_name = (self->mmap.filename +
+ sizeof(kmmap_prefix) - 1);
+ /*
+ * Should be there already, from the build-id table in
+ * the header.
+ */
+ struct dso *kernel = __dsos__findnew(&dsos__kernel,
+ "[kernel.kallsyms]");
+ if (kernel == NULL)
+ goto out_problem;
+
+ kernel->kernel = 1;
+ if (__perf_session__create_kernel_maps(session, kernel) < 0)
+ goto out_problem;
+
+ session->vmlinux_maps[MAP__FUNCTION]->start = self->mmap.start;
+ session->vmlinux_maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
+ /*
+ * Be a bit paranoid here, some perf.data file came with
+ * a zero sized synthesized MMAP event for the kernel.
+ */
+ if (session->vmlinux_maps[MAP__FUNCTION]->end == 0)
+ session->vmlinux_maps[MAP__FUNCTION]->end = ~0UL;
+
+ perf_session__set_kallsyms_ref_reloc_sym(session, symbol_name,
+ self->mmap.pgoff);
+ }
+ return 0;
+ }
- dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n",
- self->mmap.pid, self->mmap.tid,
- (void *)(long)self->mmap.start,
- (void *)(long)self->mmap.len,
- (void *)(long)self->mmap.pgoff,
- self->mmap.filename);
+ thread = perf_session__findnew(session, self->mmap.pid);
+ map = map__new(&self->mmap, MAP__FUNCTION,
+ session->cwd, session->cwdlen);
if (thread == NULL || map == NULL)
- dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
- else
- thread__insert_map(thread, map);
+ goto out_problem;
+ thread__insert_map(thread, map);
+ return 0;
+
+out_problem:
+ dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
return 0;
}
@@ -284,11 +432,10 @@ int event__process_task(event_t *self, struct perf_session *session)
return 0;
}
-void thread__find_addr_location(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, u64 addr,
- struct addr_location *al,
- symbol_filter_t filter)
+void thread__find_addr_map(struct thread *self,
+ struct perf_session *session, u8 cpumode,
+ enum map_type type, u64 addr,
+ struct addr_location *al)
{
struct map_groups *mg = &self->mg;
@@ -303,7 +450,6 @@ void thread__find_addr_location(struct thread *self,
else {
al->level = 'H';
al->map = NULL;
- al->sym = NULL;
return;
}
try_again:
@@ -322,11 +468,21 @@ try_again:
mg = &session->kmaps;
goto try_again;
}
- al->sym = NULL;
- } else {
+ } else
al->addr = al->map->map_ip(al->map, al->addr);
- al->sym = map__find_symbol(al->map, session, al->addr, filter);
- }
+}
+
+void thread__find_addr_location(struct thread *self,
+ struct perf_session *session, u8 cpumode,
+ enum map_type type, u64 addr,
+ struct addr_location *al,
+ symbol_filter_t filter)
+{
+ thread__find_addr_map(self, session, cpumode, type, addr, al);
+ if (al->map != NULL)
+ al->sym = map__find_symbol(al->map, al->addr, filter);
+ else
+ al->sym = NULL;
}
static void dso__calc_col_width(struct dso *self)
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 690a96d..50a7132 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -1,10 +1,10 @@
#ifndef __PERF_RECORD_H
#define __PERF_RECORD_H
+#include <limits.h>
+
#include "../perf.h"
-#include "util.h"
-#include <linux/list.h>
-#include <linux/rbtree.h>
+#include "map.h"
/*
* PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
@@ -101,74 +101,19 @@ struct events_stats {
void event__print_totals(void);
-enum map_type {
- MAP__FUNCTION = 0,
- MAP__VARIABLE,
-};
-
-#define MAP__NR_TYPES (MAP__VARIABLE + 1)
-
-struct map {
- union {
- struct rb_node rb_node;
- struct list_head node;
- };
- u64 start;
- u64 end;
- enum map_type type;
- u64 pgoff;
- u64 (*map_ip)(struct map *, u64);
- u64 (*unmap_ip)(struct map *, u64);
- struct dso *dso;
-};
-
-static inline u64 map__map_ip(struct map *map, u64 ip)
-{
- return ip - map->start + map->pgoff;
-}
-
-static inline u64 map__unmap_ip(struct map *map, u64 ip)
-{
- return ip + map->start - map->pgoff;
-}
-
-static inline u64 identity__map_ip(struct map *map __used, u64 ip)
-{
- return ip;
-}
-
-struct symbol;
-
-typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
-
-void map__init(struct map *self, enum map_type type,
- u64 start, u64 end, u64 pgoff, struct dso *dso);
-struct map *map__new(struct mmap_event *event, enum map_type,
- char *cwd, int cwdlen);
-void map__delete(struct map *self);
-struct map *map__clone(struct map *self);
-int map__overlap(struct map *l, struct map *r);
-size_t map__fprintf(struct map *self, FILE *fp);
-
struct perf_session;
-int map__load(struct map *self, struct perf_session *session,
- symbol_filter_t filter);
-struct symbol *map__find_symbol(struct map *self, struct perf_session *session,
- u64 addr, symbol_filter_t filter);
-struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
- struct perf_session *session,
- symbol_filter_t filter);
-void map__fixup_start(struct map *self);
-void map__fixup_end(struct map *self);
-
-int event__synthesize_thread(pid_t pid,
- int (*process)(event_t *event,
- struct perf_session *session),
+typedef int (*event__handler_t)(event_t *event, struct perf_session *session);
+
+int event__synthesize_thread(pid_t pid, event__handler_t process,
struct perf_session *session);
-void event__synthesize_threads(int (*process)(event_t *event,
- struct perf_session *session),
+void event__synthesize_threads(event__handler_t process,
struct perf_session *session);
+int event__synthesize_kernel_mmap(event__handler_t process,
+ struct perf_session *session,
+ const char *symbol_name);
+int event__synthesize_modules(event__handler_t process,
+ struct perf_session *session);
int event__process_comm(event_t *self, struct perf_session *session);
int event__process_lost(event_t *self, struct perf_session *session);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 8a0bca5..6c9aa16 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1,8 +1,12 @@
+#define _FILE_OFFSET_BITS 64
+
#include <sys/types.h>
+#include <byteswap.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <linux/list.h>
+#include <linux/kernel.h>
#include "util.h"
#include "header.h"
@@ -105,24 +109,28 @@ struct perf_trace_event_type {
static int event_count;
static struct perf_trace_event_type *events;
-void perf_header__push_event(u64 id, const char *name)
+int perf_header__push_event(u64 id, const char *name)
{
if (strlen(name) > MAX_EVENT_NAME)
pr_warning("Event %s will be truncated\n", name);
if (!events) {
events = malloc(sizeof(struct perf_trace_event_type));
- if (!events)
- die("nomem");
+ if (events == NULL)
+ return -ENOMEM;
} else {
- events = realloc(events, (event_count + 1) * sizeof(struct perf_trace_event_type));
- if (!events)
- die("nomem");
+ struct perf_trace_event_type *nevents;
+
+ nevents = realloc(events, (event_count + 1) * sizeof(*events));
+ if (nevents == NULL)
+ return -ENOMEM;
+ events = nevents;
}
memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
events[event_count].event_id = id;
strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
event_count++;
+ return 0;
}
char *perf_header__find_event(u64 id)
@@ -169,31 +177,48 @@ static int do_write(int fd, const void *buf, size_t size)
return 0;
}
-static int __dsos__write_buildid_table(struct list_head *head, int fd)
+#define NAME_ALIGN 64
+
+static int write_padded(int fd, const void *bf, size_t count,
+ size_t count_aligned)
{
-#define NAME_ALIGN 64
- struct dso *pos;
static const char zero_buf[NAME_ALIGN];
+ int err = do_write(fd, bf, count);
+
+ if (!err)
+ err = do_write(fd, zero_buf, count_aligned - count);
+
+ return err;
+}
- list_for_each_entry(pos, head, node) {
+#define dsos__for_each_with_build_id(pos, head) \
+ list_for_each_entry(pos, head, node) \
+ if (!pos->has_build_id) \
+ continue; \
+ else
+
+static int __dsos__write_buildid_table(struct list_head *head, u16 misc, int fd)
+{
+ struct dso *pos;
+
+ dsos__for_each_with_build_id(pos, head) {
int err;
struct build_id_event b;
size_t len;
- if (!pos->has_build_id)
+ if (!pos->hit)
continue;
len = pos->long_name_len + 1;
len = ALIGN(len, NAME_ALIGN);
memset(&b, 0, sizeof(b));
memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
+ b.header.misc = misc;
b.header.size = sizeof(b) + len;
err = do_write(fd, &b, sizeof(b));
if (err < 0)
return err;
- err = do_write(fd, pos->long_name, pos->long_name_len + 1);
- if (err < 0)
- return err;
- err = do_write(fd, zero_buf, len - pos->long_name_len - 1);
+ err = write_padded(fd, pos->long_name,
+ pos->long_name_len + 1, len);
if (err < 0)
return err;
}
@@ -203,12 +228,143 @@ static int __dsos__write_buildid_table(struct list_head *head, int fd)
static int dsos__write_buildid_table(int fd)
{
- int err = __dsos__write_buildid_table(&dsos__kernel, fd);
+ int err = __dsos__write_buildid_table(&dsos__kernel,
+ PERF_RECORD_MISC_KERNEL, fd);
if (err == 0)
- err = __dsos__write_buildid_table(&dsos__user, fd);
+ err = __dsos__write_buildid_table(&dsos__user,
+ PERF_RECORD_MISC_USER, fd);
return err;
}
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ const char *name, bool is_kallsyms)
+{
+ const size_t size = PATH_MAX;
+ char *filename = malloc(size),
+ *linkname = malloc(size), *targetname;
+ int len, err = -1;
+
+ if (filename == NULL || linkname == NULL)
+ goto out_free;
+
+ len = snprintf(filename, size, "%s%s%s",
+ debugdir, is_kallsyms ? "/" : "", name);
+ if (mkdir_p(filename, 0755))
+ goto out_free;
+
+ snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
+
+ if (access(filename, F_OK)) {
+ if (is_kallsyms) {
+ if (copyfile("/proc/kallsyms", filename))
+ goto out_free;
+ } else if (link(name, filename) && copyfile(name, filename))
+ goto out_free;
+ }
+
+ len = snprintf(linkname, size, "%s/.build-id/%.2s",
+ debugdir, sbuild_id);
+
+ if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
+ goto out_free;
+
+ snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
+ targetname = filename + strlen(debugdir) - 5;
+ memcpy(targetname, "../..", 5);
+
+ if (symlink(targetname, linkname) == 0)
+ err = 0;
+out_free:
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
+ const char *name, const char *debugdir,
+ bool is_kallsyms)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(build_id, build_id_size, sbuild_id);
+
+ return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
+}
+
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
+{
+ const size_t size = PATH_MAX;
+ char *filename = malloc(size),
+ *linkname = malloc(size);
+ int err = -1;
+
+ if (filename == NULL || linkname == NULL)
+ goto out_free;
+
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, sbuild_id + 2);
+
+ if (access(linkname, F_OK))
+ goto out_free;
+
+ if (readlink(linkname, filename, size) < 0)
+ goto out_free;
+
+ if (unlink(linkname))
+ goto out_free;
+
+ /*
+ * Since the link is relative, we must make it absolute:
+ */
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, filename);
+
+ if (unlink(linkname))
+ goto out_free;
+
+ err = 0;
+out_free:
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int dso__cache_build_id(struct dso *self, const char *debugdir)
+{
+ bool is_kallsyms = self->kernel && self->long_name[0] != '/';
+
+ return build_id_cache__add_b(self->build_id, sizeof(self->build_id),
+ self->long_name, debugdir, is_kallsyms);
+}
+
+static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+{
+ struct dso *pos;
+ int err = 0;
+
+ dsos__for_each_with_build_id(pos, head)
+ if (dso__cache_build_id(pos, debugdir))
+ err = -1;
+
+ return err;
+}
+
+static int dsos__cache_build_ids(void)
+{
+ int err_kernel, err_user;
+ char debugdir[PATH_MAX];
+
+ snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
+ DEBUG_CACHE_DIR);
+
+ if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
+ return -1;
+
+ err_kernel = __dsos__cache_build_ids(&dsos__kernel, debugdir);
+ err_user = __dsos__cache_build_ids(&dsos__user, debugdir);
+ return err_kernel || err_user ? -1 : 0;
+}
+
static int perf_header__adds_write(struct perf_header *self, int fd)
{
int nr_sections;
@@ -217,7 +373,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
u64 sec_start;
int idx = 0, err;
- if (dsos__read_build_ids())
+ if (dsos__read_build_ids(true))
perf_header__set_feat(self, HEADER_BUILD_ID);
nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
@@ -257,7 +413,9 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
pr_debug("failed to write buildid table\n");
goto out_free;
}
- buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset;
+ buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
+ buildid_sec->offset;
+ dsos__cache_build_ids();
}
lseek(fd, sec_start, SEEK_SET);
@@ -360,30 +518,43 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
return 0;
}
-static void do_read(int fd, void *buf, size_t size)
+static int do_read(int fd, void *buf, size_t size)
{
while (size) {
int ret = read(fd, buf, size);
- if (ret < 0)
- die("failed to read");
- if (ret == 0)
- die("failed to read: missing data");
+ if (ret <= 0)
+ return -1;
size -= ret;
buf += ret;
}
+
+ return 0;
+}
+
+static int perf_header__getbuffer64(struct perf_header *self,
+ int fd, void *buf, size_t size)
+{
+ if (do_read(fd, buf, size))
+ return -1;
+
+ if (self->needs_swap)
+ mem_bswap_64(buf, size);
+
+ return 0;
}
int perf_header__process_sections(struct perf_header *self, int fd,
int (*process)(struct perf_file_section *self,
+ struct perf_header *ph,
int feat, int fd))
{
struct perf_file_section *feat_sec;
int nr_sections;
int sec_size;
int idx = 0;
- int err = 0, feat = 1;
+ int err = -1, feat = 1;
nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
@@ -397,33 +568,45 @@ int perf_header__process_sections(struct perf_header *self, int fd,
lseek(fd, self->data_offset + self->data_size, SEEK_SET);
- do_read(fd, feat_sec, sec_size);
+ if (perf_header__getbuffer64(self, fd, feat_sec, sec_size))
+ goto out_free;
+ err = 0;
while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
if (perf_header__has_feat(self, feat)) {
struct perf_file_section *sec = &feat_sec[idx++];
- err = process(sec, feat, fd);
+ err = process(sec, self, feat, fd);
if (err < 0)
break;
}
++feat;
}
-
+out_free:
free(feat_sec);
return err;
-};
+}
int perf_file_header__read(struct perf_file_header *self,
struct perf_header *ph, int fd)
{
lseek(fd, 0, SEEK_SET);
- do_read(fd, self, sizeof(*self));
- if (self->magic != PERF_MAGIC ||
- self->attr_size != sizeof(struct perf_file_attr))
+ if (do_read(fd, self, sizeof(*self)) ||
+ memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
return -1;
+ if (self->attr_size != sizeof(struct perf_file_attr)) {
+ u64 attr_size = bswap_64(self->attr_size);
+
+ if (attr_size != sizeof(struct perf_file_attr))
+ return -1;
+
+ mem_bswap_64(self, offsetof(struct perf_file_header,
+ adds_features));
+ ph->needs_swap = true;
+ }
+
if (self->size != sizeof(*self)) {
/* Support the previous format */
if (self->size == offsetof(typeof(*self), adds_features))
@@ -433,19 +616,31 @@ int perf_file_header__read(struct perf_file_header *self,
}
memcpy(&ph->adds_features, &self->adds_features,
- sizeof(self->adds_features));
+ sizeof(ph->adds_features));
+ /*
+ * FIXME: hack that assumes that if we need swap the perf.data file
+ * may be coming from an arch with a different word-size, ergo different
+ * DEFINE_BITMAP format, investigate more later, but for now its mostly
+ * safe to assume that we have a build-id section. Trace files probably
+ * have several other issues in this realm anyway...
+ */
+ if (ph->needs_swap) {
+ memset(&ph->adds_features, 0, sizeof(ph->adds_features));
+ perf_header__set_feat(ph, HEADER_BUILD_ID);
+ }
ph->event_offset = self->event_types.offset;
- ph->event_size = self->event_types.size;
- ph->data_offset = self->data.offset;
+ ph->event_size = self->event_types.size;
+ ph->data_offset = self->data.offset;
ph->data_size = self->data.size;
return 0;
}
static int perf_file_section__process(struct perf_file_section *self,
+ struct perf_header *ph,
int feat, int fd)
{
- if (lseek(fd, self->offset, SEEK_SET) < 0) {
+ if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
pr_debug("Failed to lseek to %Ld offset for feature %d, "
"continuing...\n", self->offset, feat);
return 0;
@@ -457,7 +652,7 @@ static int perf_file_section__process(struct perf_file_section *self,
break;
case HEADER_BUILD_ID:
- if (perf_header__read_build_ids(fd, self->offset, self->size))
+ if (perf_header__read_build_ids(ph, fd, self->offset, self->size))
pr_debug("Failed to read buildids, continuing...\n");
break;
default:
@@ -469,7 +664,7 @@ static int perf_file_section__process(struct perf_file_section *self,
int perf_header__read(struct perf_header *self, int fd)
{
- struct perf_file_header f_header;
+ struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
int nr_attrs, nr_ids, i, j;
@@ -486,7 +681,9 @@ int perf_header__read(struct perf_header *self, int fd)
struct perf_header_attr *attr;
off_t tmp;
- do_read(fd, &f_attr, sizeof(f_attr));
+ if (perf_header__getbuffer64(self, fd, &f_attr, sizeof(f_attr)))
+ goto out_errno;
+
tmp = lseek(fd, 0, SEEK_CUR);
attr = perf_header_attr__new(&f_attr.attr);
@@ -497,7 +694,8 @@ int perf_header__read(struct perf_header *self, int fd)
lseek(fd, f_attr.ids.offset, SEEK_SET);
for (j = 0; j < nr_ids; j++) {
- do_read(fd, &f_id, sizeof(f_id));
+ if (perf_header__getbuffer64(self, fd, &f_id, sizeof(f_id)))
+ goto out_errno;
if (perf_header_attr__add_id(attr, f_id) < 0) {
perf_header_attr__delete(attr);
@@ -517,7 +715,9 @@ int perf_header__read(struct perf_header *self, int fd)
events = malloc(f_header.event_types.size);
if (events == NULL)
return -ENOMEM;
- do_read(fd, events, f_header.event_types.size);
+ if (perf_header__getbuffer64(self, fd, events,
+ f_header.event_types.size))
+ goto out_errno;
event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
}
@@ -527,6 +727,8 @@ int perf_header__read(struct perf_header *self, int fd)
self->frozen = 1;
return 0;
+out_errno:
+ return -errno;
}
u64 perf_header__sample_type(struct perf_header *header)
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index d118d05..82a6af7 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -5,6 +5,7 @@
#include <sys/types.h>
#include <stdbool.h>
#include "types.h"
+#include "event.h"
#include <linux/bitmap.h>
@@ -52,6 +53,7 @@ struct perf_header {
u64 data_size;
u64 event_offset;
u64 event_size;
+ bool needs_swap;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
@@ -64,7 +66,7 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit);
int perf_header__add_attr(struct perf_header *self,
struct perf_header_attr *attr);
-void perf_header__push_event(u64 id, const char *name);
+int perf_header__push_event(u64 id, const char *name);
char *perf_header__find_event(u64 id);
struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr);
@@ -80,6 +82,11 @@ bool perf_header__has_feat(const struct perf_header *self, int feat);
int perf_header__process_sections(struct perf_header *self, int fd,
int (*process)(struct perf_file_section *self,
+ struct perf_header *ph,
int feat, int fd));
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ const char *name, bool is_kallsyms);
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
+
#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/include/linux/hash.h b/tools/perf/util/include/linux/hash.h
new file mode 100644
index 0000000..201f573
--- /dev/null
+++ b/tools/perf/util/include/linux/hash.h
@@ -0,0 +1,5 @@
+#include "../../../../include/linux/hash.h"
+
+#ifndef PERF_HASH_H
+#define PERF_HASH_H
+#endif
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
index 21c0274..f261165 100644
--- a/tools/perf/util/include/linux/kernel.h
+++ b/tools/perf/util/include/linux/kernel.h
@@ -101,5 +101,6 @@ simple_strtoul(const char *nptr, char **endptr, int base)
eprintf(n, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
#endif
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index c4d55a0..e509cd5 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -5,6 +5,11 @@
#include <stdio.h>
#include "debug.h"
+const char *map_type__name[MAP__NR_TYPES] = {
+ [MAP__FUNCTION] = "Functions",
+ [MAP__VARIABLE] = "Variables",
+};
+
static inline int is_anon_memory(const char *filename)
{
return strcmp(filename, "//anon") == 0;
@@ -68,8 +73,13 @@ struct map *map__new(struct mmap_event *event, enum map_type type,
map__init(self, type, event->start, event->start + event->len,
event->pgoff, dso);
- if (self->dso == vdso || anon)
+ if (anon) {
+set_identity:
self->map_ip = self->unmap_ip = identity__map_ip;
+ } else if (strcmp(filename, "[vdso]") == 0) {
+ dso__set_loaded(dso, self->type);
+ goto set_identity;
+ }
}
return self;
out_delete:
@@ -104,8 +114,7 @@ void map__fixup_end(struct map *self)
#define DSO__DELETED "(deleted)"
-int map__load(struct map *self, struct perf_session *session,
- symbol_filter_t filter)
+int map__load(struct map *self, symbol_filter_t filter)
{
const char *name = self->dso->long_name;
int nr;
@@ -113,7 +122,7 @@ int map__load(struct map *self, struct perf_session *session,
if (dso__loaded(self->dso, self->type))
return 0;
- nr = dso__load(self->dso, self, session, filter);
+ nr = dso__load(self->dso, self, filter);
if (nr < 0) {
if (self->dso->has_build_id) {
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
@@ -144,24 +153,29 @@ int map__load(struct map *self, struct perf_session *session,
return -1;
}
+ /*
+ * Only applies to the kernel, as its symtabs aren't relative like the
+ * module ones.
+ */
+ if (self->dso->kernel)
+ map__reloc_vmlinux(self);
return 0;
}
-struct symbol *map__find_symbol(struct map *self, struct perf_session *session,
- u64 addr, symbol_filter_t filter)
+struct symbol *map__find_symbol(struct map *self, u64 addr,
+ symbol_filter_t filter)
{
- if (map__load(self, session, filter) < 0)
+ if (map__load(self, filter) < 0)
return NULL;
return dso__find_symbol(self->dso, self->type, addr);
}
struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
- struct perf_session *session,
symbol_filter_t filter)
{
- if (map__load(self, session, filter) < 0)
+ if (map__load(self, filter) < 0)
return NULL;
if (!dso__sorted_by_name(self->dso, self->type))
@@ -201,3 +215,23 @@ size_t map__fprintf(struct map *self, FILE *fp)
return fprintf(fp, " %Lx-%Lx %Lx %s\n",
self->start, self->end, self->pgoff, self->dso->name);
}
+
+/*
+ * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
+ * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
+ */
+u64 map__rip_2objdump(struct map *map, u64 rip)
+{
+ u64 addr = map->dso->adjust_symbols ?
+ map->unmap_ip(map, rip) : /* RIP -> IP */
+ rip;
+ return addr;
+}
+
+u64 map__objdump_2ip(struct map *map, u64 addr)
+{
+ u64 ip = map->dso->adjust_symbols ?
+ addr :
+ map->unmap_ip(map, addr); /* RIP -> IP */
+ return ip;
+}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
new file mode 100644
index 0000000..b756368
--- /dev/null
+++ b/tools/perf/util/map.h
@@ -0,0 +1,94 @@
+#ifndef __PERF_MAP_H
+#define __PERF_MAP_H
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+enum map_type {
+ MAP__FUNCTION = 0,
+ MAP__VARIABLE,
+};
+
+#define MAP__NR_TYPES (MAP__VARIABLE + 1)
+
+extern const char *map_type__name[MAP__NR_TYPES];
+
+struct dso;
+struct ref_reloc_sym;
+struct map_groups;
+
+struct map {
+ union {
+ struct rb_node rb_node;
+ struct list_head node;
+ };
+ u64 start;
+ u64 end;
+ enum map_type type;
+ u64 pgoff;
+
+ /* ip -> dso rip */
+ u64 (*map_ip)(struct map *, u64);
+ /* dso rip -> ip */
+ u64 (*unmap_ip)(struct map *, u64);
+
+ struct dso *dso;
+};
+
+struct kmap {
+ struct ref_reloc_sym *ref_reloc_sym;
+ struct map_groups *kmaps;
+};
+
+static inline struct kmap *map__kmap(struct map *self)
+{
+ return (struct kmap *)(self + 1);
+}
+
+static inline u64 map__map_ip(struct map *map, u64 ip)
+{
+ return ip - map->start + map->pgoff;
+}
+
+static inline u64 map__unmap_ip(struct map *map, u64 ip)
+{
+ return ip + map->start - map->pgoff;
+}
+
+static inline u64 identity__map_ip(struct map *map __used, u64 ip)
+{
+ return ip;
+}
+
+
+/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
+u64 map__rip_2objdump(struct map *map, u64 rip);
+u64 map__objdump_2ip(struct map *map, u64 addr);
+
+struct symbol;
+struct mmap_event;
+
+typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
+
+void map__init(struct map *self, enum map_type type,
+ u64 start, u64 end, u64 pgoff, struct dso *dso);
+struct map *map__new(struct mmap_event *event, enum map_type,
+ char *cwd, int cwdlen);
+void map__delete(struct map *self);
+struct map *map__clone(struct map *self);
+int map__overlap(struct map *l, struct map *r);
+size_t map__fprintf(struct map *self, FILE *fp);
+
+int map__load(struct map *self, symbol_filter_t filter);
+struct symbol *map__find_symbol(struct map *self,
+ u64 addr, symbol_filter_t filter);
+struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+ symbol_filter_t filter);
+void map__fixup_start(struct map *self);
+void map__fixup_end(struct map *self);
+
+void map__reloc_vmlinux(struct map *self);
+
+#endif /* __PERF_MAP_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index e5bc0fb..05d0c5c 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -450,7 +450,8 @@ parse_single_tracepoint_event(char *sys_name,
/* sys + ':' + event + ':' + flags*/
#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
static enum event_result
-parse_subsystem_tracepoint_event(char *sys_name, char *flags)
+parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
+ char *flags)
{
char evt_path[MAXPATHLEN];
struct dirent *evt_ent;
@@ -474,6 +475,9 @@ parse_subsystem_tracepoint_event(char *sys_name, char *flags)
|| !strcmp(evt_ent->d_name, "filter"))
continue;
+ if (!strglobmatch(evt_ent->d_name, evt_exp))
+ continue;
+
len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name,
evt_ent->d_name, flags ? ":" : "",
flags ?: "");
@@ -522,9 +526,10 @@ static enum event_result parse_tracepoint_event(const char **strp,
if (evt_length >= MAX_EVENT_LENGTH)
return EVT_FAILED;
- if (!strcmp(evt_name, "*")) {
+ if (strpbrk(evt_name, "*?")) {
*strp = evt_name + evt_length;
- return parse_subsystem_tracepoint_event(sys_name, flags);
+ return parse_multiple_tracepoint_event(sys_name, evt_name,
+ flags);
} else
return parse_single_tracepoint_event(sys_name, evt_name,
evt_length, flags,
@@ -753,11 +758,11 @@ modifier:
return ret;
}
-static void store_event_type(const char *orgname)
+static int store_event_type(const char *orgname)
{
char filename[PATH_MAX], *c;
FILE *file;
- int id;
+ int id, n;
sprintf(filename, "%s/", debugfs_path);
strncat(filename, orgname, strlen(orgname));
@@ -769,11 +774,14 @@ static void store_event_type(const char *orgname)
file = fopen(filename, "r");
if (!file)
- return;
- if (fscanf(file, "%i", &id) < 1)
- die("cannot store event ID");
+ return 0;
+ n = fscanf(file, "%i", &id);
fclose(file);
- perf_header__push_event(id, orgname);
+ if (n < 1) {
+ pr_err("cannot store event ID\n");
+ return -EINVAL;
+ }
+ return perf_header__push_event(id, orgname);
}
int parse_events(const struct option *opt __used, const char *str, int unset __used)
@@ -782,7 +790,8 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
enum event_result ret;
if (strchr(str, ':'))
- store_event_type(str);
+ if (store_event_type(str) < 0)
+ return -1;
for (;;) {
if (nr_counters == MAX_COUNTERS)
@@ -835,11 +844,12 @@ int parse_filter(const struct option *opt __used, const char *str,
}
static const char * const event_type_descriptors[] = {
- "",
"Hardware event",
"Software event",
"Tracepoint event",
"Hardware cache event",
+ "Raw hardware event descriptor",
+ "Hardware breakpoint",
};
/*
@@ -872,7 +882,7 @@ static void print_tracepoint_events(void)
snprintf(evt_path, MAXPATHLEN, "%s:%s",
sys_dirent.d_name, evt_dirent.d_name);
printf(" %-42s [%s]\n", evt_path,
- event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
+ event_type_descriptors[PERF_TYPE_TRACEPOINT]);
}
closedir(evt_dir);
}
@@ -892,9 +902,7 @@ void print_events(void)
printf("List of pre-defined events (to be used in -e):\n");
for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
- type = syms->type + 1;
- if (type >= ARRAY_SIZE(event_type_descriptors))
- type = 0;
+ type = syms->type;
if (type != prev_type)
printf("\n");
@@ -919,17 +927,19 @@ void print_events(void)
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
printf(" %-42s [%s]\n",
event_cache_name(type, op, i),
- event_type_descriptors[4]);
+ event_type_descriptors[PERF_TYPE_HW_CACHE]);
}
}
}
printf("\n");
- printf(" %-42s [raw hardware event descriptor]\n",
- "rNNN");
+ printf(" %-42s [%s]\n",
+ "rNNN", event_type_descriptors[PERF_TYPE_RAW]);
printf("\n");
- printf(" %-42s [hardware breakpoint]\n", "mem:<addr>[:access]");
+ printf(" %-42s [%s]\n",
+ "mem:<addr>[:access]",
+ event_type_descriptors[PERF_TYPE_BREAKPOINT]);
printf("\n");
print_tracepoint_events();
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index fde17b0..8f05688 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -37,6 +37,8 @@
#include "string.h"
#include "strlist.h"
#include "debug.h"
+#include "cache.h"
+#include "color.h"
#include "parse-events.h" /* For debugfs_path */
#include "probe-event.h"
@@ -62,6 +64,42 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
return ret;
}
+void parse_line_range_desc(const char *arg, struct line_range *lr)
+{
+ const char *ptr;
+ char *tmp;
+ /*
+ * <Syntax>
+ * SRC:SLN[+NUM|-ELN]
+ * FUNC[:SLN[+NUM|-ELN]]
+ */
+ ptr = strchr(arg, ':');
+ if (ptr) {
+ lr->start = (unsigned int)strtoul(ptr + 1, &tmp, 0);
+ if (*tmp == '+')
+ lr->end = lr->start + (unsigned int)strtoul(tmp + 1,
+ &tmp, 0);
+ else if (*tmp == '-')
+ lr->end = (unsigned int)strtoul(tmp + 1, &tmp, 0);
+ else
+ lr->end = 0;
+ pr_debug("Line range is %u to %u\n", lr->start, lr->end);
+ if (lr->end && lr->start > lr->end)
+ semantic_error("Start line must be smaller"
+ " than end line.");
+ if (*tmp != '\0')
+ semantic_error("Tailing with invalid character '%d'.",
+ *tmp);
+ tmp = strndup(arg, (ptr - arg));
+ } else
+ tmp = strdup(arg);
+
+ if (strchr(tmp, '.'))
+ lr->file = tmp;
+ else
+ lr->function = tmp;
+}
+
/* Check the name is good for event/group */
static bool check_event_name(const char *name)
{
@@ -370,7 +408,7 @@ static int open_kprobe_events(int flags, int mode)
if (ret < 0) {
if (errno == ENOENT)
die("kprobe_events file does not exist -"
- " please rebuild with CONFIG_KPROBE_TRACER.");
+ " please rebuild with CONFIG_KPROBE_EVENT.");
else
die("Could not open kprobe_events file: %s",
strerror(errno));
@@ -457,6 +495,8 @@ void show_perf_probe_events(void)
struct strlist *rawlist;
struct str_node *ent;
+ setup_pager();
+
memset(&pp, 0, sizeof(pp));
fd = open_kprobe_events(O_RDONLY, 0);
rawlist = get_trace_kprobe_event_rawlist(fd);
@@ -678,3 +718,66 @@ void del_trace_kprobe_events(struct strlist *dellist)
close(fd);
}
+#define LINEBUF_SIZE 256
+
+static void show_one_line(FILE *fp, unsigned int l, bool skip, bool show_num)
+{
+ char buf[LINEBUF_SIZE];
+ const char *color = PERF_COLOR_BLUE;
+
+ if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
+ goto error;
+ if (!skip) {
+ if (show_num)
+ fprintf(stdout, "%7u %s", l, buf);
+ else
+ color_fprintf(stdout, color, " %s", buf);
+ }
+
+ while (strlen(buf) == LINEBUF_SIZE - 1 &&
+ buf[LINEBUF_SIZE - 2] != '\n') {
+ if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
+ goto error;
+ if (!skip) {
+ if (show_num)
+ fprintf(stdout, "%s", buf);
+ else
+ color_fprintf(stdout, color, "%s", buf);
+ }
+ }
+ return;
+error:
+ if (feof(fp))
+ die("Source file is shorter than expected.");
+ else
+ die("File read error: %s", strerror(errno));
+}
+
+void show_line_range(struct line_range *lr)
+{
+ unsigned int l = 1;
+ struct line_node *ln;
+ FILE *fp;
+
+ setup_pager();
+
+ if (lr->function)
+ fprintf(stdout, "<%s:%d>\n", lr->function,
+ lr->start - lr->offset);
+ else
+ fprintf(stdout, "<%s:%d>\n", lr->file, lr->start);
+
+ fp = fopen(lr->path, "r");
+ if (fp == NULL)
+ die("Failed to open %s: %s", lr->path, strerror(errno));
+ /* Skip to starting line number */
+ while (l < lr->start)
+ show_one_line(fp, l++, true, false);
+
+ list_for_each_entry(ln, &lr->line_list, list) {
+ while (ln->line > l)
+ show_one_line(fp, (l++) - lr->offset, false, false);
+ show_one_line(fp, (l++) - lr->offset, false, true);
+ }
+ fclose(fp);
+}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 7f1d499..711287d 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -5,6 +5,7 @@
#include "probe-finder.h"
#include "strlist.h"
+extern void parse_line_range_desc(const char *arg, struct line_range *lr);
extern void parse_perf_probe_event(const char *str, struct probe_point *pp,
bool *need_dwarf);
extern int synthesize_perf_probe_point(struct probe_point *pp);
@@ -15,6 +16,7 @@ extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes,
bool force_add);
extern void del_trace_kprobe_events(struct strlist *dellist);
extern void show_perf_probe_events(void);
+extern void show_line_range(struct line_range *lr);
/* Maximum index number of event-name postfix */
#define MAX_EVENT_INDEX 1024
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 4b852c0..1b2124d 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -140,6 +140,31 @@ static Dwarf_Unsigned cu_find_fileno(Dwarf_Die cu_die, const char *fname)
return found;
}
+static int cu_get_filename(Dwarf_Die cu_die, Dwarf_Unsigned fno, char **buf)
+{
+ Dwarf_Signed cnt, i;
+ char **srcs;
+ int ret = 0;
+
+ if (!buf || !fno)
+ return -EINVAL;
+
+ ret = dwarf_srcfiles(cu_die, &srcs, &cnt, &__dw_error);
+ if (ret == DW_DLV_OK) {
+ if ((Dwarf_Unsigned)cnt > fno - 1) {
+ *buf = strdup(srcs[fno - 1]);
+ ret = 0;
+ pr_debug("found filename: %s\n", *buf);
+ } else
+ ret = -ENOENT;
+ for (i = 0; i < cnt; i++)
+ dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING);
+ dwarf_dealloc(__dw_debug, srcs, DW_DLA_LIST);
+ } else
+ ret = -EINVAL;
+ return ret;
+}
+
/* Compare diename and tname */
static int die_compare_name(Dwarf_Die dw_die, const char *tname)
{
@@ -402,11 +427,11 @@ static void show_location(Dwarf_Loc *loc, struct probe_finder *pf)
} else if (op == DW_OP_regx) {
regn = loc->lr_number;
} else
- die("Dwarf_OP %d is not supported.\n", op);
+ die("Dwarf_OP %d is not supported.", op);
regs = get_arch_regstr(regn);
if (!regs)
- die("%lld exceeds max register number.\n", regn);
+ die("%lld exceeds max register number.", regn);
if (deref)
ret = snprintf(pf->buf, pf->len,
@@ -438,7 +463,7 @@ static void show_variable(Dwarf_Die vr_die, struct probe_finder *pf)
return ;
error:
die("Failed to find the location of %s at this address.\n"
- " Perhaps, it has been optimized out.\n", pf->var);
+ " Perhaps, it has been optimized out.", pf->var);
}
static int variable_callback(struct die_link *dlink, void *data)
@@ -476,7 +501,7 @@ static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf)
/* Search child die for local variables and parameters. */
ret = search_die_from_children(sp_die, variable_callback, pf);
if (!ret)
- die("Failed to find '%s' in this function.\n", pf->var);
+ die("Failed to find '%s' in this function.", pf->var);
}
/* Get a frame base on the address */
@@ -567,7 +592,7 @@ static int probeaddr_callback(struct die_link *dlink, void *data)
}
/* Find probe point from its line number */
-static void find_by_line(struct probe_finder *pf)
+static void find_probe_point_by_line(struct probe_finder *pf)
{
Dwarf_Signed cnt, i, clm;
Dwarf_Line *lines;
@@ -602,7 +627,7 @@ static void find_by_line(struct probe_finder *pf)
ret = search_die_from_children(pf->cu_die,
probeaddr_callback, pf);
if (ret == 0)
- die("Probe point is not found in subprograms.\n");
+ die("Probe point is not found in subprograms.");
/* Continuing, because target line might be inlined. */
}
dwarf_srclines_dealloc(__dw_debug, lines, cnt);
@@ -626,7 +651,7 @@ static int probefunc_callback(struct die_link *dlink, void *data)
pf->fno = die_get_decl_file(dlink->die);
pf->lno = die_get_decl_line(dlink->die)
+ pp->line;
- find_by_line(pf);
+ find_probe_point_by_line(pf);
return 1;
}
if (die_inlined_subprogram(dlink->die)) {
@@ -661,7 +686,7 @@ static int probefunc_callback(struct die_link *dlink, void *data)
!die_inlined_subprogram(lk->die))
goto found;
}
- die("Failed to find real subprogram.\n");
+ die("Failed to find real subprogram.");
found:
/* Get offset from subprogram */
ret = die_within_subprogram(lk->die, pf->addr, &offs);
@@ -673,7 +698,7 @@ found:
return 0;
}
-static void find_by_func(struct probe_finder *pf)
+static void find_probe_point_by_func(struct probe_finder *pf)
{
search_die_from_children(pf->cu_die, probefunc_callback, pf);
}
@@ -714,10 +739,10 @@ int find_probepoint(int fd, struct probe_point *pp)
if (ret == DW_DLV_NO_ENTRY)
pf.cu_base = 0;
if (pp->function)
- find_by_func(&pf);
+ find_probe_point_by_func(&pf);
else {
pf.lno = pp->line;
- find_by_line(&pf);
+ find_probe_point_by_line(&pf);
}
}
dwarf_dealloc(__dw_debug, pf.cu_die, DW_DLA_DIE);
@@ -728,3 +753,159 @@ int find_probepoint(int fd, struct probe_point *pp)
return pp->found;
}
+
+static void line_range_add_line(struct line_range *lr, unsigned int line)
+{
+ struct line_node *ln;
+ struct list_head *p;
+
+ /* Reverse search, because new line will be the last one */
+ list_for_each_entry_reverse(ln, &lr->line_list, list) {
+ if (ln->line < line) {
+ p = &ln->list;
+ goto found;
+ } else if (ln->line == line) /* Already exist */
+ return ;
+ }
+ /* List is empty, or the smallest entry */
+ p = &lr->line_list;
+found:
+ pr_debug("Debug: add a line %u\n", line);
+ ln = zalloc(sizeof(struct line_node));
+ DIE_IF(ln == NULL);
+ ln->line = line;
+ INIT_LIST_HEAD(&ln->list);
+ list_add(&ln->list, p);
+}
+
+/* Find line range from its line number */
+static void find_line_range_by_line(struct line_finder *lf)
+{
+ Dwarf_Signed cnt, i;
+ Dwarf_Line *lines;
+ Dwarf_Unsigned lineno = 0;
+ Dwarf_Unsigned fno;
+ Dwarf_Addr addr;
+ int ret;
+
+ ret = dwarf_srclines(lf->cu_die, &lines, &cnt, &__dw_error);
+ DIE_IF(ret != DW_DLV_OK);
+
+ for (i = 0; i < cnt; i++) {
+ ret = dwarf_line_srcfileno(lines[i], &fno, &__dw_error);
+ DIE_IF(ret != DW_DLV_OK);
+ if (fno != lf->fno)
+ continue;
+
+ ret = dwarf_lineno(lines[i], &lineno, &__dw_error);
+ DIE_IF(ret != DW_DLV_OK);
+ if (lf->lno_s > lineno || lf->lno_e < lineno)
+ continue;
+
+ /* Filter line in the function address range */
+ if (lf->addr_s && lf->addr_e) {
+ ret = dwarf_lineaddr(lines[i], &addr, &__dw_error);
+ DIE_IF(ret != DW_DLV_OK);
+ if (lf->addr_s > addr || lf->addr_e <= addr)
+ continue;
+ }
+ line_range_add_line(lf->lr, (unsigned int)lineno);
+ }
+ dwarf_srclines_dealloc(__dw_debug, lines, cnt);
+ if (!list_empty(&lf->lr->line_list))
+ lf->found = 1;
+}
+
+/* Search function from function name */
+static int linefunc_callback(struct die_link *dlink, void *data)
+{
+ struct line_finder *lf = (struct line_finder *)data;
+ struct line_range *lr = lf->lr;
+ Dwarf_Half tag;
+ int ret;
+
+ ret = dwarf_tag(dlink->die, &tag, &__dw_error);
+ DIE_IF(ret == DW_DLV_ERROR);
+ if (tag == DW_TAG_subprogram &&
+ die_compare_name(dlink->die, lr->function) == 0) {
+ /* Get the address range of this function */
+ ret = dwarf_highpc(dlink->die, &lf->addr_e, &__dw_error);
+ if (ret == DW_DLV_OK)
+ ret = dwarf_lowpc(dlink->die, &lf->addr_s, &__dw_error);
+ DIE_IF(ret == DW_DLV_ERROR);
+ if (ret == DW_DLV_NO_ENTRY) {
+ lf->addr_s = 0;
+ lf->addr_e = 0;
+ }
+
+ lf->fno = die_get_decl_file(dlink->die);
+ lr->offset = die_get_decl_line(dlink->die);;
+ lf->lno_s = lr->offset + lr->start;
+ if (!lr->end)
+ lf->lno_e = (Dwarf_Unsigned)-1;
+ else
+ lf->lno_e = lr->offset + lr->end;
+ lr->start = lf->lno_s;
+ lr->end = lf->lno_e;
+ find_line_range_by_line(lf);
+ /* If we find a target function, this should be end. */
+ lf->found = 1;
+ return 1;
+ }
+ return 0;
+}
+
+static void find_line_range_by_func(struct line_finder *lf)
+{
+ search_die_from_children(lf->cu_die, linefunc_callback, lf);
+}
+
+int find_line_range(int fd, struct line_range *lr)
+{
+ Dwarf_Half addr_size = 0;
+ Dwarf_Unsigned next_cuh = 0;
+ int ret;
+ struct line_finder lf = {.lr = lr};
+
+ ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error);
+ if (ret != DW_DLV_OK)
+ return -ENOENT;
+
+ while (!lf.found) {
+ /* Search CU (Compilation Unit) */
+ ret = dwarf_next_cu_header(__dw_debug, NULL, NULL, NULL,
+ &addr_size, &next_cuh, &__dw_error);
+ DIE_IF(ret == DW_DLV_ERROR);
+ if (ret == DW_DLV_NO_ENTRY)
+ break;
+
+ /* Get the DIE(Debugging Information Entry) of this CU */
+ ret = dwarf_siblingof(__dw_debug, 0, &lf.cu_die, &__dw_error);
+ DIE_IF(ret != DW_DLV_OK);
+
+ /* Check if target file is included. */
+ if (lr->file)
+ lf.fno = cu_find_fileno(lf.cu_die, lr->file);
+
+ if (!lr->file || lf.fno) {
+ if (lr->function)
+ find_line_range_by_func(&lf);
+ else {
+ lf.lno_s = lr->start;
+ if (!lr->end)
+ lf.lno_e = (Dwarf_Unsigned)-1;
+ else
+ lf.lno_e = lr->end;
+ find_line_range_by_line(&lf);
+ }
+ /* Get the real file path */
+ if (lf.found)
+ cu_get_filename(lf.cu_die, lf.fno, &lr->path);
+ }
+ dwarf_dealloc(__dw_debug, lf.cu_die, DW_DLA_DIE);
+ }
+ ret = dwarf_finish(__dw_debug, &__dw_error);
+ DIE_IF(ret != DW_DLV_OK);
+ return lf.found;
+}
+
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index a4086aa..972b386 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -1,6 +1,8 @@
#ifndef _PROBE_FINDER_H
#define _PROBE_FINDER_H
+#include "util.h"
+
#define MAX_PATH_LEN 256
#define MAX_PROBE_BUFFER 1024
#define MAX_PROBES 128
@@ -32,8 +34,26 @@ struct probe_point {
char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/
};
+/* Line number container */
+struct line_node {
+ struct list_head list;
+ unsigned int line;
+};
+
+/* Line range */
+struct line_range {
+ char *file; /* File name */
+ char *function; /* Function name */
+ unsigned int start; /* Start line number */
+ unsigned int end; /* End line number */
+ unsigned int offset; /* Start line offset */
+ char *path; /* Real path name */
+ struct list_head line_list; /* Visible lines */
+};
+
#ifndef NO_LIBDWARF
extern int find_probepoint(int fd, struct probe_point *pp);
+extern int find_line_range(int fd, struct line_range *lr);
/* Workaround for undefined _MIPS_SZLONG bug in libdwarf.h: */
#ifndef _MIPS_SZLONG
@@ -60,6 +80,19 @@ struct probe_finder {
char *buf; /* Current output buffer */
int len; /* Length of output buffer */
};
+
+struct line_finder {
+ struct line_range *lr; /* Target line range */
+
+ Dwarf_Unsigned fno; /* File number */
+ Dwarf_Unsigned lno_s; /* Start line number */
+ Dwarf_Unsigned lno_e; /* End line number */
+ Dwarf_Addr addr_s; /* Start address */
+ Dwarf_Addr addr_e; /* End address */
+ Dwarf_Die cu_die; /* Current CU */
+ int found;
+};
+
#endif /* NO_LIBDWARF */
#endif /*_PROBE_FINDER_H */
diff --git a/tools/perf/util/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 6d6d76b..5376378 100644
--- a/tools/perf/util/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -25,10 +25,16 @@
#include <ctype.h>
#include <errno.h>
-#include "../perf.h"
-#include "util.h"
-#include "trace-event.h"
-#include "trace-event-perl.h"
+#include "../../perf.h"
+#include "../util.h"
+#include "../trace-event.h"
+
+#include <EXTERN.h>
+#include <perl.h>
+
+void boot_Perf__Trace__Context(pTHX_ CV *cv);
+void boot_DynaLoader(pTHX_ CV *cv);
+typedef PerlInterpreter * INTERP;
void xs_init(pTHX);
@@ -49,7 +55,7 @@ INTERP my_perl;
struct event *events[FTRACE_MAX_EVENT];
-static struct scripting_context *scripting_context;
+extern struct scripting_context *scripting_context;
static char *cur_field_name;
static int zero_flag_atom;
@@ -239,33 +245,6 @@ static inline struct event *find_cache_event(int type)
return event;
}
-int common_pc(struct scripting_context *context)
-{
- int pc;
-
- pc = parse_common_pc(context->event_data);
-
- return pc;
-}
-
-int common_flags(struct scripting_context *context)
-{
- int flags;
-
- flags = parse_common_flags(context->event_data);
-
- return flags;
-}
-
-int common_lock_depth(struct scripting_context *context)
-{
- int lock_depth;
-
- lock_depth = parse_common_lock_depth(context->event_data);
-
- return lock_depth;
-}
-
static void perl_process_event(int cpu, void *data,
int size __unused,
unsigned long long nsecs, char *comm)
@@ -587,75 +566,3 @@ struct scripting_ops perl_scripting_ops = {
.process_event = perl_process_event,
.generate_script = perl_generate_script,
};
-
-static void print_unsupported_msg(void)
-{
- fprintf(stderr, "Perl scripting not supported."
- " Install libperl and rebuild perf to enable it.\n"
- "For example:\n # apt-get install libperl-dev (ubuntu)"
- "\n # yum install perl-ExtUtils-Embed (Fedora)"
- "\n etc.\n");
-}
-
-static int perl_start_script_unsupported(const char *script __unused,
- int argc __unused,
- const char **argv __unused)
-{
- print_unsupported_msg();
-
- return -1;
-}
-
-static int perl_stop_script_unsupported(void)
-{
- return 0;
-}
-
-static void perl_process_event_unsupported(int cpu __unused,
- void *data __unused,
- int size __unused,
- unsigned long long nsecs __unused,
- char *comm __unused)
-{
-}
-
-static int perl_generate_script_unsupported(const char *outfile __unused)
-{
- print_unsupported_msg();
-
- return -1;
-}
-
-struct scripting_ops perl_scripting_unsupported_ops = {
- .name = "Perl",
- .start_script = perl_start_script_unsupported,
- .stop_script = perl_stop_script_unsupported,
- .process_event = perl_process_event_unsupported,
- .generate_script = perl_generate_script_unsupported,
-};
-
-static void register_perl_scripting(struct scripting_ops *scripting_ops)
-{
- int err;
- err = script_spec_register("Perl", scripting_ops);
- if (err)
- die("error registering Perl script extension");
-
- err = script_spec_register("pl", scripting_ops);
- if (err)
- die("error registering pl script extension");
-
- scripting_context = malloc(sizeof(struct scripting_context));
-}
-
-#ifdef NO_LIBPERL
-void setup_perl_scripting(void)
-{
- register_perl_scripting(&perl_scripting_unsupported_ops);
-}
-#else
-void setup_perl_scripting(void)
-{
- register_perl_scripting(&perl_scripting_ops);
-}
-#endif
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
new file mode 100644
index 0000000..33a414b
--- /dev/null
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -0,0 +1,573 @@
+/*
+ * trace-event-python. Feed trace events to an embedded Python interpreter.
+ *
+ * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <Python.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+#include "../../perf.h"
+#include "../util.h"
+#include "../trace-event.h"
+
+PyMODINIT_FUNC initperf_trace_context(void);
+
+#define FTRACE_MAX_EVENT \
+ ((1 << (sizeof(unsigned short) * 8)) - 1)
+
+struct event *events[FTRACE_MAX_EVENT];
+
+#define MAX_FIELDS 64
+#define N_COMMON_FIELDS 7
+
+extern struct scripting_context *scripting_context;
+
+static char *cur_field_name;
+static int zero_flag_atom;
+
+static PyObject *main_module, *main_dict;
+
+static void handler_call_die(const char *handler_name)
+{
+ PyErr_Print();
+ Py_FatalError("problem in Python trace event handler");
+}
+
+static void define_value(enum print_arg_type field_type,
+ const char *ev_name,
+ const char *field_name,
+ const char *field_value,
+ const char *field_str)
+{
+ const char *handler_name = "define_flag_value";
+ PyObject *handler, *t, *retval;
+ unsigned long long value;
+ unsigned n = 0;
+
+ if (field_type == PRINT_SYMBOL)
+ handler_name = "define_symbolic_value";
+
+ t = PyTuple_New(4);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ value = eval_flag(field_value);
+
+ PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
+ PyTuple_SetItem(t, n++, PyString_FromString(field_name));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(value));
+ PyTuple_SetItem(t, n++, PyString_FromString(field_str));
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (handler && PyCallable_Check(handler)) {
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die(handler_name);
+ }
+
+ Py_DECREF(t);
+}
+
+static void define_values(enum print_arg_type field_type,
+ struct print_flag_sym *field,
+ const char *ev_name,
+ const char *field_name)
+{
+ define_value(field_type, ev_name, field_name, field->value,
+ field->str);
+
+ if (field->next)
+ define_values(field_type, field->next, ev_name, field_name);
+}
+
+static void define_field(enum print_arg_type field_type,
+ const char *ev_name,
+ const char *field_name,
+ const char *delim)
+{
+ const char *handler_name = "define_flag_field";
+ PyObject *handler, *t, *retval;
+ unsigned n = 0;
+
+ if (field_type == PRINT_SYMBOL)
+ handler_name = "define_symbolic_field";
+
+ if (field_type == PRINT_FLAGS)
+ t = PyTuple_New(3);
+ else
+ t = PyTuple_New(2);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
+ PyTuple_SetItem(t, n++, PyString_FromString(field_name));
+ if (field_type == PRINT_FLAGS)
+ PyTuple_SetItem(t, n++, PyString_FromString(delim));
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (handler && PyCallable_Check(handler)) {
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die(handler_name);
+ }
+
+ Py_DECREF(t);
+}
+
+static void define_event_symbols(struct event *event,
+ const char *ev_name,
+ struct print_arg *args)
+{
+ switch (args->type) {
+ case PRINT_NULL:
+ break;
+ case PRINT_ATOM:
+ define_value(PRINT_FLAGS, ev_name, cur_field_name, "0",
+ args->atom.atom);
+ zero_flag_atom = 0;
+ break;
+ case PRINT_FIELD:
+ if (cur_field_name)
+ free(cur_field_name);
+ cur_field_name = strdup(args->field.name);
+ break;
+ case PRINT_FLAGS:
+ define_event_symbols(event, ev_name, args->flags.field);
+ define_field(PRINT_FLAGS, ev_name, cur_field_name,
+ args->flags.delim);
+ define_values(PRINT_FLAGS, args->flags.flags, ev_name,
+ cur_field_name);
+ break;
+ case PRINT_SYMBOL:
+ define_event_symbols(event, ev_name, args->symbol.field);
+ define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL);
+ define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
+ cur_field_name);
+ break;
+ case PRINT_STRING:
+ break;
+ case PRINT_TYPE:
+ define_event_symbols(event, ev_name, args->typecast.item);
+ break;
+ case PRINT_OP:
+ if (strcmp(args->op.op, ":") == 0)
+ zero_flag_atom = 1;
+ define_event_symbols(event, ev_name, args->op.left);
+ define_event_symbols(event, ev_name, args->op.right);
+ break;
+ default:
+ /* we should warn... */
+ return;
+ }
+
+ if (args->next)
+ define_event_symbols(event, ev_name, args->next);
+}
+
+static inline struct event *find_cache_event(int type)
+{
+ static char ev_name[256];
+ struct event *event;
+
+ if (events[type])
+ return events[type];
+
+ events[type] = event = trace_find_event(type);
+ if (!event)
+ return NULL;
+
+ sprintf(ev_name, "%s__%s", event->system, event->name);
+
+ define_event_symbols(event, ev_name, event->print_fmt.args);
+
+ return event;
+}
+
+static void python_process_event(int cpu, void *data,
+ int size __unused,
+ unsigned long long nsecs, char *comm)
+{
+ PyObject *handler, *retval, *context, *t;
+ static char handler_name[256];
+ struct format_field *field;
+ unsigned long long val;
+ unsigned long s, ns;
+ struct event *event;
+ unsigned n = 0;
+ int type;
+ int pid;
+
+ t = PyTuple_New(MAX_FIELDS);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ type = trace_parse_common_type(data);
+
+ event = find_cache_event(type);
+ if (!event)
+ die("ug! no event found for type %d", type);
+
+ pid = trace_parse_common_pid(data);
+
+ sprintf(handler_name, "%s__%s", event->system, event->name);
+
+ s = nsecs / NSECS_PER_SEC;
+ ns = nsecs - s * NSECS_PER_SEC;
+
+ scripting_context->event_data = data;
+
+ context = PyCObject_FromVoidPtr(scripting_context, NULL);
+
+ PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
+ PyTuple_SetItem(t, n++,
+ PyCObject_FromVoidPtr(scripting_context, NULL));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(s));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
+ PyTuple_SetItem(t, n++, PyString_FromString(comm));
+
+ for (field = event->format.fields; field; field = field->next) {
+ if (field->flags & FIELD_IS_STRING) {
+ int offset;
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ offset = *(int *)(data + field->offset);
+ offset &= 0xffff;
+ } else
+ offset = field->offset;
+ PyTuple_SetItem(t, n++,
+ PyString_FromString((char *)data + offset));
+ } else { /* FIELD_IS_NUMERIC */
+ val = read_size(data + field->offset, field->size);
+ if (field->flags & FIELD_IS_SIGNED) {
+ PyTuple_SetItem(t, n++, PyInt_FromLong(val));
+ } else {
+ PyTuple_SetItem(t, n++, PyInt_FromLong(val));
+ }
+ }
+ }
+
+ if (_PyTuple_Resize(&t, n) == -1)
+ Py_FatalError("error resizing Python tuple");
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (handler && PyCallable_Check(handler)) {
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die(handler_name);
+ } else {
+ handler = PyDict_GetItemString(main_dict, "trace_unhandled");
+ if (handler && PyCallable_Check(handler)) {
+ if (_PyTuple_Resize(&t, N_COMMON_FIELDS) == -1)
+ Py_FatalError("error resizing Python tuple");
+
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die("trace_unhandled");
+ }
+ }
+
+ Py_DECREF(t);
+}
+
+static int run_start_sub(void)
+{
+ PyObject *handler, *retval;
+ int err = 0;
+
+ main_module = PyImport_AddModule("__main__");
+ if (main_module == NULL)
+ return -1;
+ Py_INCREF(main_module);
+
+ main_dict = PyModule_GetDict(main_module);
+ if (main_dict == NULL) {
+ err = -1;
+ goto error;
+ }
+ Py_INCREF(main_dict);
+
+ handler = PyDict_GetItemString(main_dict, "trace_begin");
+ if (handler == NULL || !PyCallable_Check(handler))
+ goto out;
+
+ retval = PyObject_CallObject(handler, NULL);
+ if (retval == NULL)
+ handler_call_die("trace_begin");
+
+ Py_DECREF(retval);
+ return err;
+error:
+ Py_XDECREF(main_dict);
+ Py_XDECREF(main_module);
+out:
+ return err;
+}
+
+/*
+ * Start trace script
+ */
+static int python_start_script(const char *script, int argc, const char **argv)
+{
+ const char **command_line;
+ char buf[PATH_MAX];
+ int i, err = 0;
+ FILE *fp;
+
+ command_line = malloc((argc + 1) * sizeof(const char *));
+ command_line[0] = script;
+ for (i = 1; i < argc + 1; i++)
+ command_line[i] = argv[i - 1];
+
+ Py_Initialize();
+
+ initperf_trace_context();
+
+ PySys_SetArgv(argc + 1, (char **)command_line);
+
+ fp = fopen(script, "r");
+ if (!fp) {
+ sprintf(buf, "Can't open python script \"%s\"", script);
+ perror(buf);
+ err = -1;
+ goto error;
+ }
+
+ err = PyRun_SimpleFile(fp, script);
+ if (err) {
+ fprintf(stderr, "Error running python script %s\n", script);
+ goto error;
+ }
+
+ err = run_start_sub();
+ if (err) {
+ fprintf(stderr, "Error starting python script %s\n", script);
+ goto error;
+ }
+
+ free(command_line);
+ fprintf(stderr, "perf trace started with Python script %s\n\n",
+ script);
+
+ return err;
+error:
+ Py_Finalize();
+ free(command_line);
+
+ return err;
+}
+
+/*
+ * Stop trace script
+ */
+static int python_stop_script(void)
+{
+ PyObject *handler, *retval;
+ int err = 0;
+
+ handler = PyDict_GetItemString(main_dict, "trace_end");
+ if (handler == NULL || !PyCallable_Check(handler))
+ goto out;
+
+ retval = PyObject_CallObject(handler, NULL);
+ if (retval == NULL)
+ handler_call_die("trace_end");
+ else
+ Py_DECREF(retval);
+out:
+ Py_XDECREF(main_dict);
+ Py_XDECREF(main_module);
+ Py_Finalize();
+
+ fprintf(stderr, "\nperf trace Python script stopped\n");
+
+ return err;
+}
+
+static int python_generate_script(const char *outfile)
+{
+ struct event *event = NULL;
+ struct format_field *f;
+ char fname[PATH_MAX];
+ int not_first, count;
+ FILE *ofp;
+
+ sprintf(fname, "%s.py", outfile);
+ ofp = fopen(fname, "w");
+ if (ofp == NULL) {
+ fprintf(stderr, "couldn't open %s\n", fname);
+ return -1;
+ }
+ fprintf(ofp, "# perf trace event handlers, "
+ "generated by perf trace -g python\n");
+
+ fprintf(ofp, "# Licensed under the terms of the GNU GPL"
+ " License version 2\n\n");
+
+ fprintf(ofp, "# The common_* event handler fields are the most useful "
+ "fields common to\n");
+
+ fprintf(ofp, "# all events. They don't necessarily correspond to "
+ "the 'common_*' fields\n");
+
+ fprintf(ofp, "# in the format files. Those fields not available as "
+ "handler params can\n");
+
+ fprintf(ofp, "# be retrieved using Python functions of the form "
+ "common_*(context).\n");
+
+ fprintf(ofp, "# See the perf-trace-python Documentation for the list "
+ "of available functions.\n\n");
+
+ fprintf(ofp, "import os\n");
+ fprintf(ofp, "import sys\n\n");
+
+ fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n");
+ fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n");
+ fprintf(ofp, "\nfrom perf_trace_context import *\n");
+ fprintf(ofp, "from Core import *\n\n\n");
+
+ fprintf(ofp, "def trace_begin():\n");
+ fprintf(ofp, "\tprint \"in trace_begin\"\n\n");
+
+ fprintf(ofp, "def trace_end():\n");
+ fprintf(ofp, "\tprint \"in trace_end\"\n\n");
+
+ while ((event = trace_find_next_event(event))) {
+ fprintf(ofp, "def %s__%s(", event->system, event->name);
+ fprintf(ofp, "event_name, ");
+ fprintf(ofp, "context, ");
+ fprintf(ofp, "common_cpu,\n");
+ fprintf(ofp, "\tcommon_secs, ");
+ fprintf(ofp, "common_nsecs, ");
+ fprintf(ofp, "common_pid, ");
+ fprintf(ofp, "common_comm,\n\t");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+ if (++count % 5 == 0)
+ fprintf(ofp, "\n\t");
+
+ fprintf(ofp, "%s", f->name);
+ }
+ fprintf(ofp, "):\n");
+
+ fprintf(ofp, "\t\tprint_header(event_name, common_cpu, "
+ "common_secs, common_nsecs,\n\t\t\t"
+ "common_pid, common_comm)\n\n");
+
+ fprintf(ofp, "\t\tprint \"");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+ if (count && count % 3 == 0) {
+ fprintf(ofp, "\" \\\n\t\t\"");
+ }
+ count++;
+
+ fprintf(ofp, "%s=", f->name);
+ if (f->flags & FIELD_IS_STRING ||
+ f->flags & FIELD_IS_FLAG ||
+ f->flags & FIELD_IS_SYMBOLIC)
+ fprintf(ofp, "%%s");
+ else if (f->flags & FIELD_IS_SIGNED)
+ fprintf(ofp, "%%d");
+ else
+ fprintf(ofp, "%%u");
+ }
+
+ fprintf(ofp, "\\n\" %% \\\n\t\t(");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+
+ if (++count % 5 == 0)
+ fprintf(ofp, "\n\t\t");
+
+ if (f->flags & FIELD_IS_FLAG) {
+ if ((count - 1) % 5 != 0) {
+ fprintf(ofp, "\n\t\t");
+ count = 4;
+ }
+ fprintf(ofp, "flag_str(\"");
+ fprintf(ofp, "%s__%s\", ", event->system,
+ event->name);
+ fprintf(ofp, "\"%s\", %s)", f->name,
+ f->name);
+ } else if (f->flags & FIELD_IS_SYMBOLIC) {
+ if ((count - 1) % 5 != 0) {
+ fprintf(ofp, "\n\t\t");
+ count = 4;
+ }
+ fprintf(ofp, "symbol_str(\"");
+ fprintf(ofp, "%s__%s\", ", event->system,
+ event->name);
+ fprintf(ofp, "\"%s\", %s)", f->name,
+ f->name);
+ } else
+ fprintf(ofp, "%s", f->name);
+ }
+
+ fprintf(ofp, "),\n\n");
+ }
+
+ fprintf(ofp, "def trace_unhandled(event_name, context, "
+ "common_cpu, common_secs, common_nsecs,\n\t\t"
+ "common_pid, common_comm):\n");
+
+ fprintf(ofp, "\t\tprint_header(event_name, common_cpu, "
+ "common_secs, common_nsecs,\n\t\tcommon_pid, "
+ "common_comm)\n\n");
+
+ fprintf(ofp, "def print_header("
+ "event_name, cpu, secs, nsecs, pid, comm):\n"
+ "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
+ "(event_name, cpu, secs, nsecs, pid, comm),\n");
+
+ fclose(ofp);
+
+ fprintf(stderr, "generated Python script: %s\n", fname);
+
+ return 0;
+}
+
+struct scripting_ops python_scripting_ops = {
+ .name = "Python",
+ .start_script = python_start_script,
+ .stop_script = python_stop_script,
+ .process_event = python_process_event,
+ .generate_script = python_generate_script,
+};
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index ce3a6c8..0de7258 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1,5 +1,8 @@
+#define _FILE_OFFSET_BITS 64
+
#include <linux/kernel.h>
+#include <byteswap.h>
#include <unistd.h>
#include <sys/types.h>
@@ -49,6 +52,11 @@ out_close:
return -1;
}
+static inline int perf_session__create_kernel_maps(struct perf_session *self)
+{
+ return map_groups__create_kernel_maps(&self->kmaps, self->vmlinux_maps);
+}
+
struct perf_session *perf_session__new(const char *filename, int mode, bool force)
{
size_t len = filename ? strlen(filename) + 1 : 0;
@@ -66,13 +74,22 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
self->mmap_window = 32;
self->cwd = NULL;
self->cwdlen = 0;
+ self->unknown_events = 0;
map_groups__init(&self->kmaps);
- if (perf_session__create_kernel_maps(self) < 0)
- goto out_delete;
+ if (mode == O_RDONLY) {
+ if (perf_session__open(self, force) < 0)
+ goto out_delete;
+ } else if (mode == O_WRONLY) {
+ /*
+ * In O_RDONLY mode this will be performed when reading the
+ * kernel MMAP event, in event__process_mmap().
+ */
+ if (perf_session__create_kernel_maps(self) < 0)
+ goto out_delete;
+ }
- if (mode == O_RDONLY && perf_session__open(self, force) < 0)
- goto out_delete;
+ self->sample_type = perf_header__sample_type(&self->header);
out:
return self;
out_free:
@@ -148,3 +165,409 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self,
return syms;
}
+
+static int process_event_stub(event_t *event __used,
+ struct perf_session *session __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
+{
+ if (handler->sample == NULL)
+ handler->sample = process_event_stub;
+ if (handler->mmap == NULL)
+ handler->mmap = process_event_stub;
+ if (handler->comm == NULL)
+ handler->comm = process_event_stub;
+ if (handler->fork == NULL)
+ handler->fork = process_event_stub;
+ if (handler->exit == NULL)
+ handler->exit = process_event_stub;
+ if (handler->lost == NULL)
+ handler->lost = process_event_stub;
+ if (handler->read == NULL)
+ handler->read = process_event_stub;
+ if (handler->throttle == NULL)
+ handler->throttle = process_event_stub;
+ if (handler->unthrottle == NULL)
+ handler->unthrottle = process_event_stub;
+}
+
+static const char *event__name[] = {
+ [0] = "TOTAL",
+ [PERF_RECORD_MMAP] = "MMAP",
+ [PERF_RECORD_LOST] = "LOST",
+ [PERF_RECORD_COMM] = "COMM",
+ [PERF_RECORD_EXIT] = "EXIT",
+ [PERF_RECORD_THROTTLE] = "THROTTLE",
+ [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
+ [PERF_RECORD_FORK] = "FORK",
+ [PERF_RECORD_READ] = "READ",
+ [PERF_RECORD_SAMPLE] = "SAMPLE",
+};
+
+unsigned long event__total[PERF_RECORD_MAX];
+
+void event__print_totals(void)
+{
+ int i;
+ for (i = 0; i < PERF_RECORD_MAX; ++i)
+ pr_info("%10s events: %10ld\n",
+ event__name[i], event__total[i]);
+}
+
+void mem_bswap_64(void *src, int byte_size)
+{
+ u64 *m = src;
+
+ while (byte_size > 0) {
+ *m = bswap_64(*m);
+ byte_size -= sizeof(u64);
+ ++m;
+ }
+}
+
+static void event__all64_swap(event_t *self)
+{
+ struct perf_event_header *hdr = &self->header;
+ mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
+}
+
+static void event__comm_swap(event_t *self)
+{
+ self->comm.pid = bswap_32(self->comm.pid);
+ self->comm.tid = bswap_32(self->comm.tid);
+}
+
+static void event__mmap_swap(event_t *self)
+{
+ self->mmap.pid = bswap_32(self->mmap.pid);
+ self->mmap.tid = bswap_32(self->mmap.tid);
+ self->mmap.start = bswap_64(self->mmap.start);
+ self->mmap.len = bswap_64(self->mmap.len);
+ self->mmap.pgoff = bswap_64(self->mmap.pgoff);
+}
+
+static void event__task_swap(event_t *self)
+{
+ self->fork.pid = bswap_32(self->fork.pid);
+ self->fork.tid = bswap_32(self->fork.tid);
+ self->fork.ppid = bswap_32(self->fork.ppid);
+ self->fork.ptid = bswap_32(self->fork.ptid);
+ self->fork.time = bswap_64(self->fork.time);
+}
+
+static void event__read_swap(event_t *self)
+{
+ self->read.pid = bswap_32(self->read.pid);
+ self->read.tid = bswap_32(self->read.tid);
+ self->read.value = bswap_64(self->read.value);
+ self->read.time_enabled = bswap_64(self->read.time_enabled);
+ self->read.time_running = bswap_64(self->read.time_running);
+ self->read.id = bswap_64(self->read.id);
+}
+
+typedef void (*event__swap_op)(event_t *self);
+
+static event__swap_op event__swap_ops[] = {
+ [PERF_RECORD_MMAP] = event__mmap_swap,
+ [PERF_RECORD_COMM] = event__comm_swap,
+ [PERF_RECORD_FORK] = event__task_swap,
+ [PERF_RECORD_EXIT] = event__task_swap,
+ [PERF_RECORD_LOST] = event__all64_swap,
+ [PERF_RECORD_READ] = event__read_swap,
+ [PERF_RECORD_SAMPLE] = event__all64_swap,
+ [PERF_RECORD_MAX] = NULL,
+};
+
+static int perf_session__process_event(struct perf_session *self,
+ event_t *event,
+ struct perf_event_ops *ops,
+ u64 offset, u64 head)
+{
+ trace_event(event);
+
+ if (event->header.type < PERF_RECORD_MAX) {
+ dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
+ offset + head, event->header.size,
+ event__name[event->header.type]);
+ ++event__total[0];
+ ++event__total[event->header.type];
+ }
+
+ if (self->header.needs_swap && event__swap_ops[event->header.type])
+ event__swap_ops[event->header.type](event);
+
+ switch (event->header.type) {
+ case PERF_RECORD_SAMPLE:
+ return ops->sample(event, self);
+ case PERF_RECORD_MMAP:
+ return ops->mmap(event, self);
+ case PERF_RECORD_COMM:
+ return ops->comm(event, self);
+ case PERF_RECORD_FORK:
+ return ops->fork(event, self);
+ case PERF_RECORD_EXIT:
+ return ops->exit(event, self);
+ case PERF_RECORD_LOST:
+ return ops->lost(event, self);
+ case PERF_RECORD_READ:
+ return ops->read(event, self);
+ case PERF_RECORD_THROTTLE:
+ return ops->throttle(event, self);
+ case PERF_RECORD_UNTHROTTLE:
+ return ops->unthrottle(event, self);
+ default:
+ self->unknown_events++;
+ return -1;
+ }
+}
+
+void perf_event_header__bswap(struct perf_event_header *self)
+{
+ self->type = bswap_32(self->type);
+ self->misc = bswap_16(self->misc);
+ self->size = bswap_16(self->size);
+}
+
+int perf_header__read_build_ids(struct perf_header *self,
+ int input, u64 offset, u64 size)
+{
+ struct build_id_event bev;
+ char filename[PATH_MAX];
+ u64 limit = offset + size;
+ int err = -1;
+
+ while (offset < limit) {
+ struct dso *dso;
+ ssize_t len;
+ struct list_head *head = &dsos__user;
+
+ if (read(input, &bev, sizeof(bev)) != sizeof(bev))
+ goto out;
+
+ if (self->needs_swap)
+ perf_event_header__bswap(&bev.header);
+
+ len = bev.header.size - sizeof(bev);
+ if (read(input, filename, len) != len)
+ goto out;
+
+ if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
+ head = &dsos__kernel;
+
+ dso = __dsos__findnew(head, filename);
+ if (dso != NULL) {
+ dso__set_build_id(dso, &bev.build_id);
+ if (head == &dsos__kernel && filename[0] == '[')
+ dso->kernel = 1;
+ }
+
+ offset += bev.header.size;
+ }
+ err = 0;
+out:
+ return err;
+}
+
+static struct thread *perf_session__register_idle_thread(struct perf_session *self)
+{
+ struct thread *thread = perf_session__findnew(self, 0);
+
+ if (thread == NULL || thread__set_comm(thread, "swapper")) {
+ pr_err("problem inserting idle task.\n");
+ thread = NULL;
+ }
+
+ return thread;
+}
+
+int __perf_session__process_events(struct perf_session *self,
+ u64 data_offset, u64 data_size,
+ u64 file_size, struct perf_event_ops *ops)
+{
+ int err, mmap_prot, mmap_flags;
+ u64 head, shift;
+ u64 offset = 0;
+ size_t page_size;
+ event_t *event;
+ uint32_t size;
+ char *buf;
+
+ perf_event_ops__fill_defaults(ops);
+
+ page_size = sysconf(_SC_PAGESIZE);
+
+ head = data_offset;
+ shift = page_size * (head / page_size);
+ offset += shift;
+ head -= shift;
+
+ mmap_prot = PROT_READ;
+ mmap_flags = MAP_SHARED;
+
+ if (self->header.needs_swap) {
+ mmap_prot |= PROT_WRITE;
+ mmap_flags = MAP_PRIVATE;
+ }
+remap:
+ buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
+ mmap_flags, self->fd, offset);
+ if (buf == MAP_FAILED) {
+ pr_err("failed to mmap file\n");
+ err = -errno;
+ goto out_err;
+ }
+
+more:
+ event = (event_t *)(buf + head);
+
+ if (self->header.needs_swap)
+ perf_event_header__bswap(&event->header);
+ size = event->header.size;
+ if (size == 0)
+ size = 8;
+
+ if (head + event->header.size >= page_size * self->mmap_window) {
+ int munmap_ret;
+
+ shift = page_size * (head / page_size);
+
+ munmap_ret = munmap(buf, page_size * self->mmap_window);
+ assert(munmap_ret == 0);
+
+ offset += shift;
+ head -= shift;
+ goto remap;
+ }
+
+ size = event->header.size;
+
+ dump_printf("\n%#Lx [%#x]: event: %d\n",
+ offset + head, event->header.size, event->header.type);
+
+ if (size == 0 ||
+ perf_session__process_event(self, event, ops, offset, head) < 0) {
+ dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+ offset + head, event->header.size,
+ event->header.type);
+ /*
+ * assume we lost track of the stream, check alignment, and
+ * increment a single u64 in the hope to catch on again 'soon'.
+ */
+ if (unlikely(head & 7))
+ head &= ~7ULL;
+
+ size = 8;
+ }
+
+ head += size;
+
+ if (offset + head >= data_offset + data_size)
+ goto done;
+
+ if (offset + head < file_size)
+ goto more;
+done:
+ err = 0;
+out_err:
+ return err;
+}
+
+int perf_session__process_events(struct perf_session *self,
+ struct perf_event_ops *ops)
+{
+ int err;
+
+ if (perf_session__register_idle_thread(self) == NULL)
+ return -ENOMEM;
+
+ if (!symbol_conf.full_paths) {
+ char bf[PATH_MAX];
+
+ if (getcwd(bf, sizeof(bf)) == NULL) {
+ err = -errno;
+out_getcwd_err:
+ pr_err("failed to get the current directory\n");
+ goto out_err;
+ }
+ self->cwd = strdup(bf);
+ if (self->cwd == NULL) {
+ err = -ENOMEM;
+ goto out_getcwd_err;
+ }
+ self->cwdlen = strlen(self->cwd);
+ }
+
+ err = __perf_session__process_events(self, self->header.data_offset,
+ self->header.data_size,
+ self->size, ops);
+out_err:
+ return err;
+}
+
+bool perf_session__has_traces(struct perf_session *self, const char *msg)
+{
+ if (!(self->sample_type & PERF_SAMPLE_RAW)) {
+ pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
+ return false;
+ }
+
+ return true;
+}
+
+int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
+ const char *symbol_name,
+ u64 addr)
+{
+ char *bracket;
+ enum map_type i;
+
+ self->ref_reloc_sym.name = strdup(symbol_name);
+ if (self->ref_reloc_sym.name == NULL)
+ return -ENOMEM;
+
+ bracket = strchr(self->ref_reloc_sym.name, ']');
+ if (bracket)
+ *bracket = '\0';
+
+ self->ref_reloc_sym.addr = addr;
+
+ for (i = 0; i < MAP__NR_TYPES; ++i) {
+ struct kmap *kmap = map__kmap(self->vmlinux_maps[i]);
+ kmap->ref_reloc_sym = &self->ref_reloc_sym;
+ }
+
+ return 0;
+}
+
+static u64 map__reloc_map_ip(struct map *map, u64 ip)
+{
+ return ip + (s64)map->pgoff;
+}
+
+static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
+{
+ return ip - (s64)map->pgoff;
+}
+
+void map__reloc_vmlinux(struct map *self)
+{
+ struct kmap *kmap = map__kmap(self);
+ s64 reloc;
+
+ if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
+ return;
+
+ reloc = (kmap->ref_reloc_sym->unrelocated_addr -
+ kmap->ref_reloc_sym->addr);
+
+ if (!reloc)
+ return;
+
+ self->map_ip = map__reloc_map_ip;
+ self->unmap_ip = map__reloc_unmap_ip;
+ self->pgoff = reloc;
+}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 32eaa1b..31950fc 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -3,13 +3,13 @@
#include "event.h"
#include "header.h"
+#include "symbol.h"
#include "thread.h"
#include <linux/rbtree.h>
#include "../../../include/linux/perf_event.h"
struct ip_callchain;
struct thread;
-struct symbol;
struct perf_session {
struct perf_header header;
@@ -18,10 +18,13 @@ struct perf_session {
struct map_groups kmaps;
struct rb_root threads;
struct thread *last_match;
+ struct map *vmlinux_maps[MAP__NR_TYPES];
struct events_stats events_stats;
unsigned long event_total[PERF_RECORD_MAX];
+ unsigned long unknown_events;
struct rb_root hists;
u64 sample_type;
+ struct ref_reloc_sym ref_reloc_sym;
int fd;
int cwdlen;
char *cwd;
@@ -31,23 +34,25 @@ struct perf_session {
typedef int (*event_op)(event_t *self, struct perf_session *session);
struct perf_event_ops {
- event_op process_sample_event;
- event_op process_mmap_event;
- event_op process_comm_event;
- event_op process_fork_event;
- event_op process_exit_event;
- event_op process_lost_event;
- event_op process_read_event;
- event_op process_throttle_event;
- event_op process_unthrottle_event;
- int (*sample_type_check)(struct perf_session *session);
- unsigned long total_unknown;
- bool full_paths;
+ event_op sample,
+ mmap,
+ comm,
+ fork,
+ exit,
+ lost,
+ read,
+ throttle,
+ unthrottle;
};
struct perf_session *perf_session__new(const char *filename, int mode, bool force);
void perf_session__delete(struct perf_session *self);
+void perf_event_header__bswap(struct perf_event_header *self);
+
+int __perf_session__process_events(struct perf_session *self,
+ u64 data_offset, u64 data_size, u64 size,
+ struct perf_event_ops *ops);
int perf_session__process_events(struct perf_session *self,
struct perf_event_ops *event_ops);
@@ -56,6 +61,28 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self,
struct ip_callchain *chain,
struct symbol **parent);
-int perf_header__read_build_ids(int input, u64 offset, u64 file_size);
+bool perf_session__has_traces(struct perf_session *self, const char *msg);
+
+int perf_header__read_build_ids(struct perf_header *self, int input,
+ u64 offset, u64 file_size);
+
+int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
+ const char *symbol_name,
+ u64 addr);
+
+void mem_bswap_64(void *src, int byte_size);
+
+static inline int __perf_session__create_kernel_maps(struct perf_session *self,
+ struct dso *kernel)
+{
+ return __map_groups__create_kernel_maps(&self->kmaps,
+ self->vmlinux_maps, kernel);
+}
+static inline struct map *
+ perf_session__new_module_map(struct perf_session *self,
+ u64 start, const char *filename)
+{
+ return map_groups__new_module(&self->kmaps, start, filename);
+}
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 5352d7d..c397d4f 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -227,16 +227,73 @@ fail:
return NULL;
}
-/* Glob expression pattern matching */
+/* Character class matching */
+static bool __match_charclass(const char *pat, char c, const char **npat)
+{
+ bool complement = false, ret = true;
+
+ if (*pat == '!') {
+ complement = true;
+ pat++;
+ }
+ if (*pat++ == c) /* First character is special */
+ goto end;
+
+ while (*pat && *pat != ']') { /* Matching */
+ if (*pat == '-' && *(pat + 1) != ']') { /* Range */
+ if (*(pat - 1) <= c && c <= *(pat + 1))
+ goto end;
+ if (*(pat - 1) > *(pat + 1))
+ goto error;
+ pat += 2;
+ } else if (*pat++ == c)
+ goto end;
+ }
+ if (!*pat)
+ goto error;
+ ret = false;
+
+end:
+ while (*pat && *pat != ']') /* Searching closing */
+ pat++;
+ if (!*pat)
+ goto error;
+ *npat = pat + 1;
+ return complement ? !ret : ret;
+
+error:
+ return false;
+}
+
+/**
+ * strglobmatch - glob expression pattern matching
+ * @str: the target string to match
+ * @pat: the pattern string to match
+ *
+ * This returns true if the @str matches @pat. @pat can includes wildcards
+ * ('*','?') and character classes ([CHARS], complementation and ranges are
+ * also supported). Also, this supports escape character ('\') to use special
+ * characters as normal character.
+ *
+ * Note: if @pat syntax is broken, this always returns false.
+ */
bool strglobmatch(const char *str, const char *pat)
{
while (*str && *pat && *pat != '*') {
- if (*pat == '?') {
+ if (*pat == '?') { /* Matches any single character */
str++;
pat++;
- } else
- if (*str++ != *pat++)
+ continue;
+ } else if (*pat == '[') /* Character classes/Ranges */
+ if (__match_charclass(pat + 1, *str, &pat)) {
+ str++;
+ continue;
+ } else
return false;
+ else if (*pat == '\\') /* Escaped char match as normal char */
+ pat++;
+ if (*str++ != *pat++)
+ return false;
}
/* Check wild card */
if (*pat == '*') {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index ab92763..323c0ae 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1,6 +1,5 @@
#include "util.h"
#include "../perf.h"
-#include "session.h"
#include "sort.h"
#include "string.h"
#include "symbol.h"
@@ -22,6 +21,7 @@
enum dso_origin {
DSO__ORIG_KERNEL = 0,
DSO__ORIG_JAVA_JIT,
+ DSO__ORIG_BUILD_ID_CACHE,
DSO__ORIG_FEDORA,
DSO__ORIG_UBUNTU,
DSO__ORIG_BUILDID,
@@ -33,7 +33,7 @@ enum dso_origin {
static void dsos__add(struct list_head *head, struct dso *dso);
static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
static int dso__load_kernel_sym(struct dso *self, struct map *map,
- struct perf_session *session, symbol_filter_t filter);
+ symbol_filter_t filter);
static int vmlinux_path__nr_entries;
static char **vmlinux_path;
@@ -53,17 +53,12 @@ bool dso__sorted_by_name(const struct dso *self, enum map_type type)
return self->sorted_by_name & (1 << type);
}
-static void dso__set_loaded(struct dso *self, enum map_type type)
-{
- self->loaded |= (1 << type);
-}
-
static void dso__set_sorted_by_name(struct dso *self, enum map_type type)
{
self->sorted_by_name |= (1 << type);
}
-static bool symbol_type__is_a(char symbol_type, enum map_type map_type)
+bool symbol_type__is_a(char symbol_type, enum map_type map_type)
{
switch (map_type) {
case MAP__FUNCTION:
@@ -142,14 +137,14 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name)
self->start = start;
self->end = len ? start + len - 1 : start;
- pr_debug3("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
+ pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
memcpy(self->name, name, namelen);
return self;
}
-static void symbol__delete(struct symbol *self)
+void symbol__delete(struct symbol *self)
{
free(((void *)self) - symbol_conf.priv_size);
}
@@ -160,7 +155,7 @@ static size_t symbol__fprintf(struct symbol *self, FILE *fp)
self->start, self->end, self->name);
}
-static void dso__set_long_name(struct dso *self, char *name)
+void dso__set_long_name(struct dso *self, char *name)
{
if (name == NULL)
return;
@@ -175,7 +170,7 @@ static void dso__set_basename(struct dso *self)
struct dso *dso__new(const char *name)
{
- struct dso *self = malloc(sizeof(*self) + strlen(name) + 1);
+ struct dso *self = zalloc(sizeof(*self) + strlen(name) + 1);
if (self != NULL) {
int i;
@@ -344,10 +339,10 @@ void dso__sort_by_name(struct dso *self, enum map_type type)
&self->symbols[type]);
}
-int build_id__sprintf(u8 *self, int len, char *bf)
+int build_id__sprintf(const u8 *self, int len, char *bf)
{
char *bid = bf;
- u8 *raw = self;
+ const u8 *raw = self;
int i;
for (i = 0; i < len; ++i) {
@@ -372,6 +367,10 @@ size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
struct rb_node *nd;
size_t ret = fprintf(fp, "dso: %s (", self->short_name);
+ if (self->short_name != self->long_name)
+ ret += fprintf(fp, "%s, ", self->long_name);
+ ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
+ self->loaded ? "" : "NOT ");
ret += dso__fprintf_buildid(self, fp);
ret += fprintf(fp, ")\n");
for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) {
@@ -382,24 +381,20 @@ size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
return ret;
}
-/*
- * Loads the function entries in /proc/kallsyms into kernel_map->dso,
- * so that we can in the next step set the symbol ->end address and then
- * call kernel_maps__split_kallsyms.
- */
-static int dso__load_all_kallsyms(struct dso *self, struct map *map)
+int kallsyms__parse(const char *filename, void *arg,
+ int (*process_symbol)(void *arg, const char *name,
+ char type, u64 start))
{
char *line = NULL;
size_t n;
- struct rb_root *root = &self->symbols[map->type];
- FILE *file = fopen("/proc/kallsyms", "r");
+ int err = 0;
+ FILE *file = fopen(filename, "r");
if (file == NULL)
goto out_failure;
while (!feof(file)) {
u64 start;
- struct symbol *sym;
int line_len, len;
char symbol_type;
char *symbol_name;
@@ -420,43 +415,72 @@ static int dso__load_all_kallsyms(struct dso *self, struct map *map)
continue;
symbol_type = toupper(line[len]);
- if (!symbol_type__is_a(symbol_type, map->type))
- continue;
-
symbol_name = line + len + 2;
- /*
- * Will fix up the end later, when we have all symbols sorted.
- */
- sym = symbol__new(start, 0, symbol_name);
- if (sym == NULL)
- goto out_delete_line;
- /*
- * We will pass the symbols to the filter later, in
- * map__split_kallsyms, when we have split the maps per module
- */
- symbols__insert(root, sym);
+ err = process_symbol(arg, symbol_name, symbol_type, start);
+ if (err)
+ break;
}
free(line);
fclose(file);
+ return err;
- return 0;
-
-out_delete_line:
- free(line);
out_failure:
return -1;
}
+struct process_kallsyms_args {
+ struct map *map;
+ struct dso *dso;
+};
+
+static int map__process_kallsym_symbol(void *arg, const char *name,
+ char type, u64 start)
+{
+ struct symbol *sym;
+ struct process_kallsyms_args *a = arg;
+ struct rb_root *root = &a->dso->symbols[a->map->type];
+
+ if (!symbol_type__is_a(type, a->map->type))
+ return 0;
+
+ /*
+ * Will fix up the end later, when we have all symbols sorted.
+ */
+ sym = symbol__new(start, 0, name);
+
+ if (sym == NULL)
+ return -ENOMEM;
+ /*
+ * We will pass the symbols to the filter later, in
+ * map__split_kallsyms, when we have split the maps per module
+ */
+ symbols__insert(root, sym);
+ return 0;
+}
+
+/*
+ * Loads the function entries in /proc/kallsyms into kernel_map->dso,
+ * so that we can in the next step set the symbol ->end address and then
+ * call kernel_maps__split_kallsyms.
+ */
+static int dso__load_all_kallsyms(struct dso *self, const char *filename,
+ struct map *map)
+{
+ struct process_kallsyms_args args = { .map = map, .dso = self, };
+ return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
+}
+
/*
* Split the symbols into maps, making sure there are no overlaps, i.e. the
* kernel range is broken in several maps, named [kernel].N, as we don't have
* the original ELF section names vmlinux have.
*/
static int dso__split_kallsyms(struct dso *self, struct map *map,
- struct perf_session *session, symbol_filter_t filter)
+ symbol_filter_t filter)
{
+ struct map_groups *kmaps = map__kmap(map)->kmaps;
struct map *curr_map = map;
struct symbol *pos;
int count = 0;
@@ -477,13 +501,17 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
*module++ = '\0';
- if (strcmp(self->name, module)) {
- curr_map = map_groups__find_by_name(&session->kmaps, map->type, module);
+ if (strcmp(curr_map->dso->short_name, module)) {
+ curr_map = map_groups__find_by_name(kmaps, map->type, module);
if (curr_map == NULL) {
pr_debug("/proc/{kallsyms,modules} "
- "inconsistency!\n");
+ "inconsistency while looking "
+ "for \"%s\" module!\n", module);
return -1;
}
+
+ if (curr_map->dso->loaded)
+ goto discard_symbol;
}
/*
* So that we look just like we get from .ko files,
@@ -503,13 +531,13 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
return -1;
curr_map = map__new2(pos->start, dso, map->type);
- if (map == NULL) {
+ if (curr_map == NULL) {
dso__delete(dso);
return -1;
}
curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
- map_groups__insert(&session->kmaps, curr_map);
+ map_groups__insert(kmaps, curr_map);
++kernel_range;
}
@@ -528,17 +556,16 @@ discard_symbol: rb_erase(&pos->rb_node, root);
return count;
}
-
-static int dso__load_kallsyms(struct dso *self, struct map *map,
- struct perf_session *session, symbol_filter_t filter)
+int dso__load_kallsyms(struct dso *self, const char *filename,
+ struct map *map, symbol_filter_t filter)
{
- if (dso__load_all_kallsyms(self, map) < 0)
+ if (dso__load_all_kallsyms(self, filename, map) < 0)
return -1;
symbols__fixup_end(&self->symbols[map->type]);
self->origin = DSO__ORIG_KERNEL;
- return dso__split_kallsyms(self, map, session, filter);
+ return dso__split_kallsyms(self, map, filter);
}
static int dso__load_perf_map(struct dso *self, struct map *map,
@@ -864,10 +891,10 @@ static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type
}
}
-static int dso__load_sym(struct dso *self, struct map *map,
- struct perf_session *session, const char *name, int fd,
- symbol_filter_t filter, int kernel, int kmodule)
+static int dso__load_sym(struct dso *self, struct map *map, const char *name,
+ int fd, symbol_filter_t filter, int kmodule)
{
+ struct kmap *kmap = self->kernel ? map__kmap(map) : NULL;
struct map *curr_map = map;
struct dso *curr_dso = self;
size_t dso_name_len = strlen(self->short_name);
@@ -924,7 +951,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym));
- if (!kernel) {
+ if (!self->kernel) {
self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
elf_section_by_name(elf, &ehdr, &shdr,
".gnu.prelink_undo",
@@ -933,11 +960,15 @@ static int dso__load_sym(struct dso *self, struct map *map,
elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
struct symbol *f;
- const char *elf_name;
+ const char *elf_name = elf_sym__name(&sym, symstrs);
char *demangled = NULL;
int is_label = elf_sym__is_label(&sym);
const char *section_name;
+ if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
+ strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
+ kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
+
if (!is_label && !elf_sym__is_a(&sym, map->type))
continue;
@@ -950,10 +981,9 @@ static int dso__load_sym(struct dso *self, struct map *map,
if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
continue;
- elf_name = elf_sym__name(&sym, symstrs);
section_name = elf_sec__name(&shdr, secstrs);
- if (kernel || kmodule) {
+ if (self->kernel || kmodule) {
char dso_name[PATH_MAX];
if (strcmp(section_name,
@@ -969,7 +999,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
snprintf(dso_name, sizeof(dso_name),
"%s%s", self->short_name, section_name);
- curr_map = map_groups__find_by_name(&session->kmaps, map->type, dso_name);
+ curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
if (curr_map == NULL) {
u64 start = sym.st_value;
@@ -980,7 +1010,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
if (curr_dso == NULL)
goto out_elf_end;
curr_map = map__new2(start, curr_dso,
- MAP__FUNCTION);
+ map->type);
if (curr_map == NULL) {
dso__delete(curr_dso);
goto out_elf_end;
@@ -988,8 +1018,9 @@ static int dso__load_sym(struct dso *self, struct map *map,
curr_map->map_ip = identity__map_ip;
curr_map->unmap_ip = identity__map_ip;
curr_dso->origin = DSO__ORIG_KERNEL;
- map_groups__insert(&session->kmaps, curr_map);
+ map_groups__insert(kmap->kmaps, curr_map);
dsos__add(&dsos__kernel, curr_dso);
+ dso__set_loaded(curr_dso, map->type);
} else
curr_dso = curr_map->dso;
@@ -997,9 +1028,10 @@ static int dso__load_sym(struct dso *self, struct map *map,
}
if (curr_dso->adjust_symbols) {
- pr_debug2("adjusting symbol: st_value: %Lx sh_addr: "
- "%Lx sh_offset: %Lx\n", (u64)sym.st_value,
- (u64)shdr.sh_addr, (u64)shdr.sh_offset);
+ pr_debug4("%s: adjusting symbol: st_value: %#Lx "
+ "sh_addr: %#Lx sh_offset: %#Lx\n", __func__,
+ (u64)sym.st_value, (u64)shdr.sh_addr,
+ (u64)shdr.sh_offset);
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
}
/*
@@ -1027,8 +1059,16 @@ new_symbol:
/*
* For misannotated, zeroed, ASM function sizes.
*/
- if (nr > 0)
+ if (nr > 0) {
symbols__fixup_end(&self->symbols[map->type]);
+ if (kmap) {
+ /*
+ * We need to fixup this here too because we create new
+ * maps here, for things like vsyscall sections.
+ */
+ __map_groups__fixup_end(kmap->kmaps, map->type);
+ }
+ }
err = nr;
out_elf_end:
elf_end(elf);
@@ -1041,25 +1081,28 @@ static bool dso__build_id_equal(const struct dso *self, u8 *build_id)
return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0;
}
-static bool __dsos__read_build_ids(struct list_head *head)
+static bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
{
bool have_build_id = false;
struct dso *pos;
- list_for_each_entry(pos, head, node)
+ list_for_each_entry(pos, head, node) {
+ if (with_hits && !pos->hit)
+ continue;
if (filename__read_build_id(pos->long_name, pos->build_id,
sizeof(pos->build_id)) > 0) {
have_build_id = true;
pos->has_build_id = true;
}
+ }
return have_build_id;
}
-bool dsos__read_build_ids(void)
+bool dsos__read_build_ids(bool with_hits)
{
- bool kbuildids = __dsos__read_build_ids(&dsos__kernel),
- ubuildids = __dsos__read_build_ids(&dsos__user);
+ bool kbuildids = __dsos__read_build_ids(&dsos__kernel, with_hits),
+ ubuildids = __dsos__read_build_ids(&dsos__user, with_hits);
return kbuildids || ubuildids;
}
@@ -1191,6 +1234,7 @@ char dso__symtab_origin(const struct dso *self)
static const char origin[] = {
[DSO__ORIG_KERNEL] = 'k',
[DSO__ORIG_JAVA_JIT] = 'j',
+ [DSO__ORIG_BUILD_ID_CACHE] = 'B',
[DSO__ORIG_FEDORA] = 'f',
[DSO__ORIG_UBUNTU] = 'u',
[DSO__ORIG_BUILDID] = 'b',
@@ -1203,19 +1247,19 @@ char dso__symtab_origin(const struct dso *self)
return origin[self->origin];
}
-int dso__load(struct dso *self, struct map *map, struct perf_session *session,
- symbol_filter_t filter)
+int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
{
int size = PATH_MAX;
char *name;
u8 build_id[BUILD_ID_SIZE];
+ char build_id_hex[BUILD_ID_SIZE * 2 + 1];
int ret = -1;
int fd;
dso__set_loaded(self, map->type);
if (self->kernel)
- return dso__load_kernel_sym(self, map, session, filter);
+ return dso__load_kernel_sym(self, map, filter);
name = malloc(size);
if (!name)
@@ -1230,8 +1274,16 @@ int dso__load(struct dso *self, struct map *map, struct perf_session *session,
return ret;
}
- self->origin = DSO__ORIG_FEDORA - 1;
+ self->origin = DSO__ORIG_BUILD_ID_CACHE;
+ if (self->has_build_id) {
+ build_id__sprintf(self->build_id, sizeof(self->build_id),
+ build_id_hex);
+ snprintf(name, size, "%s/%s/.build-id/%.2s/%s",
+ getenv("HOME"), DEBUG_CACHE_DIR,
+ build_id_hex, build_id_hex + 2);
+ goto open_file;
+ }
more:
do {
self->origin++;
@@ -1247,8 +1299,6 @@ more:
case DSO__ORIG_BUILDID:
if (filename__read_build_id(self->long_name, build_id,
sizeof(build_id))) {
- char build_id_hex[BUILD_ID_SIZE * 2 + 1];
-
build_id__sprintf(build_id, sizeof(build_id),
build_id_hex);
snprintf(name, size,
@@ -1276,11 +1326,11 @@ compare_build_id:
if (!dso__build_id_equal(self, build_id))
goto more;
}
-
+open_file:
fd = open(name, O_RDONLY);
} while (fd < 0);
- ret = dso__load_sym(self, map, NULL, name, fd, filter, 0, 0);
+ ret = dso__load_sym(self, map, name, fd, filter, 0);
close(fd);
/*
@@ -1309,14 +1359,34 @@ struct map *map_groups__find_by_name(struct map_groups *self,
for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
struct map *map = rb_entry(nd, struct map, rb_node);
- if (map->dso && strcmp(map->dso->name, name) == 0)
+ if (map->dso && strcmp(map->dso->short_name, name) == 0)
return map;
}
return NULL;
}
-static int perf_session__set_modules_path_dir(struct perf_session *self, char *dirname)
+static int dso__kernel_module_get_build_id(struct dso *self)
+{
+ char filename[PATH_MAX];
+ /*
+ * kernel module short names are of the form "[module]" and
+ * we need just "module" here.
+ */
+ const char *name = self->short_name + 1;
+
+ snprintf(filename, sizeof(filename),
+ "/sys/module/%.*s/notes/.note.gnu.build-id",
+ (int)strlen(name - 1), name);
+
+ if (sysfs__read_build_id(filename, self->build_id,
+ sizeof(self->build_id)) == 0)
+ self->has_build_id = true;
+
+ return 0;
+}
+
+static int map_groups__set_modules_path_dir(struct map_groups *self, char *dirname)
{
struct dirent *dent;
DIR *dir = opendir(dirname);
@@ -1336,7 +1406,7 @@ static int perf_session__set_modules_path_dir(struct perf_session *self, char *d
snprintf(path, sizeof(path), "%s/%s",
dirname, dent->d_name);
- if (perf_session__set_modules_path_dir(self, path) < 0)
+ if (map_groups__set_modules_path_dir(self, path) < 0)
goto failure;
} else {
char *dot = strrchr(dent->d_name, '.'),
@@ -1350,7 +1420,7 @@ static int perf_session__set_modules_path_dir(struct perf_session *self, char *d
(int)(dot - dent->d_name), dent->d_name);
strxfrchar(dso_name, '-', '_');
- map = map_groups__find_by_name(&self->kmaps, MAP__FUNCTION, dso_name);
+ map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name);
if (map == NULL)
continue;
@@ -1361,6 +1431,7 @@ static int perf_session__set_modules_path_dir(struct perf_session *self, char *d
if (long_name == NULL)
goto failure;
dso__set_long_name(map->dso, long_name);
+ dso__kernel_module_get_build_id(map->dso);
}
}
@@ -1370,7 +1441,7 @@ failure:
return -1;
}
-static int perf_session__set_modules_path(struct perf_session *self)
+static int map_groups__set_modules_path(struct map_groups *self)
{
struct utsname uts;
char modules_path[PATH_MAX];
@@ -1381,7 +1452,7 @@ static int perf_session__set_modules_path(struct perf_session *self)
snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel",
uts.release);
- return perf_session__set_modules_path_dir(self, modules_path);
+ return map_groups__set_modules_path_dir(self, modules_path);
}
/*
@@ -1391,8 +1462,8 @@ static int perf_session__set_modules_path(struct perf_session *self)
*/
static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
{
- struct map *self = malloc(sizeof(*self));
-
+ struct map *self = zalloc(sizeof(*self) +
+ (dso->kernel ? sizeof(struct kmap) : 0));
if (self != NULL) {
/*
* ->end will be filled after we load all the symbols
@@ -1403,7 +1474,25 @@ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
return self;
}
-static int perf_session__create_module_maps(struct perf_session *self)
+struct map *map_groups__new_module(struct map_groups *self, u64 start,
+ const char *filename)
+{
+ struct map *map;
+ struct dso *dso = __dsos__findnew(&dsos__kernel, filename);
+
+ if (dso == NULL)
+ return NULL;
+
+ map = map__new2(start, dso, MAP__FUNCTION);
+ if (map == NULL)
+ return NULL;
+
+ dso->origin = DSO__ORIG_KMODULE;
+ map_groups__insert(self, map);
+ return map;
+}
+
+static int map_groups__create_modules(struct map_groups *self)
{
char *line = NULL;
size_t n;
@@ -1416,7 +1505,6 @@ static int perf_session__create_module_maps(struct perf_session *self)
while (!feof(file)) {
char name[PATH_MAX];
u64 start;
- struct dso *dso;
char *sep;
int line_len;
@@ -1442,32 +1530,16 @@ static int perf_session__create_module_maps(struct perf_session *self)
*sep = '\0';
snprintf(name, sizeof(name), "[%s]", line);
- dso = dso__new(name);
-
- if (dso == NULL)
- goto out_delete_line;
-
- map = map__new2(start, dso, MAP__FUNCTION);
- if (map == NULL) {
- dso__delete(dso);
+ map = map_groups__new_module(self, start, name);
+ if (map == NULL)
goto out_delete_line;
- }
-
- snprintf(name, sizeof(name),
- "/sys/module/%s/notes/.note.gnu.build-id", line);
- if (sysfs__read_build_id(name, dso->build_id,
- sizeof(dso->build_id)) == 0)
- dso->has_build_id = true;
-
- dso->origin = DSO__ORIG_KMODULE;
- map_groups__insert(&self->kmaps, map);
- dsos__add(&dsos__kernel, dso);
+ dso__kernel_module_get_build_id(map->dso);
}
free(line);
fclose(file);
- return perf_session__set_modules_path(self);
+ return map_groups__set_modules_path(self);
out_delete_line:
free(line);
@@ -1476,7 +1548,6 @@ out_failure:
}
static int dso__load_vmlinux(struct dso *self, struct map *map,
- struct perf_session *session,
const char *vmlinux, symbol_filter_t filter)
{
int err = -1, fd;
@@ -1510,51 +1581,124 @@ static int dso__load_vmlinux(struct dso *self, struct map *map,
return -1;
dso__set_loaded(self, map->type);
- err = dso__load_sym(self, map, session, self->long_name, fd, filter, 1, 0);
+ err = dso__load_sym(self, map, vmlinux, fd, filter, 0);
close(fd);
+ if (err > 0)
+ pr_debug("Using %s for symbols\n", vmlinux);
+
+ return err;
+}
+
+int dso__load_vmlinux_path(struct dso *self, struct map *map,
+ symbol_filter_t filter)
+{
+ int i, err = 0;
+
+ pr_debug("Looking at the vmlinux_path (%d entries long)\n",
+ vmlinux_path__nr_entries);
+
+ for (i = 0; i < vmlinux_path__nr_entries; ++i) {
+ err = dso__load_vmlinux(self, map, vmlinux_path[i], filter);
+ if (err > 0) {
+ dso__set_long_name(self, strdup(vmlinux_path[i]));
+ break;
+ }
+ }
+
return err;
}
static int dso__load_kernel_sym(struct dso *self, struct map *map,
- struct perf_session *session, symbol_filter_t filter)
+ symbol_filter_t filter)
{
int err;
- bool is_kallsyms;
+ const char *kallsyms_filename = NULL;
+ char *kallsyms_allocated_filename = NULL;
+ /*
+ * Step 1: if the user specified a vmlinux filename, use it and only
+ * it, reporting errors to the user if it cannot be used.
+ *
+ * For instance, try to analyse an ARM perf.data file _without_ a
+ * build-id, or if the user specifies the wrong path to the right
+ * vmlinux file, obviously we can't fallback to another vmlinux (a
+ * x86_86 one, on the machine where analysis is being performed, say),
+ * or worse, /proc/kallsyms.
+ *
+ * If the specified file _has_ a build-id and there is a build-id
+ * section in the perf.data file, we will still do the expected
+ * validation in dso__load_vmlinux and will bail out if they don't
+ * match.
+ */
+ if (symbol_conf.vmlinux_name != NULL) {
+ err = dso__load_vmlinux(self, map,
+ symbol_conf.vmlinux_name, filter);
+ goto out_try_fixup;
+ }
if (vmlinux_path != NULL) {
- int i;
- pr_debug("Looking at the vmlinux_path (%d entries long)\n",
- vmlinux_path__nr_entries);
- for (i = 0; i < vmlinux_path__nr_entries; ++i) {
- err = dso__load_vmlinux(self, map, session,
- vmlinux_path[i], filter);
- if (err > 0) {
- pr_debug("Using %s for symbols\n",
- vmlinux_path[i]);
- dso__set_long_name(self,
- strdup(vmlinux_path[i]));
- goto out_fixup;
+ err = dso__load_vmlinux_path(self, map, filter);
+ if (err > 0)
+ goto out_fixup;
+ }
+
+ /*
+ * Say the kernel DSO was created when processing the build-id header table,
+ * we have a build-id, so check if it is the same as the running kernel,
+ * using it if it is.
+ */
+ if (self->has_build_id) {
+ u8 kallsyms_build_id[BUILD_ID_SIZE];
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
+ sizeof(kallsyms_build_id)) == 0) {
+ if (dso__build_id_equal(self, kallsyms_build_id)) {
+ kallsyms_filename = "/proc/kallsyms";
+ goto do_kallsyms;
}
}
- }
+ /*
+ * Now look if we have it on the build-id cache in
+ * $HOME/.debug/[kernel.kallsyms].
+ */
+ build_id__sprintf(self->build_id, sizeof(self->build_id),
+ sbuild_id);
- is_kallsyms = self->long_name[0] == '[';
- if (is_kallsyms)
- goto do_kallsyms;
+ if (asprintf(&kallsyms_allocated_filename,
+ "%s/.debug/[kernel.kallsyms]/%s",
+ getenv("HOME"), sbuild_id) == -1) {
+ pr_err("Not enough memory for kallsyms file lookup\n");
+ return -1;
+ }
- err = dso__load_vmlinux(self, map, session, self->long_name, filter);
- if (err <= 0) {
- pr_info("The file %s cannot be used, "
- "trying to use /proc/kallsyms...", self->long_name);
-do_kallsyms:
- err = dso__load_kallsyms(self, map, session, filter);
- if (err > 0 && !is_kallsyms)
- dso__set_long_name(self, strdup("[kernel.kallsyms]"));
+ kallsyms_filename = kallsyms_allocated_filename;
+
+ if (access(kallsyms_filename, F_OK)) {
+ pr_err("No kallsyms or vmlinux with build-id %s "
+ "was found\n", sbuild_id);
+ free(kallsyms_allocated_filename);
+ return -1;
+ }
+ } else {
+ /*
+ * Last resort, if we don't have a build-id and couldn't find
+ * any vmlinux file, try the running kernel kallsyms table.
+ */
+ kallsyms_filename = "/proc/kallsyms";
}
+do_kallsyms:
+ err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
+ if (err > 0)
+ pr_debug("Using %s for symbols\n", kallsyms_filename);
+ free(kallsyms_allocated_filename);
+
+out_try_fixup:
if (err > 0) {
out_fixup:
+ if (kallsyms_filename != NULL)
+ dso__set_long_name(self, strdup("[kernel.kallsyms]"));
map__fixup_start(map);
map__fixup_end(map);
}
@@ -1564,7 +1708,6 @@ out_fixup:
LIST_HEAD(dsos__user);
LIST_HEAD(dsos__kernel);
-struct dso *vdso;
static void dsos__add(struct list_head *head, struct dso *dso)
{
@@ -1576,19 +1719,19 @@ static struct dso *dsos__find(struct list_head *head, const char *name)
struct dso *pos;
list_for_each_entry(pos, head, node)
- if (strcmp(pos->name, name) == 0)
+ if (strcmp(pos->long_name, name) == 0)
return pos;
return NULL;
}
-struct dso *dsos__findnew(const char *name)
+struct dso *__dsos__findnew(struct list_head *head, const char *name)
{
- struct dso *dso = dsos__find(&dsos__user, name);
+ struct dso *dso = dsos__find(head, name);
if (!dso) {
dso = dso__new(name);
if (dso != NULL) {
- dsos__add(&dsos__user, dso);
+ dsos__add(head, dso);
dso__set_basename(dso);
}
}
@@ -1613,75 +1756,78 @@ void dsos__fprintf(FILE *fp)
__dsos__fprintf(&dsos__user, fp);
}
-static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp)
+static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
+ bool with_hits)
{
struct dso *pos;
size_t ret = 0;
list_for_each_entry(pos, head, node) {
+ if (with_hits && !pos->hit)
+ continue;
ret += dso__fprintf_buildid(pos, fp);
ret += fprintf(fp, " %s\n", pos->long_name);
}
return ret;
}
-size_t dsos__fprintf_buildid(FILE *fp)
+size_t dsos__fprintf_buildid(FILE *fp, bool with_hits)
{
- return (__dsos__fprintf_buildid(&dsos__kernel, fp) +
- __dsos__fprintf_buildid(&dsos__user, fp));
+ return (__dsos__fprintf_buildid(&dsos__kernel, fp, with_hits) +
+ __dsos__fprintf_buildid(&dsos__user, fp, with_hits));
}
-static struct dso *dsos__create_kernel( const char *vmlinux)
+struct dso *dso__new_kernel(const char *name)
{
- struct dso *kernel = dso__new(vmlinux ?: "[kernel.kallsyms]");
+ struct dso *self = dso__new(name ?: "[kernel.kallsyms]");
- if (kernel == NULL)
- return NULL;
+ if (self != NULL) {
+ self->short_name = "[kernel]";
+ self->kernel = 1;
+ }
- kernel->short_name = "[kernel]";
- kernel->kernel = 1;
+ return self;
+}
- vdso = dso__new("[vdso]");
- if (vdso == NULL)
- goto out_delete_kernel_dso;
- dso__set_loaded(vdso, MAP__FUNCTION);
+void dso__read_running_kernel_build_id(struct dso *self)
+{
+ if (sysfs__read_build_id("/sys/kernel/notes", self->build_id,
+ sizeof(self->build_id)) == 0)
+ self->has_build_id = true;
+}
- if (sysfs__read_build_id("/sys/kernel/notes", kernel->build_id,
- sizeof(kernel->build_id)) == 0)
- kernel->has_build_id = true;
+static struct dso *dsos__create_kernel(const char *vmlinux)
+{
+ struct dso *kernel = dso__new_kernel(vmlinux);
- dsos__add(&dsos__kernel, kernel);
- dsos__add(&dsos__user, vdso);
+ if (kernel != NULL) {
+ dso__read_running_kernel_build_id(kernel);
+ dsos__add(&dsos__kernel, kernel);
+ }
return kernel;
-
-out_delete_kernel_dso:
- dso__delete(kernel);
- return NULL;
}
-static int map_groups__create_kernel_maps(struct map_groups *self, const char *vmlinux)
+int __map_groups__create_kernel_maps(struct map_groups *self,
+ struct map *vmlinux_maps[MAP__NR_TYPES],
+ struct dso *kernel)
{
- struct map *functions, *variables;
- struct dso *kernel = dsos__create_kernel(vmlinux);
+ enum map_type type;
- if (kernel == NULL)
- return -1;
+ for (type = 0; type < MAP__NR_TYPES; ++type) {
+ struct kmap *kmap;
- functions = map__new2(0, kernel, MAP__FUNCTION);
- if (functions == NULL)
- return -1;
+ vmlinux_maps[type] = map__new2(0, kernel, type);
+ if (vmlinux_maps[type] == NULL)
+ return -1;
- variables = map__new2(0, kernel, MAP__VARIABLE);
- if (variables == NULL) {
- map__delete(functions);
- return -1;
- }
+ vmlinux_maps[type]->map_ip =
+ vmlinux_maps[type]->unmap_ip = identity__map_ip;
- functions->map_ip = functions->unmap_ip =
- variables->map_ip = variables->unmap_ip = identity__map_ip;
- map_groups__insert(self, functions);
- map_groups__insert(self, variables);
+ kmap = map__kmap(vmlinux_maps[type]);
+ kmap->kmaps = self;
+ map_groups__insert(self, vmlinux_maps[type]);
+ }
return 0;
}
@@ -1791,19 +1937,22 @@ out_free_comm_list:
return -1;
}
-int perf_session__create_kernel_maps(struct perf_session *self)
+int map_groups__create_kernel_maps(struct map_groups *self,
+ struct map *vmlinux_maps[MAP__NR_TYPES])
{
- if (map_groups__create_kernel_maps(&self->kmaps,
- symbol_conf.vmlinux_name) < 0)
+ struct dso *kernel = dsos__create_kernel(symbol_conf.vmlinux_name);
+
+ if (kernel == NULL)
+ return -1;
+
+ if (__map_groups__create_kernel_maps(self, vmlinux_maps, kernel) < 0)
return -1;
- if (symbol_conf.use_modules &&
- perf_session__create_module_maps(self) < 0)
- pr_debug("Failed to load list of modules for session %s, "
- "continuing...\n", self->filename);
+ if (symbol_conf.use_modules && map_groups__create_modules(self) < 0)
+ pr_debug("Problems creating module maps, continuing anyway...\n");
/*
* Now that we have all the maps created, just set the ->end of them:
*/
- map_groups__fixup_end(&self->kmaps);
+ map_groups__fixup_end(self);
return 0;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 8aded23..280dadd 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -8,6 +8,8 @@
#include <linux/rbtree.h>
#include "event.h"
+#define DEBUG_CACHE_DIR ".debug"
+
#ifdef HAVE_CPLUS_DEMANGLE
extern char *cplus_demangle(const char *, int);
@@ -49,6 +51,8 @@ struct symbol {
char name[0];
};
+void symbol__delete(struct symbol *self);
+
struct strlist;
struct symbol_conf {
@@ -58,7 +62,8 @@ struct symbol_conf {
sort_by_name,
show_nr_samples,
use_callchain,
- exclude_other;
+ exclude_other,
+ full_paths;
const char *vmlinux_name,
*field_sep;
char *dso_list_str,
@@ -77,6 +82,12 @@ static inline void *symbol__priv(struct symbol *self)
return ((void *)self) - symbol_conf.priv_size;
}
+struct ref_reloc_sym {
+ const char *name;
+ u64 addr;
+ u64 unrelocated_addr;
+};
+
struct addr_location {
struct thread *thread;
struct map *map;
@@ -94,6 +105,7 @@ struct dso {
u8 slen_calculated:1;
u8 has_build_id:1;
u8 kernel:1;
+ u8 hit:1;
unsigned char origin;
u8 sorted_by_name;
u8 loaded;
@@ -105,37 +117,55 @@ struct dso {
};
struct dso *dso__new(const char *name);
+struct dso *dso__new_kernel(const char *name);
void dso__delete(struct dso *self);
bool dso__loaded(const struct dso *self, enum map_type type);
bool dso__sorted_by_name(const struct dso *self, enum map_type type);
+static inline void dso__set_loaded(struct dso *self, enum map_type type)
+{
+ self->loaded |= (1 << type);
+}
+
void dso__sort_by_name(struct dso *self, enum map_type type);
-struct perf_session;
+extern struct list_head dsos__user, dsos__kernel;
+
+struct dso *__dsos__findnew(struct list_head *head, const char *name);
+
+static inline struct dso *dsos__findnew(const char *name)
+{
+ return __dsos__findnew(&dsos__user, name);
+}
-struct dso *dsos__findnew(const char *name);
-int dso__load(struct dso *self, struct map *map, struct perf_session *session,
- symbol_filter_t filter);
+int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
+int dso__load_vmlinux_path(struct dso *self, struct map *map,
+ symbol_filter_t filter);
+int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map,
+ symbol_filter_t filter);
void dsos__fprintf(FILE *fp);
-size_t dsos__fprintf_buildid(FILE *fp);
+size_t dsos__fprintf_buildid(FILE *fp, bool with_hits);
size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
char dso__symtab_origin(const struct dso *self);
+void dso__set_long_name(struct dso *self, char *name);
void dso__set_build_id(struct dso *self, void *build_id);
+void dso__read_running_kernel_build_id(struct dso *self);
struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
const char *name);
int filename__read_build_id(const char *filename, void *bf, size_t size);
int sysfs__read_build_id(const char *filename, void *bf, size_t size);
-bool dsos__read_build_ids(void);
-int build_id__sprintf(u8 *self, int len, char *bf);
+bool dsos__read_build_ids(bool with_hits);
+int build_id__sprintf(const u8 *self, int len, char *bf);
+int kallsyms__parse(const char *filename, void *arg,
+ int (*process_symbol)(void *arg, const char *name,
+ char type, u64 start));
int symbol__init(void);
-int perf_session__create_kernel_maps(struct perf_session *self);
+bool symbol_type__is_a(char symbol_type, enum map_type map_type);
-extern struct list_head dsos__user, dsos__kernel;
-extern struct dso *vdso;
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 4a08dcf..21b9216 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -31,12 +31,41 @@ static struct thread *thread__new(pid_t pid)
return self;
}
+static void map_groups__flush(struct map_groups *self)
+{
+ int type;
+
+ for (type = 0; type < MAP__NR_TYPES; type++) {
+ struct rb_root *root = &self->maps[type];
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, root);
+ /*
+ * We may have references to this map, for
+ * instance in some hist_entry instances, so
+ * just move them to a separate list.
+ */
+ list_add_tail(&pos->node, &self->removed_maps[pos->type]);
+ }
+ }
+}
+
int thread__set_comm(struct thread *self, const char *comm)
{
+ int err;
+
if (self->comm)
free(self->comm);
self->comm = strdup(comm);
- return self->comm ? 0 : -ENOMEM;
+ err = self->comm == NULL ? -ENOMEM : 0;
+ if (!err) {
+ self->comm_set = true;
+ map_groups__flush(&self->mg);
+ }
+ return err;
}
int thread__comm_len(struct thread *self)
@@ -50,11 +79,6 @@ int thread__comm_len(struct thread *self)
return self->comm_len;
}
-static const char *map_type__name[MAP__NR_TYPES] = {
- [MAP__FUNCTION] = "Functions",
- [MAP__VARIABLE] = "Variables",
-};
-
static size_t __map_groups__fprintf_maps(struct map_groups *self,
enum map_type type, FILE *fp)
{
@@ -255,11 +279,14 @@ int thread__fork(struct thread *self, struct thread *parent)
{
int i;
- if (self->comm)
- free(self->comm);
- self->comm = strdup(parent->comm);
- if (!self->comm)
- return -ENOMEM;
+ if (parent->comm_set) {
+ if (self->comm)
+ free(self->comm);
+ self->comm = strdup(parent->comm);
+ if (!self->comm)
+ return -ENOMEM;
+ self->comm_set = true;
+ }
for (i = 0; i < MAP__NR_TYPES; ++i)
if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
@@ -282,14 +309,13 @@ size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
}
struct symbol *map_groups__find_symbol(struct map_groups *self,
- struct perf_session *session,
enum map_type type, u64 addr,
symbol_filter_t filter)
{
struct map *map = map_groups__find(self, type, addr);
if (map != NULL)
- return map__find_symbol(map, session, map->map_ip(map, addr), filter);
+ return map__find_symbol(map, map->map_ip(map, addr), filter);
return NULL;
}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index c206f72..0a28f39 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -15,6 +15,7 @@ struct thread {
struct map_groups mg;
pid_t pid;
char shortname[3];
+ bool comm_set;
char *comm;
int comm_len;
};
@@ -48,23 +49,36 @@ static inline struct map *thread__find_map(struct thread *self,
return self ? map_groups__find(&self->mg, type, addr) : NULL;
}
+void thread__find_addr_map(struct thread *self,
+ struct perf_session *session, u8 cpumode,
+ enum map_type type, u64 addr,
+ struct addr_location *al);
+
void thread__find_addr_location(struct thread *self,
struct perf_session *session, u8 cpumode,
enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter);
struct symbol *map_groups__find_symbol(struct map_groups *self,
- struct perf_session *session,
enum map_type type, u64 addr,
symbol_filter_t filter);
-static inline struct symbol *
-map_groups__find_function(struct map_groups *self, struct perf_session *session,
- u64 addr, symbol_filter_t filter)
+static inline struct symbol *map_groups__find_function(struct map_groups *self,
+ u64 addr,
+ symbol_filter_t filter)
{
- return map_groups__find_symbol(self, session, MAP__FUNCTION, addr, filter);
+ return map_groups__find_symbol(self, MAP__FUNCTION, addr, filter);
}
struct map *map_groups__find_by_name(struct map_groups *self,
enum map_type type, const char *name);
+
+int __map_groups__create_kernel_maps(struct map_groups *self,
+ struct map *vmlinux_maps[MAP__NR_TYPES],
+ struct dso *kernel);
+int map_groups__create_kernel_maps(struct map_groups *self,
+ struct map *vmlinux_maps[MAP__NR_TYPES]);
+
+struct map *map_groups__new_module(struct map_groups *self, u64 start,
+ const char *filename);
#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index cace355..5ea8973 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -20,6 +20,7 @@
*/
#define _GNU_SOURCE
#include <dirent.h>
+#include <mntent.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -37,6 +38,7 @@
#include "../perf.h"
#include "trace-event.h"
+#include "debugfs.h"
#define VERSION "0.5"
@@ -101,32 +103,12 @@ void *malloc_or_die(unsigned int size)
static const char *find_debugfs(void)
{
- static char debugfs[MAX_PATH+1];
- static int debugfs_found;
- char type[100];
- FILE *fp;
-
- if (debugfs_found)
- return debugfs;
-
- if ((fp = fopen("/proc/mounts","r")) == NULL)
- die("Can't open /proc/mounts for read");
-
- while (fscanf(fp, "%*s %"
- STR(MAX_PATH)
- "s %99s %*s %*d %*d\n",
- debugfs, type) == 2) {
- if (strcmp(type, "debugfs") == 0)
- break;
- }
- fclose(fp);
-
- if (strcmp(type, "debugfs") != 0)
- die("debugfs not mounted, please mount");
+ const char *path = debugfs_mount(NULL);
- debugfs_found = 1;
+ if (!path)
+ die("Your kernel not support debugfs filesystem");
- return debugfs;
+ return path;
}
/*
@@ -271,6 +253,8 @@ static void read_header_files(void)
write_or_die("header_page", 12);
write_or_die(&size, 8);
check_size = copy_file_fd(fd);
+ close(fd);
+
if (size != check_size)
die("wrong size for '%s' size=%lld read=%lld",
path, size, check_size);
@@ -289,6 +273,7 @@ static void read_header_files(void)
if (size != check_size)
die("wrong size for '%s'", path);
put_tracing_file(path);
+ close(fd);
}
static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
@@ -317,7 +302,8 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
die("can't read directory '%s'", sys);
while ((dent = readdir(dir))) {
- if (strcmp(dent->d_name, ".") == 0 ||
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0 ||
!name_in_tp_list(dent->d_name, tps))
continue;
@@ -334,7 +320,8 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
rewinddir(dir);
while ((dent = readdir(dir))) {
- if (strcmp(dent->d_name, ".") == 0 ||
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0 ||
!name_in_tp_list(dent->d_name, tps))
continue;
@@ -353,6 +340,7 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
free(format);
}
+ closedir(dir);
}
static void read_ftrace_files(struct tracepoint_path *tps)
@@ -394,26 +382,21 @@ static void read_event_files(struct tracepoint_path *tps)
die("can't read directory '%s'", path);
while ((dent = readdir(dir))) {
- if (strcmp(dent->d_name, ".") == 0 ||
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0 ||
strcmp(dent->d_name, "ftrace") == 0 ||
!system_in_tp_list(dent->d_name, tps))
continue;
- sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2);
- sprintf(sys, "%s/%s", path, dent->d_name);
- ret = stat(sys, &st);
- free(sys);
- if (ret < 0)
- continue;
- if (S_ISDIR(st.st_mode))
- count++;
+ count++;
}
write_or_die(&count, 4);
rewinddir(dir);
while ((dent = readdir(dir))) {
- if (strcmp(dent->d_name, ".") == 0 ||
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0 ||
strcmp(dent->d_name, "ftrace") == 0 ||
!system_in_tp_list(dent->d_name, tps))
@@ -422,14 +405,13 @@ static void read_event_files(struct tracepoint_path *tps)
sprintf(sys, "%s/%s", path, dent->d_name);
ret = stat(sys, &st);
if (ret >= 0) {
- if (S_ISDIR(st.st_mode)) {
- write_or_die(dent->d_name, strlen(dent->d_name) + 1);
- copy_event_system(sys, tps);
- }
+ write_or_die(dent->d_name, strlen(dent->d_name) + 1);
+ copy_event_system(sys, tps);
}
free(sys);
}
+ closedir(dir);
put_tracing_file(path);
}
@@ -533,7 +515,7 @@ int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
write_or_die(buf, 1);
/* save page_size */
- page_size = getpagesize();
+ page_size = sysconf(_SC_PAGESIZE);
write_or_die(&page_size, 4);
read_header_files();
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index c5c32be..9b3c20f 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -1925,6 +1925,15 @@ void *raw_field_ptr(struct event *event, const char *name, void *data)
if (!field)
return NULL;
+ if (field->flags & FIELD_IS_STRING) {
+ int offset;
+
+ offset = *(int *)(data + field->offset);
+ offset &= 0xffff;
+
+ return data + offset;
+ }
+
return data + field->offset;
}
@@ -3277,3 +3286,18 @@ void parse_set_info(int nr_cpus, int long_sz)
cpus = nr_cpus;
long_size = long_sz;
}
+
+int common_pc(struct scripting_context *context)
+{
+ return parse_common_pc(context->event_data);
+}
+
+int common_flags(struct scripting_context *context)
+{
+ return parse_common_flags(context->event_data);
+}
+
+int common_lock_depth(struct scripting_context *context)
+{
+ return parse_common_lock_depth(context->event_data);
+}
diff --git a/tools/perf/util/trace-event-perl.h b/tools/perf/util/trace-event-perl.h
deleted file mode 100644
index e88fb26..0000000
--- a/tools/perf/util/trace-event-perl.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef __PERF_TRACE_EVENT_PERL_H
-#define __PERF_TRACE_EVENT_PERL_H
-#ifdef NO_LIBPERL
-typedef int INTERP;
-#define dSP
-#define ENTER
-#define SAVETMPS
-#define PUTBACK
-#define SPAGAIN
-#define FREETMPS
-#define LEAVE
-#define SP
-#define ERRSV
-#define G_SCALAR (0)
-#define G_DISCARD (0)
-#define G_NOARGS (0)
-#define PUSHMARK(a)
-#define SvTRUE(a) (0)
-#define XPUSHs(s)
-#define sv_2mortal(a)
-#define newSVpv(a,b)
-#define newSVuv(a)
-#define newSViv(a)
-#define get_cv(a,b) (0)
-#define call_pv(a,b) (0)
-#define perl_alloc() (0)
-#define perl_construct(a) (0)
-#define perl_parse(a,b,c,d,e) (0)
-#define perl_run(a) (0)
-#define perl_destruct(a) (0)
-#define perl_free(a) (0)
-#define pTHX void
-#define CV void
-#define dXSUB_SYS
-#define pTHX_
-static inline void newXS(const char *a, void *b, const char *c) {}
-static void boot_Perf__Trace__Context(pTHX_ CV *cv) {}
-static void boot_DynaLoader(pTHX_ CV *cv) {}
-#else
-#include <EXTERN.h>
-#include <perl.h>
-void boot_Perf__Trace__Context(pTHX_ CV *cv);
-void boot_DynaLoader(pTHX_ CV *cv);
-typedef PerlInterpreter * INTERP;
-#endif
-
-struct scripting_context {
- void *event_data;
-};
-
-int common_pc(struct scripting_context *context);
-int common_flags(struct scripting_context *context);
-int common_lock_depth(struct scripting_context *context);
-
-#endif /* __PERF_TRACE_EVENT_PERL_H */
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 1744422..7cd1193 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -18,7 +18,7 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#define _LARGEFILE64_SOURCE
+#define _FILE_OFFSET_BITS 64
#include <dirent.h>
#include <stdio.h>
@@ -83,7 +83,7 @@ static char *read_string(void)
char *str = NULL;
int size = 0;
int i;
- int r;
+ off_t r;
for (;;) {
r = read(input_fd, buf, BUFSIZ);
@@ -118,7 +118,7 @@ static char *read_string(void)
/* move the file descriptor to the end of the string */
r = lseek(input_fd, -(r - i), SEEK_CUR);
- if (r < 0)
+ if (r == (off_t)-1)
die("lseek");
if (str) {
@@ -282,8 +282,8 @@ static void update_cpu_data_index(int cpu)
static void get_next_page(int cpu)
{
- off64_t save_seek;
- off64_t ret;
+ off_t save_seek;
+ off_t ret;
if (!cpu_data[cpu].page)
return;
@@ -298,17 +298,17 @@ static void get_next_page(int cpu)
update_cpu_data_index(cpu);
/* other parts of the code may expect the pointer to not move */
- save_seek = lseek64(input_fd, 0, SEEK_CUR);
+ save_seek = lseek(input_fd, 0, SEEK_CUR);
- ret = lseek64(input_fd, cpu_data[cpu].offset, SEEK_SET);
- if (ret < 0)
+ ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET);
+ if (ret == (off_t)-1)
die("failed to lseek");
ret = read(input_fd, cpu_data[cpu].page, page_size);
if (ret < 0)
die("failed to read page");
/* reset the file pointer back */
- lseek64(input_fd, save_seek, SEEK_SET);
+ lseek(input_fd, save_seek, SEEK_SET);
return;
}
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
new file mode 100644
index 0000000..7ea983a
--- /dev/null
+++ b/tools/perf/util/trace-event-scripting.c
@@ -0,0 +1,167 @@
+/*
+ * trace-event-scripting. Scripting engine common and initialization code.
+ *
+ * Copyright (C) 2009-2010 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+#include "../perf.h"
+#include "util.h"
+#include "trace-event.h"
+
+struct scripting_context *scripting_context;
+
+static int stop_script_unsupported(void)
+{
+ return 0;
+}
+
+static void process_event_unsupported(int cpu __unused,
+ void *data __unused,
+ int size __unused,
+ unsigned long long nsecs __unused,
+ char *comm __unused)
+{
+}
+
+static void print_python_unsupported_msg(void)
+{
+ fprintf(stderr, "Python scripting not supported."
+ " Install libpython and rebuild perf to enable it.\n"
+ "For example:\n # apt-get install python-dev (ubuntu)"
+ "\n # yum install python-devel (Fedora)"
+ "\n etc.\n");
+}
+
+static int python_start_script_unsupported(const char *script __unused,
+ int argc __unused,
+ const char **argv __unused)
+{
+ print_python_unsupported_msg();
+
+ return -1;
+}
+
+static int python_generate_script_unsupported(const char *outfile __unused)
+{
+ print_python_unsupported_msg();
+
+ return -1;
+}
+
+struct scripting_ops python_scripting_unsupported_ops = {
+ .name = "Python",
+ .start_script = python_start_script_unsupported,
+ .stop_script = stop_script_unsupported,
+ .process_event = process_event_unsupported,
+ .generate_script = python_generate_script_unsupported,
+};
+
+static void register_python_scripting(struct scripting_ops *scripting_ops)
+{
+ int err;
+ err = script_spec_register("Python", scripting_ops);
+ if (err)
+ die("error registering Python script extension");
+
+ err = script_spec_register("py", scripting_ops);
+ if (err)
+ die("error registering py script extension");
+
+ scripting_context = malloc(sizeof(struct scripting_context));
+}
+
+#ifdef NO_LIBPYTHON
+void setup_python_scripting(void)
+{
+ register_python_scripting(&python_scripting_unsupported_ops);
+}
+#else
+struct scripting_ops python_scripting_ops;
+
+void setup_python_scripting(void)
+{
+ register_python_scripting(&python_scripting_ops);
+}
+#endif
+
+static void print_perl_unsupported_msg(void)
+{
+ fprintf(stderr, "Perl scripting not supported."
+ " Install libperl and rebuild perf to enable it.\n"
+ "For example:\n # apt-get install libperl-dev (ubuntu)"
+ "\n # yum install 'perl(ExtUtils::Embed)' (Fedora)"
+ "\n etc.\n");
+}
+
+static int perl_start_script_unsupported(const char *script __unused,
+ int argc __unused,
+ const char **argv __unused)
+{
+ print_perl_unsupported_msg();
+
+ return -1;
+}
+
+static int perl_generate_script_unsupported(const char *outfile __unused)
+{
+ print_perl_unsupported_msg();
+
+ return -1;
+}
+
+struct scripting_ops perl_scripting_unsupported_ops = {
+ .name = "Perl",
+ .start_script = perl_start_script_unsupported,
+ .stop_script = stop_script_unsupported,
+ .process_event = process_event_unsupported,
+ .generate_script = perl_generate_script_unsupported,
+};
+
+static void register_perl_scripting(struct scripting_ops *scripting_ops)
+{
+ int err;
+ err = script_spec_register("Perl", scripting_ops);
+ if (err)
+ die("error registering Perl script extension");
+
+ err = script_spec_register("pl", scripting_ops);
+ if (err)
+ die("error registering pl script extension");
+
+ scripting_context = malloc(sizeof(struct scripting_context));
+}
+
+#ifdef NO_LIBPERL
+void setup_perl_scripting(void)
+{
+ register_perl_scripting(&perl_scripting_unsupported_ops);
+}
+#else
+struct scripting_ops perl_scripting_ops;
+
+void setup_perl_scripting(void)
+{
+ register_perl_scripting(&perl_scripting_ops);
+}
+#endif
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 6ad4056..c3269b9 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -279,7 +279,15 @@ struct scripting_ops {
int script_spec_register(const char *spec, struct scripting_ops *ops);
-extern struct scripting_ops perl_scripting_ops;
void setup_perl_scripting(void);
+void setup_python_scripting(void);
+
+struct scripting_context {
+ void *event_data;
+};
+
+int common_pc(struct scripting_context *context);
+int common_flags(struct scripting_context *context);
+int common_lock_depth(struct scripting_context *context);
#endif /* __PERF_TRACE_EVENTS_H */
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
new file mode 100644
index 0000000..f9b890f
--- /dev/null
+++ b/tools/perf/util/util.c
@@ -0,0 +1,94 @@
+#include "util.h"
+#include <sys/mman.h>
+
+int mkdir_p(char *path, mode_t mode)
+{
+ struct stat st;
+ int err;
+ char *d = path;
+
+ if (*d != '/')
+ return -1;
+
+ if (stat(path, &st) == 0)
+ return 0;
+
+ while (*++d == '/');
+
+ while ((d = strchr(d, '/'))) {
+ *d = '\0';
+ err = stat(path, &st) && mkdir(path, mode);
+ *d++ = '/';
+ if (err)
+ return -1;
+ while (*d == '/')
+ ++d;
+ }
+ return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
+}
+
+static int slow_copyfile(const char *from, const char *to)
+{
+ int err = 0;
+ char *line = NULL;
+ size_t n;
+ FILE *from_fp = fopen(from, "r"), *to_fp;
+
+ if (from_fp == NULL)
+ goto out;
+
+ to_fp = fopen(to, "w");
+ if (to_fp == NULL)
+ goto out_fclose_from;
+
+ while (getline(&line, &n, from_fp) > 0)
+ if (fputs(line, to_fp) == EOF)
+ goto out_fclose_to;
+ err = 0;
+out_fclose_to:
+ fclose(to_fp);
+ free(line);
+out_fclose_from:
+ fclose(from_fp);
+out:
+ return err;
+}
+
+int copyfile(const char *from, const char *to)
+{
+ int fromfd, tofd;
+ struct stat st;
+ void *addr;
+ int err = -1;
+
+ if (stat(from, &st))
+ goto out;
+
+ if (st.st_size == 0) /* /proc? do it slowly... */
+ return slow_copyfile(from, to);
+
+ fromfd = open(from, O_RDONLY);
+ if (fromfd < 0)
+ goto out;
+
+ tofd = creat(to, 0755);
+ if (tofd < 0)
+ goto out_close_from;
+
+ addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0);
+ if (addr == MAP_FAILED)
+ goto out_close_to;
+
+ if (write(tofd, addr, st.st_size) == st.st_size)
+ err = 0;
+
+ munmap(addr, st.st_size);
+out_close_to:
+ close(tofd);
+ if (err)
+ unlink(to);
+out_close_from:
+ close(fromfd);
+out:
+ return err;
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index c673d88..0f5b2a6 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -403,4 +403,7 @@ void git_qsort(void *base, size_t nmemb, size_t size,
#endif
#endif
+int mkdir_p(char *path, mode_t mode);
+int copyfile(const char *from, const char *to);
+
#endif
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index 1c15e39..cfa55d6 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -169,6 +169,7 @@ static void perf_read_values__display_pretty(FILE *fp,
counterwidth[j], values->value[i][j]);
fprintf(fp, "\n");
}
+ free(counterwidth);
}
static void perf_read_values__display_raw(FILE *fp,