aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-11 12:12:54 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-13 10:22:50 +0200
commitb5fae128e41021889777f8ead810cbd2a8b249fc (patch)
tree4fb7885dc9d9232c6c8fb4f45b95dfedcdbac175 /tools/perf/builtin-sched.c
parentb1ffe8f3e0c96f5527a89e24410d6b0e59b3554a (diff)
downloadkernel_samsung_aries-b5fae128e41021889777f8ead810cbd2a8b249fc.zip
kernel_samsung_aries-b5fae128e41021889777f8ead810cbd2a8b249fc.tar.gz
kernel_samsung_aries-b5fae128e41021889777f8ead810cbd2a8b249fc.tar.bz2
perf sched: Clean up PID sorting logic
Use a sort list for thread atoms insertion as well - instead of hardcoded for PID. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r--tools/perf/builtin-sched.c88
1 files changed, 47 insertions, 41 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index cc2dbd5..b72544f 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -144,7 +144,7 @@ struct task_atoms {
u64 total_runtime;
};
-typedef int (*sort_thread_lat)(struct task_atoms *, struct task_atoms *);
+typedef int (*sort_fn_t)(struct task_atoms *, struct task_atoms *);
static struct rb_root atom_root, sorted_atom_root;
@@ -869,41 +869,22 @@ static struct trace_sched_handler replay_ops = {
.fork_event = replay_fork_event,
};
-static struct task_atoms *
-thread_atoms_search(struct rb_root *root, struct thread *thread)
-{
- struct rb_node *node = root->rb_node;
-
- while (node) {
- struct task_atoms *atoms;
-
- atoms = container_of(node, struct task_atoms, node);
- if (thread->pid > atoms->thread->pid)
- node = node->rb_left;
- else if (thread->pid < atoms->thread->pid)
- node = node->rb_right;
- else {
- return atoms;
- }
- }
- return NULL;
-}
-
struct sort_dimension {
const char *name;
- sort_thread_lat cmp;
+ sort_fn_t cmp;
struct list_head list;
};
static LIST_HEAD(cmp_pid);
static int
-thread_lat_cmp(struct list_head *list, struct task_atoms *l,
- struct task_atoms *r)
+thread_lat_cmp(struct list_head *list, struct task_atoms *l, struct task_atoms *r)
{
struct sort_dimension *sort;
int ret = 0;
+ BUG_ON(list_empty(list));
+
list_for_each_entry(sort, list, list) {
ret = sort->cmp(l, r);
if (ret)
@@ -913,6 +894,32 @@ thread_lat_cmp(struct list_head *list, struct task_atoms *l,
return ret;
}
+static struct task_atoms *
+thread_atoms_search(struct rb_root *root, struct thread *thread,
+ struct list_head *sort_list)
+{
+ struct rb_node *node = root->rb_node;
+ struct task_atoms key = { .thread = thread };
+
+ while (node) {
+ struct task_atoms *atoms;
+ int cmp;
+
+ atoms = container_of(node, struct task_atoms, node);
+
+ cmp = thread_lat_cmp(sort_list, &key, atoms);
+ if (cmp > 0)
+ node = node->rb_left;
+ else if (cmp < 0)
+ node = node->rb_right;
+ else {
+ BUG_ON(thread != atoms->thread);
+ return atoms;
+ }
+ }
+ return NULL;
+}
+
static void
__thread_latency_insert(struct rb_root *root, struct task_atoms *data,
struct list_head *sort_list)
@@ -1049,18 +1056,18 @@ latency_switch_event(struct trace_switch_event *switch_event,
sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
- in_atoms = thread_atoms_search(&atom_root, sched_in);
+ in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
if (!in_atoms) {
thread_atoms_insert(sched_in);
- in_atoms = thread_atoms_search(&atom_root, sched_in);
+ in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
if (!in_atoms)
die("in-atom: Internal tree error");
}
- out_atoms = thread_atoms_search(&atom_root, sched_out);
+ out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
if (!out_atoms) {
thread_atoms_insert(sched_out);
- out_atoms = thread_atoms_search(&atom_root, sched_out);
+ out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
if (!out_atoms)
die("out-atom: Internal tree error");
}
@@ -1085,7 +1092,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
return;
wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
- atoms = thread_atoms_search(&atom_root, wakee);
+ atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
if (!atoms) {
thread_atoms_insert(wakee);
return;
@@ -1136,7 +1143,6 @@ static void output_lat_thread(struct task_atoms *atom_list)
static int pid_cmp(struct task_atoms *l, struct task_atoms *r)
{
-
if (l->thread->pid < r->thread->pid)
return -1;
if (l->thread->pid > r->thread->pid)
@@ -1146,8 +1152,8 @@ static int pid_cmp(struct task_atoms *l, struct task_atoms *r)
}
static struct sort_dimension pid_sort_dimension = {
- .name = "pid",
- .cmp = pid_cmp,
+ .name = "pid",
+ .cmp = pid_cmp,
};
static int avg_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1172,8 +1178,8 @@ static int avg_cmp(struct task_atoms *l, struct task_atoms *r)
}
static struct sort_dimension avg_sort_dimension = {
- .name = "avg",
- .cmp = avg_cmp,
+ .name = "avg",
+ .cmp = avg_cmp,
};
static int max_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1187,8 +1193,8 @@ static int max_cmp(struct task_atoms *l, struct task_atoms *r)
}
static struct sort_dimension max_sort_dimension = {
- .name = "max",
- .cmp = max_cmp,
+ .name = "max",
+ .cmp = max_cmp,
};
static int switch_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1202,8 +1208,8 @@ static int switch_cmp(struct task_atoms *l, struct task_atoms *r)
}
static struct sort_dimension switch_sort_dimension = {
- .name = "switch",
- .cmp = switch_cmp,
+ .name = "switch",
+ .cmp = switch_cmp,
};
static int runtime_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1217,8 +1223,8 @@ static int runtime_cmp(struct task_atoms *l, struct task_atoms *r)
}
static struct sort_dimension runtime_sort_dimension = {
- .name = "runtime",
- .cmp = runtime_cmp,
+ .name = "runtime",
+ .cmp = runtime_cmp,
};
static struct sort_dimension *available_sorts[] = {
@@ -1666,8 +1672,8 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used)
argc = parse_options(argc, argv, latency_options, latency_usage, 0);
if (argc)
usage_with_options(latency_usage, latency_options);
- setup_sorting();
}
+ setup_sorting();
__cmd_lat();
} else if (!strncmp(argv[0], "rep", 3)) {
trace_handler = &replay_ops;