aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-11-25 02:35:02 +1030
committerIngo Molnar <mingo@elte.hu>2008-11-24 17:49:27 +0100
commitea6f18ed5a1531caf678374f30a0990c9e6742f3 (patch)
tree721c45d123ffd4f1f3bfbb93f9f7675b1588c610
parent943f3d030003e1fa5f77647328e805441213bf49 (diff)
downloadkernel_goldelico_gta04-ea6f18ed5a1531caf678374f30a0990c9e6742f3.zip
kernel_goldelico_gta04-ea6f18ed5a1531caf678374f30a0990c9e6742f3.tar.gz
kernel_goldelico_gta04-ea6f18ed5a1531caf678374f30a0990c9e6742f3.tar.bz2
sched: reduce stack size requirements in kernel/sched.c
Impact: cleanup * use node_to_cpumask_ptr in place of node_to_cpumask to reduce stack requirements in sched.c Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index bb82765..dd22cec 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6110,8 +6110,9 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
do {
/* On same node? */
- mask = node_to_cpumask(cpu_to_node(dead_cpu));
- cpus_and(mask, mask, p->cpus_allowed);
+ node_to_cpumask_ptr(pnodemask, cpu_to_node(dead_cpu));
+
+ cpus_and(mask, *pnodemask, p->cpus_allowed);
dest_cpu = any_online_cpu(mask);
/* On any allowed CPU? */
@@ -7098,9 +7099,9 @@ static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
struct sched_group **sg, cpumask_t *nodemask)
{
int group;
+ node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu));
- *nodemask = node_to_cpumask(cpu_to_node(cpu));
- cpus_and(*nodemask, *nodemask, *cpu_map);
+ cpus_and(*nodemask, *pnodemask, *cpu_map);
group = first_cpu(*nodemask);
if (sg)
@@ -7150,9 +7151,9 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
for (i = 0; i < nr_node_ids; i++) {
struct sched_group *oldsg, *sg = sched_group_nodes[i];
+ node_to_cpumask_ptr(pnodemask, i);
- *nodemask = node_to_cpumask(i);
- cpus_and(*nodemask, *nodemask, *cpu_map);
+ cpus_and(*nodemask, *pnodemask, *cpu_map);
if (cpus_empty(*nodemask))
continue;