diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-24 11:05:11 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-24 11:51:52 -0500 |
commit | e76bd8d9850c2296a7e8e24c9dce9b5e6b55fe2f (patch) | |
tree | c4f5178e224ece271317c60da569489b5a8704ee /kernel/sched.c | |
parent | f17c860760927c2a8e41a021eab3317e4415e962 (diff) |
sched: avoid stack var in move_task_off_dead_cpu
Impact: stack usage reduction
With some care, we can avoid needing a temporary cpumask (we can't
really allocate here, since we can't fail).
This version calls cpuset_cpus_allowed_locked() with the task_rq_lock
held. I'm fairly sure this works, but there might be a deadlock
hiding.
And of course, we can't get rid of the last cpumask on stack until we
can use cpumask_of_node instead of node_to_cpumask.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 78 |
1 files changed, 36 insertions, 42 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6deff24349b6..f7dee2029e4d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6112,52 +6112,46 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6112 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6112 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
6113 | { | 6113 | { |
6114 | unsigned long flags; | 6114 | unsigned long flags; |
6115 | cpumask_t mask; | ||
6116 | struct rq *rq; | 6115 | struct rq *rq; |
6117 | int dest_cpu; | 6116 | int dest_cpu; |
6117 | /* FIXME: Use cpumask_of_node here. */ | ||
6118 | cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu)); | ||
6119 | const struct cpumask *nodemask = &_nodemask; | ||
6120 | |||
6121 | again: | ||
6122 | /* Look for allowed, online CPU in same node. */ | ||
6123 | for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) | ||
6124 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | ||
6125 | goto move; | ||
6126 | |||
6127 | /* Any allowed, online CPU? */ | ||
6128 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); | ||
6129 | if (dest_cpu < nr_cpu_ids) | ||
6130 | goto move; | ||
6131 | |||
6132 | /* No more Mr. Nice Guy. */ | ||
6133 | if (dest_cpu >= nr_cpu_ids) { | ||
6134 | rq = task_rq_lock(p, &flags); | ||
6135 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | ||
6136 | dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); | ||
6137 | task_rq_unlock(rq, &flags); | ||
6118 | 6138 | ||
6119 | do { | 6139 | /* |
6120 | /* On same node? */ | 6140 | * Don't tell them about moving exiting tasks or |
6121 | node_to_cpumask_ptr(pnodemask, cpu_to_node(dead_cpu)); | 6141 | * kernel threads (both mm NULL), since they never |
6122 | 6142 | * leave kernel. | |
6123 | cpus_and(mask, *pnodemask, p->cpus_allowed); | 6143 | */ |
6124 | dest_cpu = cpumask_any_and(cpu_online_mask, &mask); | 6144 | if (p->mm && printk_ratelimit()) { |
6125 | 6145 | printk(KERN_INFO "process %d (%s) no " | |
6126 | /* On any allowed CPU? */ | 6146 | "longer affine to cpu%d\n", |
6127 | if (dest_cpu >= nr_cpu_ids) | 6147 | task_pid_nr(p), p->comm, dead_cpu); |
6128 | dest_cpu = cpumask_any_and(cpu_online_mask, | ||
6129 | &p->cpus_allowed); | ||
6130 | |||
6131 | /* No more Mr. Nice Guy. */ | ||
6132 | if (dest_cpu >= nr_cpu_ids) { | ||
6133 | cpumask_t cpus_allowed; | ||
6134 | |||
6135 | cpuset_cpus_allowed_locked(p, &cpus_allowed); | ||
6136 | /* | ||
6137 | * Try to stay on the same cpuset, where the | ||
6138 | * current cpuset may be a subset of all cpus. | ||
6139 | * The cpuset_cpus_allowed_locked() variant of | ||
6140 | * cpuset_cpus_allowed() will not block. It must be | ||
6141 | * called within calls to cpuset_lock/cpuset_unlock. | ||
6142 | */ | ||
6143 | rq = task_rq_lock(p, &flags); | ||
6144 | p->cpus_allowed = cpus_allowed; | ||
6145 | dest_cpu = cpumask_any_and(cpu_online_mask, | ||
6146 | &p->cpus_allowed); | ||
6147 | task_rq_unlock(rq, &flags); | ||
6148 | |||
6149 | /* | ||
6150 | * Don't tell them about moving exiting tasks or | ||
6151 | * kernel threads (both mm NULL), since they never | ||
6152 | * leave kernel. | ||
6153 | */ | ||
6154 | if (p->mm && printk_ratelimit()) { | ||
6155 | printk(KERN_INFO "process %d (%s) no " | ||
6156 | "longer affine to cpu%d\n", | ||
6157 | task_pid_nr(p), p->comm, dead_cpu); | ||
6158 | } | ||
6159 | } | 6148 | } |
6160 | } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); | 6149 | } |
6150 | |||
6151 | move: | ||
6152 | /* It can have affinity changed while we were choosing. */ | ||
6153 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) | ||
6154 | goto again; | ||
6161 | } | 6155 | } |
6162 | 6156 | ||
6163 | /* | 6157 | /* |