diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-12-05 09:46:09 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-12-05 09:46:09 -0500 |
commit | 41a2d6cfa3f77ec469e7e5f06b4d7ffd031f9c0e (patch) | |
tree | 462949400fbb320194b9f586c48315c306be53cf /kernel | |
parent | ce6bd420f43b28038a2c6e8fbb86ad24014727b6 (diff) |
sched: style cleanups
style cleanup of various changes that were done recently.
no code changed:
text data bss dec hex filename
23680 2542 28 26250 668a sched.o.before
23680 2542 28 26250 668a sched.o.after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 132 |
1 files changed, 68 insertions, 64 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b062856b946c..67d9d1799d86 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -209,9 +209,8 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
209 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), | 209 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), |
210 | struct task_group, css); | 210 | struct task_group, css); |
211 | #else | 211 | #else |
212 | tg = &init_task_group; | 212 | tg = &init_task_group; |
213 | #endif | 213 | #endif |
214 | |||
215 | return tg; | 214 | return tg; |
216 | } | 215 | } |
217 | 216 | ||
@@ -249,15 +248,16 @@ struct cfs_rq { | |||
249 | #ifdef CONFIG_FAIR_GROUP_SCHED | 248 | #ifdef CONFIG_FAIR_GROUP_SCHED |
250 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ | 249 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
251 | 250 | ||
252 | /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in | 251 | /* |
252 | * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in | ||
253 | * a hierarchy). Non-leaf lrqs hold other higher schedulable entities | 253 | * a hierarchy). Non-leaf lrqs hold other higher schedulable entities |
254 | * (like users, containers etc.) | 254 | * (like users, containers etc.) |
255 | * | 255 | * |
256 | * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This | 256 | * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This |
257 | * list is used during load balance. | 257 | * list is used during load balance. |
258 | */ | 258 | */ |
259 | struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ | 259 | struct list_head leaf_cfs_rq_list; |
260 | struct task_group *tg; /* group that "owns" this runqueue */ | 260 | struct task_group *tg; /* group that "owns" this runqueue */ |
261 | #endif | 261 | #endif |
262 | }; | 262 | }; |
263 | 263 | ||
@@ -300,7 +300,7 @@ struct rq { | |||
300 | /* list of leaf cfs_rq on this cpu: */ | 300 | /* list of leaf cfs_rq on this cpu: */ |
301 | struct list_head leaf_cfs_rq_list; | 301 | struct list_head leaf_cfs_rq_list; |
302 | #endif | 302 | #endif |
303 | struct rt_rq rt; | 303 | struct rt_rq rt; |
304 | 304 | ||
305 | /* | 305 | /* |
306 | * This is part of a global counter where only the total sum | 306 | * This is part of a global counter where only the total sum |
@@ -457,8 +457,8 @@ enum { | |||
457 | SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, | 457 | SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, |
458 | SCHED_FEAT_WAKEUP_PREEMPT = 2, | 458 | SCHED_FEAT_WAKEUP_PREEMPT = 2, |
459 | SCHED_FEAT_START_DEBIT = 4, | 459 | SCHED_FEAT_START_DEBIT = 4, |
460 | SCHED_FEAT_TREE_AVG = 8, | 460 | SCHED_FEAT_TREE_AVG = 8, |
461 | SCHED_FEAT_APPROX_AVG = 16, | 461 | SCHED_FEAT_APPROX_AVG = 16, |
462 | }; | 462 | }; |
463 | 463 | ||
464 | const_debug unsigned int sysctl_sched_features = | 464 | const_debug unsigned int sysctl_sched_features = |
@@ -591,7 +591,7 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) | |||
591 | 591 | ||
592 | /* | 592 | /* |
593 | * task_rq_lock - lock the runqueue a given task resides on and disable | 593 | * task_rq_lock - lock the runqueue a given task resides on and disable |
594 | * interrupts. Note the ordering: we can safely lookup the task_rq without | 594 | * interrupts. Note the ordering: we can safely lookup the task_rq without |
595 | * explicitly disabling preemption. | 595 | * explicitly disabling preemption. |
596 | */ | 596 | */ |
597 | static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | 597 | static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) |
@@ -779,7 +779,7 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec) | |||
779 | * To aid in avoiding the subversion of "niceness" due to uneven distribution | 779 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
780 | * of tasks with abnormal "nice" values across CPUs the contribution that | 780 | * of tasks with abnormal "nice" values across CPUs the contribution that |
781 | * each task makes to its run queue's load is weighted according to its | 781 | * each task makes to its run queue's load is weighted according to its |
782 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a | 782 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
783 | * scaled version of the new time slice allocation that they receive on time | 783 | * scaled version of the new time slice allocation that they receive on time |
784 | * slice expiry etc. | 784 | * slice expiry etc. |
785 | */ | 785 | */ |
@@ -1854,7 +1854,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, | |||
1854 | * and do any other architecture-specific cleanup actions. | 1854 | * and do any other architecture-specific cleanup actions. |
1855 | * | 1855 | * |
1856 | * Note that we may have delayed dropping an mm in context_switch(). If | 1856 | * Note that we may have delayed dropping an mm in context_switch(). If |
1857 | * so, we finish that here outside of the runqueue lock. (Doing it | 1857 | * so, we finish that here outside of the runqueue lock. (Doing it |
1858 | * with the lock held can cause deadlocks; see schedule() for | 1858 | * with the lock held can cause deadlocks; see schedule() for |
1859 | * details.) | 1859 | * details.) |
1860 | */ | 1860 | */ |
@@ -2136,7 +2136,7 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
2136 | /* | 2136 | /* |
2137 | * If dest_cpu is allowed for this process, migrate the task to it. | 2137 | * If dest_cpu is allowed for this process, migrate the task to it. |
2138 | * This is accomplished by forcing the cpu_allowed mask to only | 2138 | * This is accomplished by forcing the cpu_allowed mask to only |
2139 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then | 2139 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then |
2140 | * the cpu_allowed mask is restored. | 2140 | * the cpu_allowed mask is restored. |
2141 | */ | 2141 | */ |
2142 | static void sched_migrate_task(struct task_struct *p, int dest_cpu) | 2142 | static void sched_migrate_task(struct task_struct *p, int dest_cpu) |
@@ -2581,7 +2581,7 @@ group_next: | |||
2581 | * tasks around. Thus we look for the minimum possible imbalance. | 2581 | * tasks around. Thus we look for the minimum possible imbalance. |
2582 | * Negative imbalances (*we* are more loaded than anyone else) will | 2582 | * Negative imbalances (*we* are more loaded than anyone else) will |
2583 | * be counted as no imbalance for these purposes -- we can't fix that | 2583 | * be counted as no imbalance for these purposes -- we can't fix that |
2584 | * by pulling tasks to us. Be careful of negative numbers as they'll | 2584 | * by pulling tasks to us. Be careful of negative numbers as they'll |
2585 | * appear as very large values with unsigned longs. | 2585 | * appear as very large values with unsigned longs. |
2586 | */ | 2586 | */ |
2587 | if (max_load <= busiest_load_per_task) | 2587 | if (max_load <= busiest_load_per_task) |
@@ -3016,7 +3016,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3016 | 3016 | ||
3017 | /* | 3017 | /* |
3018 | * This condition is "impossible", if it occurs | 3018 | * This condition is "impossible", if it occurs |
3019 | * we need to fix it. Originally reported by | 3019 | * we need to fix it. Originally reported by |
3020 | * Bjorn Helgaas on a 128-cpu setup. | 3020 | * Bjorn Helgaas on a 128-cpu setup. |
3021 | */ | 3021 | */ |
3022 | BUG_ON(busiest_rq == target_rq); | 3022 | BUG_ON(busiest_rq == target_rq); |
@@ -3048,7 +3048,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3048 | #ifdef CONFIG_NO_HZ | 3048 | #ifdef CONFIG_NO_HZ |
3049 | static struct { | 3049 | static struct { |
3050 | atomic_t load_balancer; | 3050 | atomic_t load_balancer; |
3051 | cpumask_t cpu_mask; | 3051 | cpumask_t cpu_mask; |
3052 | } nohz ____cacheline_aligned = { | 3052 | } nohz ____cacheline_aligned = { |
3053 | .load_balancer = ATOMIC_INIT(-1), | 3053 | .load_balancer = ATOMIC_INIT(-1), |
3054 | .cpu_mask = CPU_MASK_NONE, | 3054 | .cpu_mask = CPU_MASK_NONE, |
@@ -3552,7 +3552,7 @@ static noinline void __schedule_bug(struct task_struct *prev) | |||
3552 | static inline void schedule_debug(struct task_struct *prev) | 3552 | static inline void schedule_debug(struct task_struct *prev) |
3553 | { | 3553 | { |
3554 | /* | 3554 | /* |
3555 | * Test if we are atomic. Since do_exit() needs to call into | 3555 | * Test if we are atomic. Since do_exit() needs to call into |
3556 | * schedule() atomically, we ignore that path for now. | 3556 | * schedule() atomically, we ignore that path for now. |
3557 | * Otherwise, whine if we are scheduling when we should not be. | 3557 | * Otherwise, whine if we are scheduling when we should not be. |
3558 | */ | 3558 | */ |
@@ -3674,7 +3674,7 @@ EXPORT_SYMBOL(schedule); | |||
3674 | #ifdef CONFIG_PREEMPT | 3674 | #ifdef CONFIG_PREEMPT |
3675 | /* | 3675 | /* |
3676 | * this is the entry point to schedule() from in-kernel preemption | 3676 | * this is the entry point to schedule() from in-kernel preemption |
3677 | * off of preempt_enable. Kernel preemptions off return from interrupt | 3677 | * off of preempt_enable. Kernel preemptions off return from interrupt |
3678 | * occur there and call schedule directly. | 3678 | * occur there and call schedule directly. |
3679 | */ | 3679 | */ |
3680 | asmlinkage void __sched preempt_schedule(void) | 3680 | asmlinkage void __sched preempt_schedule(void) |
@@ -3686,7 +3686,7 @@ asmlinkage void __sched preempt_schedule(void) | |||
3686 | #endif | 3686 | #endif |
3687 | /* | 3687 | /* |
3688 | * If there is a non-zero preempt_count or interrupts are disabled, | 3688 | * If there is a non-zero preempt_count or interrupts are disabled, |
3689 | * we do not want to preempt the current task. Just return.. | 3689 | * we do not want to preempt the current task. Just return.. |
3690 | */ | 3690 | */ |
3691 | if (likely(ti->preempt_count || irqs_disabled())) | 3691 | if (likely(ti->preempt_count || irqs_disabled())) |
3692 | return; | 3692 | return; |
@@ -3772,12 +3772,12 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, | |||
3772 | EXPORT_SYMBOL(default_wake_function); | 3772 | EXPORT_SYMBOL(default_wake_function); |
3773 | 3773 | ||
3774 | /* | 3774 | /* |
3775 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just | 3775 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just |
3776 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve | 3776 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve |
3777 | * number) then we wake all the non-exclusive tasks and one exclusive task. | 3777 | * number) then we wake all the non-exclusive tasks and one exclusive task. |
3778 | * | 3778 | * |
3779 | * There are circumstances in which we can try to wake a task which has already | 3779 | * There are circumstances in which we can try to wake a task which has already |
3780 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | 3780 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
3781 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | 3781 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
3782 | */ | 3782 | */ |
3783 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | 3783 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
@@ -4390,8 +4390,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
4390 | * @policy: new policy. | 4390 | * @policy: new policy. |
4391 | * @param: structure containing the new RT priority. | 4391 | * @param: structure containing the new RT priority. |
4392 | */ | 4392 | */ |
4393 | asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, | 4393 | asmlinkage long |
4394 | struct sched_param __user *param) | 4394 | sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) |
4395 | { | 4395 | { |
4396 | /* negative values for policy are not valid */ | 4396 | /* negative values for policy are not valid */ |
4397 | if (policy < 0) | 4397 | if (policy < 0) |
@@ -4491,7 +4491,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
4491 | 4491 | ||
4492 | /* | 4492 | /* |
4493 | * It is not safe to call set_cpus_allowed with the | 4493 | * It is not safe to call set_cpus_allowed with the |
4494 | * tasklist_lock held. We will bump the task_struct's | 4494 | * tasklist_lock held. We will bump the task_struct's |
4495 | * usage count and then drop tasklist_lock. | 4495 | * usage count and then drop tasklist_lock. |
4496 | */ | 4496 | */ |
4497 | get_task_struct(p); | 4497 | get_task_struct(p); |
@@ -4687,7 +4687,7 @@ EXPORT_SYMBOL(cond_resched); | |||
4687 | * cond_resched_lock() - if a reschedule is pending, drop the given lock, | 4687 | * cond_resched_lock() - if a reschedule is pending, drop the given lock, |
4688 | * call schedule, and on return reacquire the lock. | 4688 | * call schedule, and on return reacquire the lock. |
4689 | * | 4689 | * |
4690 | * This works OK both with and without CONFIG_PREEMPT. We do strange low-level | 4690 | * This works OK both with and without CONFIG_PREEMPT. We do strange low-level |
4691 | * operations here to prevent schedule() from being called twice (once via | 4691 | * operations here to prevent schedule() from being called twice (once via |
4692 | * spin_unlock(), once by hand). | 4692 | * spin_unlock(), once by hand). |
4693 | */ | 4693 | */ |
@@ -4741,7 +4741,7 @@ void __sched yield(void) | |||
4741 | EXPORT_SYMBOL(yield); | 4741 | EXPORT_SYMBOL(yield); |
4742 | 4742 | ||
4743 | /* | 4743 | /* |
4744 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so | 4744 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
4745 | * that process accounting knows that this is a task in IO wait state. | 4745 | * that process accounting knows that this is a task in IO wait state. |
4746 | * | 4746 | * |
4747 | * But don't do that if it is a deliberate, throttling IO wait (this task | 4747 | * But don't do that if it is a deliberate, throttling IO wait (this task |
@@ -5050,7 +5050,7 @@ static inline void sched_init_granularity(void) | |||
5050 | * is removed from the allowed bitmask. | 5050 | * is removed from the allowed bitmask. |
5051 | * | 5051 | * |
5052 | * NOTE: the caller must have a valid reference to the task, the | 5052 | * NOTE: the caller must have a valid reference to the task, the |
5053 | * task must not exit() & deallocate itself prematurely. The | 5053 | * task must not exit() & deallocate itself prematurely. The |
5054 | * call is not atomic; no spinlocks may be held. | 5054 | * call is not atomic; no spinlocks may be held. |
5055 | */ | 5055 | */ |
5056 | int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | 5056 | int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) |
@@ -5087,7 +5087,7 @@ out: | |||
5087 | EXPORT_SYMBOL_GPL(set_cpus_allowed); | 5087 | EXPORT_SYMBOL_GPL(set_cpus_allowed); |
5088 | 5088 | ||
5089 | /* | 5089 | /* |
5090 | * Move (not current) task off this cpu, onto dest cpu. We're doing | 5090 | * Move (not current) task off this cpu, onto dest cpu. We're doing |
5091 | * this because either it can't run here any more (set_cpus_allowed() | 5091 | * this because either it can't run here any more (set_cpus_allowed() |
5092 | * away from this CPU, or CPU going down), or because we're | 5092 | * away from this CPU, or CPU going down), or because we're |
5093 | * attempting to rebalance this task on exec (sched_exec). | 5093 | * attempting to rebalance this task on exec (sched_exec). |
@@ -5232,7 +5232,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
5232 | * Try to stay on the same cpuset, where the | 5232 | * Try to stay on the same cpuset, where the |
5233 | * current cpuset may be a subset of all cpus. | 5233 | * current cpuset may be a subset of all cpus. |
5234 | * The cpuset_cpus_allowed_locked() variant of | 5234 | * The cpuset_cpus_allowed_locked() variant of |
5235 | * cpuset_cpus_allowed() will not block. It must be | 5235 | * cpuset_cpus_allowed() will not block. It must be |
5236 | * called within calls to cpuset_lock/cpuset_unlock. | 5236 | * called within calls to cpuset_lock/cpuset_unlock. |
5237 | */ | 5237 | */ |
5238 | rq = task_rq_lock(p, &flags); | 5238 | rq = task_rq_lock(p, &flags); |
@@ -5245,10 +5245,11 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
5245 | * kernel threads (both mm NULL), since they never | 5245 | * kernel threads (both mm NULL), since they never |
5246 | * leave kernel. | 5246 | * leave kernel. |
5247 | */ | 5247 | */ |
5248 | if (p->mm && printk_ratelimit()) | 5248 | if (p->mm && printk_ratelimit()) { |
5249 | printk(KERN_INFO "process %d (%s) no " | 5249 | printk(KERN_INFO "process %d (%s) no " |
5250 | "longer affine to cpu%d\n", | 5250 | "longer affine to cpu%d\n", |
5251 | task_pid_nr(p), p->comm, dead_cpu); | 5251 | task_pid_nr(p), p->comm, dead_cpu); |
5252 | } | ||
5252 | } | 5253 | } |
5253 | } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); | 5254 | } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); |
5254 | } | 5255 | } |
@@ -5350,7 +5351,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) | |||
5350 | 5351 | ||
5351 | /* | 5352 | /* |
5352 | * Drop lock around migration; if someone else moves it, | 5353 | * Drop lock around migration; if someone else moves it, |
5353 | * that's OK. No task can be added to this CPU, so iteration is | 5354 | * that's OK. No task can be added to this CPU, so iteration is |
5354 | * fine. | 5355 | * fine. |
5355 | */ | 5356 | */ |
5356 | spin_unlock_irq(&rq->lock); | 5357 | spin_unlock_irq(&rq->lock); |
@@ -5414,7 +5415,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep) | |||
5414 | /* | 5415 | /* |
5415 | * In the intermediate directories, both the child directory and | 5416 | * In the intermediate directories, both the child directory and |
5416 | * procname are dynamically allocated and could fail but the mode | 5417 | * procname are dynamically allocated and could fail but the mode |
5417 | * will always be set. In the lowest directory the names are | 5418 | * will always be set. In the lowest directory the names are |
5418 | * static strings and all have proc handlers. | 5419 | * static strings and all have proc handlers. |
5419 | */ | 5420 | */ |
5420 | for (entry = *tablep; entry->mode; entry++) { | 5421 | for (entry = *tablep; entry->mode; entry++) { |
@@ -5585,7 +5586,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5585 | case CPU_UP_CANCELED_FROZEN: | 5586 | case CPU_UP_CANCELED_FROZEN: |
5586 | if (!cpu_rq(cpu)->migration_thread) | 5587 | if (!cpu_rq(cpu)->migration_thread) |
5587 | break; | 5588 | break; |
5588 | /* Unbind it from offline cpu so it can run. Fall thru. */ | 5589 | /* Unbind it from offline cpu so it can run. Fall thru. */ |
5589 | kthread_bind(cpu_rq(cpu)->migration_thread, | 5590 | kthread_bind(cpu_rq(cpu)->migration_thread, |
5590 | any_online_cpu(cpu_online_map)); | 5591 | any_online_cpu(cpu_online_map)); |
5591 | kthread_stop(cpu_rq(cpu)->migration_thread); | 5592 | kthread_stop(cpu_rq(cpu)->migration_thread); |
@@ -5612,9 +5613,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5612 | migrate_nr_uninterruptible(rq); | 5613 | migrate_nr_uninterruptible(rq); |
5613 | BUG_ON(rq->nr_running != 0); | 5614 | BUG_ON(rq->nr_running != 0); |
5614 | 5615 | ||
5615 | /* No need to migrate the tasks: it was best-effort if | 5616 | /* |
5616 | * they didn't take sched_hotcpu_mutex. Just wake up | 5617 | * No need to migrate the tasks: it was best-effort if |
5617 | * the requestors. */ | 5618 | * they didn't take sched_hotcpu_mutex. Just wake up |
5619 | * the requestors. | ||
5620 | */ | ||
5618 | spin_lock_irq(&rq->lock); | 5621 | spin_lock_irq(&rq->lock); |
5619 | while (!list_empty(&rq->migration_queue)) { | 5622 | while (!list_empty(&rq->migration_queue)) { |
5620 | struct migration_req *req; | 5623 | struct migration_req *req; |
@@ -5922,7 +5925,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map, | |||
5922 | * @node: node whose sched_domain we're building | 5925 | * @node: node whose sched_domain we're building |
5923 | * @used_nodes: nodes already in the sched_domain | 5926 | * @used_nodes: nodes already in the sched_domain |
5924 | * | 5927 | * |
5925 | * Find the next node to include in a given scheduling domain. Simply | 5928 | * Find the next node to include in a given scheduling domain. Simply |
5926 | * finds the closest node not already in the @used_nodes map. | 5929 | * finds the closest node not already in the @used_nodes map. |
5927 | * | 5930 | * |
5928 | * Should use nodemask_t. | 5931 | * Should use nodemask_t. |
@@ -5962,7 +5965,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes) | |||
5962 | * @node: node whose cpumask we're constructing | 5965 | * @node: node whose cpumask we're constructing |
5963 | * @size: number of nodes to include in this span | 5966 | * @size: number of nodes to include in this span |
5964 | * | 5967 | * |
5965 | * Given a node, construct a good cpumask for its sched_domain to span. It | 5968 | * Given a node, construct a good cpumask for its sched_domain to span. It |
5966 | * should be one that prevents unnecessary balancing, but also spreads tasks | 5969 | * should be one that prevents unnecessary balancing, but also spreads tasks |
5967 | * out optimally. | 5970 | * out optimally. |
5968 | */ | 5971 | */ |
@@ -5999,8 +6002,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0; | |||
5999 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | 6002 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); |
6000 | static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); | 6003 | static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); |
6001 | 6004 | ||
6002 | static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, | 6005 | static int |
6003 | struct sched_group **sg) | 6006 | cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) |
6004 | { | 6007 | { |
6005 | if (sg) | 6008 | if (sg) |
6006 | *sg = &per_cpu(sched_group_cpus, cpu); | 6009 | *sg = &per_cpu(sched_group_cpus, cpu); |
@@ -6017,8 +6020,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core); | |||
6017 | #endif | 6020 | #endif |
6018 | 6021 | ||
6019 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 6022 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
6020 | static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, | 6023 | static int |
6021 | struct sched_group **sg) | 6024 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) |
6022 | { | 6025 | { |
6023 | int group; | 6026 | int group; |
6024 | cpumask_t mask = per_cpu(cpu_sibling_map, cpu); | 6027 | cpumask_t mask = per_cpu(cpu_sibling_map, cpu); |
@@ -6029,8 +6032,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, | |||
6029 | return group; | 6032 | return group; |
6030 | } | 6033 | } |
6031 | #elif defined(CONFIG_SCHED_MC) | 6034 | #elif defined(CONFIG_SCHED_MC) |
6032 | static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, | 6035 | static int |
6033 | struct sched_group **sg) | 6036 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) |
6034 | { | 6037 | { |
6035 | if (sg) | 6038 | if (sg) |
6036 | *sg = &per_cpu(sched_group_core, cpu); | 6039 | *sg = &per_cpu(sched_group_core, cpu); |
@@ -6041,8 +6044,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, | |||
6041 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 6044 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); |
6042 | static DEFINE_PER_CPU(struct sched_group, sched_group_phys); | 6045 | static DEFINE_PER_CPU(struct sched_group, sched_group_phys); |
6043 | 6046 | ||
6044 | static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, | 6047 | static int |
6045 | struct sched_group **sg) | 6048 | cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) |
6046 | { | 6049 | { |
6047 | int group; | 6050 | int group; |
6048 | #ifdef CONFIG_SCHED_MC | 6051 | #ifdef CONFIG_SCHED_MC |
@@ -6222,7 +6225,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6222 | * Allocate the per-node list of sched groups | 6225 | * Allocate the per-node list of sched groups |
6223 | */ | 6226 | */ |
6224 | sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), | 6227 | sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), |
6225 | GFP_KERNEL); | 6228 | GFP_KERNEL); |
6226 | if (!sched_group_nodes) { | 6229 | if (!sched_group_nodes) { |
6227 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 6230 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
6228 | return -ENOMEM; | 6231 | return -ENOMEM; |
@@ -6469,7 +6472,7 @@ static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | |||
6469 | static cpumask_t fallback_doms; | 6472 | static cpumask_t fallback_doms; |
6470 | 6473 | ||
6471 | /* | 6474 | /* |
6472 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 6475 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
6473 | * For now this just excludes isolated cpus, but could be used to | 6476 | * For now this just excludes isolated cpus, but could be used to |
6474 | * exclude other special cases in the future. | 6477 | * exclude other special cases in the future. |
6475 | */ | 6478 | */ |
@@ -6511,19 +6514,19 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
6511 | 6514 | ||
6512 | /* | 6515 | /* |
6513 | * Partition sched domains as specified by the 'ndoms_new' | 6516 | * Partition sched domains as specified by the 'ndoms_new' |
6514 | * cpumasks in the array doms_new[] of cpumasks. This compares | 6517 | * cpumasks in the array doms_new[] of cpumasks. This compares |
6515 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | 6518 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
6516 | * It destroys each deleted domain and builds each new domain. | 6519 | * It destroys each deleted domain and builds each new domain. |
6517 | * | 6520 | * |
6518 | * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. | 6521 | * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. |
6519 | * The masks don't intersect (don't overlap.) We should setup one | 6522 | * The masks don't intersect (don't overlap.) We should setup one |
6520 | * sched domain for each mask. CPUs not in any of the cpumasks will | 6523 | * sched domain for each mask. CPUs not in any of the cpumasks will |
6521 | * not be load balanced. If the same cpumask appears both in the | 6524 | * not be load balanced. If the same cpumask appears both in the |
6522 | * current 'doms_cur' domains and in the new 'doms_new', we can leave | 6525 | * current 'doms_cur' domains and in the new 'doms_new', we can leave |
6523 | * it as it is. | 6526 | * it as it is. |
6524 | * | 6527 | * |
6525 | * The passed in 'doms_new' should be kmalloc'd. This routine takes | 6528 | * The passed in 'doms_new' should be kmalloc'd. This routine takes |
6526 | * ownership of it and will kfree it when done with it. If the caller | 6529 | * ownership of it and will kfree it when done with it. If the caller |
6527 | * failed the kmalloc call, then it can pass in doms_new == NULL, | 6530 | * failed the kmalloc call, then it can pass in doms_new == NULL, |
6528 | * and partition_sched_domains() will fallback to the single partition | 6531 | * and partition_sched_domains() will fallback to the single partition |
6529 | * 'fallback_doms'. | 6532 | * 'fallback_doms'. |
@@ -6653,7 +6656,7 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | |||
6653 | #endif | 6656 | #endif |
6654 | 6657 | ||
6655 | /* | 6658 | /* |
6656 | * Force a reinitialization of the sched domains hierarchy. The domains | 6659 | * Force a reinitialization of the sched domains hierarchy. The domains |
6657 | * and groups cannot be updated in place without racing with the balancing | 6660 | * and groups cannot be updated in place without racing with the balancing |
6658 | * code, so we temporarily attach all running cpus to the NULL domain | 6661 | * code, so we temporarily attach all running cpus to the NULL domain |
6659 | * which will prevent rebalancing while the sched domains are recalculated. | 6662 | * which will prevent rebalancing while the sched domains are recalculated. |
@@ -6943,8 +6946,8 @@ struct task_struct *curr_task(int cpu) | |||
6943 | * @p: the task pointer to set. | 6946 | * @p: the task pointer to set. |
6944 | * | 6947 | * |
6945 | * Description: This function must only be used when non-maskable interrupts | 6948 | * Description: This function must only be used when non-maskable interrupts |
6946 | * are serviced on a separate stack. It allows the architecture to switch the | 6949 | * are serviced on a separate stack. It allows the architecture to switch the |
6947 | * notion of the current task on a cpu in a non-blocking manner. This function | 6950 | * notion of the current task on a cpu in a non-blocking manner. This function |
6948 | * must be called with all CPU's synchronized, and interrupts disabled, the | 6951 | * must be called with all CPU's synchronized, and interrupts disabled, the |
6949 | * and caller must save the original value of the current task (see | 6952 | * and caller must save the original value of the current task (see |
6950 | * curr_task() above) and restore that value before reenabling interrupts and | 6953 | * curr_task() above) and restore that value before reenabling interrupts and |
@@ -7193,16 +7196,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
7193 | return &tg->css; | 7196 | return &tg->css; |
7194 | } | 7197 | } |
7195 | 7198 | ||
7196 | static void cpu_cgroup_destroy(struct cgroup_subsys *ss, | 7199 | static void |
7197 | struct cgroup *cgrp) | 7200 | cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
7198 | { | 7201 | { |
7199 | struct task_group *tg = cgroup_tg(cgrp); | 7202 | struct task_group *tg = cgroup_tg(cgrp); |
7200 | 7203 | ||
7201 | sched_destroy_group(tg); | 7204 | sched_destroy_group(tg); |
7202 | } | 7205 | } |
7203 | 7206 | ||
7204 | static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, | 7207 | static int |
7205 | struct cgroup *cgrp, struct task_struct *tsk) | 7208 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
7209 | struct task_struct *tsk) | ||
7206 | { | 7210 | { |
7207 | /* We don't support RT-tasks being in separate groups */ | 7211 | /* We don't support RT-tasks being in separate groups */ |
7208 | if (tsk->sched_class != &fair_sched_class) | 7212 | if (tsk->sched_class != &fair_sched_class) |
@@ -7308,8 +7312,8 @@ static struct cgroup_subsys_state *cpuacct_create( | |||
7308 | } | 7312 | } |
7309 | 7313 | ||
7310 | /* destroy an existing cpu accounting group */ | 7314 | /* destroy an existing cpu accounting group */ |
7311 | static void cpuacct_destroy(struct cgroup_subsys *ss, | 7315 | static void |
7312 | struct cgroup *cont) | 7316 | cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont) |
7313 | { | 7317 | { |
7314 | struct cpuacct *ca = cgroup_ca(cont); | 7318 | struct cpuacct *ca = cgroup_ca(cont); |
7315 | 7319 | ||