diff options
author | Ingo Molnar <mingo@elte.hu> | 2005-09-10 03:26:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-10 13:06:22 -0400 |
commit | 95cdf3b799a481969a48d69a1a52916ad5da6694 (patch) | |
tree | 38aab4b832aaebe2e36879deaeec36e7a939590d /kernel | |
parent | da5a5522709a030da91932d4d4c2b179a481a8c0 (diff) |
[PATCH] sched cleanups
whitespace cleanups.
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 44 |
1 files changed, 25 insertions, 19 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index bac23fb418f6..24eed372d280 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -875,7 +875,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) | |||
875 | * smp_call_function() if an IPI is sent by the same process we are | 875 | * smp_call_function() if an IPI is sent by the same process we are |
876 | * waiting to become inactive. | 876 | * waiting to become inactive. |
877 | */ | 877 | */ |
878 | void wait_task_inactive(task_t * p) | 878 | void wait_task_inactive(task_t *p) |
879 | { | 879 | { |
880 | unsigned long flags; | 880 | unsigned long flags; |
881 | runqueue_t *rq; | 881 | runqueue_t *rq; |
@@ -1007,8 +1007,8 @@ nextgroup: | |||
1007 | /* | 1007 | /* |
1008 | * find_idlest_queue - find the idlest runqueue among the cpus in group. | 1008 | * find_idlest_queue - find the idlest runqueue among the cpus in group. |
1009 | */ | 1009 | */ |
1010 | static int find_idlest_cpu(struct sched_group *group, | 1010 | static int |
1011 | struct task_struct *p, int this_cpu) | 1011 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) |
1012 | { | 1012 | { |
1013 | cpumask_t tmp; | 1013 | cpumask_t tmp; |
1014 | unsigned long load, min_load = ULONG_MAX; | 1014 | unsigned long load, min_load = ULONG_MAX; |
@@ -1136,7 +1136,7 @@ static inline int wake_idle(int cpu, task_t *p) | |||
1136 | * | 1136 | * |
1137 | * returns failure only if the task is already active. | 1137 | * returns failure only if the task is already active. |
1138 | */ | 1138 | */ |
1139 | static int try_to_wake_up(task_t * p, unsigned int state, int sync) | 1139 | static int try_to_wake_up(task_t *p, unsigned int state, int sync) |
1140 | { | 1140 | { |
1141 | int cpu, this_cpu, success = 0; | 1141 | int cpu, this_cpu, success = 0; |
1142 | unsigned long flags; | 1142 | unsigned long flags; |
@@ -1283,7 +1283,7 @@ out: | |||
1283 | return success; | 1283 | return success; |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | int fastcall wake_up_process(task_t * p) | 1286 | int fastcall wake_up_process(task_t *p) |
1287 | { | 1287 | { |
1288 | return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | | 1288 | return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | |
1289 | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); | 1289 | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); |
@@ -1362,7 +1362,7 @@ void fastcall sched_fork(task_t *p, int clone_flags) | |||
1362 | * that must be done for every newly created context, then puts the task | 1362 | * that must be done for every newly created context, then puts the task |
1363 | * on the runqueue and wakes it. | 1363 | * on the runqueue and wakes it. |
1364 | */ | 1364 | */ |
1365 | void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) | 1365 | void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) |
1366 | { | 1366 | { |
1367 | unsigned long flags; | 1367 | unsigned long flags; |
1368 | int this_cpu, cpu; | 1368 | int this_cpu, cpu; |
@@ -1445,7 +1445,7 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) | |||
1445 | * artificially, because any timeslice recovered here | 1445 | * artificially, because any timeslice recovered here |
1446 | * was given away by the parent in the first place.) | 1446 | * was given away by the parent in the first place.) |
1447 | */ | 1447 | */ |
1448 | void fastcall sched_exit(task_t * p) | 1448 | void fastcall sched_exit(task_t *p) |
1449 | { | 1449 | { |
1450 | unsigned long flags; | 1450 | unsigned long flags; |
1451 | runqueue_t *rq; | 1451 | runqueue_t *rq; |
@@ -1766,7 +1766,8 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, | |||
1766 | */ | 1766 | */ |
1767 | static inline | 1767 | static inline |
1768 | int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, | 1768 | int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, |
1769 | struct sched_domain *sd, enum idle_type idle, int *all_pinned) | 1769 | struct sched_domain *sd, enum idle_type idle, |
1770 | int *all_pinned) | ||
1770 | { | 1771 | { |
1771 | /* | 1772 | /* |
1772 | * We do not migrate tasks that are: | 1773 | * We do not migrate tasks that are: |
@@ -3058,7 +3059,8 @@ need_resched: | |||
3058 | 3059 | ||
3059 | #endif /* CONFIG_PREEMPT */ | 3060 | #endif /* CONFIG_PREEMPT */ |
3060 | 3061 | ||
3061 | int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) | 3062 | int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, |
3063 | void *key) | ||
3062 | { | 3064 | { |
3063 | task_t *p = curr->private; | 3065 | task_t *p = curr->private; |
3064 | return try_to_wake_up(p, mode, sync); | 3066 | return try_to_wake_up(p, mode, sync); |
@@ -3100,7 +3102,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | |||
3100 | * @key: is directly passed to the wakeup function | 3102 | * @key: is directly passed to the wakeup function |
3101 | */ | 3103 | */ |
3102 | void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, | 3104 | void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, |
3103 | int nr_exclusive, void *key) | 3105 | int nr_exclusive, void *key) |
3104 | { | 3106 | { |
3105 | unsigned long flags; | 3107 | unsigned long flags; |
3106 | 3108 | ||
@@ -3132,7 +3134,8 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | |||
3132 | * | 3134 | * |
3133 | * On UP it can prevent extra preemption. | 3135 | * On UP it can prevent extra preemption. |
3134 | */ | 3136 | */ |
3135 | void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | 3137 | void fastcall |
3138 | __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | ||
3136 | { | 3139 | { |
3137 | unsigned long flags; | 3140 | unsigned long flags; |
3138 | int sync = 1; | 3141 | int sync = 1; |
@@ -3323,7 +3326,8 @@ void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q) | |||
3323 | 3326 | ||
3324 | EXPORT_SYMBOL(interruptible_sleep_on); | 3327 | EXPORT_SYMBOL(interruptible_sleep_on); |
3325 | 3328 | ||
3326 | long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) | 3329 | long fastcall __sched |
3330 | interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) | ||
3327 | { | 3331 | { |
3328 | SLEEP_ON_VAR | 3332 | SLEEP_ON_VAR |
3329 | 3333 | ||
@@ -3542,7 +3546,8 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) | |||
3542 | * @policy: new policy. | 3546 | * @policy: new policy. |
3543 | * @param: structure containing the new RT priority. | 3547 | * @param: structure containing the new RT priority. |
3544 | */ | 3548 | */ |
3545 | int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) | 3549 | int sched_setscheduler(struct task_struct *p, int policy, |
3550 | struct sched_param *param) | ||
3546 | { | 3551 | { |
3547 | int retval; | 3552 | int retval; |
3548 | int oldprio, oldpolicy = -1; | 3553 | int oldprio, oldpolicy = -1; |
@@ -3562,7 +3567,7 @@ recheck: | |||
3562 | * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0. | 3567 | * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0. |
3563 | */ | 3568 | */ |
3564 | if (param->sched_priority < 0 || | 3569 | if (param->sched_priority < 0 || |
3565 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || | 3570 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || |
3566 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) | 3571 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) |
3567 | return -EINVAL; | 3572 | return -EINVAL; |
3568 | if ((policy == SCHED_NORMAL) != (param->sched_priority == 0)) | 3573 | if ((policy == SCHED_NORMAL) != (param->sched_priority == 0)) |
@@ -3625,7 +3630,8 @@ recheck: | |||
3625 | } | 3630 | } |
3626 | EXPORT_SYMBOL_GPL(sched_setscheduler); | 3631 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
3627 | 3632 | ||
3628 | static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | 3633 | static int |
3634 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | ||
3629 | { | 3635 | { |
3630 | int retval; | 3636 | int retval; |
3631 | struct sched_param lparam; | 3637 | struct sched_param lparam; |
@@ -3956,7 +3962,7 @@ EXPORT_SYMBOL(cond_resched); | |||
3956 | * operations here to prevent schedule() from being called twice (once via | 3962 | * operations here to prevent schedule() from being called twice (once via |
3957 | * spin_unlock(), once by hand). | 3963 | * spin_unlock(), once by hand). |
3958 | */ | 3964 | */ |
3959 | int cond_resched_lock(spinlock_t * lock) | 3965 | int cond_resched_lock(spinlock_t *lock) |
3960 | { | 3966 | { |
3961 | int ret = 0; | 3967 | int ret = 0; |
3962 | 3968 | ||
@@ -4139,7 +4145,7 @@ static inline struct task_struct *younger_sibling(struct task_struct *p) | |||
4139 | return list_entry(p->sibling.next,struct task_struct,sibling); | 4145 | return list_entry(p->sibling.next,struct task_struct,sibling); |
4140 | } | 4146 | } |
4141 | 4147 | ||
4142 | static void show_task(task_t * p) | 4148 | static void show_task(task_t *p) |
4143 | { | 4149 | { |
4144 | task_t *relative; | 4150 | task_t *relative; |
4145 | unsigned state; | 4151 | unsigned state; |
@@ -4165,7 +4171,7 @@ static void show_task(task_t * p) | |||
4165 | #endif | 4171 | #endif |
4166 | #ifdef CONFIG_DEBUG_STACK_USAGE | 4172 | #ifdef CONFIG_DEBUG_STACK_USAGE |
4167 | { | 4173 | { |
4168 | unsigned long * n = (unsigned long *) (p->thread_info+1); | 4174 | unsigned long *n = (unsigned long *) (p->thread_info+1); |
4169 | while (!*n) | 4175 | while (!*n) |
4170 | n++; | 4176 | n++; |
4171 | free = (unsigned long) n - (unsigned long)(p->thread_info+1); | 4177 | free = (unsigned long) n - (unsigned long)(p->thread_info+1); |
@@ -4374,7 +4380,7 @@ out: | |||
4374 | * thread migration by bumping thread off CPU then 'pushing' onto | 4380 | * thread migration by bumping thread off CPU then 'pushing' onto |
4375 | * another runqueue. | 4381 | * another runqueue. |
4376 | */ | 4382 | */ |
4377 | static int migration_thread(void * data) | 4383 | static int migration_thread(void *data) |
4378 | { | 4384 | { |
4379 | runqueue_t *rq; | 4385 | runqueue_t *rq; |
4380 | int cpu = (long)data; | 4386 | int cpu = (long)data; |