aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c192
1 files changed, 101 insertions, 91 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b0326141f841..021b31219516 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -179,7 +179,7 @@ static unsigned int static_prio_timeslice(int static_prio)
179 return SCALE_PRIO(DEF_TIMESLICE, static_prio); 179 return SCALE_PRIO(DEF_TIMESLICE, static_prio);
180} 180}
181 181
182static inline unsigned int task_timeslice(task_t *p) 182static inline unsigned int task_timeslice(struct task_struct *p)
183{ 183{
184 return static_prio_timeslice(p->static_prio); 184 return static_prio_timeslice(p->static_prio);
185} 185}
@@ -227,7 +227,7 @@ struct runqueue {
227 227
228 unsigned long expired_timestamp; 228 unsigned long expired_timestamp;
229 unsigned long long timestamp_last_tick; 229 unsigned long long timestamp_last_tick;
230 task_t *curr, *idle; 230 struct task_struct *curr, *idle;
231 struct mm_struct *prev_mm; 231 struct mm_struct *prev_mm;
232 prio_array_t *active, *expired, arrays[2]; 232 prio_array_t *active, *expired, arrays[2];
233 int best_expired_prio; 233 int best_expired_prio;
@@ -240,7 +240,7 @@ struct runqueue {
240 int active_balance; 240 int active_balance;
241 int push_cpu; 241 int push_cpu;
242 242
243 task_t *migration_thread; 243 struct task_struct *migration_thread;
244 struct list_head migration_queue; 244 struct list_head migration_queue;
245#endif 245#endif
246 246
@@ -291,16 +291,16 @@ static DEFINE_PER_CPU(struct runqueue, runqueues);
291#endif 291#endif
292 292
293#ifndef __ARCH_WANT_UNLOCKED_CTXSW 293#ifndef __ARCH_WANT_UNLOCKED_CTXSW
294static inline int task_running(runqueue_t *rq, task_t *p) 294static inline int task_running(runqueue_t *rq, struct task_struct *p)
295{ 295{
296 return rq->curr == p; 296 return rq->curr == p;
297} 297}
298 298
299static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) 299static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next)
300{ 300{
301} 301}
302 302
303static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) 303static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev)
304{ 304{
305#ifdef CONFIG_DEBUG_SPINLOCK 305#ifdef CONFIG_DEBUG_SPINLOCK
306 /* this is a valid case when another task releases the spinlock */ 306 /* this is a valid case when another task releases the spinlock */
@@ -317,7 +317,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
317} 317}
318 318
319#else /* __ARCH_WANT_UNLOCKED_CTXSW */ 319#else /* __ARCH_WANT_UNLOCKED_CTXSW */
320static inline int task_running(runqueue_t *rq, task_t *p) 320static inline int task_running(runqueue_t *rq, struct task_struct *p)
321{ 321{
322#ifdef CONFIG_SMP 322#ifdef CONFIG_SMP
323 return p->oncpu; 323 return p->oncpu;
@@ -326,7 +326,7 @@ static inline int task_running(runqueue_t *rq, task_t *p)
326#endif 326#endif
327} 327}
328 328
329static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) 329static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next)
330{ 330{
331#ifdef CONFIG_SMP 331#ifdef CONFIG_SMP
332 /* 332 /*
@@ -343,7 +343,7 @@ static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
343#endif 343#endif
344} 344}
345 345
346static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) 346static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev)
347{ 347{
348#ifdef CONFIG_SMP 348#ifdef CONFIG_SMP
349 /* 349 /*
@@ -364,7 +364,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
364 * __task_rq_lock - lock the runqueue a given task resides on. 364 * __task_rq_lock - lock the runqueue a given task resides on.
365 * Must be called interrupts disabled. 365 * Must be called interrupts disabled.
366 */ 366 */
367static inline runqueue_t *__task_rq_lock(task_t *p) 367static inline runqueue_t *__task_rq_lock(struct task_struct *p)
368 __acquires(rq->lock) 368 __acquires(rq->lock)
369{ 369{
370 struct runqueue *rq; 370 struct runqueue *rq;
@@ -384,7 +384,7 @@ repeat_lock_task:
384 * interrupts. Note the ordering: we can safely lookup the task_rq without 384 * interrupts. Note the ordering: we can safely lookup the task_rq without
385 * explicitly disabling preemption. 385 * explicitly disabling preemption.
386 */ 386 */
387static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) 387static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags)
388 __acquires(rq->lock) 388 __acquires(rq->lock)
389{ 389{
390 struct runqueue *rq; 390 struct runqueue *rq;
@@ -541,7 +541,7 @@ static inline runqueue_t *this_rq_lock(void)
541 * long it was from the *first* time it was queued to the time that it 541 * long it was from the *first* time it was queued to the time that it
542 * finally hit a cpu. 542 * finally hit a cpu.
543 */ 543 */
544static inline void sched_info_dequeued(task_t *t) 544static inline void sched_info_dequeued(struct task_struct *t)
545{ 545{
546 t->sched_info.last_queued = 0; 546 t->sched_info.last_queued = 0;
547} 547}
@@ -551,7 +551,7 @@ static inline void sched_info_dequeued(task_t *t)
551 * long it was waiting to run. We also note when it began so that we 551 * long it was waiting to run. We also note when it began so that we
552 * can keep stats on how long its timeslice is. 552 * can keep stats on how long its timeslice is.
553 */ 553 */
554static void sched_info_arrive(task_t *t) 554static void sched_info_arrive(struct task_struct *t)
555{ 555{
556 unsigned long now = jiffies, diff = 0; 556 unsigned long now = jiffies, diff = 0;
557 struct runqueue *rq = task_rq(t); 557 struct runqueue *rq = task_rq(t);
@@ -585,7 +585,7 @@ static void sched_info_arrive(task_t *t)
585 * the timestamp if it is already not set. It's assumed that 585 * the timestamp if it is already not set. It's assumed that
586 * sched_info_dequeued() will clear that stamp when appropriate. 586 * sched_info_dequeued() will clear that stamp when appropriate.
587 */ 587 */
588static inline void sched_info_queued(task_t *t) 588static inline void sched_info_queued(struct task_struct *t)
589{ 589{
590 if (!t->sched_info.last_queued) 590 if (!t->sched_info.last_queued)
591 t->sched_info.last_queued = jiffies; 591 t->sched_info.last_queued = jiffies;
@@ -595,7 +595,7 @@ static inline void sched_info_queued(task_t *t)
595 * Called when a process ceases being the active-running process, either 595 * Called when a process ceases being the active-running process, either
596 * voluntarily or involuntarily. Now we can calculate how long we ran. 596 * voluntarily or involuntarily. Now we can calculate how long we ran.
597 */ 597 */
598static inline void sched_info_depart(task_t *t) 598static inline void sched_info_depart(struct task_struct *t)
599{ 599{
600 struct runqueue *rq = task_rq(t); 600 struct runqueue *rq = task_rq(t);
601 unsigned long diff = jiffies - t->sched_info.last_arrival; 601 unsigned long diff = jiffies - t->sched_info.last_arrival;
@@ -611,7 +611,8 @@ static inline void sched_info_depart(task_t *t)
611 * their time slice. (This may also be called when switching to or from 611 * their time slice. (This may also be called when switching to or from
612 * the idle task.) We are only called when prev != next. 612 * the idle task.) We are only called when prev != next.
613 */ 613 */
614static inline void sched_info_switch(task_t *prev, task_t *next) 614static inline void
615sched_info_switch(struct task_struct *prev, struct task_struct *next)
615{ 616{
616 struct runqueue *rq = task_rq(prev); 617 struct runqueue *rq = task_rq(prev);
617 618
@@ -683,7 +684,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
683 * Both properties are important to certain workloads. 684 * Both properties are important to certain workloads.
684 */ 685 */
685 686
686static inline int __normal_prio(task_t *p) 687static inline int __normal_prio(struct task_struct *p)
687{ 688{
688 int bonus, prio; 689 int bonus, prio;
689 690
@@ -719,7 +720,7 @@ static inline int __normal_prio(task_t *p)
719#define RTPRIO_TO_LOAD_WEIGHT(rp) \ 720#define RTPRIO_TO_LOAD_WEIGHT(rp) \
720 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) 721 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
721 722
722static void set_load_weight(task_t *p) 723static void set_load_weight(struct task_struct *p)
723{ 724{
724 if (has_rt_policy(p)) { 725 if (has_rt_policy(p)) {
725#ifdef CONFIG_SMP 726#ifdef CONFIG_SMP
@@ -737,23 +738,25 @@ static void set_load_weight(task_t *p)
737 p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); 738 p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
738} 739}
739 740
740static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p) 741static inline void
742inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p)
741{ 743{
742 rq->raw_weighted_load += p->load_weight; 744 rq->raw_weighted_load += p->load_weight;
743} 745}
744 746
745static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p) 747static inline void
748dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p)
746{ 749{
747 rq->raw_weighted_load -= p->load_weight; 750 rq->raw_weighted_load -= p->load_weight;
748} 751}
749 752
750static inline void inc_nr_running(task_t *p, runqueue_t *rq) 753static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq)
751{ 754{
752 rq->nr_running++; 755 rq->nr_running++;
753 inc_raw_weighted_load(rq, p); 756 inc_raw_weighted_load(rq, p);
754} 757}
755 758
756static inline void dec_nr_running(task_t *p, runqueue_t *rq) 759static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq)
757{ 760{
758 rq->nr_running--; 761 rq->nr_running--;
759 dec_raw_weighted_load(rq, p); 762 dec_raw_weighted_load(rq, p);
@@ -766,7 +769,7 @@ static inline void dec_nr_running(task_t *p, runqueue_t *rq)
766 * setprio syscalls, and whenever the interactivity 769 * setprio syscalls, and whenever the interactivity
767 * estimator recalculates. 770 * estimator recalculates.
768 */ 771 */
769static inline int normal_prio(task_t *p) 772static inline int normal_prio(struct task_struct *p)
770{ 773{
771 int prio; 774 int prio;
772 775
@@ -784,7 +787,7 @@ static inline int normal_prio(task_t *p)
784 * interactivity modifiers. Will be RT if the task got 787 * interactivity modifiers. Will be RT if the task got
785 * RT-boosted. If not then it returns p->normal_prio. 788 * RT-boosted. If not then it returns p->normal_prio.
786 */ 789 */
787static int effective_prio(task_t *p) 790static int effective_prio(struct task_struct *p)
788{ 791{
789 p->normal_prio = normal_prio(p); 792 p->normal_prio = normal_prio(p);
790 /* 793 /*
@@ -800,7 +803,7 @@ static int effective_prio(task_t *p)
800/* 803/*
801 * __activate_task - move a task to the runqueue. 804 * __activate_task - move a task to the runqueue.
802 */ 805 */
803static void __activate_task(task_t *p, runqueue_t *rq) 806static void __activate_task(struct task_struct *p, runqueue_t *rq)
804{ 807{
805 prio_array_t *target = rq->active; 808 prio_array_t *target = rq->active;
806 809
@@ -813,7 +816,7 @@ static void __activate_task(task_t *p, runqueue_t *rq)
813/* 816/*
814 * __activate_idle_task - move idle task to the _front_ of runqueue. 817 * __activate_idle_task - move idle task to the _front_ of runqueue.
815 */ 818 */
816static inline void __activate_idle_task(task_t *p, runqueue_t *rq) 819static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq)
817{ 820{
818 enqueue_task_head(p, rq->active); 821 enqueue_task_head(p, rq->active);
819 inc_nr_running(p, rq); 822 inc_nr_running(p, rq);
@@ -823,7 +826,7 @@ static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
823 * Recalculate p->normal_prio and p->prio after having slept, 826 * Recalculate p->normal_prio and p->prio after having slept,
824 * updating the sleep-average too: 827 * updating the sleep-average too:
825 */ 828 */
826static int recalc_task_prio(task_t *p, unsigned long long now) 829static int recalc_task_prio(struct task_struct *p, unsigned long long now)
827{ 830{
828 /* Caller must always ensure 'now >= p->timestamp' */ 831 /* Caller must always ensure 'now >= p->timestamp' */
829 unsigned long sleep_time = now - p->timestamp; 832 unsigned long sleep_time = now - p->timestamp;
@@ -895,7 +898,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
895 * Update all the scheduling statistics stuff. (sleep average 898 * Update all the scheduling statistics stuff. (sleep average
896 * calculation, priority modifiers, etc.) 899 * calculation, priority modifiers, etc.)
897 */ 900 */
898static void activate_task(task_t *p, runqueue_t *rq, int local) 901static void activate_task(struct task_struct *p, runqueue_t *rq, int local)
899{ 902{
900 unsigned long long now; 903 unsigned long long now;
901 904
@@ -962,7 +965,7 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq)
962#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 965#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
963#endif 966#endif
964 967
965static void resched_task(task_t *p) 968static void resched_task(struct task_struct *p)
966{ 969{
967 int cpu; 970 int cpu;
968 971
@@ -983,7 +986,7 @@ static void resched_task(task_t *p)
983 smp_send_reschedule(cpu); 986 smp_send_reschedule(cpu);
984} 987}
985#else 988#else
986static inline void resched_task(task_t *p) 989static inline void resched_task(struct task_struct *p)
987{ 990{
988 assert_spin_locked(&task_rq(p)->lock); 991 assert_spin_locked(&task_rq(p)->lock);
989 set_tsk_need_resched(p); 992 set_tsk_need_resched(p);
@@ -994,7 +997,7 @@ static inline void resched_task(task_t *p)
994 * task_curr - is this task currently executing on a CPU? 997 * task_curr - is this task currently executing on a CPU?
995 * @p: the task in question. 998 * @p: the task in question.
996 */ 999 */
997inline int task_curr(const task_t *p) 1000inline int task_curr(const struct task_struct *p)
998{ 1001{
999 return cpu_curr(task_cpu(p)) == p; 1002 return cpu_curr(task_cpu(p)) == p;
1000} 1003}
@@ -1009,7 +1012,7 @@ unsigned long weighted_cpuload(const int cpu)
1009typedef struct { 1012typedef struct {
1010 struct list_head list; 1013 struct list_head list;
1011 1014
1012 task_t *task; 1015 struct task_struct *task;
1013 int dest_cpu; 1016 int dest_cpu;
1014 1017
1015 struct completion done; 1018 struct completion done;
@@ -1019,7 +1022,8 @@ typedef struct {
1019 * The task's runqueue lock must be held. 1022 * The task's runqueue lock must be held.
1020 * Returns true if you have to wait for migration thread. 1023 * Returns true if you have to wait for migration thread.
1021 */ 1024 */
1022static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) 1025static int
1026migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req)
1023{ 1027{
1024 runqueue_t *rq = task_rq(p); 1028 runqueue_t *rq = task_rq(p);
1025 1029
@@ -1049,7 +1053,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
1049 * smp_call_function() if an IPI is sent by the same process we are 1053 * smp_call_function() if an IPI is sent by the same process we are
1050 * waiting to become inactive. 1054 * waiting to become inactive.
1051 */ 1055 */
1052void wait_task_inactive(task_t *p) 1056void wait_task_inactive(struct task_struct *p)
1053{ 1057{
1054 unsigned long flags; 1058 unsigned long flags;
1055 runqueue_t *rq; 1059 runqueue_t *rq;
@@ -1083,7 +1087,7 @@ repeat:
1083 * to another CPU then no harm is done and the purpose has been 1087 * to another CPU then no harm is done and the purpose has been
1084 * achieved as well. 1088 * achieved as well.
1085 */ 1089 */
1086void kick_process(task_t *p) 1090void kick_process(struct task_struct *p)
1087{ 1091{
1088 int cpu; 1092 int cpu;
1089 1093
@@ -1286,7 +1290,7 @@ nextlevel:
1286 * Returns the CPU we should wake onto. 1290 * Returns the CPU we should wake onto.
1287 */ 1291 */
1288#if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1292#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1289static int wake_idle(int cpu, task_t *p) 1293static int wake_idle(int cpu, struct task_struct *p)
1290{ 1294{
1291 cpumask_t tmp; 1295 cpumask_t tmp;
1292 struct sched_domain *sd; 1296 struct sched_domain *sd;
@@ -1309,7 +1313,7 @@ static int wake_idle(int cpu, task_t *p)
1309 return cpu; 1313 return cpu;
1310} 1314}
1311#else 1315#else
1312static inline int wake_idle(int cpu, task_t *p) 1316static inline int wake_idle(int cpu, struct task_struct *p)
1313{ 1317{
1314 return cpu; 1318 return cpu;
1315} 1319}
@@ -1329,7 +1333,7 @@ static inline int wake_idle(int cpu, task_t *p)
1329 * 1333 *
1330 * returns failure only if the task is already active. 1334 * returns failure only if the task is already active.
1331 */ 1335 */
1332static int try_to_wake_up(task_t *p, unsigned int state, int sync) 1336static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1333{ 1337{
1334 int cpu, this_cpu, success = 0; 1338 int cpu, this_cpu, success = 0;
1335 unsigned long flags; 1339 unsigned long flags;
@@ -1487,14 +1491,14 @@ out:
1487 return success; 1491 return success;
1488} 1492}
1489 1493
1490int fastcall wake_up_process(task_t *p) 1494int fastcall wake_up_process(struct task_struct *p)
1491{ 1495{
1492 return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | 1496 return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
1493 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); 1497 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
1494} 1498}
1495EXPORT_SYMBOL(wake_up_process); 1499EXPORT_SYMBOL(wake_up_process);
1496 1500
1497int fastcall wake_up_state(task_t *p, unsigned int state) 1501int fastcall wake_up_state(struct task_struct *p, unsigned int state)
1498{ 1502{
1499 return try_to_wake_up(p, state, 0); 1503 return try_to_wake_up(p, state, 0);
1500} 1504}
@@ -1503,7 +1507,7 @@ int fastcall wake_up_state(task_t *p, unsigned int state)
1503 * Perform scheduler related setup for a newly forked process p. 1507 * Perform scheduler related setup for a newly forked process p.
1504 * p is forked by current. 1508 * p is forked by current.
1505 */ 1509 */
1506void fastcall sched_fork(task_t *p, int clone_flags) 1510void fastcall sched_fork(struct task_struct *p, int clone_flags)
1507{ 1511{
1508 int cpu = get_cpu(); 1512 int cpu = get_cpu();
1509 1513
@@ -1571,7 +1575,7 @@ void fastcall sched_fork(task_t *p, int clone_flags)
1571 * that must be done for every newly created context, then puts the task 1575 * that must be done for every newly created context, then puts the task
1572 * on the runqueue and wakes it. 1576 * on the runqueue and wakes it.
1573 */ 1577 */
1574void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) 1578void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1575{ 1579{
1576 unsigned long flags; 1580 unsigned long flags;
1577 int this_cpu, cpu; 1581 int this_cpu, cpu;
@@ -1655,7 +1659,7 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
1655 * artificially, because any timeslice recovered here 1659 * artificially, because any timeslice recovered here
1656 * was given away by the parent in the first place.) 1660 * was given away by the parent in the first place.)
1657 */ 1661 */
1658void fastcall sched_exit(task_t *p) 1662void fastcall sched_exit(struct task_struct *p)
1659{ 1663{
1660 unsigned long flags; 1664 unsigned long flags;
1661 runqueue_t *rq; 1665 runqueue_t *rq;
@@ -1689,7 +1693,7 @@ void fastcall sched_exit(task_t *p)
1689 * prepare_task_switch sets up locking and calls architecture specific 1693 * prepare_task_switch sets up locking and calls architecture specific
1690 * hooks. 1694 * hooks.
1691 */ 1695 */
1692static inline void prepare_task_switch(runqueue_t *rq, task_t *next) 1696static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next)
1693{ 1697{
1694 prepare_lock_switch(rq, next); 1698 prepare_lock_switch(rq, next);
1695 prepare_arch_switch(next); 1699 prepare_arch_switch(next);
@@ -1710,7 +1714,7 @@ static inline void prepare_task_switch(runqueue_t *rq, task_t *next)
1710 * with the lock held can cause deadlocks; see schedule() for 1714 * with the lock held can cause deadlocks; see schedule() for
1711 * details.) 1715 * details.)
1712 */ 1716 */
1713static inline void finish_task_switch(runqueue_t *rq, task_t *prev) 1717static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev)
1714 __releases(rq->lock) 1718 __releases(rq->lock)
1715{ 1719{
1716 struct mm_struct *mm = rq->prev_mm; 1720 struct mm_struct *mm = rq->prev_mm;
@@ -1748,7 +1752,7 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
1748 * schedule_tail - first thing a freshly forked thread must call. 1752 * schedule_tail - first thing a freshly forked thread must call.
1749 * @prev: the thread we just switched away from. 1753 * @prev: the thread we just switched away from.
1750 */ 1754 */
1751asmlinkage void schedule_tail(task_t *prev) 1755asmlinkage void schedule_tail(struct task_struct *prev)
1752 __releases(rq->lock) 1756 __releases(rq->lock)
1753{ 1757{
1754 runqueue_t *rq = this_rq(); 1758 runqueue_t *rq = this_rq();
@@ -1765,8 +1769,9 @@ asmlinkage void schedule_tail(task_t *prev)
1765 * context_switch - switch to the new MM and the new 1769 * context_switch - switch to the new MM and the new
1766 * thread's register state. 1770 * thread's register state.
1767 */ 1771 */
1768static inline 1772static inline struct task_struct *
1769task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next) 1773context_switch(runqueue_t *rq, struct task_struct *prev,
1774 struct task_struct *next)
1770{ 1775{
1771 struct mm_struct *mm = next->mm; 1776 struct mm_struct *mm = next->mm;
1772 struct mm_struct *oldmm = prev->active_mm; 1777 struct mm_struct *oldmm = prev->active_mm;
@@ -1937,7 +1942,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
1937 * allow dest_cpu, which will force the cpu onto dest_cpu. Then 1942 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
1938 * the cpu_allowed mask is restored. 1943 * the cpu_allowed mask is restored.
1939 */ 1944 */
1940static void sched_migrate_task(task_t *p, int dest_cpu) 1945static void sched_migrate_task(struct task_struct *p, int dest_cpu)
1941{ 1946{
1942 migration_req_t req; 1947 migration_req_t req;
1943 runqueue_t *rq; 1948 runqueue_t *rq;
@@ -1952,11 +1957,13 @@ static void sched_migrate_task(task_t *p, int dest_cpu)
1952 if (migrate_task(p, dest_cpu, &req)) { 1957 if (migrate_task(p, dest_cpu, &req)) {
1953 /* Need to wait for migration thread (might exit: take ref). */ 1958 /* Need to wait for migration thread (might exit: take ref). */
1954 struct task_struct *mt = rq->migration_thread; 1959 struct task_struct *mt = rq->migration_thread;
1960
1955 get_task_struct(mt); 1961 get_task_struct(mt);
1956 task_rq_unlock(rq, &flags); 1962 task_rq_unlock(rq, &flags);
1957 wake_up_process(mt); 1963 wake_up_process(mt);
1958 put_task_struct(mt); 1964 put_task_struct(mt);
1959 wait_for_completion(&req.done); 1965 wait_for_completion(&req.done);
1966
1960 return; 1967 return;
1961 } 1968 }
1962out: 1969out:
@@ -1980,9 +1987,9 @@ void sched_exec(void)
1980 * pull_task - move a task from a remote runqueue to the local runqueue. 1987 * pull_task - move a task from a remote runqueue to the local runqueue.
1981 * Both runqueues must be locked. 1988 * Both runqueues must be locked.
1982 */ 1989 */
1983static 1990static void pull_task(runqueue_t *src_rq, prio_array_t *src_array,
1984void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, 1991 struct task_struct *p, runqueue_t *this_rq,
1985 runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) 1992 prio_array_t *this_array, int this_cpu)
1986{ 1993{
1987 dequeue_task(p, src_array); 1994 dequeue_task(p, src_array);
1988 dec_nr_running(p, src_rq); 1995 dec_nr_running(p, src_rq);
@@ -2003,7 +2010,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
2003 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 2010 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2004 */ 2011 */
2005static 2012static
2006int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, 2013int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu,
2007 struct sched_domain *sd, enum idle_type idle, 2014 struct sched_domain *sd, enum idle_type idle,
2008 int *all_pinned) 2015 int *all_pinned)
2009{ 2016{
@@ -2052,8 +2059,8 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
2052 best_prio_seen, skip_for_load; 2059 best_prio_seen, skip_for_load;
2053 prio_array_t *array, *dst_array; 2060 prio_array_t *array, *dst_array;
2054 struct list_head *head, *curr; 2061 struct list_head *head, *curr;
2062 struct task_struct *tmp;
2055 long rem_load_move; 2063 long rem_load_move;
2056 task_t *tmp;
2057 2064
2058 if (max_nr_move == 0 || max_load_move == 0) 2065 if (max_nr_move == 0 || max_load_move == 0)
2059 goto out; 2066 goto out;
@@ -2105,7 +2112,7 @@ skip_bitmap:
2105 head = array->queue + idx; 2112 head = array->queue + idx;
2106 curr = head->prev; 2113 curr = head->prev;
2107skip_queue: 2114skip_queue:
2108 tmp = list_entry(curr, task_t, run_list); 2115 tmp = list_entry(curr, struct task_struct, run_list);
2109 2116
2110 curr = curr->prev; 2117 curr = curr->prev;
2111 2118
@@ -2819,7 +2826,7 @@ EXPORT_PER_CPU_SYMBOL(kstat);
2819 * Bank in p->sched_time the ns elapsed since the last tick or switch. 2826 * Bank in p->sched_time the ns elapsed since the last tick or switch.
2820 */ 2827 */
2821static inline void 2828static inline void
2822update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now) 2829update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now)
2823{ 2830{
2824 p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); 2831 p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick);
2825} 2832}
@@ -2828,7 +2835,7 @@ update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now)
2828 * Return current->sched_time plus any more ns on the sched_clock 2835 * Return current->sched_time plus any more ns on the sched_clock
2829 * that have not yet been banked. 2836 * that have not yet been banked.
2830 */ 2837 */
2831unsigned long long current_sched_time(const task_t *p) 2838unsigned long long current_sched_time(const struct task_struct *p)
2832{ 2839{
2833 unsigned long long ns; 2840 unsigned long long ns;
2834 unsigned long flags; 2841 unsigned long flags;
@@ -2945,9 +2952,9 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
2945void scheduler_tick(void) 2952void scheduler_tick(void)
2946{ 2953{
2947 unsigned long long now = sched_clock(); 2954 unsigned long long now = sched_clock();
2955 struct task_struct *p = current;
2948 int cpu = smp_processor_id(); 2956 int cpu = smp_processor_id();
2949 runqueue_t *rq = this_rq(); 2957 runqueue_t *rq = this_rq();
2950 task_t *p = current;
2951 2958
2952 update_cpu_clock(p, rq, now); 2959 update_cpu_clock(p, rq, now);
2953 2960
@@ -3079,7 +3086,8 @@ static void wake_sleeping_dependent(int this_cpu)
3079 * utilize, if another task runs on a sibling. This models the 3086 * utilize, if another task runs on a sibling. This models the
3080 * slowdown effect of other tasks running on siblings: 3087 * slowdown effect of other tasks running on siblings:
3081 */ 3088 */
3082static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) 3089static inline unsigned long
3090smt_slice(struct task_struct *p, struct sched_domain *sd)
3083{ 3091{
3084 return p->time_slice * (100 - sd->per_cpu_gain) / 100; 3092 return p->time_slice * (100 - sd->per_cpu_gain) / 100;
3085} 3093}
@@ -3090,7 +3098,8 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
3090 * acquire their lock. As we only trylock the normal locking order does not 3098 * acquire their lock. As we only trylock the normal locking order does not
3091 * need to be obeyed. 3099 * need to be obeyed.
3092 */ 3100 */
3093static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) 3101static int
3102dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p)
3094{ 3103{
3095 struct sched_domain *tmp, *sd = NULL; 3104 struct sched_domain *tmp, *sd = NULL;
3096 int ret = 0, i; 3105 int ret = 0, i;
@@ -3110,8 +3119,8 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p)
3110 return 0; 3119 return 0;
3111 3120
3112 for_each_cpu_mask(i, sd->span) { 3121 for_each_cpu_mask(i, sd->span) {
3122 struct task_struct *smt_curr;
3113 runqueue_t *smt_rq; 3123 runqueue_t *smt_rq;
3114 task_t *smt_curr;
3115 3124
3116 if (i == this_cpu) 3125 if (i == this_cpu)
3117 continue; 3126 continue;
@@ -3157,7 +3166,7 @@ static inline void wake_sleeping_dependent(int this_cpu)
3157{ 3166{
3158} 3167}
3159static inline int 3168static inline int
3160dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) 3169dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p)
3161{ 3170{
3162 return 0; 3171 return 0;
3163} 3172}
@@ -3211,11 +3220,11 @@ static inline int interactive_sleep(enum sleep_type sleep_type)
3211 */ 3220 */
3212asmlinkage void __sched schedule(void) 3221asmlinkage void __sched schedule(void)
3213{ 3222{
3223 struct task_struct *prev, *next;
3214 struct list_head *queue; 3224 struct list_head *queue;
3215 unsigned long long now; 3225 unsigned long long now;
3216 unsigned long run_time; 3226 unsigned long run_time;
3217 int cpu, idx, new_prio; 3227 int cpu, idx, new_prio;
3218 task_t *prev, *next;
3219 prio_array_t *array; 3228 prio_array_t *array;
3220 long *switch_count; 3229 long *switch_count;
3221 runqueue_t *rq; 3230 runqueue_t *rq;
@@ -3308,7 +3317,7 @@ need_resched_nonpreemptible:
3308 3317
3309 idx = sched_find_first_bit(array->bitmap); 3318 idx = sched_find_first_bit(array->bitmap);
3310 queue = array->queue + idx; 3319 queue = array->queue + idx;
3311 next = list_entry(queue->next, task_t, run_list); 3320 next = list_entry(queue->next, struct task_struct, run_list);
3312 3321
3313 if (!rt_task(next) && interactive_sleep(next->sleep_type)) { 3322 if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
3314 unsigned long long delta = now - next->timestamp; 3323 unsigned long long delta = now - next->timestamp;
@@ -3776,7 +3785,7 @@ EXPORT_SYMBOL(sleep_on_timeout);
3776 * 3785 *
3777 * Used by the rt_mutex code to implement priority inheritance logic. 3786 * Used by the rt_mutex code to implement priority inheritance logic.
3778 */ 3787 */
3779void rt_mutex_setprio(task_t *p, int prio) 3788void rt_mutex_setprio(struct task_struct *p, int prio)
3780{ 3789{
3781 unsigned long flags; 3790 unsigned long flags;
3782 prio_array_t *array; 3791 prio_array_t *array;
@@ -3817,7 +3826,7 @@ void rt_mutex_setprio(task_t *p, int prio)
3817 3826
3818#endif 3827#endif
3819 3828
3820void set_user_nice(task_t *p, long nice) 3829void set_user_nice(struct task_struct *p, long nice)
3821{ 3830{
3822 int old_prio, delta; 3831 int old_prio, delta;
3823 unsigned long flags; 3832 unsigned long flags;
@@ -3873,7 +3882,7 @@ EXPORT_SYMBOL(set_user_nice);
3873 * @p: task 3882 * @p: task
3874 * @nice: nice value 3883 * @nice: nice value
3875 */ 3884 */
3876int can_nice(const task_t *p, const int nice) 3885int can_nice(const struct task_struct *p, const int nice)
3877{ 3886{
3878 /* convert nice value [19,-20] to rlimit style value [1,40] */ 3887 /* convert nice value [19,-20] to rlimit style value [1,40] */
3879 int nice_rlim = 20 - nice; 3888 int nice_rlim = 20 - nice;
@@ -3932,7 +3941,7 @@ asmlinkage long sys_nice(int increment)
3932 * RT tasks are offset by -200. Normal tasks are centered 3941 * RT tasks are offset by -200. Normal tasks are centered
3933 * around 0, value goes from -16 to +15. 3942 * around 0, value goes from -16 to +15.
3934 */ 3943 */
3935int task_prio(const task_t *p) 3944int task_prio(const struct task_struct *p)
3936{ 3945{
3937 return p->prio - MAX_RT_PRIO; 3946 return p->prio - MAX_RT_PRIO;
3938} 3947}
@@ -3941,7 +3950,7 @@ int task_prio(const task_t *p)
3941 * task_nice - return the nice value of a given task. 3950 * task_nice - return the nice value of a given task.
3942 * @p: the task in question. 3951 * @p: the task in question.
3943 */ 3952 */
3944int task_nice(const task_t *p) 3953int task_nice(const struct task_struct *p)
3945{ 3954{
3946 return TASK_NICE(p); 3955 return TASK_NICE(p);
3947} 3956}
@@ -3960,7 +3969,7 @@ int idle_cpu(int cpu)
3960 * idle_task - return the idle task for a given cpu. 3969 * idle_task - return the idle task for a given cpu.
3961 * @cpu: the processor in question. 3970 * @cpu: the processor in question.
3962 */ 3971 */
3963task_t *idle_task(int cpu) 3972struct task_struct *idle_task(int cpu)
3964{ 3973{
3965 return cpu_rq(cpu)->idle; 3974 return cpu_rq(cpu)->idle;
3966} 3975}
@@ -3969,7 +3978,7 @@ task_t *idle_task(int cpu)
3969 * find_process_by_pid - find a process with a matching PID value. 3978 * find_process_by_pid - find a process with a matching PID value.
3970 * @pid: the pid in question. 3979 * @pid: the pid in question.
3971 */ 3980 */
3972static inline task_t *find_process_by_pid(pid_t pid) 3981static inline struct task_struct *find_process_by_pid(pid_t pid)
3973{ 3982{
3974 return pid ? find_task_by_pid(pid) : current; 3983 return pid ? find_task_by_pid(pid) : current;
3975} 3984}
@@ -4103,9 +4112,9 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
4103static int 4112static int
4104do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4113do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4105{ 4114{
4106 int retval;
4107 struct sched_param lparam; 4115 struct sched_param lparam;
4108 struct task_struct *p; 4116 struct task_struct *p;
4117 int retval;
4109 4118
4110 if (!param || pid < 0) 4119 if (!param || pid < 0)
4111 return -EINVAL; 4120 return -EINVAL;
@@ -4121,6 +4130,7 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4121 read_unlock_irq(&tasklist_lock); 4130 read_unlock_irq(&tasklist_lock);
4122 retval = sched_setscheduler(p, policy, &lparam); 4131 retval = sched_setscheduler(p, policy, &lparam);
4123 put_task_struct(p); 4132 put_task_struct(p);
4133
4124 return retval; 4134 return retval;
4125} 4135}
4126 4136
@@ -4156,8 +4166,8 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
4156 */ 4166 */
4157asmlinkage long sys_sched_getscheduler(pid_t pid) 4167asmlinkage long sys_sched_getscheduler(pid_t pid)
4158{ 4168{
4169 struct task_struct *p;
4159 int retval = -EINVAL; 4170 int retval = -EINVAL;
4160 task_t *p;
4161 4171
4162 if (pid < 0) 4172 if (pid < 0)
4163 goto out_nounlock; 4173 goto out_nounlock;
@@ -4184,8 +4194,8 @@ out_nounlock:
4184asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) 4194asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
4185{ 4195{
4186 struct sched_param lp; 4196 struct sched_param lp;
4197 struct task_struct *p;
4187 int retval = -EINVAL; 4198 int retval = -EINVAL;
4188 task_t *p;
4189 4199
4190 if (!param || pid < 0) 4200 if (!param || pid < 0)
4191 goto out_nounlock; 4201 goto out_nounlock;
@@ -4218,9 +4228,9 @@ out_unlock:
4218 4228
4219long sched_setaffinity(pid_t pid, cpumask_t new_mask) 4229long sched_setaffinity(pid_t pid, cpumask_t new_mask)
4220{ 4230{
4221 task_t *p;
4222 int retval;
4223 cpumask_t cpus_allowed; 4231 cpumask_t cpus_allowed;
4232 struct task_struct *p;
4233 int retval;
4224 4234
4225 lock_cpu_hotplug(); 4235 lock_cpu_hotplug();
4226 read_lock(&tasklist_lock); 4236 read_lock(&tasklist_lock);
@@ -4306,8 +4316,8 @@ cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
4306 4316
4307long sched_getaffinity(pid_t pid, cpumask_t *mask) 4317long sched_getaffinity(pid_t pid, cpumask_t *mask)
4308{ 4318{
4319 struct task_struct *p;
4309 int retval; 4320 int retval;
4310 task_t *p;
4311 4321
4312 lock_cpu_hotplug(); 4322 lock_cpu_hotplug();
4313 read_lock(&tasklist_lock); 4323 read_lock(&tasklist_lock);
@@ -4592,9 +4602,9 @@ asmlinkage long sys_sched_get_priority_min(int policy)
4592asmlinkage 4602asmlinkage
4593long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) 4603long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
4594{ 4604{
4605 struct task_struct *p;
4595 int retval = -EINVAL; 4606 int retval = -EINVAL;
4596 struct timespec t; 4607 struct timespec t;
4597 task_t *p;
4598 4608
4599 if (pid < 0) 4609 if (pid < 0)
4600 goto out_nounlock; 4610 goto out_nounlock;
@@ -4641,12 +4651,13 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
4641 return list_entry(p->sibling.next,struct task_struct,sibling); 4651 return list_entry(p->sibling.next,struct task_struct,sibling);
4642} 4652}
4643 4653
4644static void show_task(task_t *p) 4654static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" };
4655
4656static void show_task(struct task_struct *p)
4645{ 4657{
4646 task_t *relative; 4658 struct task_struct *relative;
4647 unsigned state;
4648 unsigned long free = 0; 4659 unsigned long free = 0;
4649 static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; 4660 unsigned state;
4650 4661
4651 printk("%-13.13s ", p->comm); 4662 printk("%-13.13s ", p->comm);
4652 state = p->state ? __ffs(p->state) + 1 : 0; 4663 state = p->state ? __ffs(p->state) + 1 : 0;
@@ -4697,7 +4708,7 @@ static void show_task(task_t *p)
4697 4708
4698void show_state(void) 4709void show_state(void)
4699{ 4710{
4700 task_t *g, *p; 4711 struct task_struct *g, *p;
4701 4712
4702#if (BITS_PER_LONG == 32) 4713#if (BITS_PER_LONG == 32)
4703 printk("\n" 4714 printk("\n"
@@ -4730,7 +4741,7 @@ void show_state(void)
4730 * NOTE: this function does not set the idle thread's NEED_RESCHED 4741 * NOTE: this function does not set the idle thread's NEED_RESCHED
4731 * flag, to make booting more robust. 4742 * flag, to make booting more robust.
4732 */ 4743 */
4733void __devinit init_idle(task_t *idle, int cpu) 4744void __devinit init_idle(struct task_struct *idle, int cpu)
4734{ 4745{
4735 runqueue_t *rq = cpu_rq(cpu); 4746 runqueue_t *rq = cpu_rq(cpu);
4736 unsigned long flags; 4747 unsigned long flags;
@@ -4793,7 +4804,7 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
4793 * task must not exit() & deallocate itself prematurely. The 4804 * task must not exit() & deallocate itself prematurely. The
4794 * call is not atomic; no spinlocks may be held. 4805 * call is not atomic; no spinlocks may be held.
4795 */ 4806 */
4796int set_cpus_allowed(task_t *p, cpumask_t new_mask) 4807int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
4797{ 4808{
4798 unsigned long flags; 4809 unsigned long flags;
4799 migration_req_t req; 4810 migration_req_t req;
@@ -5061,7 +5072,7 @@ void idle_task_exit(void)
5061 mmdrop(mm); 5072 mmdrop(mm);
5062} 5073}
5063 5074
5064static void migrate_dead(unsigned int dead_cpu, task_t *p) 5075static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5065{ 5076{
5066 struct runqueue *rq = cpu_rq(dead_cpu); 5077 struct runqueue *rq = cpu_rq(dead_cpu);
5067 5078
@@ -5096,9 +5107,8 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
5096 struct list_head *list = &rq->arrays[arr].queue[i]; 5107 struct list_head *list = &rq->arrays[arr].queue[i];
5097 5108
5098 while (!list_empty(list)) 5109 while (!list_empty(list))
5099 migrate_dead(dead_cpu, 5110 migrate_dead(dead_cpu, list_entry(list->next,
5100 list_entry(list->next, task_t, 5111 struct task_struct, run_list));
5101 run_list));
5102 } 5112 }
5103 } 5113 }
5104} 5114}
@@ -6801,7 +6811,7 @@ void normalize_rt_tasks(void)
6801 * 6811 *
6802 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6812 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6803 */ 6813 */
6804task_t *curr_task(int cpu) 6814struct task_struct *curr_task(int cpu)
6805{ 6815{
6806 return cpu_curr(cpu); 6816 return cpu_curr(cpu);
6807} 6817}
@@ -6821,7 +6831,7 @@ task_t *curr_task(int cpu)
6821 * 6831 *
6822 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6832 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6823 */ 6833 */
6824void set_curr_task(int cpu, task_t *p) 6834void set_curr_task(int cpu, struct task_struct *p)
6825{ 6835{
6826 cpu_curr(cpu) = p; 6836 cpu_curr(cpu) = p;
6827} 6837}