aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h5
-rw-r--r--kernel/sched.c250
2 files changed, 128 insertions, 127 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c2797f04d931..1c876e27ff93 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -534,7 +534,6 @@ extern struct user_struct *find_user(uid_t);
534extern struct user_struct root_user; 534extern struct user_struct root_user;
535#define INIT_USER (&root_user) 535#define INIT_USER (&root_user)
536 536
537typedef struct prio_array prio_array_t;
538struct backing_dev_info; 537struct backing_dev_info;
539struct reclaim_state; 538struct reclaim_state;
540 539
@@ -715,6 +714,8 @@ enum sleep_type {
715 SLEEP_INTERRUPTED, 714 SLEEP_INTERRUPTED,
716}; 715};
717 716
717struct prio_array;
718
718struct task_struct { 719struct task_struct {
719 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 720 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
720 struct thread_info *thread_info; 721 struct thread_info *thread_info;
@@ -732,7 +733,7 @@ struct task_struct {
732 int load_weight; /* for niceness load balancing purposes */ 733 int load_weight; /* for niceness load balancing purposes */
733 int prio, static_prio, normal_prio; 734 int prio, static_prio, normal_prio;
734 struct list_head run_list; 735 struct list_head run_list;
735 prio_array_t *array; 736 struct prio_array *array;
736 737
737 unsigned short ioprio; 738 unsigned short ioprio;
738 unsigned int btrace_seq; 739 unsigned int btrace_seq;
diff --git a/kernel/sched.c b/kernel/sched.c
index 021b31219516..4ee400f9d56b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -188,8 +188,6 @@ static inline unsigned int task_timeslice(struct task_struct *p)
188 * These are the runqueue data structures: 188 * These are the runqueue data structures:
189 */ 189 */
190 190
191typedef struct runqueue runqueue_t;
192
193struct prio_array { 191struct prio_array {
194 unsigned int nr_active; 192 unsigned int nr_active;
195 DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */ 193 DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
@@ -203,7 +201,7 @@ struct prio_array {
203 * (such as the load balancing or the thread migration code), lock 201 * (such as the load balancing or the thread migration code), lock
204 * acquire operations must be ordered by ascending &runqueue. 202 * acquire operations must be ordered by ascending &runqueue.
205 */ 203 */
206struct runqueue { 204struct rq {
207 spinlock_t lock; 205 spinlock_t lock;
208 206
209 /* 207 /*
@@ -229,7 +227,7 @@ struct runqueue {
229 unsigned long long timestamp_last_tick; 227 unsigned long long timestamp_last_tick;
230 struct task_struct *curr, *idle; 228 struct task_struct *curr, *idle;
231 struct mm_struct *prev_mm; 229 struct mm_struct *prev_mm;
232 prio_array_t *active, *expired, arrays[2]; 230 struct prio_array *active, *expired, arrays[2];
233 int best_expired_prio; 231 int best_expired_prio;
234 atomic_t nr_iowait; 232 atomic_t nr_iowait;
235 233
@@ -266,7 +264,7 @@ struct runqueue {
266 struct lock_class_key rq_lock_key; 264 struct lock_class_key rq_lock_key;
267}; 265};
268 266
269static DEFINE_PER_CPU(struct runqueue, runqueues); 267static DEFINE_PER_CPU(struct rq, runqueues);
270 268
271/* 269/*
272 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 270 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
@@ -291,16 +289,16 @@ static DEFINE_PER_CPU(struct runqueue, runqueues);
291#endif 289#endif
292 290
293#ifndef __ARCH_WANT_UNLOCKED_CTXSW 291#ifndef __ARCH_WANT_UNLOCKED_CTXSW
294static inline int task_running(runqueue_t *rq, struct task_struct *p) 292static inline int task_running(struct rq *rq, struct task_struct *p)
295{ 293{
296 return rq->curr == p; 294 return rq->curr == p;
297} 295}
298 296
299static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) 297static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
300{ 298{
301} 299}
302 300
303static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) 301static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
304{ 302{
305#ifdef CONFIG_DEBUG_SPINLOCK 303#ifdef CONFIG_DEBUG_SPINLOCK
306 /* this is a valid case when another task releases the spinlock */ 304 /* this is a valid case when another task releases the spinlock */
@@ -317,7 +315,7 @@ static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev)
317} 315}
318 316
319#else /* __ARCH_WANT_UNLOCKED_CTXSW */ 317#else /* __ARCH_WANT_UNLOCKED_CTXSW */
320static inline int task_running(runqueue_t *rq, struct task_struct *p) 318static inline int task_running(struct rq *rq, struct task_struct *p)
321{ 319{
322#ifdef CONFIG_SMP 320#ifdef CONFIG_SMP
323 return p->oncpu; 321 return p->oncpu;
@@ -326,7 +324,7 @@ static inline int task_running(runqueue_t *rq, struct task_struct *p)
326#endif 324#endif
327} 325}
328 326
329static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) 327static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
330{ 328{
331#ifdef CONFIG_SMP 329#ifdef CONFIG_SMP
332 /* 330 /*
@@ -343,7 +341,7 @@ static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next)
343#endif 341#endif
344} 342}
345 343
346static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) 344static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
347{ 345{
348#ifdef CONFIG_SMP 346#ifdef CONFIG_SMP
349 /* 347 /*
@@ -364,10 +362,10 @@ static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev)
364 * __task_rq_lock - lock the runqueue a given task resides on. 362 * __task_rq_lock - lock the runqueue a given task resides on.
365 * Must be called interrupts disabled. 363 * Must be called interrupts disabled.
366 */ 364 */
367static inline runqueue_t *__task_rq_lock(struct task_struct *p) 365static inline struct rq *__task_rq_lock(struct task_struct *p)
368 __acquires(rq->lock) 366 __acquires(rq->lock)
369{ 367{
370 struct runqueue *rq; 368 struct rq *rq;
371 369
372repeat_lock_task: 370repeat_lock_task:
373 rq = task_rq(p); 371 rq = task_rq(p);
@@ -384,10 +382,10 @@ repeat_lock_task:
384 * interrupts. Note the ordering: we can safely lookup the task_rq without 382 * interrupts. Note the ordering: we can safely lookup the task_rq without
385 * explicitly disabling preemption. 383 * explicitly disabling preemption.
386 */ 384 */
387static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags) 385static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
388 __acquires(rq->lock) 386 __acquires(rq->lock)
389{ 387{
390 struct runqueue *rq; 388 struct rq *rq;
391 389
392repeat_lock_task: 390repeat_lock_task:
393 local_irq_save(*flags); 391 local_irq_save(*flags);
@@ -400,13 +398,13 @@ repeat_lock_task:
400 return rq; 398 return rq;
401} 399}
402 400
403static inline void __task_rq_unlock(runqueue_t *rq) 401static inline void __task_rq_unlock(struct rq *rq)
404 __releases(rq->lock) 402 __releases(rq->lock)
405{ 403{
406 spin_unlock(&rq->lock); 404 spin_unlock(&rq->lock);
407} 405}
408 406
409static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) 407static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
410 __releases(rq->lock) 408 __releases(rq->lock)
411{ 409{
412 spin_unlock_irqrestore(&rq->lock, *flags); 410 spin_unlock_irqrestore(&rq->lock, *flags);
@@ -426,7 +424,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
426 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); 424 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
427 seq_printf(seq, "timestamp %lu\n", jiffies); 425 seq_printf(seq, "timestamp %lu\n", jiffies);
428 for_each_online_cpu(cpu) { 426 for_each_online_cpu(cpu) {
429 runqueue_t *rq = cpu_rq(cpu); 427 struct rq *rq = cpu_rq(cpu);
430#ifdef CONFIG_SMP 428#ifdef CONFIG_SMP
431 struct sched_domain *sd; 429 struct sched_domain *sd;
432 int dcnt = 0; 430 int dcnt = 0;
@@ -513,10 +511,10 @@ struct file_operations proc_schedstat_operations = {
513/* 511/*
514 * rq_lock - lock a given runqueue and disable interrupts. 512 * rq_lock - lock a given runqueue and disable interrupts.
515 */ 513 */
516static inline runqueue_t *this_rq_lock(void) 514static inline struct rq *this_rq_lock(void)
517 __acquires(rq->lock) 515 __acquires(rq->lock)
518{ 516{
519 runqueue_t *rq; 517 struct rq *rq;
520 518
521 local_irq_disable(); 519 local_irq_disable();
522 rq = this_rq(); 520 rq = this_rq();
@@ -554,7 +552,7 @@ static inline void sched_info_dequeued(struct task_struct *t)
554static void sched_info_arrive(struct task_struct *t) 552static void sched_info_arrive(struct task_struct *t)
555{ 553{
556 unsigned long now = jiffies, diff = 0; 554 unsigned long now = jiffies, diff = 0;
557 struct runqueue *rq = task_rq(t); 555 struct rq *rq = task_rq(t);
558 556
559 if (t->sched_info.last_queued) 557 if (t->sched_info.last_queued)
560 diff = now - t->sched_info.last_queued; 558 diff = now - t->sched_info.last_queued;
@@ -597,7 +595,7 @@ static inline void sched_info_queued(struct task_struct *t)
597 */ 595 */
598static inline void sched_info_depart(struct task_struct *t) 596static inline void sched_info_depart(struct task_struct *t)
599{ 597{
600 struct runqueue *rq = task_rq(t); 598 struct rq *rq = task_rq(t);
601 unsigned long diff = jiffies - t->sched_info.last_arrival; 599 unsigned long diff = jiffies - t->sched_info.last_arrival;
602 600
603 t->sched_info.cpu_time += diff; 601 t->sched_info.cpu_time += diff;
@@ -614,7 +612,7 @@ static inline void sched_info_depart(struct task_struct *t)
614static inline void 612static inline void
615sched_info_switch(struct task_struct *prev, struct task_struct *next) 613sched_info_switch(struct task_struct *prev, struct task_struct *next)
616{ 614{
617 struct runqueue *rq = task_rq(prev); 615 struct rq *rq = task_rq(prev);
618 616
619 /* 617 /*
620 * prev now departs the cpu. It's not interesting to record 618 * prev now departs the cpu. It's not interesting to record
@@ -635,7 +633,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
635/* 633/*
636 * Adding/removing a task to/from a priority array: 634 * Adding/removing a task to/from a priority array:
637 */ 635 */
638static void dequeue_task(struct task_struct *p, prio_array_t *array) 636static void dequeue_task(struct task_struct *p, struct prio_array *array)
639{ 637{
640 array->nr_active--; 638 array->nr_active--;
641 list_del(&p->run_list); 639 list_del(&p->run_list);
@@ -643,7 +641,7 @@ static void dequeue_task(struct task_struct *p, prio_array_t *array)
643 __clear_bit(p->prio, array->bitmap); 641 __clear_bit(p->prio, array->bitmap);
644} 642}
645 643
646static void enqueue_task(struct task_struct *p, prio_array_t *array) 644static void enqueue_task(struct task_struct *p, struct prio_array *array)
647{ 645{
648 sched_info_queued(p); 646 sched_info_queued(p);
649 list_add_tail(&p->run_list, array->queue + p->prio); 647 list_add_tail(&p->run_list, array->queue + p->prio);
@@ -656,12 +654,13 @@ static void enqueue_task(struct task_struct *p, prio_array_t *array)
656 * Put task to the end of the run list without the overhead of dequeue 654 * Put task to the end of the run list without the overhead of dequeue
657 * followed by enqueue. 655 * followed by enqueue.
658 */ 656 */
659static void requeue_task(struct task_struct *p, prio_array_t *array) 657static void requeue_task(struct task_struct *p, struct prio_array *array)
660{ 658{
661 list_move_tail(&p->run_list, array->queue + p->prio); 659 list_move_tail(&p->run_list, array->queue + p->prio);
662} 660}
663 661
664static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) 662static inline void
663enqueue_task_head(struct task_struct *p, struct prio_array *array)
665{ 664{
666 list_add(&p->run_list, array->queue + p->prio); 665 list_add(&p->run_list, array->queue + p->prio);
667 __set_bit(p->prio, array->bitmap); 666 __set_bit(p->prio, array->bitmap);
@@ -739,24 +738,24 @@ static void set_load_weight(struct task_struct *p)
739} 738}
740 739
741static inline void 740static inline void
742inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) 741inc_raw_weighted_load(struct rq *rq, const struct task_struct *p)
743{ 742{
744 rq->raw_weighted_load += p->load_weight; 743 rq->raw_weighted_load += p->load_weight;
745} 744}
746 745
747static inline void 746static inline void
748dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) 747dec_raw_weighted_load(struct rq *rq, const struct task_struct *p)
749{ 748{
750 rq->raw_weighted_load -= p->load_weight; 749 rq->raw_weighted_load -= p->load_weight;
751} 750}
752 751
753static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq) 752static inline void inc_nr_running(struct task_struct *p, struct rq *rq)
754{ 753{
755 rq->nr_running++; 754 rq->nr_running++;
756 inc_raw_weighted_load(rq, p); 755 inc_raw_weighted_load(rq, p);
757} 756}
758 757
759static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq) 758static inline void dec_nr_running(struct task_struct *p, struct rq *rq)
760{ 759{
761 rq->nr_running--; 760 rq->nr_running--;
762 dec_raw_weighted_load(rq, p); 761 dec_raw_weighted_load(rq, p);
@@ -803,9 +802,9 @@ static int effective_prio(struct task_struct *p)
803/* 802/*
804 * __activate_task - move a task to the runqueue. 803 * __activate_task - move a task to the runqueue.
805 */ 804 */
806static void __activate_task(struct task_struct *p, runqueue_t *rq) 805static void __activate_task(struct task_struct *p, struct rq *rq)
807{ 806{
808 prio_array_t *target = rq->active; 807 struct prio_array *target = rq->active;
809 808
810 if (batch_task(p)) 809 if (batch_task(p))
811 target = rq->expired; 810 target = rq->expired;
@@ -816,7 +815,7 @@ static void __activate_task(struct task_struct *p, runqueue_t *rq)
816/* 815/*
817 * __activate_idle_task - move idle task to the _front_ of runqueue. 816 * __activate_idle_task - move idle task to the _front_ of runqueue.
818 */ 817 */
819static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq) 818static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
820{ 819{
821 enqueue_task_head(p, rq->active); 820 enqueue_task_head(p, rq->active);
822 inc_nr_running(p, rq); 821 inc_nr_running(p, rq);
@@ -898,7 +897,7 @@ static int recalc_task_prio(struct task_struct *p, unsigned long long now)
898 * Update all the scheduling statistics stuff. (sleep average 897 * Update all the scheduling statistics stuff. (sleep average
899 * calculation, priority modifiers, etc.) 898 * calculation, priority modifiers, etc.)
900 */ 899 */
901static void activate_task(struct task_struct *p, runqueue_t *rq, int local) 900static void activate_task(struct task_struct *p, struct rq *rq, int local)
902{ 901{
903 unsigned long long now; 902 unsigned long long now;
904 903
@@ -906,7 +905,7 @@ static void activate_task(struct task_struct *p, runqueue_t *rq, int local)
906#ifdef CONFIG_SMP 905#ifdef CONFIG_SMP
907 if (!local) { 906 if (!local) {
908 /* Compensate for drifting sched_clock */ 907 /* Compensate for drifting sched_clock */
909 runqueue_t *this_rq = this_rq(); 908 struct rq *this_rq = this_rq();
910 now = (now - this_rq->timestamp_last_tick) 909 now = (now - this_rq->timestamp_last_tick)
911 + rq->timestamp_last_tick; 910 + rq->timestamp_last_tick;
912 } 911 }
@@ -945,7 +944,7 @@ static void activate_task(struct task_struct *p, runqueue_t *rq, int local)
945/* 944/*
946 * deactivate_task - remove a task from the runqueue. 945 * deactivate_task - remove a task from the runqueue.
947 */ 946 */
948static void deactivate_task(struct task_struct *p, runqueue_t *rq) 947static void deactivate_task(struct task_struct *p, struct rq *rq)
949{ 948{
950 dec_nr_running(p, rq); 949 dec_nr_running(p, rq);
951 dequeue_task(p, p->array); 950 dequeue_task(p, p->array);
@@ -1009,23 +1008,23 @@ unsigned long weighted_cpuload(const int cpu)
1009} 1008}
1010 1009
1011#ifdef CONFIG_SMP 1010#ifdef CONFIG_SMP
1012typedef struct { 1011struct migration_req {
1013 struct list_head list; 1012 struct list_head list;
1014 1013
1015 struct task_struct *task; 1014 struct task_struct *task;
1016 int dest_cpu; 1015 int dest_cpu;
1017 1016
1018 struct completion done; 1017 struct completion done;
1019} migration_req_t; 1018};
1020 1019
1021/* 1020/*
1022 * The task's runqueue lock must be held. 1021 * The task's runqueue lock must be held.
1023 * Returns true if you have to wait for migration thread. 1022 * Returns true if you have to wait for migration thread.
1024 */ 1023 */
1025static int 1024static int
1026migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req) 1025migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1027{ 1026{
1028 runqueue_t *rq = task_rq(p); 1027 struct rq *rq = task_rq(p);
1029 1028
1030 /* 1029 /*
1031 * If the task is not on a runqueue (and not running), then 1030 * If the task is not on a runqueue (and not running), then
@@ -1056,7 +1055,7 @@ migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req)
1056void wait_task_inactive(struct task_struct *p) 1055void wait_task_inactive(struct task_struct *p)
1057{ 1056{
1058 unsigned long flags; 1057 unsigned long flags;
1059 runqueue_t *rq; 1058 struct rq *rq;
1060 int preempted; 1059 int preempted;
1061 1060
1062repeat: 1061repeat:
@@ -1107,7 +1106,7 @@ void kick_process(struct task_struct *p)
1107 */ 1106 */
1108static inline unsigned long source_load(int cpu, int type) 1107static inline unsigned long source_load(int cpu, int type)
1109{ 1108{
1110 runqueue_t *rq = cpu_rq(cpu); 1109 struct rq *rq = cpu_rq(cpu);
1111 1110
1112 if (type == 0) 1111 if (type == 0)
1113 return rq->raw_weighted_load; 1112 return rq->raw_weighted_load;
@@ -1121,7 +1120,7 @@ static inline unsigned long source_load(int cpu, int type)
1121 */ 1120 */
1122static inline unsigned long target_load(int cpu, int type) 1121static inline unsigned long target_load(int cpu, int type)
1123{ 1122{
1124 runqueue_t *rq = cpu_rq(cpu); 1123 struct rq *rq = cpu_rq(cpu);
1125 1124
1126 if (type == 0) 1125 if (type == 0)
1127 return rq->raw_weighted_load; 1126 return rq->raw_weighted_load;
@@ -1134,7 +1133,7 @@ static inline unsigned long target_load(int cpu, int type)
1134 */ 1133 */
1135static inline unsigned long cpu_avg_load_per_task(int cpu) 1134static inline unsigned long cpu_avg_load_per_task(int cpu)
1136{ 1135{
1137 runqueue_t *rq = cpu_rq(cpu); 1136 struct rq *rq = cpu_rq(cpu);
1138 unsigned long n = rq->nr_running; 1137 unsigned long n = rq->nr_running;
1139 1138
1140 return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE; 1139 return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
@@ -1338,10 +1337,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1338 int cpu, this_cpu, success = 0; 1337 int cpu, this_cpu, success = 0;
1339 unsigned long flags; 1338 unsigned long flags;
1340 long old_state; 1339 long old_state;
1341 runqueue_t *rq; 1340 struct rq *rq;
1342#ifdef CONFIG_SMP 1341#ifdef CONFIG_SMP
1343 unsigned long load, this_load;
1344 struct sched_domain *sd, *this_sd = NULL; 1342 struct sched_domain *sd, *this_sd = NULL;
1343 unsigned long load, this_load;
1345 int new_cpu; 1344 int new_cpu;
1346#endif 1345#endif
1347 1346
@@ -1577,9 +1576,9 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags)
1577 */ 1576 */
1578void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) 1577void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1579{ 1578{
1579 struct rq *rq, *this_rq;
1580 unsigned long flags; 1580 unsigned long flags;
1581 int this_cpu, cpu; 1581 int this_cpu, cpu;
1582 runqueue_t *rq, *this_rq;
1583 1582
1584 rq = task_rq_lock(p, &flags); 1583 rq = task_rq_lock(p, &flags);
1585 BUG_ON(p->state != TASK_RUNNING); 1584 BUG_ON(p->state != TASK_RUNNING);
@@ -1662,7 +1661,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1662void fastcall sched_exit(struct task_struct *p) 1661void fastcall sched_exit(struct task_struct *p)
1663{ 1662{
1664 unsigned long flags; 1663 unsigned long flags;
1665 runqueue_t *rq; 1664 struct rq *rq;
1666 1665
1667 /* 1666 /*
1668 * If the child was a (relative-) CPU hog then decrease 1667 * If the child was a (relative-) CPU hog then decrease
@@ -1693,7 +1692,7 @@ void fastcall sched_exit(struct task_struct *p)
1693 * prepare_task_switch sets up locking and calls architecture specific 1692 * prepare_task_switch sets up locking and calls architecture specific
1694 * hooks. 1693 * hooks.
1695 */ 1694 */
1696static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next) 1695static inline void prepare_task_switch(struct rq *rq, struct task_struct *next)
1697{ 1696{
1698 prepare_lock_switch(rq, next); 1697 prepare_lock_switch(rq, next);
1699 prepare_arch_switch(next); 1698 prepare_arch_switch(next);
@@ -1714,7 +1713,7 @@ static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next)
1714 * with the lock held can cause deadlocks; see schedule() for 1713 * with the lock held can cause deadlocks; see schedule() for
1715 * details.) 1714 * details.)
1716 */ 1715 */
1717static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev) 1716static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
1718 __releases(rq->lock) 1717 __releases(rq->lock)
1719{ 1718{
1720 struct mm_struct *mm = rq->prev_mm; 1719 struct mm_struct *mm = rq->prev_mm;
@@ -1755,7 +1754,8 @@ static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev)
1755asmlinkage void schedule_tail(struct task_struct *prev) 1754asmlinkage void schedule_tail(struct task_struct *prev)
1756 __releases(rq->lock) 1755 __releases(rq->lock)
1757{ 1756{
1758 runqueue_t *rq = this_rq(); 1757 struct rq *rq = this_rq();
1758
1759 finish_task_switch(rq, prev); 1759 finish_task_switch(rq, prev);
1760#ifdef __ARCH_WANT_UNLOCKED_CTXSW 1760#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1761 /* In this case, finish_task_switch does not reenable preemption */ 1761 /* In this case, finish_task_switch does not reenable preemption */
@@ -1770,7 +1770,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
1770 * thread's register state. 1770 * thread's register state.
1771 */ 1771 */
1772static inline struct task_struct * 1772static inline struct task_struct *
1773context_switch(runqueue_t *rq, struct task_struct *prev, 1773context_switch(struct rq *rq, struct task_struct *prev,
1774 struct task_struct *next) 1774 struct task_struct *next)
1775{ 1775{
1776 struct mm_struct *mm = next->mm; 1776 struct mm_struct *mm = next->mm;
@@ -1883,7 +1883,7 @@ task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
1883 * Note this does not disable interrupts like task_rq_lock, 1883 * Note this does not disable interrupts like task_rq_lock,
1884 * you need to do so manually before calling. 1884 * you need to do so manually before calling.
1885 */ 1885 */
1886static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) 1886static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1887 __acquires(rq1->lock) 1887 __acquires(rq1->lock)
1888 __acquires(rq2->lock) 1888 __acquires(rq2->lock)
1889{ 1889{
@@ -1907,7 +1907,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
1907 * Note this does not restore interrupts like task_rq_unlock, 1907 * Note this does not restore interrupts like task_rq_unlock,
1908 * you need to do so manually after calling. 1908 * you need to do so manually after calling.
1909 */ 1909 */
1910static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) 1910static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1911 __releases(rq1->lock) 1911 __releases(rq1->lock)
1912 __releases(rq2->lock) 1912 __releases(rq2->lock)
1913{ 1913{
@@ -1921,7 +1921,7 @@ static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
1921/* 1921/*
1922 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1922 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1923 */ 1923 */
1924static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) 1924static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
1925 __releases(this_rq->lock) 1925 __releases(this_rq->lock)
1926 __acquires(busiest->lock) 1926 __acquires(busiest->lock)
1927 __acquires(this_rq->lock) 1927 __acquires(this_rq->lock)
@@ -1944,9 +1944,9 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
1944 */ 1944 */
1945static void sched_migrate_task(struct task_struct *p, int dest_cpu) 1945static void sched_migrate_task(struct task_struct *p, int dest_cpu)
1946{ 1946{
1947 migration_req_t req; 1947 struct migration_req req;
1948 runqueue_t *rq;
1949 unsigned long flags; 1948 unsigned long flags;
1949 struct rq *rq;
1950 1950
1951 rq = task_rq_lock(p, &flags); 1951 rq = task_rq_lock(p, &flags);
1952 if (!cpu_isset(dest_cpu, p->cpus_allowed) 1952 if (!cpu_isset(dest_cpu, p->cpus_allowed)
@@ -1987,9 +1987,9 @@ void sched_exec(void)
1987 * pull_task - move a task from a remote runqueue to the local runqueue. 1987 * pull_task - move a task from a remote runqueue to the local runqueue.
1988 * Both runqueues must be locked. 1988 * Both runqueues must be locked.
1989 */ 1989 */
1990static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, 1990static void pull_task(struct rq *src_rq, struct prio_array *src_array,
1991 struct task_struct *p, runqueue_t *this_rq, 1991 struct task_struct *p, struct rq *this_rq,
1992 prio_array_t *this_array, int this_cpu) 1992 struct prio_array *this_array, int this_cpu)
1993{ 1993{
1994 dequeue_task(p, src_array); 1994 dequeue_task(p, src_array);
1995 dec_nr_running(p, src_rq); 1995 dec_nr_running(p, src_rq);
@@ -2010,7 +2010,7 @@ static void pull_task(runqueue_t *src_rq, prio_array_t *src_array,
2010 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 2010 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2011 */ 2011 */
2012static 2012static
2013int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu, 2013int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2014 struct sched_domain *sd, enum idle_type idle, 2014 struct sched_domain *sd, enum idle_type idle,
2015 int *all_pinned) 2015 int *all_pinned)
2016{ 2016{
@@ -2050,14 +2050,14 @@ int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu,
2050 * 2050 *
2051 * Called with both runqueues locked. 2051 * Called with both runqueues locked.
2052 */ 2052 */
2053static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, 2053static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2054 unsigned long max_nr_move, unsigned long max_load_move, 2054 unsigned long max_nr_move, unsigned long max_load_move,
2055 struct sched_domain *sd, enum idle_type idle, 2055 struct sched_domain *sd, enum idle_type idle,
2056 int *all_pinned) 2056 int *all_pinned)
2057{ 2057{
2058 int idx, pulled = 0, pinned = 0, this_best_prio, best_prio, 2058 int idx, pulled = 0, pinned = 0, this_best_prio, best_prio,
2059 best_prio_seen, skip_for_load; 2059 best_prio_seen, skip_for_load;
2060 prio_array_t *array, *dst_array; 2060 struct prio_array *array, *dst_array;
2061 struct list_head *head, *curr; 2061 struct list_head *head, *curr;
2062 struct task_struct *tmp; 2062 struct task_struct *tmp;
2063 long rem_load_move; 2063 long rem_load_move;
@@ -2212,7 +2212,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2212 sum_weighted_load = sum_nr_running = avg_load = 0; 2212 sum_weighted_load = sum_nr_running = avg_load = 0;
2213 2213
2214 for_each_cpu_mask(i, group->cpumask) { 2214 for_each_cpu_mask(i, group->cpumask) {
2215 runqueue_t *rq = cpu_rq(i); 2215 struct rq *rq = cpu_rq(i);
2216 2216
2217 if (*sd_idle && !idle_cpu(i)) 2217 if (*sd_idle && !idle_cpu(i))
2218 *sd_idle = 0; 2218 *sd_idle = 0;
@@ -2428,11 +2428,11 @@ ret:
2428/* 2428/*
2429 * find_busiest_queue - find the busiest runqueue among the cpus in group. 2429 * find_busiest_queue - find the busiest runqueue among the cpus in group.
2430 */ 2430 */
2431static runqueue_t * 2431static struct rq *
2432find_busiest_queue(struct sched_group *group, enum idle_type idle, 2432find_busiest_queue(struct sched_group *group, enum idle_type idle,
2433 unsigned long imbalance) 2433 unsigned long imbalance)
2434{ 2434{
2435 runqueue_t *busiest = NULL, *rq; 2435 struct rq *busiest = NULL, *rq;
2436 unsigned long max_load = 0; 2436 unsigned long max_load = 0;
2437 int i; 2437 int i;
2438 2438
@@ -2468,13 +2468,13 @@ static inline unsigned long minus_1_or_zero(unsigned long n)
2468 * 2468 *
2469 * Called with this_rq unlocked. 2469 * Called with this_rq unlocked.
2470 */ 2470 */
2471static int load_balance(int this_cpu, runqueue_t *this_rq, 2471static int load_balance(int this_cpu, struct rq *this_rq,
2472 struct sched_domain *sd, enum idle_type idle) 2472 struct sched_domain *sd, enum idle_type idle)
2473{ 2473{
2474 int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; 2474 int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
2475 struct sched_group *group; 2475 struct sched_group *group;
2476 unsigned long imbalance; 2476 unsigned long imbalance;
2477 runqueue_t *busiest; 2477 struct rq *busiest;
2478 2478
2479 if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && 2479 if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
2480 !sched_smt_power_savings) 2480 !sched_smt_power_savings)
@@ -2596,10 +2596,10 @@ out_one_pinned:
2596 * this_rq is locked. 2596 * this_rq is locked.
2597 */ 2597 */
2598static int 2598static int
2599load_balance_newidle(int this_cpu, runqueue_t *this_rq, struct sched_domain *sd) 2599load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
2600{ 2600{
2601 struct sched_group *group; 2601 struct sched_group *group;
2602 runqueue_t *busiest = NULL; 2602 struct rq *busiest = NULL;
2603 unsigned long imbalance; 2603 unsigned long imbalance;
2604 int nr_moved = 0; 2604 int nr_moved = 0;
2605 int sd_idle = 0; 2605 int sd_idle = 0;
@@ -2657,7 +2657,7 @@ out_balanced:
2657 * idle_balance is called by schedule() if this_cpu is about to become 2657 * idle_balance is called by schedule() if this_cpu is about to become
2658 * idle. Attempts to pull tasks from other CPUs. 2658 * idle. Attempts to pull tasks from other CPUs.
2659 */ 2659 */
2660static void idle_balance(int this_cpu, runqueue_t *this_rq) 2660static void idle_balance(int this_cpu, struct rq *this_rq)
2661{ 2661{
2662 struct sched_domain *sd; 2662 struct sched_domain *sd;
2663 2663
@@ -2678,11 +2678,11 @@ static void idle_balance(int this_cpu, runqueue_t *this_rq)
2678 * 2678 *
2679 * Called with busiest_rq locked. 2679 * Called with busiest_rq locked.
2680 */ 2680 */
2681static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) 2681static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
2682{ 2682{
2683 struct sched_domain *sd;
2684 runqueue_t *target_rq;
2685 int target_cpu = busiest_rq->push_cpu; 2683 int target_cpu = busiest_rq->push_cpu;
2684 struct sched_domain *sd;
2685 struct rq *target_rq;
2686 2686
2687 /* Is there any task to move? */ 2687 /* Is there any task to move? */
2688 if (busiest_rq->nr_running <= 1) 2688 if (busiest_rq->nr_running <= 1)
@@ -2736,7 +2736,7 @@ static inline unsigned long cpu_offset(int cpu)
2736} 2736}
2737 2737
2738static void 2738static void
2739rebalance_tick(int this_cpu, runqueue_t *this_rq, enum idle_type idle) 2739rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
2740{ 2740{
2741 unsigned long this_load, interval, j = cpu_offset(this_cpu); 2741 unsigned long this_load, interval, j = cpu_offset(this_cpu);
2742 struct sched_domain *sd; 2742 struct sched_domain *sd;
@@ -2790,15 +2790,15 @@ rebalance_tick(int this_cpu, runqueue_t *this_rq, enum idle_type idle)
2790/* 2790/*
2791 * on UP we do not need to balance between CPUs: 2791 * on UP we do not need to balance between CPUs:
2792 */ 2792 */
2793static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle) 2793static inline void rebalance_tick(int cpu, struct rq *rq, enum idle_type idle)
2794{ 2794{
2795} 2795}
2796static inline void idle_balance(int cpu, runqueue_t *rq) 2796static inline void idle_balance(int cpu, struct rq *rq)
2797{ 2797{
2798} 2798}
2799#endif 2799#endif
2800 2800
2801static inline int wake_priority_sleeper(runqueue_t *rq) 2801static inline int wake_priority_sleeper(struct rq *rq)
2802{ 2802{
2803 int ret = 0; 2803 int ret = 0;
2804 2804
@@ -2826,7 +2826,7 @@ EXPORT_PER_CPU_SYMBOL(kstat);
2826 * Bank in p->sched_time the ns elapsed since the last tick or switch. 2826 * Bank in p->sched_time the ns elapsed since the last tick or switch.
2827 */ 2827 */
2828static inline void 2828static inline void
2829update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now) 2829update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
2830{ 2830{
2831 p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); 2831 p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick);
2832} 2832}
@@ -2858,7 +2858,7 @@ unsigned long long current_sched_time(const struct task_struct *p)
2858 * increasing number of running tasks. We also ignore the interactivity 2858 * increasing number of running tasks. We also ignore the interactivity
2859 * if a better static_prio task has expired: 2859 * if a better static_prio task has expired:
2860 */ 2860 */
2861static inline int expired_starving(runqueue_t *rq) 2861static inline int expired_starving(struct rq *rq)
2862{ 2862{
2863 if (rq->curr->static_prio > rq->best_expired_prio) 2863 if (rq->curr->static_prio > rq->best_expired_prio)
2864 return 1; 2864 return 1;
@@ -2900,7 +2900,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
2900 cputime_t cputime) 2900 cputime_t cputime)
2901{ 2901{
2902 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 2902 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2903 runqueue_t *rq = this_rq(); 2903 struct rq *rq = this_rq();
2904 cputime64_t tmp; 2904 cputime64_t tmp;
2905 2905
2906 p->stime = cputime_add(p->stime, cputime); 2906 p->stime = cputime_add(p->stime, cputime);
@@ -2930,7 +2930,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
2930{ 2930{
2931 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 2931 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2932 cputime64_t tmp = cputime_to_cputime64(steal); 2932 cputime64_t tmp = cputime_to_cputime64(steal);
2933 runqueue_t *rq = this_rq(); 2933 struct rq *rq = this_rq();
2934 2934
2935 if (p == rq->idle) { 2935 if (p == rq->idle) {
2936 p->stime = cputime_add(p->stime, steal); 2936 p->stime = cputime_add(p->stime, steal);
@@ -2954,7 +2954,7 @@ void scheduler_tick(void)
2954 unsigned long long now = sched_clock(); 2954 unsigned long long now = sched_clock();
2955 struct task_struct *p = current; 2955 struct task_struct *p = current;
2956 int cpu = smp_processor_id(); 2956 int cpu = smp_processor_id();
2957 runqueue_t *rq = this_rq(); 2957 struct rq *rq = cpu_rq(cpu);
2958 2958
2959 update_cpu_clock(p, rq, now); 2959 update_cpu_clock(p, rq, now);
2960 2960
@@ -3043,7 +3043,7 @@ out:
3043} 3043}
3044 3044
3045#ifdef CONFIG_SCHED_SMT 3045#ifdef CONFIG_SCHED_SMT
3046static inline void wakeup_busy_runqueue(runqueue_t *rq) 3046static inline void wakeup_busy_runqueue(struct rq *rq)
3047{ 3047{
3048 /* If an SMT runqueue is sleeping due to priority reasons wake it up */ 3048 /* If an SMT runqueue is sleeping due to priority reasons wake it up */
3049 if (rq->curr == rq->idle && rq->nr_running) 3049 if (rq->curr == rq->idle && rq->nr_running)
@@ -3069,7 +3069,7 @@ static void wake_sleeping_dependent(int this_cpu)
3069 return; 3069 return;
3070 3070
3071 for_each_cpu_mask(i, sd->span) { 3071 for_each_cpu_mask(i, sd->span) {
3072 runqueue_t *smt_rq = cpu_rq(i); 3072 struct rq *smt_rq = cpu_rq(i);
3073 3073
3074 if (i == this_cpu) 3074 if (i == this_cpu)
3075 continue; 3075 continue;
@@ -3099,7 +3099,7 @@ smt_slice(struct task_struct *p, struct sched_domain *sd)
3099 * need to be obeyed. 3099 * need to be obeyed.
3100 */ 3100 */
3101static int 3101static int
3102dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) 3102dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
3103{ 3103{
3104 struct sched_domain *tmp, *sd = NULL; 3104 struct sched_domain *tmp, *sd = NULL;
3105 int ret = 0, i; 3105 int ret = 0, i;
@@ -3120,7 +3120,7 @@ dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p)
3120 3120
3121 for_each_cpu_mask(i, sd->span) { 3121 for_each_cpu_mask(i, sd->span) {
3122 struct task_struct *smt_curr; 3122 struct task_struct *smt_curr;
3123 runqueue_t *smt_rq; 3123 struct rq *smt_rq;
3124 3124
3125 if (i == this_cpu) 3125 if (i == this_cpu)
3126 continue; 3126 continue;
@@ -3166,7 +3166,7 @@ static inline void wake_sleeping_dependent(int this_cpu)
3166{ 3166{
3167} 3167}
3168static inline int 3168static inline int
3169dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) 3169dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
3170{ 3170{
3171 return 0; 3171 return 0;
3172} 3172}
@@ -3221,13 +3221,13 @@ static inline int interactive_sleep(enum sleep_type sleep_type)
3221asmlinkage void __sched schedule(void) 3221asmlinkage void __sched schedule(void)
3222{ 3222{
3223 struct task_struct *prev, *next; 3223 struct task_struct *prev, *next;
3224 struct prio_array *array;
3224 struct list_head *queue; 3225 struct list_head *queue;
3225 unsigned long long now; 3226 unsigned long long now;
3226 unsigned long run_time; 3227 unsigned long run_time;
3227 int cpu, idx, new_prio; 3228 int cpu, idx, new_prio;
3228 prio_array_t *array;
3229 long *switch_count; 3229 long *switch_count;
3230 runqueue_t *rq; 3230 struct rq *rq;
3231 3231
3232 /* 3232 /*
3233 * Test if we are atomic. Since do_exit() needs to call into 3233 * Test if we are atomic. Since do_exit() needs to call into
@@ -3787,9 +3787,9 @@ EXPORT_SYMBOL(sleep_on_timeout);
3787 */ 3787 */
3788void rt_mutex_setprio(struct task_struct *p, int prio) 3788void rt_mutex_setprio(struct task_struct *p, int prio)
3789{ 3789{
3790 struct prio_array *array;
3790 unsigned long flags; 3791 unsigned long flags;
3791 prio_array_t *array; 3792 struct rq *rq;
3792 runqueue_t *rq;
3793 int oldprio; 3793 int oldprio;
3794 3794
3795 BUG_ON(prio < 0 || prio > MAX_PRIO); 3795 BUG_ON(prio < 0 || prio > MAX_PRIO);
@@ -3828,10 +3828,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3828 3828
3829void set_user_nice(struct task_struct *p, long nice) 3829void set_user_nice(struct task_struct *p, long nice)
3830{ 3830{
3831 struct prio_array *array;
3831 int old_prio, delta; 3832 int old_prio, delta;
3832 unsigned long flags; 3833 unsigned long flags;
3833 prio_array_t *array; 3834 struct rq *rq;
3834 runqueue_t *rq;
3835 3835
3836 if (TASK_NICE(p) == nice || nice < -20 || nice > 19) 3836 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3837 return; 3837 return;
@@ -4012,9 +4012,9 @@ int sched_setscheduler(struct task_struct *p, int policy,
4012 struct sched_param *param) 4012 struct sched_param *param)
4013{ 4013{
4014 int retval, oldprio, oldpolicy = -1; 4014 int retval, oldprio, oldpolicy = -1;
4015 prio_array_t *array; 4015 struct prio_array *array;
4016 unsigned long flags; 4016 unsigned long flags;
4017 runqueue_t *rq; 4017 struct rq *rq;
4018 4018
4019 /* may grab non-irq protected spin_locks */ 4019 /* may grab non-irq protected spin_locks */
4020 BUG_ON(in_interrupt()); 4020 BUG_ON(in_interrupt());
@@ -4376,9 +4376,8 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
4376 */ 4376 */
4377asmlinkage long sys_sched_yield(void) 4377asmlinkage long sys_sched_yield(void)
4378{ 4378{
4379 runqueue_t *rq = this_rq_lock(); 4379 struct rq *rq = this_rq_lock();
4380 prio_array_t *array = current->array; 4380 struct prio_array *array = current->array, *target = rq->expired;
4381 prio_array_t *target = rq->expired;
4382 4381
4383 schedstat_inc(rq, yld_cnt); 4382 schedstat_inc(rq, yld_cnt);
4384 /* 4383 /*
@@ -4525,7 +4524,7 @@ EXPORT_SYMBOL(yield);
4525 */ 4524 */
4526void __sched io_schedule(void) 4525void __sched io_schedule(void)
4527{ 4526{
4528 struct runqueue *rq = &__raw_get_cpu_var(runqueues); 4527 struct rq *rq = &__raw_get_cpu_var(runqueues);
4529 4528
4530 atomic_inc(&rq->nr_iowait); 4529 atomic_inc(&rq->nr_iowait);
4531 schedule(); 4530 schedule();
@@ -4535,7 +4534,7 @@ EXPORT_SYMBOL(io_schedule);
4535 4534
4536long __sched io_schedule_timeout(long timeout) 4535long __sched io_schedule_timeout(long timeout)
4537{ 4536{
4538 struct runqueue *rq = &__raw_get_cpu_var(runqueues); 4537 struct rq *rq = &__raw_get_cpu_var(runqueues);
4539 long ret; 4538 long ret;
4540 4539
4541 atomic_inc(&rq->nr_iowait); 4540 atomic_inc(&rq->nr_iowait);
@@ -4743,7 +4742,7 @@ void show_state(void)
4743 */ 4742 */
4744void __devinit init_idle(struct task_struct *idle, int cpu) 4743void __devinit init_idle(struct task_struct *idle, int cpu)
4745{ 4744{
4746 runqueue_t *rq = cpu_rq(cpu); 4745 struct rq *rq = cpu_rq(cpu);
4747 unsigned long flags; 4746 unsigned long flags;
4748 4747
4749 idle->timestamp = sched_clock(); 4748 idle->timestamp = sched_clock();
@@ -4782,7 +4781,7 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
4782/* 4781/*
4783 * This is how migration works: 4782 * This is how migration works:
4784 * 4783 *
4785 * 1) we queue a migration_req_t structure in the source CPU's 4784 * 1) we queue a struct migration_req structure in the source CPU's
4786 * runqueue and wake up that CPU's migration thread. 4785 * runqueue and wake up that CPU's migration thread.
4787 * 2) we down() the locked semaphore => thread blocks. 4786 * 2) we down() the locked semaphore => thread blocks.
4788 * 3) migration thread wakes up (implicitly it forces the migrated 4787 * 3) migration thread wakes up (implicitly it forces the migrated
@@ -4806,9 +4805,9 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
4806 */ 4805 */
4807int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 4806int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
4808{ 4807{
4808 struct migration_req req;
4809 unsigned long flags; 4809 unsigned long flags;
4810 migration_req_t req; 4810 struct rq *rq;
4811 runqueue_t *rq;
4812 int ret = 0; 4811 int ret = 0;
4813 4812
4814 rq = task_rq_lock(p, &flags); 4813 rq = task_rq_lock(p, &flags);
@@ -4850,7 +4849,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
4850 */ 4849 */
4851static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 4850static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4852{ 4851{
4853 runqueue_t *rq_dest, *rq_src; 4852 struct rq *rq_dest, *rq_src;
4854 int ret = 0; 4853 int ret = 0;
4855 4854
4856 if (unlikely(cpu_is_offline(dest_cpu))) 4855 if (unlikely(cpu_is_offline(dest_cpu)))
@@ -4896,15 +4895,15 @@ out:
4896static int migration_thread(void *data) 4895static int migration_thread(void *data)
4897{ 4896{
4898 int cpu = (long)data; 4897 int cpu = (long)data;
4899 runqueue_t *rq; 4898 struct rq *rq;
4900 4899
4901 rq = cpu_rq(cpu); 4900 rq = cpu_rq(cpu);
4902 BUG_ON(rq->migration_thread != current); 4901 BUG_ON(rq->migration_thread != current);
4903 4902
4904 set_current_state(TASK_INTERRUPTIBLE); 4903 set_current_state(TASK_INTERRUPTIBLE);
4905 while (!kthread_should_stop()) { 4904 while (!kthread_should_stop()) {
4905 struct migration_req *req;
4906 struct list_head *head; 4906 struct list_head *head;
4907 migration_req_t *req;
4908 4907
4909 try_to_freeze(); 4908 try_to_freeze();
4910 4909
@@ -4928,7 +4927,7 @@ static int migration_thread(void *data)
4928 set_current_state(TASK_INTERRUPTIBLE); 4927 set_current_state(TASK_INTERRUPTIBLE);
4929 continue; 4928 continue;
4930 } 4929 }
4931 req = list_entry(head->next, migration_req_t, list); 4930 req = list_entry(head->next, struct migration_req, list);
4932 list_del_init(head->next); 4931 list_del_init(head->next);
4933 4932
4934 spin_unlock(&rq->lock); 4933 spin_unlock(&rq->lock);
@@ -4955,10 +4954,10 @@ wait_to_die:
4955/* Figure out where task on dead CPU should go, use force if neccessary. */ 4954/* Figure out where task on dead CPU should go, use force if neccessary. */
4956static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 4955static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
4957{ 4956{
4958 runqueue_t *rq;
4959 unsigned long flags; 4957 unsigned long flags;
4960 int dest_cpu;
4961 cpumask_t mask; 4958 cpumask_t mask;
4959 struct rq *rq;
4960 int dest_cpu;
4962 4961
4963restart: 4962restart:
4964 /* On same node? */ 4963 /* On same node? */
@@ -4998,9 +4997,9 @@ restart:
4998 * their home CPUs. So we just add the counter to another CPU's counter, 4997 * their home CPUs. So we just add the counter to another CPU's counter,
4999 * to keep the global sum constant after CPU-down: 4998 * to keep the global sum constant after CPU-down:
5000 */ 4999 */
5001static void migrate_nr_uninterruptible(runqueue_t *rq_src) 5000static void migrate_nr_uninterruptible(struct rq *rq_src)
5002{ 5001{
5003 runqueue_t *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL)); 5002 struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
5004 unsigned long flags; 5003 unsigned long flags;
5005 5004
5006 local_irq_save(flags); 5005 local_irq_save(flags);
@@ -5036,7 +5035,7 @@ static void migrate_live_tasks(int src_cpu)
5036void sched_idle_next(void) 5035void sched_idle_next(void)
5037{ 5036{
5038 int this_cpu = smp_processor_id(); 5037 int this_cpu = smp_processor_id();
5039 runqueue_t *rq = cpu_rq(this_cpu); 5038 struct rq *rq = cpu_rq(this_cpu);
5040 struct task_struct *p = rq->idle; 5039 struct task_struct *p = rq->idle;
5041 unsigned long flags; 5040 unsigned long flags;
5042 5041
@@ -5074,7 +5073,7 @@ void idle_task_exit(void)
5074 5073
5075static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) 5074static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5076{ 5075{
5077 struct runqueue *rq = cpu_rq(dead_cpu); 5076 struct rq *rq = cpu_rq(dead_cpu);
5078 5077
5079 /* Must be exiting, otherwise would be on tasklist. */ 5078 /* Must be exiting, otherwise would be on tasklist. */
5080 BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); 5079 BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
@@ -5099,7 +5098,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5099/* release_task() removes task from tasklist, so we won't find dead tasks. */ 5098/* release_task() removes task from tasklist, so we won't find dead tasks. */
5100static void migrate_dead_tasks(unsigned int dead_cpu) 5099static void migrate_dead_tasks(unsigned int dead_cpu)
5101{ 5100{
5102 struct runqueue *rq = cpu_rq(dead_cpu); 5101 struct rq *rq = cpu_rq(dead_cpu);
5103 unsigned int arr, i; 5102 unsigned int arr, i;
5104 5103
5105 for (arr = 0; arr < 2; arr++) { 5104 for (arr = 0; arr < 2; arr++) {
@@ -5123,8 +5122,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5123{ 5122{
5124 struct task_struct *p; 5123 struct task_struct *p;
5125 int cpu = (long)hcpu; 5124 int cpu = (long)hcpu;
5126 struct runqueue *rq;
5127 unsigned long flags; 5125 unsigned long flags;
5126 struct rq *rq;
5128 5127
5129 switch (action) { 5128 switch (action) {
5130 case CPU_UP_PREPARE: 5129 case CPU_UP_PREPARE:
@@ -5176,9 +5175,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5176 * the requestors. */ 5175 * the requestors. */
5177 spin_lock_irq(&rq->lock); 5176 spin_lock_irq(&rq->lock);
5178 while (!list_empty(&rq->migration_queue)) { 5177 while (!list_empty(&rq->migration_queue)) {
5179 migration_req_t *req; 5178 struct migration_req *req;
5179
5180 req = list_entry(rq->migration_queue.next, 5180 req = list_entry(rq->migration_queue.next,
5181 migration_req_t, list); 5181 struct migration_req, list);
5182 list_del_init(&req->list); 5182 list_del_init(&req->list);
5183 complete(&req->done); 5183 complete(&req->done);
5184 } 5184 }
@@ -5361,7 +5361,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5361 */ 5361 */
5362static void cpu_attach_domain(struct sched_domain *sd, int cpu) 5362static void cpu_attach_domain(struct sched_domain *sd, int cpu)
5363{ 5363{
5364 runqueue_t *rq = cpu_rq(cpu); 5364 struct rq *rq = cpu_rq(cpu);
5365 struct sched_domain *tmp; 5365 struct sched_domain *tmp;
5366 5366
5367 /* Remove the sched domains which do not contribute to scheduling. */ 5367 /* Remove the sched domains which do not contribute to scheduling. */
@@ -6690,8 +6690,8 @@ void __init sched_init(void)
6690 int i, j, k; 6690 int i, j, k;
6691 6691
6692 for_each_possible_cpu(i) { 6692 for_each_possible_cpu(i) {
6693 prio_array_t *array; 6693 struct prio_array *array;
6694 runqueue_t *rq; 6694 struct rq *rq;
6695 6695
6696 rq = cpu_rq(i); 6696 rq = cpu_rq(i);
6697 spin_lock_init(&rq->lock); 6697 spin_lock_init(&rq->lock);
@@ -6764,10 +6764,10 @@ EXPORT_SYMBOL(__might_sleep);
6764#ifdef CONFIG_MAGIC_SYSRQ 6764#ifdef CONFIG_MAGIC_SYSRQ
6765void normalize_rt_tasks(void) 6765void normalize_rt_tasks(void)
6766{ 6766{
6767 struct prio_array *array;
6767 struct task_struct *p; 6768 struct task_struct *p;
6768 prio_array_t *array;
6769 unsigned long flags; 6769 unsigned long flags;
6770 runqueue_t *rq; 6770 struct rq *rq;
6771 6771
6772 read_lock_irq(&tasklist_lock); 6772 read_lock_irq(&tasklist_lock);
6773 for_each_process(p) { 6773 for_each_process(p) {