diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 1210 |
1 files changed, 862 insertions, 348 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f06d059edef5..2629c1711fd6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -168,15 +168,21 @@ | |||
168 | */ | 168 | */ |
169 | 169 | ||
170 | #define SCALE_PRIO(x, prio) \ | 170 | #define SCALE_PRIO(x, prio) \ |
171 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE) | 171 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) |
172 | 172 | ||
173 | static unsigned int task_timeslice(task_t *p) | 173 | static unsigned int static_prio_timeslice(int static_prio) |
174 | { | 174 | { |
175 | if (p->static_prio < NICE_TO_PRIO(0)) | 175 | if (static_prio < NICE_TO_PRIO(0)) |
176 | return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio); | 176 | return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio); |
177 | else | 177 | else |
178 | return SCALE_PRIO(DEF_TIMESLICE, p->static_prio); | 178 | return SCALE_PRIO(DEF_TIMESLICE, static_prio); |
179 | } | 179 | } |
180 | |||
181 | static inline unsigned int task_timeslice(task_t *p) | ||
182 | { | ||
183 | return static_prio_timeslice(p->static_prio); | ||
184 | } | ||
185 | |||
180 | #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \ | 186 | #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \ |
181 | < (long long) (sd)->cache_hot_time) | 187 | < (long long) (sd)->cache_hot_time) |
182 | 188 | ||
@@ -184,13 +190,11 @@ static unsigned int task_timeslice(task_t *p) | |||
184 | * These are the runqueue data structures: | 190 | * These are the runqueue data structures: |
185 | */ | 191 | */ |
186 | 192 | ||
187 | #define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long)) | ||
188 | |||
189 | typedef struct runqueue runqueue_t; | 193 | typedef struct runqueue runqueue_t; |
190 | 194 | ||
191 | struct prio_array { | 195 | struct prio_array { |
192 | unsigned int nr_active; | 196 | unsigned int nr_active; |
193 | unsigned long bitmap[BITMAP_SIZE]; | 197 | DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */ |
194 | struct list_head queue[MAX_PRIO]; | 198 | struct list_head queue[MAX_PRIO]; |
195 | }; | 199 | }; |
196 | 200 | ||
@@ -209,6 +213,7 @@ struct runqueue { | |||
209 | * remote CPUs use both these fields when doing load calculation. | 213 | * remote CPUs use both these fields when doing load calculation. |
210 | */ | 214 | */ |
211 | unsigned long nr_running; | 215 | unsigned long nr_running; |
216 | unsigned long raw_weighted_load; | ||
212 | #ifdef CONFIG_SMP | 217 | #ifdef CONFIG_SMP |
213 | unsigned long cpu_load[3]; | 218 | unsigned long cpu_load[3]; |
214 | #endif | 219 | #endif |
@@ -239,7 +244,6 @@ struct runqueue { | |||
239 | 244 | ||
240 | task_t *migration_thread; | 245 | task_t *migration_thread; |
241 | struct list_head migration_queue; | 246 | struct list_head migration_queue; |
242 | int cpu; | ||
243 | #endif | 247 | #endif |
244 | 248 | ||
245 | #ifdef CONFIG_SCHEDSTATS | 249 | #ifdef CONFIG_SCHEDSTATS |
@@ -351,11 +355,30 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) | |||
351 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | 355 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
352 | 356 | ||
353 | /* | 357 | /* |
358 | * __task_rq_lock - lock the runqueue a given task resides on. | ||
359 | * Must be called interrupts disabled. | ||
360 | */ | ||
361 | static inline runqueue_t *__task_rq_lock(task_t *p) | ||
362 | __acquires(rq->lock) | ||
363 | { | ||
364 | struct runqueue *rq; | ||
365 | |||
366 | repeat_lock_task: | ||
367 | rq = task_rq(p); | ||
368 | spin_lock(&rq->lock); | ||
369 | if (unlikely(rq != task_rq(p))) { | ||
370 | spin_unlock(&rq->lock); | ||
371 | goto repeat_lock_task; | ||
372 | } | ||
373 | return rq; | ||
374 | } | ||
375 | |||
376 | /* | ||
354 | * task_rq_lock - lock the runqueue a given task resides on and disable | 377 | * task_rq_lock - lock the runqueue a given task resides on and disable |
355 | * interrupts. Note the ordering: we can safely lookup the task_rq without | 378 | * interrupts. Note the ordering: we can safely lookup the task_rq without |
356 | * explicitly disabling preemption. | 379 | * explicitly disabling preemption. |
357 | */ | 380 | */ |
358 | static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) | 381 | static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) |
359 | __acquires(rq->lock) | 382 | __acquires(rq->lock) |
360 | { | 383 | { |
361 | struct runqueue *rq; | 384 | struct runqueue *rq; |
@@ -371,6 +394,12 @@ repeat_lock_task: | |||
371 | return rq; | 394 | return rq; |
372 | } | 395 | } |
373 | 396 | ||
397 | static inline void __task_rq_unlock(runqueue_t *rq) | ||
398 | __releases(rq->lock) | ||
399 | { | ||
400 | spin_unlock(&rq->lock); | ||
401 | } | ||
402 | |||
374 | static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) | 403 | static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) |
375 | __releases(rq->lock) | 404 | __releases(rq->lock) |
376 | { | 405 | { |
@@ -634,7 +663,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) | |||
634 | } | 663 | } |
635 | 664 | ||
636 | /* | 665 | /* |
637 | * effective_prio - return the priority that is based on the static | 666 | * __normal_prio - return the priority that is based on the static |
638 | * priority but is modified by bonuses/penalties. | 667 | * priority but is modified by bonuses/penalties. |
639 | * | 668 | * |
640 | * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] | 669 | * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] |
@@ -647,13 +676,11 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) | |||
647 | * | 676 | * |
648 | * Both properties are important to certain workloads. | 677 | * Both properties are important to certain workloads. |
649 | */ | 678 | */ |
650 | static int effective_prio(task_t *p) | 679 | |
680 | static inline int __normal_prio(task_t *p) | ||
651 | { | 681 | { |
652 | int bonus, prio; | 682 | int bonus, prio; |
653 | 683 | ||
654 | if (rt_task(p)) | ||
655 | return p->prio; | ||
656 | |||
657 | bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; | 684 | bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; |
658 | 685 | ||
659 | prio = p->static_prio - bonus; | 686 | prio = p->static_prio - bonus; |
@@ -665,6 +692,106 @@ static int effective_prio(task_t *p) | |||
665 | } | 692 | } |
666 | 693 | ||
667 | /* | 694 | /* |
695 | * To aid in avoiding the subversion of "niceness" due to uneven distribution | ||
696 | * of tasks with abnormal "nice" values across CPUs the contribution that | ||
697 | * each task makes to its run queue's load is weighted according to its | ||
698 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a | ||
699 | * scaled version of the new time slice allocation that they receive on time | ||
700 | * slice expiry etc. | ||
701 | */ | ||
702 | |||
703 | /* | ||
704 | * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE | ||
705 | * If static_prio_timeslice() is ever changed to break this assumption then | ||
706 | * this code will need modification | ||
707 | */ | ||
708 | #define TIME_SLICE_NICE_ZERO DEF_TIMESLICE | ||
709 | #define LOAD_WEIGHT(lp) \ | ||
710 | (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO) | ||
711 | #define PRIO_TO_LOAD_WEIGHT(prio) \ | ||
712 | LOAD_WEIGHT(static_prio_timeslice(prio)) | ||
713 | #define RTPRIO_TO_LOAD_WEIGHT(rp) \ | ||
714 | (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) | ||
715 | |||
716 | static void set_load_weight(task_t *p) | ||
717 | { | ||
718 | if (has_rt_policy(p)) { | ||
719 | #ifdef CONFIG_SMP | ||
720 | if (p == task_rq(p)->migration_thread) | ||
721 | /* | ||
722 | * The migration thread does the actual balancing. | ||
723 | * Giving its load any weight will skew balancing | ||
724 | * adversely. | ||
725 | */ | ||
726 | p->load_weight = 0; | ||
727 | else | ||
728 | #endif | ||
729 | p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority); | ||
730 | } else | ||
731 | p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); | ||
732 | } | ||
733 | |||
734 | static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p) | ||
735 | { | ||
736 | rq->raw_weighted_load += p->load_weight; | ||
737 | } | ||
738 | |||
739 | static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p) | ||
740 | { | ||
741 | rq->raw_weighted_load -= p->load_weight; | ||
742 | } | ||
743 | |||
744 | static inline void inc_nr_running(task_t *p, runqueue_t *rq) | ||
745 | { | ||
746 | rq->nr_running++; | ||
747 | inc_raw_weighted_load(rq, p); | ||
748 | } | ||
749 | |||
750 | static inline void dec_nr_running(task_t *p, runqueue_t *rq) | ||
751 | { | ||
752 | rq->nr_running--; | ||
753 | dec_raw_weighted_load(rq, p); | ||
754 | } | ||
755 | |||
756 | /* | ||
757 | * Calculate the expected normal priority: i.e. priority | ||
758 | * without taking RT-inheritance into account. Might be | ||
759 | * boosted by interactivity modifiers. Changes upon fork, | ||
760 | * setprio syscalls, and whenever the interactivity | ||
761 | * estimator recalculates. | ||
762 | */ | ||
763 | static inline int normal_prio(task_t *p) | ||
764 | { | ||
765 | int prio; | ||
766 | |||
767 | if (has_rt_policy(p)) | ||
768 | prio = MAX_RT_PRIO-1 - p->rt_priority; | ||
769 | else | ||
770 | prio = __normal_prio(p); | ||
771 | return prio; | ||
772 | } | ||
773 | |||
774 | /* | ||
775 | * Calculate the current priority, i.e. the priority | ||
776 | * taken into account by the scheduler. This value might | ||
777 | * be boosted by RT tasks, or might be boosted by | ||
778 | * interactivity modifiers. Will be RT if the task got | ||
779 | * RT-boosted. If not then it returns p->normal_prio. | ||
780 | */ | ||
781 | static int effective_prio(task_t *p) | ||
782 | { | ||
783 | p->normal_prio = normal_prio(p); | ||
784 | /* | ||
785 | * If we are RT tasks or we were boosted to RT priority, | ||
786 | * keep the priority unchanged. Otherwise, update priority | ||
787 | * to the normal priority: | ||
788 | */ | ||
789 | if (!rt_prio(p->prio)) | ||
790 | return p->normal_prio; | ||
791 | return p->prio; | ||
792 | } | ||
793 | |||
794 | /* | ||
668 | * __activate_task - move a task to the runqueue. | 795 | * __activate_task - move a task to the runqueue. |
669 | */ | 796 | */ |
670 | static void __activate_task(task_t *p, runqueue_t *rq) | 797 | static void __activate_task(task_t *p, runqueue_t *rq) |
@@ -674,7 +801,7 @@ static void __activate_task(task_t *p, runqueue_t *rq) | |||
674 | if (batch_task(p)) | 801 | if (batch_task(p)) |
675 | target = rq->expired; | 802 | target = rq->expired; |
676 | enqueue_task(p, target); | 803 | enqueue_task(p, target); |
677 | rq->nr_running++; | 804 | inc_nr_running(p, rq); |
678 | } | 805 | } |
679 | 806 | ||
680 | /* | 807 | /* |
@@ -683,39 +810,45 @@ static void __activate_task(task_t *p, runqueue_t *rq) | |||
683 | static inline void __activate_idle_task(task_t *p, runqueue_t *rq) | 810 | static inline void __activate_idle_task(task_t *p, runqueue_t *rq) |
684 | { | 811 | { |
685 | enqueue_task_head(p, rq->active); | 812 | enqueue_task_head(p, rq->active); |
686 | rq->nr_running++; | 813 | inc_nr_running(p, rq); |
687 | } | 814 | } |
688 | 815 | ||
816 | /* | ||
817 | * Recalculate p->normal_prio and p->prio after having slept, | ||
818 | * updating the sleep-average too: | ||
819 | */ | ||
689 | static int recalc_task_prio(task_t *p, unsigned long long now) | 820 | static int recalc_task_prio(task_t *p, unsigned long long now) |
690 | { | 821 | { |
691 | /* Caller must always ensure 'now >= p->timestamp' */ | 822 | /* Caller must always ensure 'now >= p->timestamp' */ |
692 | unsigned long long __sleep_time = now - p->timestamp; | 823 | unsigned long sleep_time = now - p->timestamp; |
693 | unsigned long sleep_time; | ||
694 | 824 | ||
695 | if (batch_task(p)) | 825 | if (batch_task(p)) |
696 | sleep_time = 0; | 826 | sleep_time = 0; |
697 | else { | ||
698 | if (__sleep_time > NS_MAX_SLEEP_AVG) | ||
699 | sleep_time = NS_MAX_SLEEP_AVG; | ||
700 | else | ||
701 | sleep_time = (unsigned long)__sleep_time; | ||
702 | } | ||
703 | 827 | ||
704 | if (likely(sleep_time > 0)) { | 828 | if (likely(sleep_time > 0)) { |
705 | /* | 829 | /* |
706 | * User tasks that sleep a long time are categorised as | 830 | * This ceiling is set to the lowest priority that would allow |
707 | * idle. They will only have their sleep_avg increased to a | 831 | * a task to be reinserted into the active array on timeslice |
708 | * level that makes them just interactive priority to stay | 832 | * completion. |
709 | * active yet prevent them suddenly becoming cpu hogs and | ||
710 | * starving other processes. | ||
711 | */ | 833 | */ |
712 | if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) { | 834 | unsigned long ceiling = INTERACTIVE_SLEEP(p); |
713 | unsigned long ceiling; | ||
714 | 835 | ||
715 | ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG - | 836 | if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) { |
716 | DEF_TIMESLICE); | 837 | /* |
717 | if (p->sleep_avg < ceiling) | 838 | * Prevents user tasks from achieving best priority |
718 | p->sleep_avg = ceiling; | 839 | * with one single large enough sleep. |
840 | */ | ||
841 | p->sleep_avg = ceiling; | ||
842 | /* | ||
843 | * Using INTERACTIVE_SLEEP() as a ceiling places a | ||
844 | * nice(0) task 1ms sleep away from promotion, and | ||
845 | * gives it 700ms to round-robin with no chance of | ||
846 | * being demoted. This is more than generous, so | ||
847 | * mark this sleep as non-interactive to prevent the | ||
848 | * on-runqueue bonus logic from intervening should | ||
849 | * this task not receive cpu immediately. | ||
850 | */ | ||
851 | p->sleep_type = SLEEP_NONINTERACTIVE; | ||
719 | } else { | 852 | } else { |
720 | /* | 853 | /* |
721 | * Tasks waking from uninterruptible sleep are | 854 | * Tasks waking from uninterruptible sleep are |
@@ -723,12 +856,12 @@ static int recalc_task_prio(task_t *p, unsigned long long now) | |||
723 | * are likely to be waiting on I/O | 856 | * are likely to be waiting on I/O |
724 | */ | 857 | */ |
725 | if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) { | 858 | if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) { |
726 | if (p->sleep_avg >= INTERACTIVE_SLEEP(p)) | 859 | if (p->sleep_avg >= ceiling) |
727 | sleep_time = 0; | 860 | sleep_time = 0; |
728 | else if (p->sleep_avg + sleep_time >= | 861 | else if (p->sleep_avg + sleep_time >= |
729 | INTERACTIVE_SLEEP(p)) { | 862 | ceiling) { |
730 | p->sleep_avg = INTERACTIVE_SLEEP(p); | 863 | p->sleep_avg = ceiling; |
731 | sleep_time = 0; | 864 | sleep_time = 0; |
732 | } | 865 | } |
733 | } | 866 | } |
734 | 867 | ||
@@ -742,9 +875,9 @@ static int recalc_task_prio(task_t *p, unsigned long long now) | |||
742 | */ | 875 | */ |
743 | p->sleep_avg += sleep_time; | 876 | p->sleep_avg += sleep_time; |
744 | 877 | ||
745 | if (p->sleep_avg > NS_MAX_SLEEP_AVG) | ||
746 | p->sleep_avg = NS_MAX_SLEEP_AVG; | ||
747 | } | 878 | } |
879 | if (p->sleep_avg > NS_MAX_SLEEP_AVG) | ||
880 | p->sleep_avg = NS_MAX_SLEEP_AVG; | ||
748 | } | 881 | } |
749 | 882 | ||
750 | return effective_prio(p); | 883 | return effective_prio(p); |
@@ -805,7 +938,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local) | |||
805 | */ | 938 | */ |
806 | static void deactivate_task(struct task_struct *p, runqueue_t *rq) | 939 | static void deactivate_task(struct task_struct *p, runqueue_t *rq) |
807 | { | 940 | { |
808 | rq->nr_running--; | 941 | dec_nr_running(p, rq); |
809 | dequeue_task(p, p->array); | 942 | dequeue_task(p, p->array); |
810 | p->array = NULL; | 943 | p->array = NULL; |
811 | } | 944 | } |
@@ -818,6 +951,11 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq) | |||
818 | * the target CPU. | 951 | * the target CPU. |
819 | */ | 952 | */ |
820 | #ifdef CONFIG_SMP | 953 | #ifdef CONFIG_SMP |
954 | |||
955 | #ifndef tsk_is_polling | ||
956 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
957 | #endif | ||
958 | |||
821 | static void resched_task(task_t *p) | 959 | static void resched_task(task_t *p) |
822 | { | 960 | { |
823 | int cpu; | 961 | int cpu; |
@@ -833,9 +971,9 @@ static void resched_task(task_t *p) | |||
833 | if (cpu == smp_processor_id()) | 971 | if (cpu == smp_processor_id()) |
834 | return; | 972 | return; |
835 | 973 | ||
836 | /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */ | 974 | /* NEED_RESCHED must be visible before we test polling */ |
837 | smp_mb(); | 975 | smp_mb(); |
838 | if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG)) | 976 | if (!tsk_is_polling(p)) |
839 | smp_send_reschedule(cpu); | 977 | smp_send_reschedule(cpu); |
840 | } | 978 | } |
841 | #else | 979 | #else |
@@ -855,6 +993,12 @@ inline int task_curr(const task_t *p) | |||
855 | return cpu_curr(task_cpu(p)) == p; | 993 | return cpu_curr(task_cpu(p)) == p; |
856 | } | 994 | } |
857 | 995 | ||
996 | /* Used instead of source_load when we know the type == 0 */ | ||
997 | unsigned long weighted_cpuload(const int cpu) | ||
998 | { | ||
999 | return cpu_rq(cpu)->raw_weighted_load; | ||
1000 | } | ||
1001 | |||
858 | #ifdef CONFIG_SMP | 1002 | #ifdef CONFIG_SMP |
859 | typedef struct { | 1003 | typedef struct { |
860 | struct list_head list; | 1004 | struct list_head list; |
@@ -944,7 +1088,8 @@ void kick_process(task_t *p) | |||
944 | } | 1088 | } |
945 | 1089 | ||
946 | /* | 1090 | /* |
947 | * Return a low guess at the load of a migration-source cpu. | 1091 | * Return a low guess at the load of a migration-source cpu weighted |
1092 | * according to the scheduling class and "nice" value. | ||
948 | * | 1093 | * |
949 | * We want to under-estimate the load of migration sources, to | 1094 | * We want to under-estimate the load of migration sources, to |
950 | * balance conservatively. | 1095 | * balance conservatively. |
@@ -952,24 +1097,36 @@ void kick_process(task_t *p) | |||
952 | static inline unsigned long source_load(int cpu, int type) | 1097 | static inline unsigned long source_load(int cpu, int type) |
953 | { | 1098 | { |
954 | runqueue_t *rq = cpu_rq(cpu); | 1099 | runqueue_t *rq = cpu_rq(cpu); |
955 | unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; | 1100 | |
956 | if (type == 0) | 1101 | if (type == 0) |
957 | return load_now; | 1102 | return rq->raw_weighted_load; |
958 | 1103 | ||
959 | return min(rq->cpu_load[type-1], load_now); | 1104 | return min(rq->cpu_load[type-1], rq->raw_weighted_load); |
960 | } | 1105 | } |
961 | 1106 | ||
962 | /* | 1107 | /* |
963 | * Return a high guess at the load of a migration-target cpu | 1108 | * Return a high guess at the load of a migration-target cpu weighted |
1109 | * according to the scheduling class and "nice" value. | ||
964 | */ | 1110 | */ |
965 | static inline unsigned long target_load(int cpu, int type) | 1111 | static inline unsigned long target_load(int cpu, int type) |
966 | { | 1112 | { |
967 | runqueue_t *rq = cpu_rq(cpu); | 1113 | runqueue_t *rq = cpu_rq(cpu); |
968 | unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; | 1114 | |
969 | if (type == 0) | 1115 | if (type == 0) |
970 | return load_now; | 1116 | return rq->raw_weighted_load; |
1117 | |||
1118 | return max(rq->cpu_load[type-1], rq->raw_weighted_load); | ||
1119 | } | ||
1120 | |||
1121 | /* | ||
1122 | * Return the average load per task on the cpu's run queue | ||
1123 | */ | ||
1124 | static inline unsigned long cpu_avg_load_per_task(int cpu) | ||
1125 | { | ||
1126 | runqueue_t *rq = cpu_rq(cpu); | ||
1127 | unsigned long n = rq->nr_running; | ||
971 | 1128 | ||
972 | return max(rq->cpu_load[type-1], load_now); | 1129 | return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE; |
973 | } | 1130 | } |
974 | 1131 | ||
975 | /* | 1132 | /* |
@@ -1042,7 +1199,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | |||
1042 | cpus_and(tmp, group->cpumask, p->cpus_allowed); | 1199 | cpus_and(tmp, group->cpumask, p->cpus_allowed); |
1043 | 1200 | ||
1044 | for_each_cpu_mask(i, tmp) { | 1201 | for_each_cpu_mask(i, tmp) { |
1045 | load = source_load(i, 0); | 1202 | load = weighted_cpuload(i); |
1046 | 1203 | ||
1047 | if (load < min_load || (load == min_load && i == this_cpu)) { | 1204 | if (load < min_load || (load == min_load && i == this_cpu)) { |
1048 | min_load = load; | 1205 | min_load = load; |
@@ -1069,9 +1226,15 @@ static int sched_balance_self(int cpu, int flag) | |||
1069 | struct task_struct *t = current; | 1226 | struct task_struct *t = current; |
1070 | struct sched_domain *tmp, *sd = NULL; | 1227 | struct sched_domain *tmp, *sd = NULL; |
1071 | 1228 | ||
1072 | for_each_domain(cpu, tmp) | 1229 | for_each_domain(cpu, tmp) { |
1230 | /* | ||
1231 | * If power savings logic is enabled for a domain, stop there. | ||
1232 | */ | ||
1233 | if (tmp->flags & SD_POWERSAVINGS_BALANCE) | ||
1234 | break; | ||
1073 | if (tmp->flags & flag) | 1235 | if (tmp->flags & flag) |
1074 | sd = tmp; | 1236 | sd = tmp; |
1237 | } | ||
1075 | 1238 | ||
1076 | while (sd) { | 1239 | while (sd) { |
1077 | cpumask_t span; | 1240 | cpumask_t span; |
@@ -1221,17 +1384,19 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync) | |||
1221 | 1384 | ||
1222 | if (this_sd->flags & SD_WAKE_AFFINE) { | 1385 | if (this_sd->flags & SD_WAKE_AFFINE) { |
1223 | unsigned long tl = this_load; | 1386 | unsigned long tl = this_load; |
1387 | unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu); | ||
1388 | |||
1224 | /* | 1389 | /* |
1225 | * If sync wakeup then subtract the (maximum possible) | 1390 | * If sync wakeup then subtract the (maximum possible) |
1226 | * effect of the currently running task from the load | 1391 | * effect of the currently running task from the load |
1227 | * of the current CPU: | 1392 | * of the current CPU: |
1228 | */ | 1393 | */ |
1229 | if (sync) | 1394 | if (sync) |
1230 | tl -= SCHED_LOAD_SCALE; | 1395 | tl -= current->load_weight; |
1231 | 1396 | ||
1232 | if ((tl <= load && | 1397 | if ((tl <= load && |
1233 | tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) || | 1398 | tl + target_load(cpu, idx) <= tl_per_task) || |
1234 | 100*(tl + SCHED_LOAD_SCALE) <= imbalance*load) { | 1399 | 100*(tl + p->load_weight) <= imbalance*load) { |
1235 | /* | 1400 | /* |
1236 | * This domain has SD_WAKE_AFFINE and | 1401 | * This domain has SD_WAKE_AFFINE and |
1237 | * p is cache cold in this domain, and | 1402 | * p is cache cold in this domain, and |
@@ -1348,6 +1513,12 @@ void fastcall sched_fork(task_t *p, int clone_flags) | |||
1348 | * event cannot wake it up and insert it on the runqueue either. | 1513 | * event cannot wake it up and insert it on the runqueue either. |
1349 | */ | 1514 | */ |
1350 | p->state = TASK_RUNNING; | 1515 | p->state = TASK_RUNNING; |
1516 | |||
1517 | /* | ||
1518 | * Make sure we do not leak PI boosting priority to the child: | ||
1519 | */ | ||
1520 | p->prio = current->normal_prio; | ||
1521 | |||
1351 | INIT_LIST_HEAD(&p->run_list); | 1522 | INIT_LIST_HEAD(&p->run_list); |
1352 | p->array = NULL; | 1523 | p->array = NULL; |
1353 | #ifdef CONFIG_SCHEDSTATS | 1524 | #ifdef CONFIG_SCHEDSTATS |
@@ -1427,10 +1598,11 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) | |||
1427 | __activate_task(p, rq); | 1598 | __activate_task(p, rq); |
1428 | else { | 1599 | else { |
1429 | p->prio = current->prio; | 1600 | p->prio = current->prio; |
1601 | p->normal_prio = current->normal_prio; | ||
1430 | list_add_tail(&p->run_list, ¤t->run_list); | 1602 | list_add_tail(&p->run_list, ¤t->run_list); |
1431 | p->array = current->array; | 1603 | p->array = current->array; |
1432 | p->array->nr_active++; | 1604 | p->array->nr_active++; |
1433 | rq->nr_running++; | 1605 | inc_nr_running(p, rq); |
1434 | } | 1606 | } |
1435 | set_need_resched(); | 1607 | set_need_resched(); |
1436 | } else | 1608 | } else |
@@ -1648,7 +1820,8 @@ unsigned long nr_uninterruptible(void) | |||
1648 | 1820 | ||
1649 | unsigned long long nr_context_switches(void) | 1821 | unsigned long long nr_context_switches(void) |
1650 | { | 1822 | { |
1651 | unsigned long long i, sum = 0; | 1823 | int i; |
1824 | unsigned long long sum = 0; | ||
1652 | 1825 | ||
1653 | for_each_possible_cpu(i) | 1826 | for_each_possible_cpu(i) |
1654 | sum += cpu_rq(i)->nr_switches; | 1827 | sum += cpu_rq(i)->nr_switches; |
@@ -1686,9 +1859,6 @@ unsigned long nr_active(void) | |||
1686 | /* | 1859 | /* |
1687 | * double_rq_lock - safely lock two runqueues | 1860 | * double_rq_lock - safely lock two runqueues |
1688 | * | 1861 | * |
1689 | * We must take them in cpu order to match code in | ||
1690 | * dependent_sleeper and wake_dependent_sleeper. | ||
1691 | * | ||
1692 | * Note this does not disable interrupts like task_rq_lock, | 1862 | * Note this does not disable interrupts like task_rq_lock, |
1693 | * you need to do so manually before calling. | 1863 | * you need to do so manually before calling. |
1694 | */ | 1864 | */ |
@@ -1700,7 +1870,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) | |||
1700 | spin_lock(&rq1->lock); | 1870 | spin_lock(&rq1->lock); |
1701 | __acquire(rq2->lock); /* Fake it out ;) */ | 1871 | __acquire(rq2->lock); /* Fake it out ;) */ |
1702 | } else { | 1872 | } else { |
1703 | if (rq1->cpu < rq2->cpu) { | 1873 | if (rq1 < rq2) { |
1704 | spin_lock(&rq1->lock); | 1874 | spin_lock(&rq1->lock); |
1705 | spin_lock(&rq2->lock); | 1875 | spin_lock(&rq2->lock); |
1706 | } else { | 1876 | } else { |
@@ -1736,7 +1906,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) | |||
1736 | __acquires(this_rq->lock) | 1906 | __acquires(this_rq->lock) |
1737 | { | 1907 | { |
1738 | if (unlikely(!spin_trylock(&busiest->lock))) { | 1908 | if (unlikely(!spin_trylock(&busiest->lock))) { |
1739 | if (busiest->cpu < this_rq->cpu) { | 1909 | if (busiest < this_rq) { |
1740 | spin_unlock(&this_rq->lock); | 1910 | spin_unlock(&this_rq->lock); |
1741 | spin_lock(&busiest->lock); | 1911 | spin_lock(&busiest->lock); |
1742 | spin_lock(&this_rq->lock); | 1912 | spin_lock(&this_rq->lock); |
@@ -1799,9 +1969,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, | |||
1799 | runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) | 1969 | runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) |
1800 | { | 1970 | { |
1801 | dequeue_task(p, src_array); | 1971 | dequeue_task(p, src_array); |
1802 | src_rq->nr_running--; | 1972 | dec_nr_running(p, src_rq); |
1803 | set_task_cpu(p, this_cpu); | 1973 | set_task_cpu(p, this_cpu); |
1804 | this_rq->nr_running++; | 1974 | inc_nr_running(p, this_rq); |
1805 | enqueue_task(p, this_array); | 1975 | enqueue_task(p, this_array); |
1806 | p->timestamp = (p->timestamp - src_rq->timestamp_last_tick) | 1976 | p->timestamp = (p->timestamp - src_rq->timestamp_last_tick) |
1807 | + this_rq->timestamp_last_tick; | 1977 | + this_rq->timestamp_last_tick; |
@@ -1848,26 +2018,42 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, | |||
1848 | return 1; | 2018 | return 1; |
1849 | } | 2019 | } |
1850 | 2020 | ||
2021 | #define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio) | ||
1851 | /* | 2022 | /* |
1852 | * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq, | 2023 | * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted |
1853 | * as part of a balancing operation within "domain". Returns the number of | 2024 | * load from busiest to this_rq, as part of a balancing operation within |
1854 | * tasks moved. | 2025 | * "domain". Returns the number of tasks moved. |
1855 | * | 2026 | * |
1856 | * Called with both runqueues locked. | 2027 | * Called with both runqueues locked. |
1857 | */ | 2028 | */ |
1858 | static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, | 2029 | static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, |
1859 | unsigned long max_nr_move, struct sched_domain *sd, | 2030 | unsigned long max_nr_move, unsigned long max_load_move, |
1860 | enum idle_type idle, int *all_pinned) | 2031 | struct sched_domain *sd, enum idle_type idle, |
2032 | int *all_pinned) | ||
1861 | { | 2033 | { |
1862 | prio_array_t *array, *dst_array; | 2034 | prio_array_t *array, *dst_array; |
1863 | struct list_head *head, *curr; | 2035 | struct list_head *head, *curr; |
1864 | int idx, pulled = 0, pinned = 0; | 2036 | int idx, pulled = 0, pinned = 0, this_best_prio, busiest_best_prio; |
2037 | int busiest_best_prio_seen; | ||
2038 | int skip_for_load; /* skip the task based on weighted load issues */ | ||
2039 | long rem_load_move; | ||
1865 | task_t *tmp; | 2040 | task_t *tmp; |
1866 | 2041 | ||
1867 | if (max_nr_move == 0) | 2042 | if (max_nr_move == 0 || max_load_move == 0) |
1868 | goto out; | 2043 | goto out; |
1869 | 2044 | ||
2045 | rem_load_move = max_load_move; | ||
1870 | pinned = 1; | 2046 | pinned = 1; |
2047 | this_best_prio = rq_best_prio(this_rq); | ||
2048 | busiest_best_prio = rq_best_prio(busiest); | ||
2049 | /* | ||
2050 | * Enable handling of the case where there is more than one task | ||
2051 | * with the best priority. If the current running task is one | ||
2052 | * of those with prio==busiest_best_prio we know it won't be moved | ||
2053 | * and therefore it's safe to override the skip (based on load) of | ||
2054 | * any task we find with that prio. | ||
2055 | */ | ||
2056 | busiest_best_prio_seen = busiest_best_prio == busiest->curr->prio; | ||
1871 | 2057 | ||
1872 | /* | 2058 | /* |
1873 | * We first consider expired tasks. Those will likely not be | 2059 | * We first consider expired tasks. Those will likely not be |
@@ -1907,7 +2093,17 @@ skip_queue: | |||
1907 | 2093 | ||
1908 | curr = curr->prev; | 2094 | curr = curr->prev; |
1909 | 2095 | ||
1910 | if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { | 2096 | /* |
2097 | * To help distribute high priority tasks accross CPUs we don't | ||
2098 | * skip a task if it will be the highest priority task (i.e. smallest | ||
2099 | * prio value) on its new queue regardless of its load weight | ||
2100 | */ | ||
2101 | skip_for_load = tmp->load_weight > rem_load_move; | ||
2102 | if (skip_for_load && idx < this_best_prio) | ||
2103 | skip_for_load = !busiest_best_prio_seen && idx == busiest_best_prio; | ||
2104 | if (skip_for_load || | ||
2105 | !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { | ||
2106 | busiest_best_prio_seen |= idx == busiest_best_prio; | ||
1911 | if (curr != head) | 2107 | if (curr != head) |
1912 | goto skip_queue; | 2108 | goto skip_queue; |
1913 | idx++; | 2109 | idx++; |
@@ -1921,9 +2117,15 @@ skip_queue: | |||
1921 | 2117 | ||
1922 | pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); | 2118 | pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); |
1923 | pulled++; | 2119 | pulled++; |
2120 | rem_load_move -= tmp->load_weight; | ||
1924 | 2121 | ||
1925 | /* We only want to steal up to the prescribed number of tasks. */ | 2122 | /* |
1926 | if (pulled < max_nr_move) { | 2123 | * We only want to steal up to the prescribed number of tasks |
2124 | * and the prescribed amount of weighted load. | ||
2125 | */ | ||
2126 | if (pulled < max_nr_move && rem_load_move > 0) { | ||
2127 | if (idx < this_best_prio) | ||
2128 | this_best_prio = idx; | ||
1927 | if (curr != head) | 2129 | if (curr != head) |
1928 | goto skip_queue; | 2130 | goto skip_queue; |
1929 | idx++; | 2131 | idx++; |
@@ -1944,7 +2146,7 @@ out: | |||
1944 | 2146 | ||
1945 | /* | 2147 | /* |
1946 | * find_busiest_group finds and returns the busiest CPU group within the | 2148 | * find_busiest_group finds and returns the busiest CPU group within the |
1947 | * domain. It calculates and returns the number of tasks which should be | 2149 | * domain. It calculates and returns the amount of weighted load which should be |
1948 | * moved to restore balance via the imbalance parameter. | 2150 | * moved to restore balance via the imbalance parameter. |
1949 | */ | 2151 | */ |
1950 | static struct sched_group * | 2152 | static struct sched_group * |
@@ -1954,9 +2156,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
1954 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 2156 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
1955 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 2157 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
1956 | unsigned long max_pull; | 2158 | unsigned long max_pull; |
2159 | unsigned long busiest_load_per_task, busiest_nr_running; | ||
2160 | unsigned long this_load_per_task, this_nr_running; | ||
1957 | int load_idx; | 2161 | int load_idx; |
2162 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
2163 | int power_savings_balance = 1; | ||
2164 | unsigned long leader_nr_running = 0, min_load_per_task = 0; | ||
2165 | unsigned long min_nr_running = ULONG_MAX; | ||
2166 | struct sched_group *group_min = NULL, *group_leader = NULL; | ||
2167 | #endif | ||
1958 | 2168 | ||
1959 | max_load = this_load = total_load = total_pwr = 0; | 2169 | max_load = this_load = total_load = total_pwr = 0; |
2170 | busiest_load_per_task = busiest_nr_running = 0; | ||
2171 | this_load_per_task = this_nr_running = 0; | ||
1960 | if (idle == NOT_IDLE) | 2172 | if (idle == NOT_IDLE) |
1961 | load_idx = sd->busy_idx; | 2173 | load_idx = sd->busy_idx; |
1962 | else if (idle == NEWLY_IDLE) | 2174 | else if (idle == NEWLY_IDLE) |
@@ -1965,16 +2177,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
1965 | load_idx = sd->idle_idx; | 2177 | load_idx = sd->idle_idx; |
1966 | 2178 | ||
1967 | do { | 2179 | do { |
1968 | unsigned long load; | 2180 | unsigned long load, group_capacity; |
1969 | int local_group; | 2181 | int local_group; |
1970 | int i; | 2182 | int i; |
2183 | unsigned long sum_nr_running, sum_weighted_load; | ||
1971 | 2184 | ||
1972 | local_group = cpu_isset(this_cpu, group->cpumask); | 2185 | local_group = cpu_isset(this_cpu, group->cpumask); |
1973 | 2186 | ||
1974 | /* Tally up the load of all CPUs in the group */ | 2187 | /* Tally up the load of all CPUs in the group */ |
1975 | avg_load = 0; | 2188 | sum_weighted_load = sum_nr_running = avg_load = 0; |
1976 | 2189 | ||
1977 | for_each_cpu_mask(i, group->cpumask) { | 2190 | for_each_cpu_mask(i, group->cpumask) { |
2191 | runqueue_t *rq = cpu_rq(i); | ||
2192 | |||
1978 | if (*sd_idle && !idle_cpu(i)) | 2193 | if (*sd_idle && !idle_cpu(i)) |
1979 | *sd_idle = 0; | 2194 | *sd_idle = 0; |
1980 | 2195 | ||
@@ -1985,6 +2200,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
1985 | load = source_load(i, load_idx); | 2200 | load = source_load(i, load_idx); |
1986 | 2201 | ||
1987 | avg_load += load; | 2202 | avg_load += load; |
2203 | sum_nr_running += rq->nr_running; | ||
2204 | sum_weighted_load += rq->raw_weighted_load; | ||
1988 | } | 2205 | } |
1989 | 2206 | ||
1990 | total_load += avg_load; | 2207 | total_load += avg_load; |
@@ -1993,17 +2210,80 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
1993 | /* Adjust by relative CPU power of the group */ | 2210 | /* Adjust by relative CPU power of the group */ |
1994 | avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power; | 2211 | avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power; |
1995 | 2212 | ||
2213 | group_capacity = group->cpu_power / SCHED_LOAD_SCALE; | ||
2214 | |||
1996 | if (local_group) { | 2215 | if (local_group) { |
1997 | this_load = avg_load; | 2216 | this_load = avg_load; |
1998 | this = group; | 2217 | this = group; |
1999 | } else if (avg_load > max_load) { | 2218 | this_nr_running = sum_nr_running; |
2219 | this_load_per_task = sum_weighted_load; | ||
2220 | } else if (avg_load > max_load && | ||
2221 | sum_nr_running > group_capacity) { | ||
2000 | max_load = avg_load; | 2222 | max_load = avg_load; |
2001 | busiest = group; | 2223 | busiest = group; |
2224 | busiest_nr_running = sum_nr_running; | ||
2225 | busiest_load_per_task = sum_weighted_load; | ||
2002 | } | 2226 | } |
2227 | |||
2228 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
2229 | /* | ||
2230 | * Busy processors will not participate in power savings | ||
2231 | * balance. | ||
2232 | */ | ||
2233 | if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) | ||
2234 | goto group_next; | ||
2235 | |||
2236 | /* | ||
2237 | * If the local group is idle or completely loaded | ||
2238 | * no need to do power savings balance at this domain | ||
2239 | */ | ||
2240 | if (local_group && (this_nr_running >= group_capacity || | ||
2241 | !this_nr_running)) | ||
2242 | power_savings_balance = 0; | ||
2243 | |||
2244 | /* | ||
2245 | * If a group is already running at full capacity or idle, | ||
2246 | * don't include that group in power savings calculations | ||
2247 | */ | ||
2248 | if (!power_savings_balance || sum_nr_running >= group_capacity | ||
2249 | || !sum_nr_running) | ||
2250 | goto group_next; | ||
2251 | |||
2252 | /* | ||
2253 | * Calculate the group which has the least non-idle load. | ||
2254 | * This is the group from where we need to pick up the load | ||
2255 | * for saving power | ||
2256 | */ | ||
2257 | if ((sum_nr_running < min_nr_running) || | ||
2258 | (sum_nr_running == min_nr_running && | ||
2259 | first_cpu(group->cpumask) < | ||
2260 | first_cpu(group_min->cpumask))) { | ||
2261 | group_min = group; | ||
2262 | min_nr_running = sum_nr_running; | ||
2263 | min_load_per_task = sum_weighted_load / | ||
2264 | sum_nr_running; | ||
2265 | } | ||
2266 | |||
2267 | /* | ||
2268 | * Calculate the group which is almost near its | ||
2269 | * capacity but still has some space to pick up some load | ||
2270 | * from other group and save more power | ||
2271 | */ | ||
2272 | if (sum_nr_running <= group_capacity - 1) | ||
2273 | if (sum_nr_running > leader_nr_running || | ||
2274 | (sum_nr_running == leader_nr_running && | ||
2275 | first_cpu(group->cpumask) > | ||
2276 | first_cpu(group_leader->cpumask))) { | ||
2277 | group_leader = group; | ||
2278 | leader_nr_running = sum_nr_running; | ||
2279 | } | ||
2280 | |||
2281 | group_next: | ||
2282 | #endif | ||
2003 | group = group->next; | 2283 | group = group->next; |
2004 | } while (group != sd->groups); | 2284 | } while (group != sd->groups); |
2005 | 2285 | ||
2006 | if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE) | 2286 | if (!busiest || this_load >= max_load || busiest_nr_running == 0) |
2007 | goto out_balanced; | 2287 | goto out_balanced; |
2008 | 2288 | ||
2009 | avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; | 2289 | avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; |
@@ -2012,6 +2292,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2012 | 100*max_load <= sd->imbalance_pct*this_load) | 2292 | 100*max_load <= sd->imbalance_pct*this_load) |
2013 | goto out_balanced; | 2293 | goto out_balanced; |
2014 | 2294 | ||
2295 | busiest_load_per_task /= busiest_nr_running; | ||
2015 | /* | 2296 | /* |
2016 | * We're trying to get all the cpus to the average_load, so we don't | 2297 | * We're trying to get all the cpus to the average_load, so we don't |
2017 | * want to push ourselves above the average load, nor do we wish to | 2298 | * want to push ourselves above the average load, nor do we wish to |
@@ -2023,21 +2304,50 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2023 | * by pulling tasks to us. Be careful of negative numbers as they'll | 2304 | * by pulling tasks to us. Be careful of negative numbers as they'll |
2024 | * appear as very large values with unsigned longs. | 2305 | * appear as very large values with unsigned longs. |
2025 | */ | 2306 | */ |
2307 | if (max_load <= busiest_load_per_task) | ||
2308 | goto out_balanced; | ||
2309 | |||
2310 | /* | ||
2311 | * In the presence of smp nice balancing, certain scenarios can have | ||
2312 | * max load less than avg load(as we skip the groups at or below | ||
2313 | * its cpu_power, while calculating max_load..) | ||
2314 | */ | ||
2315 | if (max_load < avg_load) { | ||
2316 | *imbalance = 0; | ||
2317 | goto small_imbalance; | ||
2318 | } | ||
2026 | 2319 | ||
2027 | /* Don't want to pull so many tasks that a group would go idle */ | 2320 | /* Don't want to pull so many tasks that a group would go idle */ |
2028 | max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE); | 2321 | max_pull = min(max_load - avg_load, max_load - busiest_load_per_task); |
2029 | 2322 | ||
2030 | /* How much load to actually move to equalise the imbalance */ | 2323 | /* How much load to actually move to equalise the imbalance */ |
2031 | *imbalance = min(max_pull * busiest->cpu_power, | 2324 | *imbalance = min(max_pull * busiest->cpu_power, |
2032 | (avg_load - this_load) * this->cpu_power) | 2325 | (avg_load - this_load) * this->cpu_power) |
2033 | / SCHED_LOAD_SCALE; | 2326 | / SCHED_LOAD_SCALE; |
2034 | 2327 | ||
2035 | if (*imbalance < SCHED_LOAD_SCALE) { | 2328 | /* |
2036 | unsigned long pwr_now = 0, pwr_move = 0; | 2329 | * if *imbalance is less than the average load per runnable task |
2330 | * there is no gaurantee that any tasks will be moved so we'll have | ||
2331 | * a think about bumping its value to force at least one task to be | ||
2332 | * moved | ||
2333 | */ | ||
2334 | if (*imbalance < busiest_load_per_task) { | ||
2335 | unsigned long pwr_now, pwr_move; | ||
2037 | unsigned long tmp; | 2336 | unsigned long tmp; |
2337 | unsigned int imbn; | ||
2338 | |||
2339 | small_imbalance: | ||
2340 | pwr_move = pwr_now = 0; | ||
2341 | imbn = 2; | ||
2342 | if (this_nr_running) { | ||
2343 | this_load_per_task /= this_nr_running; | ||
2344 | if (busiest_load_per_task > this_load_per_task) | ||
2345 | imbn = 1; | ||
2346 | } else | ||
2347 | this_load_per_task = SCHED_LOAD_SCALE; | ||
2038 | 2348 | ||
2039 | if (max_load - this_load >= SCHED_LOAD_SCALE*2) { | 2349 | if (max_load - this_load >= busiest_load_per_task * imbn) { |
2040 | *imbalance = 1; | 2350 | *imbalance = busiest_load_per_task; |
2041 | return busiest; | 2351 | return busiest; |
2042 | } | 2352 | } |
2043 | 2353 | ||
@@ -2047,39 +2357,47 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2047 | * moving them. | 2357 | * moving them. |
2048 | */ | 2358 | */ |
2049 | 2359 | ||
2050 | pwr_now += busiest->cpu_power*min(SCHED_LOAD_SCALE, max_load); | 2360 | pwr_now += busiest->cpu_power * |
2051 | pwr_now += this->cpu_power*min(SCHED_LOAD_SCALE, this_load); | 2361 | min(busiest_load_per_task, max_load); |
2362 | pwr_now += this->cpu_power * | ||
2363 | min(this_load_per_task, this_load); | ||
2052 | pwr_now /= SCHED_LOAD_SCALE; | 2364 | pwr_now /= SCHED_LOAD_SCALE; |
2053 | 2365 | ||
2054 | /* Amount of load we'd subtract */ | 2366 | /* Amount of load we'd subtract */ |
2055 | tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/busiest->cpu_power; | 2367 | tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power; |
2056 | if (max_load > tmp) | 2368 | if (max_load > tmp) |
2057 | pwr_move += busiest->cpu_power*min(SCHED_LOAD_SCALE, | 2369 | pwr_move += busiest->cpu_power * |
2058 | max_load - tmp); | 2370 | min(busiest_load_per_task, max_load - tmp); |
2059 | 2371 | ||
2060 | /* Amount of load we'd add */ | 2372 | /* Amount of load we'd add */ |
2061 | if (max_load*busiest->cpu_power < | 2373 | if (max_load*busiest->cpu_power < |
2062 | SCHED_LOAD_SCALE*SCHED_LOAD_SCALE) | 2374 | busiest_load_per_task*SCHED_LOAD_SCALE) |
2063 | tmp = max_load*busiest->cpu_power/this->cpu_power; | 2375 | tmp = max_load*busiest->cpu_power/this->cpu_power; |
2064 | else | 2376 | else |
2065 | tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power; | 2377 | tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power; |
2066 | pwr_move += this->cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp); | 2378 | pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp); |
2067 | pwr_move /= SCHED_LOAD_SCALE; | 2379 | pwr_move /= SCHED_LOAD_SCALE; |
2068 | 2380 | ||
2069 | /* Move if we gain throughput */ | 2381 | /* Move if we gain throughput */ |
2070 | if (pwr_move <= pwr_now) | 2382 | if (pwr_move <= pwr_now) |
2071 | goto out_balanced; | 2383 | goto out_balanced; |
2072 | 2384 | ||
2073 | *imbalance = 1; | 2385 | *imbalance = busiest_load_per_task; |
2074 | return busiest; | ||
2075 | } | 2386 | } |
2076 | 2387 | ||
2077 | /* Get rid of the scaling factor, rounding down as we divide */ | ||
2078 | *imbalance = *imbalance / SCHED_LOAD_SCALE; | ||
2079 | return busiest; | 2388 | return busiest; |
2080 | 2389 | ||
2081 | out_balanced: | 2390 | out_balanced: |
2391 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
2392 | if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) | ||
2393 | goto ret; | ||
2082 | 2394 | ||
2395 | if (this == group_leader && group_leader != group_min) { | ||
2396 | *imbalance = min_load_per_task; | ||
2397 | return group_min; | ||
2398 | } | ||
2399 | ret: | ||
2400 | #endif | ||
2083 | *imbalance = 0; | 2401 | *imbalance = 0; |
2084 | return NULL; | 2402 | return NULL; |
2085 | } | 2403 | } |
@@ -2088,18 +2406,21 @@ out_balanced: | |||
2088 | * find_busiest_queue - find the busiest runqueue among the cpus in group. | 2406 | * find_busiest_queue - find the busiest runqueue among the cpus in group. |
2089 | */ | 2407 | */ |
2090 | static runqueue_t *find_busiest_queue(struct sched_group *group, | 2408 | static runqueue_t *find_busiest_queue(struct sched_group *group, |
2091 | enum idle_type idle) | 2409 | enum idle_type idle, unsigned long imbalance) |
2092 | { | 2410 | { |
2093 | unsigned long load, max_load = 0; | 2411 | unsigned long max_load = 0; |
2094 | runqueue_t *busiest = NULL; | 2412 | runqueue_t *busiest = NULL, *rqi; |
2095 | int i; | 2413 | int i; |
2096 | 2414 | ||
2097 | for_each_cpu_mask(i, group->cpumask) { | 2415 | for_each_cpu_mask(i, group->cpumask) { |
2098 | load = source_load(i, 0); | 2416 | rqi = cpu_rq(i); |
2099 | 2417 | ||
2100 | if (load > max_load) { | 2418 | if (rqi->nr_running == 1 && rqi->raw_weighted_load > imbalance) |
2101 | max_load = load; | 2419 | continue; |
2102 | busiest = cpu_rq(i); | 2420 | |
2421 | if (rqi->raw_weighted_load > max_load) { | ||
2422 | max_load = rqi->raw_weighted_load; | ||
2423 | busiest = rqi; | ||
2103 | } | 2424 | } |
2104 | } | 2425 | } |
2105 | 2426 | ||
@@ -2112,6 +2433,7 @@ static runqueue_t *find_busiest_queue(struct sched_group *group, | |||
2112 | */ | 2433 | */ |
2113 | #define MAX_PINNED_INTERVAL 512 | 2434 | #define MAX_PINNED_INTERVAL 512 |
2114 | 2435 | ||
2436 | #define minus_1_or_zero(n) ((n) > 0 ? (n) - 1 : 0) | ||
2115 | /* | 2437 | /* |
2116 | * Check this_cpu to ensure it is balanced within domain. Attempt to move | 2438 | * Check this_cpu to ensure it is balanced within domain. Attempt to move |
2117 | * tasks if there is an imbalance. | 2439 | * tasks if there is an imbalance. |
@@ -2128,7 +2450,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
2128 | int active_balance = 0; | 2450 | int active_balance = 0; |
2129 | int sd_idle = 0; | 2451 | int sd_idle = 0; |
2130 | 2452 | ||
2131 | if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER) | 2453 | if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && |
2454 | !sched_smt_power_savings) | ||
2132 | sd_idle = 1; | 2455 | sd_idle = 1; |
2133 | 2456 | ||
2134 | schedstat_inc(sd, lb_cnt[idle]); | 2457 | schedstat_inc(sd, lb_cnt[idle]); |
@@ -2139,7 +2462,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
2139 | goto out_balanced; | 2462 | goto out_balanced; |
2140 | } | 2463 | } |
2141 | 2464 | ||
2142 | busiest = find_busiest_queue(group, idle); | 2465 | busiest = find_busiest_queue(group, idle, imbalance); |
2143 | if (!busiest) { | 2466 | if (!busiest) { |
2144 | schedstat_inc(sd, lb_nobusyq[idle]); | 2467 | schedstat_inc(sd, lb_nobusyq[idle]); |
2145 | goto out_balanced; | 2468 | goto out_balanced; |
@@ -2159,6 +2482,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
2159 | */ | 2482 | */ |
2160 | double_rq_lock(this_rq, busiest); | 2483 | double_rq_lock(this_rq, busiest); |
2161 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2484 | nr_moved = move_tasks(this_rq, this_cpu, busiest, |
2485 | minus_1_or_zero(busiest->nr_running), | ||
2162 | imbalance, sd, idle, &all_pinned); | 2486 | imbalance, sd, idle, &all_pinned); |
2163 | double_rq_unlock(this_rq, busiest); | 2487 | double_rq_unlock(this_rq, busiest); |
2164 | 2488 | ||
@@ -2216,7 +2540,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
2216 | sd->balance_interval *= 2; | 2540 | sd->balance_interval *= 2; |
2217 | } | 2541 | } |
2218 | 2542 | ||
2219 | if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER) | 2543 | if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
2544 | !sched_smt_power_savings) | ||
2220 | return -1; | 2545 | return -1; |
2221 | return nr_moved; | 2546 | return nr_moved; |
2222 | 2547 | ||
@@ -2231,7 +2556,7 @@ out_one_pinned: | |||
2231 | (sd->balance_interval < sd->max_interval)) | 2556 | (sd->balance_interval < sd->max_interval)) |
2232 | sd->balance_interval *= 2; | 2557 | sd->balance_interval *= 2; |
2233 | 2558 | ||
2234 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER) | 2559 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) |
2235 | return -1; | 2560 | return -1; |
2236 | return 0; | 2561 | return 0; |
2237 | } | 2562 | } |
@@ -2252,7 +2577,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
2252 | int nr_moved = 0; | 2577 | int nr_moved = 0; |
2253 | int sd_idle = 0; | 2578 | int sd_idle = 0; |
2254 | 2579 | ||
2255 | if (sd->flags & SD_SHARE_CPUPOWER) | 2580 | if (sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) |
2256 | sd_idle = 1; | 2581 | sd_idle = 1; |
2257 | 2582 | ||
2258 | schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); | 2583 | schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); |
@@ -2262,7 +2587,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
2262 | goto out_balanced; | 2587 | goto out_balanced; |
2263 | } | 2588 | } |
2264 | 2589 | ||
2265 | busiest = find_busiest_queue(group, NEWLY_IDLE); | 2590 | busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance); |
2266 | if (!busiest) { | 2591 | if (!busiest) { |
2267 | schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); | 2592 | schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); |
2268 | goto out_balanced; | 2593 | goto out_balanced; |
@@ -2277,6 +2602,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
2277 | /* Attempt to move tasks */ | 2602 | /* Attempt to move tasks */ |
2278 | double_lock_balance(this_rq, busiest); | 2603 | double_lock_balance(this_rq, busiest); |
2279 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2604 | nr_moved = move_tasks(this_rq, this_cpu, busiest, |
2605 | minus_1_or_zero(busiest->nr_running), | ||
2280 | imbalance, sd, NEWLY_IDLE, NULL); | 2606 | imbalance, sd, NEWLY_IDLE, NULL); |
2281 | spin_unlock(&busiest->lock); | 2607 | spin_unlock(&busiest->lock); |
2282 | } | 2608 | } |
@@ -2292,7 +2618,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
2292 | 2618 | ||
2293 | out_balanced: | 2619 | out_balanced: |
2294 | schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); | 2620 | schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); |
2295 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER) | 2621 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) |
2296 | return -1; | 2622 | return -1; |
2297 | sd->nr_balance_failed = 0; | 2623 | sd->nr_balance_failed = 0; |
2298 | return 0; | 2624 | return 0; |
@@ -2347,17 +2673,19 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) | |||
2347 | double_lock_balance(busiest_rq, target_rq); | 2673 | double_lock_balance(busiest_rq, target_rq); |
2348 | 2674 | ||
2349 | /* Search for an sd spanning us and the target CPU. */ | 2675 | /* Search for an sd spanning us and the target CPU. */ |
2350 | for_each_domain(target_cpu, sd) | 2676 | for_each_domain(target_cpu, sd) { |
2351 | if ((sd->flags & SD_LOAD_BALANCE) && | 2677 | if ((sd->flags & SD_LOAD_BALANCE) && |
2352 | cpu_isset(busiest_cpu, sd->span)) | 2678 | cpu_isset(busiest_cpu, sd->span)) |
2353 | break; | 2679 | break; |
2680 | } | ||
2354 | 2681 | ||
2355 | if (unlikely(sd == NULL)) | 2682 | if (unlikely(sd == NULL)) |
2356 | goto out; | 2683 | goto out; |
2357 | 2684 | ||
2358 | schedstat_inc(sd, alb_cnt); | 2685 | schedstat_inc(sd, alb_cnt); |
2359 | 2686 | ||
2360 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL)) | 2687 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, |
2688 | RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, NULL)) | ||
2361 | schedstat_inc(sd, alb_pushed); | 2689 | schedstat_inc(sd, alb_pushed); |
2362 | else | 2690 | else |
2363 | schedstat_inc(sd, alb_failed); | 2691 | schedstat_inc(sd, alb_failed); |
@@ -2385,7 +2713,7 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq, | |||
2385 | struct sched_domain *sd; | 2713 | struct sched_domain *sd; |
2386 | int i; | 2714 | int i; |
2387 | 2715 | ||
2388 | this_load = this_rq->nr_running * SCHED_LOAD_SCALE; | 2716 | this_load = this_rq->raw_weighted_load; |
2389 | /* Update our load */ | 2717 | /* Update our load */ |
2390 | for (i = 0; i < 3; i++) { | 2718 | for (i = 0; i < 3; i++) { |
2391 | unsigned long new_load = this_load; | 2719 | unsigned long new_load = this_load; |
@@ -2686,48 +3014,35 @@ static inline void wakeup_busy_runqueue(runqueue_t *rq) | |||
2686 | resched_task(rq->idle); | 3014 | resched_task(rq->idle); |
2687 | } | 3015 | } |
2688 | 3016 | ||
2689 | static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) | 3017 | /* |
3018 | * Called with interrupt disabled and this_rq's runqueue locked. | ||
3019 | */ | ||
3020 | static void wake_sleeping_dependent(int this_cpu) | ||
2690 | { | 3021 | { |
2691 | struct sched_domain *tmp, *sd = NULL; | 3022 | struct sched_domain *tmp, *sd = NULL; |
2692 | cpumask_t sibling_map; | ||
2693 | int i; | 3023 | int i; |
2694 | 3024 | ||
2695 | for_each_domain(this_cpu, tmp) | 3025 | for_each_domain(this_cpu, tmp) { |
2696 | if (tmp->flags & SD_SHARE_CPUPOWER) | 3026 | if (tmp->flags & SD_SHARE_CPUPOWER) { |
2697 | sd = tmp; | 3027 | sd = tmp; |
3028 | break; | ||
3029 | } | ||
3030 | } | ||
2698 | 3031 | ||
2699 | if (!sd) | 3032 | if (!sd) |
2700 | return; | 3033 | return; |
2701 | 3034 | ||
2702 | /* | 3035 | for_each_cpu_mask(i, sd->span) { |
2703 | * Unlock the current runqueue because we have to lock in | ||
2704 | * CPU order to avoid deadlocks. Caller knows that we might | ||
2705 | * unlock. We keep IRQs disabled. | ||
2706 | */ | ||
2707 | spin_unlock(&this_rq->lock); | ||
2708 | |||
2709 | sibling_map = sd->span; | ||
2710 | |||
2711 | for_each_cpu_mask(i, sibling_map) | ||
2712 | spin_lock(&cpu_rq(i)->lock); | ||
2713 | /* | ||
2714 | * We clear this CPU from the mask. This both simplifies the | ||
2715 | * inner loop and keps this_rq locked when we exit: | ||
2716 | */ | ||
2717 | cpu_clear(this_cpu, sibling_map); | ||
2718 | |||
2719 | for_each_cpu_mask(i, sibling_map) { | ||
2720 | runqueue_t *smt_rq = cpu_rq(i); | 3036 | runqueue_t *smt_rq = cpu_rq(i); |
2721 | 3037 | ||
3038 | if (i == this_cpu) | ||
3039 | continue; | ||
3040 | if (unlikely(!spin_trylock(&smt_rq->lock))) | ||
3041 | continue; | ||
3042 | |||
2722 | wakeup_busy_runqueue(smt_rq); | 3043 | wakeup_busy_runqueue(smt_rq); |
3044 | spin_unlock(&smt_rq->lock); | ||
2723 | } | 3045 | } |
2724 | |||
2725 | for_each_cpu_mask(i, sibling_map) | ||
2726 | spin_unlock(&cpu_rq(i)->lock); | ||
2727 | /* | ||
2728 | * We exit with this_cpu's rq still held and IRQs | ||
2729 | * still disabled: | ||
2730 | */ | ||
2731 | } | 3046 | } |
2732 | 3047 | ||
2733 | /* | 3048 | /* |
@@ -2740,52 +3055,46 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) | |||
2740 | return p->time_slice * (100 - sd->per_cpu_gain) / 100; | 3055 | return p->time_slice * (100 - sd->per_cpu_gain) / 100; |
2741 | } | 3056 | } |
2742 | 3057 | ||
2743 | static int dependent_sleeper(int this_cpu, runqueue_t *this_rq) | 3058 | /* |
3059 | * To minimise lock contention and not have to drop this_rq's runlock we only | ||
3060 | * trylock the sibling runqueues and bypass those runqueues if we fail to | ||
3061 | * acquire their lock. As we only trylock the normal locking order does not | ||
3062 | * need to be obeyed. | ||
3063 | */ | ||
3064 | static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) | ||
2744 | { | 3065 | { |
2745 | struct sched_domain *tmp, *sd = NULL; | 3066 | struct sched_domain *tmp, *sd = NULL; |
2746 | cpumask_t sibling_map; | ||
2747 | prio_array_t *array; | ||
2748 | int ret = 0, i; | 3067 | int ret = 0, i; |
2749 | task_t *p; | ||
2750 | 3068 | ||
2751 | for_each_domain(this_cpu, tmp) | 3069 | /* kernel/rt threads do not participate in dependent sleeping */ |
2752 | if (tmp->flags & SD_SHARE_CPUPOWER) | 3070 | if (!p->mm || rt_task(p)) |
3071 | return 0; | ||
3072 | |||
3073 | for_each_domain(this_cpu, tmp) { | ||
3074 | if (tmp->flags & SD_SHARE_CPUPOWER) { | ||
2753 | sd = tmp; | 3075 | sd = tmp; |
3076 | break; | ||
3077 | } | ||
3078 | } | ||
2754 | 3079 | ||
2755 | if (!sd) | 3080 | if (!sd) |
2756 | return 0; | 3081 | return 0; |
2757 | 3082 | ||
2758 | /* | 3083 | for_each_cpu_mask(i, sd->span) { |
2759 | * The same locking rules and details apply as for | 3084 | runqueue_t *smt_rq; |
2760 | * wake_sleeping_dependent(): | 3085 | task_t *smt_curr; |
2761 | */ | ||
2762 | spin_unlock(&this_rq->lock); | ||
2763 | sibling_map = sd->span; | ||
2764 | for_each_cpu_mask(i, sibling_map) | ||
2765 | spin_lock(&cpu_rq(i)->lock); | ||
2766 | cpu_clear(this_cpu, sibling_map); | ||
2767 | 3086 | ||
2768 | /* | 3087 | if (i == this_cpu) |
2769 | * Establish next task to be run - it might have gone away because | 3088 | continue; |
2770 | * we released the runqueue lock above: | ||
2771 | */ | ||
2772 | if (!this_rq->nr_running) | ||
2773 | goto out_unlock; | ||
2774 | array = this_rq->active; | ||
2775 | if (!array->nr_active) | ||
2776 | array = this_rq->expired; | ||
2777 | BUG_ON(!array->nr_active); | ||
2778 | 3089 | ||
2779 | p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next, | 3090 | smt_rq = cpu_rq(i); |
2780 | task_t, run_list); | 3091 | if (unlikely(!spin_trylock(&smt_rq->lock))) |
3092 | continue; | ||
2781 | 3093 | ||
2782 | for_each_cpu_mask(i, sibling_map) { | 3094 | smt_curr = smt_rq->curr; |
2783 | runqueue_t *smt_rq = cpu_rq(i); | ||
2784 | task_t *smt_curr = smt_rq->curr; | ||
2785 | 3095 | ||
2786 | /* Kernel threads do not participate in dependent sleeping */ | 3096 | if (!smt_curr->mm) |
2787 | if (!p->mm || !smt_curr->mm || rt_task(p)) | 3097 | goto unlock; |
2788 | goto check_smt_task; | ||
2789 | 3098 | ||
2790 | /* | 3099 | /* |
2791 | * If a user task with lower static priority than the | 3100 | * If a user task with lower static priority than the |
@@ -2803,49 +3112,24 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq) | |||
2803 | if ((jiffies % DEF_TIMESLICE) > | 3112 | if ((jiffies % DEF_TIMESLICE) > |
2804 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) | 3113 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) |
2805 | ret = 1; | 3114 | ret = 1; |
2806 | } else | 3115 | } else { |
2807 | if (smt_curr->static_prio < p->static_prio && | 3116 | if (smt_curr->static_prio < p->static_prio && |
2808 | !TASK_PREEMPTS_CURR(p, smt_rq) && | 3117 | !TASK_PREEMPTS_CURR(p, smt_rq) && |
2809 | smt_slice(smt_curr, sd) > task_timeslice(p)) | 3118 | smt_slice(smt_curr, sd) > task_timeslice(p)) |
2810 | ret = 1; | 3119 | ret = 1; |
2811 | |||
2812 | check_smt_task: | ||
2813 | if ((!smt_curr->mm && smt_curr != smt_rq->idle) || | ||
2814 | rt_task(smt_curr)) | ||
2815 | continue; | ||
2816 | if (!p->mm) { | ||
2817 | wakeup_busy_runqueue(smt_rq); | ||
2818 | continue; | ||
2819 | } | ||
2820 | |||
2821 | /* | ||
2822 | * Reschedule a lower priority task on the SMT sibling for | ||
2823 | * it to be put to sleep, or wake it up if it has been put to | ||
2824 | * sleep for priority reasons to see if it should run now. | ||
2825 | */ | ||
2826 | if (rt_task(p)) { | ||
2827 | if ((jiffies % DEF_TIMESLICE) > | ||
2828 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) | ||
2829 | resched_task(smt_curr); | ||
2830 | } else { | ||
2831 | if (TASK_PREEMPTS_CURR(p, smt_rq) && | ||
2832 | smt_slice(p, sd) > task_timeslice(smt_curr)) | ||
2833 | resched_task(smt_curr); | ||
2834 | else | ||
2835 | wakeup_busy_runqueue(smt_rq); | ||
2836 | } | 3120 | } |
3121 | unlock: | ||
3122 | spin_unlock(&smt_rq->lock); | ||
2837 | } | 3123 | } |
2838 | out_unlock: | ||
2839 | for_each_cpu_mask(i, sibling_map) | ||
2840 | spin_unlock(&cpu_rq(i)->lock); | ||
2841 | return ret; | 3124 | return ret; |
2842 | } | 3125 | } |
2843 | #else | 3126 | #else |
2844 | static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) | 3127 | static inline void wake_sleeping_dependent(int this_cpu) |
2845 | { | 3128 | { |
2846 | } | 3129 | } |
2847 | 3130 | ||
2848 | static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) | 3131 | static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq, |
3132 | task_t *p) | ||
2849 | { | 3133 | { |
2850 | return 0; | 3134 | return 0; |
2851 | } | 3135 | } |
@@ -2967,32 +3251,13 @@ need_resched_nonpreemptible: | |||
2967 | 3251 | ||
2968 | cpu = smp_processor_id(); | 3252 | cpu = smp_processor_id(); |
2969 | if (unlikely(!rq->nr_running)) { | 3253 | if (unlikely(!rq->nr_running)) { |
2970 | go_idle: | ||
2971 | idle_balance(cpu, rq); | 3254 | idle_balance(cpu, rq); |
2972 | if (!rq->nr_running) { | 3255 | if (!rq->nr_running) { |
2973 | next = rq->idle; | 3256 | next = rq->idle; |
2974 | rq->expired_timestamp = 0; | 3257 | rq->expired_timestamp = 0; |
2975 | wake_sleeping_dependent(cpu, rq); | 3258 | wake_sleeping_dependent(cpu); |
2976 | /* | ||
2977 | * wake_sleeping_dependent() might have released | ||
2978 | * the runqueue, so break out if we got new | ||
2979 | * tasks meanwhile: | ||
2980 | */ | ||
2981 | if (!rq->nr_running) | ||
2982 | goto switch_tasks; | ||
2983 | } | ||
2984 | } else { | ||
2985 | if (dependent_sleeper(cpu, rq)) { | ||
2986 | next = rq->idle; | ||
2987 | goto switch_tasks; | 3259 | goto switch_tasks; |
2988 | } | 3260 | } |
2989 | /* | ||
2990 | * dependent_sleeper() releases and reacquires the runqueue | ||
2991 | * lock, hence go into the idle loop if the rq went | ||
2992 | * empty meanwhile: | ||
2993 | */ | ||
2994 | if (unlikely(!rq->nr_running)) | ||
2995 | goto go_idle; | ||
2996 | } | 3261 | } |
2997 | 3262 | ||
2998 | array = rq->active; | 3263 | array = rq->active; |
@@ -3030,6 +3295,8 @@ go_idle: | |||
3030 | } | 3295 | } |
3031 | } | 3296 | } |
3032 | next->sleep_type = SLEEP_NORMAL; | 3297 | next->sleep_type = SLEEP_NORMAL; |
3298 | if (dependent_sleeper(cpu, rq, next)) | ||
3299 | next = rq->idle; | ||
3033 | switch_tasks: | 3300 | switch_tasks: |
3034 | if (next == rq->idle) | 3301 | if (next == rq->idle) |
3035 | schedstat_inc(rq, sched_goidle); | 3302 | schedstat_inc(rq, sched_goidle); |
@@ -3473,12 +3740,65 @@ long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) | |||
3473 | 3740 | ||
3474 | EXPORT_SYMBOL(sleep_on_timeout); | 3741 | EXPORT_SYMBOL(sleep_on_timeout); |
3475 | 3742 | ||
3743 | #ifdef CONFIG_RT_MUTEXES | ||
3744 | |||
3745 | /* | ||
3746 | * rt_mutex_setprio - set the current priority of a task | ||
3747 | * @p: task | ||
3748 | * @prio: prio value (kernel-internal form) | ||
3749 | * | ||
3750 | * This function changes the 'effective' priority of a task. It does | ||
3751 | * not touch ->normal_prio like __setscheduler(). | ||
3752 | * | ||
3753 | * Used by the rt_mutex code to implement priority inheritance logic. | ||
3754 | */ | ||
3755 | void rt_mutex_setprio(task_t *p, int prio) | ||
3756 | { | ||
3757 | unsigned long flags; | ||
3758 | prio_array_t *array; | ||
3759 | runqueue_t *rq; | ||
3760 | int oldprio; | ||
3761 | |||
3762 | BUG_ON(prio < 0 || prio > MAX_PRIO); | ||
3763 | |||
3764 | rq = task_rq_lock(p, &flags); | ||
3765 | |||
3766 | oldprio = p->prio; | ||
3767 | array = p->array; | ||
3768 | if (array) | ||
3769 | dequeue_task(p, array); | ||
3770 | p->prio = prio; | ||
3771 | |||
3772 | if (array) { | ||
3773 | /* | ||
3774 | * If changing to an RT priority then queue it | ||
3775 | * in the active array! | ||
3776 | */ | ||
3777 | if (rt_task(p)) | ||
3778 | array = rq->active; | ||
3779 | enqueue_task(p, array); | ||
3780 | /* | ||
3781 | * Reschedule if we are currently running on this runqueue and | ||
3782 | * our priority decreased, or if we are not currently running on | ||
3783 | * this runqueue and our priority is higher than the current's | ||
3784 | */ | ||
3785 | if (task_running(rq, p)) { | ||
3786 | if (p->prio > oldprio) | ||
3787 | resched_task(rq->curr); | ||
3788 | } else if (TASK_PREEMPTS_CURR(p, rq)) | ||
3789 | resched_task(rq->curr); | ||
3790 | } | ||
3791 | task_rq_unlock(rq, &flags); | ||
3792 | } | ||
3793 | |||
3794 | #endif | ||
3795 | |||
3476 | void set_user_nice(task_t *p, long nice) | 3796 | void set_user_nice(task_t *p, long nice) |
3477 | { | 3797 | { |
3478 | unsigned long flags; | 3798 | unsigned long flags; |
3479 | prio_array_t *array; | 3799 | prio_array_t *array; |
3480 | runqueue_t *rq; | 3800 | runqueue_t *rq; |
3481 | int old_prio, new_prio, delta; | 3801 | int old_prio, delta; |
3482 | 3802 | ||
3483 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) | 3803 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) |
3484 | return; | 3804 | return; |
@@ -3493,22 +3813,25 @@ void set_user_nice(task_t *p, long nice) | |||
3493 | * it wont have any effect on scheduling until the task is | 3813 | * it wont have any effect on scheduling until the task is |
3494 | * not SCHED_NORMAL/SCHED_BATCH: | 3814 | * not SCHED_NORMAL/SCHED_BATCH: |
3495 | */ | 3815 | */ |
3496 | if (rt_task(p)) { | 3816 | if (has_rt_policy(p)) { |
3497 | p->static_prio = NICE_TO_PRIO(nice); | 3817 | p->static_prio = NICE_TO_PRIO(nice); |
3498 | goto out_unlock; | 3818 | goto out_unlock; |
3499 | } | 3819 | } |
3500 | array = p->array; | 3820 | array = p->array; |
3501 | if (array) | 3821 | if (array) { |
3502 | dequeue_task(p, array); | 3822 | dequeue_task(p, array); |
3823 | dec_raw_weighted_load(rq, p); | ||
3824 | } | ||
3503 | 3825 | ||
3504 | old_prio = p->prio; | ||
3505 | new_prio = NICE_TO_PRIO(nice); | ||
3506 | delta = new_prio - old_prio; | ||
3507 | p->static_prio = NICE_TO_PRIO(nice); | 3826 | p->static_prio = NICE_TO_PRIO(nice); |
3508 | p->prio += delta; | 3827 | set_load_weight(p); |
3828 | old_prio = p->prio; | ||
3829 | p->prio = effective_prio(p); | ||
3830 | delta = p->prio - old_prio; | ||
3509 | 3831 | ||
3510 | if (array) { | 3832 | if (array) { |
3511 | enqueue_task(p, array); | 3833 | enqueue_task(p, array); |
3834 | inc_raw_weighted_load(rq, p); | ||
3512 | /* | 3835 | /* |
3513 | * If the task increased its priority or is running and | 3836 | * If the task increased its priority or is running and |
3514 | * lowered its priority, then reschedule its CPU: | 3837 | * lowered its priority, then reschedule its CPU: |
@@ -3519,7 +3842,6 @@ void set_user_nice(task_t *p, long nice) | |||
3519 | out_unlock: | 3842 | out_unlock: |
3520 | task_rq_unlock(rq, &flags); | 3843 | task_rq_unlock(rq, &flags); |
3521 | } | 3844 | } |
3522 | |||
3523 | EXPORT_SYMBOL(set_user_nice); | 3845 | EXPORT_SYMBOL(set_user_nice); |
3524 | 3846 | ||
3525 | /* | 3847 | /* |
@@ -3634,16 +3956,15 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) | |||
3634 | BUG_ON(p->array); | 3956 | BUG_ON(p->array); |
3635 | p->policy = policy; | 3957 | p->policy = policy; |
3636 | p->rt_priority = prio; | 3958 | p->rt_priority = prio; |
3637 | if (policy != SCHED_NORMAL && policy != SCHED_BATCH) { | 3959 | p->normal_prio = normal_prio(p); |
3638 | p->prio = MAX_RT_PRIO-1 - p->rt_priority; | 3960 | /* we are holding p->pi_lock already */ |
3639 | } else { | 3961 | p->prio = rt_mutex_getprio(p); |
3640 | p->prio = p->static_prio; | 3962 | /* |
3641 | /* | 3963 | * SCHED_BATCH tasks are treated as perpetual CPU hogs: |
3642 | * SCHED_BATCH tasks are treated as perpetual CPU hogs: | 3964 | */ |
3643 | */ | 3965 | if (policy == SCHED_BATCH) |
3644 | if (policy == SCHED_BATCH) | 3966 | p->sleep_avg = 0; |
3645 | p->sleep_avg = 0; | 3967 | set_load_weight(p); |
3646 | } | ||
3647 | } | 3968 | } |
3648 | 3969 | ||
3649 | /** | 3970 | /** |
@@ -3662,6 +3983,8 @@ int sched_setscheduler(struct task_struct *p, int policy, | |||
3662 | unsigned long flags; | 3983 | unsigned long flags; |
3663 | runqueue_t *rq; | 3984 | runqueue_t *rq; |
3664 | 3985 | ||
3986 | /* may grab non-irq protected spin_locks */ | ||
3987 | BUG_ON(in_interrupt()); | ||
3665 | recheck: | 3988 | recheck: |
3666 | /* double check policy once rq lock held */ | 3989 | /* double check policy once rq lock held */ |
3667 | if (policy < 0) | 3990 | if (policy < 0) |
@@ -3710,14 +4033,20 @@ recheck: | |||
3710 | if (retval) | 4033 | if (retval) |
3711 | return retval; | 4034 | return retval; |
3712 | /* | 4035 | /* |
4036 | * make sure no PI-waiters arrive (or leave) while we are | ||
4037 | * changing the priority of the task: | ||
4038 | */ | ||
4039 | spin_lock_irqsave(&p->pi_lock, flags); | ||
4040 | /* | ||
3713 | * To be able to change p->policy safely, the apropriate | 4041 | * To be able to change p->policy safely, the apropriate |
3714 | * runqueue lock must be held. | 4042 | * runqueue lock must be held. |
3715 | */ | 4043 | */ |
3716 | rq = task_rq_lock(p, &flags); | 4044 | rq = __task_rq_lock(p); |
3717 | /* recheck policy now with rq lock held */ | 4045 | /* recheck policy now with rq lock held */ |
3718 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { | 4046 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
3719 | policy = oldpolicy = -1; | 4047 | policy = oldpolicy = -1; |
3720 | task_rq_unlock(rq, &flags); | 4048 | __task_rq_unlock(rq); |
4049 | spin_unlock_irqrestore(&p->pi_lock, flags); | ||
3721 | goto recheck; | 4050 | goto recheck; |
3722 | } | 4051 | } |
3723 | array = p->array; | 4052 | array = p->array; |
@@ -3738,7 +4067,11 @@ recheck: | |||
3738 | } else if (TASK_PREEMPTS_CURR(p, rq)) | 4067 | } else if (TASK_PREEMPTS_CURR(p, rq)) |
3739 | resched_task(rq->curr); | 4068 | resched_task(rq->curr); |
3740 | } | 4069 | } |
3741 | task_rq_unlock(rq, &flags); | 4070 | __task_rq_unlock(rq); |
4071 | spin_unlock_irqrestore(&p->pi_lock, flags); | ||
4072 | |||
4073 | rt_mutex_adjust_pi(p); | ||
4074 | |||
3742 | return 0; | 4075 | return 0; |
3743 | } | 4076 | } |
3744 | EXPORT_SYMBOL_GPL(sched_setscheduler); | 4077 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
@@ -3760,8 +4093,10 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
3760 | read_unlock_irq(&tasklist_lock); | 4093 | read_unlock_irq(&tasklist_lock); |
3761 | return -ESRCH; | 4094 | return -ESRCH; |
3762 | } | 4095 | } |
3763 | retval = sched_setscheduler(p, policy, &lparam); | 4096 | get_task_struct(p); |
3764 | read_unlock_irq(&tasklist_lock); | 4097 | read_unlock_irq(&tasklist_lock); |
4098 | retval = sched_setscheduler(p, policy, &lparam); | ||
4099 | put_task_struct(p); | ||
3765 | return retval; | 4100 | return retval; |
3766 | } | 4101 | } |
3767 | 4102 | ||
@@ -4247,7 +4582,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) | |||
4247 | if (retval) | 4582 | if (retval) |
4248 | goto out_unlock; | 4583 | goto out_unlock; |
4249 | 4584 | ||
4250 | jiffies_to_timespec(p->policy & SCHED_FIFO ? | 4585 | jiffies_to_timespec(p->policy == SCHED_FIFO ? |
4251 | 0 : task_timeslice(p), &t); | 4586 | 0 : task_timeslice(p), &t); |
4252 | read_unlock(&tasklist_lock); | 4587 | read_unlock(&tasklist_lock); |
4253 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; | 4588 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; |
@@ -4373,7 +4708,7 @@ void __devinit init_idle(task_t *idle, int cpu) | |||
4373 | idle->timestamp = sched_clock(); | 4708 | idle->timestamp = sched_clock(); |
4374 | idle->sleep_avg = 0; | 4709 | idle->sleep_avg = 0; |
4375 | idle->array = NULL; | 4710 | idle->array = NULL; |
4376 | idle->prio = MAX_PRIO; | 4711 | idle->prio = idle->normal_prio = MAX_PRIO; |
4377 | idle->state = TASK_RUNNING; | 4712 | idle->state = TASK_RUNNING; |
4378 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 4713 | idle->cpus_allowed = cpumask_of_cpu(cpu); |
4379 | set_task_cpu(idle, cpu); | 4714 | set_task_cpu(idle, cpu); |
@@ -4469,13 +4804,16 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed); | |||
4469 | * | 4804 | * |
4470 | * So we race with normal scheduler movements, but that's OK, as long | 4805 | * So we race with normal scheduler movements, but that's OK, as long |
4471 | * as the task is no longer on this CPU. | 4806 | * as the task is no longer on this CPU. |
4807 | * | ||
4808 | * Returns non-zero if task was successfully migrated. | ||
4472 | */ | 4809 | */ |
4473 | static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | 4810 | static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) |
4474 | { | 4811 | { |
4475 | runqueue_t *rq_dest, *rq_src; | 4812 | runqueue_t *rq_dest, *rq_src; |
4813 | int ret = 0; | ||
4476 | 4814 | ||
4477 | if (unlikely(cpu_is_offline(dest_cpu))) | 4815 | if (unlikely(cpu_is_offline(dest_cpu))) |
4478 | return; | 4816 | return ret; |
4479 | 4817 | ||
4480 | rq_src = cpu_rq(src_cpu); | 4818 | rq_src = cpu_rq(src_cpu); |
4481 | rq_dest = cpu_rq(dest_cpu); | 4819 | rq_dest = cpu_rq(dest_cpu); |
@@ -4503,9 +4841,10 @@ static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
4503 | if (TASK_PREEMPTS_CURR(p, rq_dest)) | 4841 | if (TASK_PREEMPTS_CURR(p, rq_dest)) |
4504 | resched_task(rq_dest->curr); | 4842 | resched_task(rq_dest->curr); |
4505 | } | 4843 | } |
4506 | 4844 | ret = 1; | |
4507 | out: | 4845 | out: |
4508 | double_rq_unlock(rq_src, rq_dest); | 4846 | double_rq_unlock(rq_src, rq_dest); |
4847 | return ret; | ||
4509 | } | 4848 | } |
4510 | 4849 | ||
4511 | /* | 4850 | /* |
@@ -4575,9 +4914,12 @@ wait_to_die: | |||
4575 | /* Figure out where task on dead CPU should go, use force if neccessary. */ | 4914 | /* Figure out where task on dead CPU should go, use force if neccessary. */ |
4576 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | 4915 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) |
4577 | { | 4916 | { |
4917 | runqueue_t *rq; | ||
4918 | unsigned long flags; | ||
4578 | int dest_cpu; | 4919 | int dest_cpu; |
4579 | cpumask_t mask; | 4920 | cpumask_t mask; |
4580 | 4921 | ||
4922 | restart: | ||
4581 | /* On same node? */ | 4923 | /* On same node? */ |
4582 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); | 4924 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); |
4583 | cpus_and(mask, mask, tsk->cpus_allowed); | 4925 | cpus_and(mask, mask, tsk->cpus_allowed); |
@@ -4589,8 +4931,10 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | |||
4589 | 4931 | ||
4590 | /* No more Mr. Nice Guy. */ | 4932 | /* No more Mr. Nice Guy. */ |
4591 | if (dest_cpu == NR_CPUS) { | 4933 | if (dest_cpu == NR_CPUS) { |
4934 | rq = task_rq_lock(tsk, &flags); | ||
4592 | cpus_setall(tsk->cpus_allowed); | 4935 | cpus_setall(tsk->cpus_allowed); |
4593 | dest_cpu = any_online_cpu(tsk->cpus_allowed); | 4936 | dest_cpu = any_online_cpu(tsk->cpus_allowed); |
4937 | task_rq_unlock(rq, &flags); | ||
4594 | 4938 | ||
4595 | /* | 4939 | /* |
4596 | * Don't tell them about moving exiting tasks or | 4940 | * Don't tell them about moving exiting tasks or |
@@ -4602,7 +4946,8 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | |||
4602 | "longer affine to cpu%d\n", | 4946 | "longer affine to cpu%d\n", |
4603 | tsk->pid, tsk->comm, dead_cpu); | 4947 | tsk->pid, tsk->comm, dead_cpu); |
4604 | } | 4948 | } |
4605 | __migrate_task(tsk, dead_cpu, dest_cpu); | 4949 | if (!__migrate_task(tsk, dead_cpu, dest_cpu)) |
4950 | goto restart; | ||
4606 | } | 4951 | } |
4607 | 4952 | ||
4608 | /* | 4953 | /* |
@@ -4729,8 +5074,9 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
4729 | * migration_call - callback that gets triggered when a CPU is added. | 5074 | * migration_call - callback that gets triggered when a CPU is added. |
4730 | * Here we can start up the necessary migration thread for the new CPU. | 5075 | * Here we can start up the necessary migration thread for the new CPU. |
4731 | */ | 5076 | */ |
4732 | static int migration_call(struct notifier_block *nfb, unsigned long action, | 5077 | static int __cpuinit migration_call(struct notifier_block *nfb, |
4733 | void *hcpu) | 5078 | unsigned long action, |
5079 | void *hcpu) | ||
4734 | { | 5080 | { |
4735 | int cpu = (long)hcpu; | 5081 | int cpu = (long)hcpu; |
4736 | struct task_struct *p; | 5082 | struct task_struct *p; |
@@ -4800,7 +5146,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, | |||
4800 | /* Register at highest priority so that task migration (migrate_all_tasks) | 5146 | /* Register at highest priority so that task migration (migrate_all_tasks) |
4801 | * happens before everything else. | 5147 | * happens before everything else. |
4802 | */ | 5148 | */ |
4803 | static struct notifier_block migration_notifier = { | 5149 | static struct notifier_block __cpuinitdata migration_notifier = { |
4804 | .notifier_call = migration_call, | 5150 | .notifier_call = migration_call, |
4805 | .priority = 10 | 5151 | .priority = 10 |
4806 | }; | 5152 | }; |
@@ -5601,6 +5947,7 @@ static cpumask_t sched_domain_node_span(int node) | |||
5601 | } | 5947 | } |
5602 | #endif | 5948 | #endif |
5603 | 5949 | ||
5950 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; | ||
5604 | /* | 5951 | /* |
5605 | * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we | 5952 | * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we |
5606 | * can switch it on easily if needed. | 5953 | * can switch it on easily if needed. |
@@ -5616,7 +5963,7 @@ static int cpu_to_cpu_group(int cpu) | |||
5616 | 5963 | ||
5617 | #ifdef CONFIG_SCHED_MC | 5964 | #ifdef CONFIG_SCHED_MC |
5618 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | 5965 | static DEFINE_PER_CPU(struct sched_domain, core_domains); |
5619 | static struct sched_group sched_group_core[NR_CPUS]; | 5966 | static struct sched_group *sched_group_core_bycpu[NR_CPUS]; |
5620 | #endif | 5967 | #endif |
5621 | 5968 | ||
5622 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 5969 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
@@ -5632,7 +5979,7 @@ static int cpu_to_core_group(int cpu) | |||
5632 | #endif | 5979 | #endif |
5633 | 5980 | ||
5634 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 5981 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); |
5635 | static struct sched_group sched_group_phys[NR_CPUS]; | 5982 | static struct sched_group *sched_group_phys_bycpu[NR_CPUS]; |
5636 | static int cpu_to_phys_group(int cpu) | 5983 | static int cpu_to_phys_group(int cpu) |
5637 | { | 5984 | { |
5638 | #if defined(CONFIG_SCHED_MC) | 5985 | #if defined(CONFIG_SCHED_MC) |
@@ -5689,13 +6036,74 @@ next_sg: | |||
5689 | } | 6036 | } |
5690 | #endif | 6037 | #endif |
5691 | 6038 | ||
6039 | /* Free memory allocated for various sched_group structures */ | ||
6040 | static void free_sched_groups(const cpumask_t *cpu_map) | ||
6041 | { | ||
6042 | int cpu; | ||
6043 | #ifdef CONFIG_NUMA | ||
6044 | int i; | ||
6045 | |||
6046 | for_each_cpu_mask(cpu, *cpu_map) { | ||
6047 | struct sched_group *sched_group_allnodes | ||
6048 | = sched_group_allnodes_bycpu[cpu]; | ||
6049 | struct sched_group **sched_group_nodes | ||
6050 | = sched_group_nodes_bycpu[cpu]; | ||
6051 | |||
6052 | if (sched_group_allnodes) { | ||
6053 | kfree(sched_group_allnodes); | ||
6054 | sched_group_allnodes_bycpu[cpu] = NULL; | ||
6055 | } | ||
6056 | |||
6057 | if (!sched_group_nodes) | ||
6058 | continue; | ||
6059 | |||
6060 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
6061 | cpumask_t nodemask = node_to_cpumask(i); | ||
6062 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | ||
6063 | |||
6064 | cpus_and(nodemask, nodemask, *cpu_map); | ||
6065 | if (cpus_empty(nodemask)) | ||
6066 | continue; | ||
6067 | |||
6068 | if (sg == NULL) | ||
6069 | continue; | ||
6070 | sg = sg->next; | ||
6071 | next_sg: | ||
6072 | oldsg = sg; | ||
6073 | sg = sg->next; | ||
6074 | kfree(oldsg); | ||
6075 | if (oldsg != sched_group_nodes[i]) | ||
6076 | goto next_sg; | ||
6077 | } | ||
6078 | kfree(sched_group_nodes); | ||
6079 | sched_group_nodes_bycpu[cpu] = NULL; | ||
6080 | } | ||
6081 | #endif | ||
6082 | for_each_cpu_mask(cpu, *cpu_map) { | ||
6083 | if (sched_group_phys_bycpu[cpu]) { | ||
6084 | kfree(sched_group_phys_bycpu[cpu]); | ||
6085 | sched_group_phys_bycpu[cpu] = NULL; | ||
6086 | } | ||
6087 | #ifdef CONFIG_SCHED_MC | ||
6088 | if (sched_group_core_bycpu[cpu]) { | ||
6089 | kfree(sched_group_core_bycpu[cpu]); | ||
6090 | sched_group_core_bycpu[cpu] = NULL; | ||
6091 | } | ||
6092 | #endif | ||
6093 | } | ||
6094 | } | ||
6095 | |||
5692 | /* | 6096 | /* |
5693 | * Build sched domains for a given set of cpus and attach the sched domains | 6097 | * Build sched domains for a given set of cpus and attach the sched domains |
5694 | * to the individual cpus | 6098 | * to the individual cpus |
5695 | */ | 6099 | */ |
5696 | void build_sched_domains(const cpumask_t *cpu_map) | 6100 | static int build_sched_domains(const cpumask_t *cpu_map) |
5697 | { | 6101 | { |
5698 | int i; | 6102 | int i; |
6103 | struct sched_group *sched_group_phys = NULL; | ||
6104 | #ifdef CONFIG_SCHED_MC | ||
6105 | struct sched_group *sched_group_core = NULL; | ||
6106 | #endif | ||
5699 | #ifdef CONFIG_NUMA | 6107 | #ifdef CONFIG_NUMA |
5700 | struct sched_group **sched_group_nodes = NULL; | 6108 | struct sched_group **sched_group_nodes = NULL; |
5701 | struct sched_group *sched_group_allnodes = NULL; | 6109 | struct sched_group *sched_group_allnodes = NULL; |
@@ -5703,11 +6111,11 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5703 | /* | 6111 | /* |
5704 | * Allocate the per-node list of sched groups | 6112 | * Allocate the per-node list of sched groups |
5705 | */ | 6113 | */ |
5706 | sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES, | 6114 | sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES, |
5707 | GFP_ATOMIC); | 6115 | GFP_KERNEL); |
5708 | if (!sched_group_nodes) { | 6116 | if (!sched_group_nodes) { |
5709 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 6117 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
5710 | return; | 6118 | return -ENOMEM; |
5711 | } | 6119 | } |
5712 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | 6120 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; |
5713 | #endif | 6121 | #endif |
@@ -5733,7 +6141,7 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5733 | if (!sched_group_allnodes) { | 6141 | if (!sched_group_allnodes) { |
5734 | printk(KERN_WARNING | 6142 | printk(KERN_WARNING |
5735 | "Can not alloc allnodes sched group\n"); | 6143 | "Can not alloc allnodes sched group\n"); |
5736 | break; | 6144 | goto error; |
5737 | } | 6145 | } |
5738 | sched_group_allnodes_bycpu[i] | 6146 | sched_group_allnodes_bycpu[i] |
5739 | = sched_group_allnodes; | 6147 | = sched_group_allnodes; |
@@ -5754,6 +6162,18 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5754 | cpus_and(sd->span, sd->span, *cpu_map); | 6162 | cpus_and(sd->span, sd->span, *cpu_map); |
5755 | #endif | 6163 | #endif |
5756 | 6164 | ||
6165 | if (!sched_group_phys) { | ||
6166 | sched_group_phys | ||
6167 | = kmalloc(sizeof(struct sched_group) * NR_CPUS, | ||
6168 | GFP_KERNEL); | ||
6169 | if (!sched_group_phys) { | ||
6170 | printk (KERN_WARNING "Can not alloc phys sched" | ||
6171 | "group\n"); | ||
6172 | goto error; | ||
6173 | } | ||
6174 | sched_group_phys_bycpu[i] = sched_group_phys; | ||
6175 | } | ||
6176 | |||
5757 | p = sd; | 6177 | p = sd; |
5758 | sd = &per_cpu(phys_domains, i); | 6178 | sd = &per_cpu(phys_domains, i); |
5759 | group = cpu_to_phys_group(i); | 6179 | group = cpu_to_phys_group(i); |
@@ -5763,6 +6183,18 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5763 | sd->groups = &sched_group_phys[group]; | 6183 | sd->groups = &sched_group_phys[group]; |
5764 | 6184 | ||
5765 | #ifdef CONFIG_SCHED_MC | 6185 | #ifdef CONFIG_SCHED_MC |
6186 | if (!sched_group_core) { | ||
6187 | sched_group_core | ||
6188 | = kmalloc(sizeof(struct sched_group) * NR_CPUS, | ||
6189 | GFP_KERNEL); | ||
6190 | if (!sched_group_core) { | ||
6191 | printk (KERN_WARNING "Can not alloc core sched" | ||
6192 | "group\n"); | ||
6193 | goto error; | ||
6194 | } | ||
6195 | sched_group_core_bycpu[i] = sched_group_core; | ||
6196 | } | ||
6197 | |||
5766 | p = sd; | 6198 | p = sd; |
5767 | sd = &per_cpu(core_domains, i); | 6199 | sd = &per_cpu(core_domains, i); |
5768 | group = cpu_to_core_group(i); | 6200 | group = cpu_to_core_group(i); |
@@ -5846,24 +6278,21 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5846 | domainspan = sched_domain_node_span(i); | 6278 | domainspan = sched_domain_node_span(i); |
5847 | cpus_and(domainspan, domainspan, *cpu_map); | 6279 | cpus_and(domainspan, domainspan, *cpu_map); |
5848 | 6280 | ||
5849 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); | 6281 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); |
6282 | if (!sg) { | ||
6283 | printk(KERN_WARNING "Can not alloc domain group for " | ||
6284 | "node %d\n", i); | ||
6285 | goto error; | ||
6286 | } | ||
5850 | sched_group_nodes[i] = sg; | 6287 | sched_group_nodes[i] = sg; |
5851 | for_each_cpu_mask(j, nodemask) { | 6288 | for_each_cpu_mask(j, nodemask) { |
5852 | struct sched_domain *sd; | 6289 | struct sched_domain *sd; |
5853 | sd = &per_cpu(node_domains, j); | 6290 | sd = &per_cpu(node_domains, j); |
5854 | sd->groups = sg; | 6291 | sd->groups = sg; |
5855 | if (sd->groups == NULL) { | ||
5856 | /* Turn off balancing if we have no groups */ | ||
5857 | sd->flags = 0; | ||
5858 | } | ||
5859 | } | ||
5860 | if (!sg) { | ||
5861 | printk(KERN_WARNING | ||
5862 | "Can not alloc domain group for node %d\n", i); | ||
5863 | continue; | ||
5864 | } | 6292 | } |
5865 | sg->cpu_power = 0; | 6293 | sg->cpu_power = 0; |
5866 | sg->cpumask = nodemask; | 6294 | sg->cpumask = nodemask; |
6295 | sg->next = sg; | ||
5867 | cpus_or(covered, covered, nodemask); | 6296 | cpus_or(covered, covered, nodemask); |
5868 | prev = sg; | 6297 | prev = sg; |
5869 | 6298 | ||
@@ -5882,54 +6311,90 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5882 | if (cpus_empty(tmp)) | 6311 | if (cpus_empty(tmp)) |
5883 | continue; | 6312 | continue; |
5884 | 6313 | ||
5885 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); | 6314 | sg = kmalloc_node(sizeof(struct sched_group), |
6315 | GFP_KERNEL, i); | ||
5886 | if (!sg) { | 6316 | if (!sg) { |
5887 | printk(KERN_WARNING | 6317 | printk(KERN_WARNING |
5888 | "Can not alloc domain group for node %d\n", j); | 6318 | "Can not alloc domain group for node %d\n", j); |
5889 | break; | 6319 | goto error; |
5890 | } | 6320 | } |
5891 | sg->cpu_power = 0; | 6321 | sg->cpu_power = 0; |
5892 | sg->cpumask = tmp; | 6322 | sg->cpumask = tmp; |
6323 | sg->next = prev->next; | ||
5893 | cpus_or(covered, covered, tmp); | 6324 | cpus_or(covered, covered, tmp); |
5894 | prev->next = sg; | 6325 | prev->next = sg; |
5895 | prev = sg; | 6326 | prev = sg; |
5896 | } | 6327 | } |
5897 | prev->next = sched_group_nodes[i]; | ||
5898 | } | 6328 | } |
5899 | #endif | 6329 | #endif |
5900 | 6330 | ||
5901 | /* Calculate CPU power for physical packages and nodes */ | 6331 | /* Calculate CPU power for physical packages and nodes */ |
6332 | #ifdef CONFIG_SCHED_SMT | ||
5902 | for_each_cpu_mask(i, *cpu_map) { | 6333 | for_each_cpu_mask(i, *cpu_map) { |
5903 | int power; | ||
5904 | struct sched_domain *sd; | 6334 | struct sched_domain *sd; |
5905 | #ifdef CONFIG_SCHED_SMT | ||
5906 | sd = &per_cpu(cpu_domains, i); | 6335 | sd = &per_cpu(cpu_domains, i); |
5907 | power = SCHED_LOAD_SCALE; | 6336 | sd->groups->cpu_power = SCHED_LOAD_SCALE; |
5908 | sd->groups->cpu_power = power; | 6337 | } |
5909 | #endif | 6338 | #endif |
5910 | #ifdef CONFIG_SCHED_MC | 6339 | #ifdef CONFIG_SCHED_MC |
6340 | for_each_cpu_mask(i, *cpu_map) { | ||
6341 | int power; | ||
6342 | struct sched_domain *sd; | ||
5911 | sd = &per_cpu(core_domains, i); | 6343 | sd = &per_cpu(core_domains, i); |
5912 | power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1) | 6344 | if (sched_smt_power_savings) |
6345 | power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask); | ||
6346 | else | ||
6347 | power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1) | ||
5913 | * SCHED_LOAD_SCALE / 10; | 6348 | * SCHED_LOAD_SCALE / 10; |
5914 | sd->groups->cpu_power = power; | 6349 | sd->groups->cpu_power = power; |
6350 | } | ||
6351 | #endif | ||
5915 | 6352 | ||
6353 | for_each_cpu_mask(i, *cpu_map) { | ||
6354 | struct sched_domain *sd; | ||
6355 | #ifdef CONFIG_SCHED_MC | ||
5916 | sd = &per_cpu(phys_domains, i); | 6356 | sd = &per_cpu(phys_domains, i); |
6357 | if (i != first_cpu(sd->groups->cpumask)) | ||
6358 | continue; | ||
5917 | 6359 | ||
5918 | /* | 6360 | sd->groups->cpu_power = 0; |
5919 | * This has to be < 2 * SCHED_LOAD_SCALE | 6361 | if (sched_mc_power_savings || sched_smt_power_savings) { |
5920 | * Lets keep it SCHED_LOAD_SCALE, so that | 6362 | int j; |
5921 | * while calculating NUMA group's cpu_power | 6363 | |
5922 | * we can simply do | 6364 | for_each_cpu_mask(j, sd->groups->cpumask) { |
5923 | * numa_group->cpu_power += phys_group->cpu_power; | 6365 | struct sched_domain *sd1; |
5924 | * | 6366 | sd1 = &per_cpu(core_domains, j); |
5925 | * See "only add power once for each physical pkg" | 6367 | /* |
5926 | * comment below | 6368 | * for each core we will add once |
5927 | */ | 6369 | * to the group in physical domain |
5928 | sd->groups->cpu_power = SCHED_LOAD_SCALE; | 6370 | */ |
6371 | if (j != first_cpu(sd1->groups->cpumask)) | ||
6372 | continue; | ||
6373 | |||
6374 | if (sched_smt_power_savings) | ||
6375 | sd->groups->cpu_power += sd1->groups->cpu_power; | ||
6376 | else | ||
6377 | sd->groups->cpu_power += SCHED_LOAD_SCALE; | ||
6378 | } | ||
6379 | } else | ||
6380 | /* | ||
6381 | * This has to be < 2 * SCHED_LOAD_SCALE | ||
6382 | * Lets keep it SCHED_LOAD_SCALE, so that | ||
6383 | * while calculating NUMA group's cpu_power | ||
6384 | * we can simply do | ||
6385 | * numa_group->cpu_power += phys_group->cpu_power; | ||
6386 | * | ||
6387 | * See "only add power once for each physical pkg" | ||
6388 | * comment below | ||
6389 | */ | ||
6390 | sd->groups->cpu_power = SCHED_LOAD_SCALE; | ||
5929 | #else | 6391 | #else |
6392 | int power; | ||
5930 | sd = &per_cpu(phys_domains, i); | 6393 | sd = &per_cpu(phys_domains, i); |
5931 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | 6394 | if (sched_smt_power_savings) |
5932 | (cpus_weight(sd->groups->cpumask)-1) / 10; | 6395 | power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask); |
6396 | else | ||
6397 | power = SCHED_LOAD_SCALE; | ||
5933 | sd->groups->cpu_power = power; | 6398 | sd->groups->cpu_power = power; |
5934 | #endif | 6399 | #endif |
5935 | } | 6400 | } |
@@ -5957,13 +6422,20 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5957 | * Tune cache-hot values: | 6422 | * Tune cache-hot values: |
5958 | */ | 6423 | */ |
5959 | calibrate_migration_costs(cpu_map); | 6424 | calibrate_migration_costs(cpu_map); |
6425 | |||
6426 | return 0; | ||
6427 | |||
6428 | error: | ||
6429 | free_sched_groups(cpu_map); | ||
6430 | return -ENOMEM; | ||
5960 | } | 6431 | } |
5961 | /* | 6432 | /* |
5962 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 6433 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
5963 | */ | 6434 | */ |
5964 | static void arch_init_sched_domains(const cpumask_t *cpu_map) | 6435 | static int arch_init_sched_domains(const cpumask_t *cpu_map) |
5965 | { | 6436 | { |
5966 | cpumask_t cpu_default_map; | 6437 | cpumask_t cpu_default_map; |
6438 | int err; | ||
5967 | 6439 | ||
5968 | /* | 6440 | /* |
5969 | * Setup mask for cpus without special case scheduling requirements. | 6441 | * Setup mask for cpus without special case scheduling requirements. |
@@ -5972,51 +6444,14 @@ static void arch_init_sched_domains(const cpumask_t *cpu_map) | |||
5972 | */ | 6444 | */ |
5973 | cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); | 6445 | cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); |
5974 | 6446 | ||
5975 | build_sched_domains(&cpu_default_map); | 6447 | err = build_sched_domains(&cpu_default_map); |
6448 | |||
6449 | return err; | ||
5976 | } | 6450 | } |
5977 | 6451 | ||
5978 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map) | 6452 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map) |
5979 | { | 6453 | { |
5980 | #ifdef CONFIG_NUMA | 6454 | free_sched_groups(cpu_map); |
5981 | int i; | ||
5982 | int cpu; | ||
5983 | |||
5984 | for_each_cpu_mask(cpu, *cpu_map) { | ||
5985 | struct sched_group *sched_group_allnodes | ||
5986 | = sched_group_allnodes_bycpu[cpu]; | ||
5987 | struct sched_group **sched_group_nodes | ||
5988 | = sched_group_nodes_bycpu[cpu]; | ||
5989 | |||
5990 | if (sched_group_allnodes) { | ||
5991 | kfree(sched_group_allnodes); | ||
5992 | sched_group_allnodes_bycpu[cpu] = NULL; | ||
5993 | } | ||
5994 | |||
5995 | if (!sched_group_nodes) | ||
5996 | continue; | ||
5997 | |||
5998 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
5999 | cpumask_t nodemask = node_to_cpumask(i); | ||
6000 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | ||
6001 | |||
6002 | cpus_and(nodemask, nodemask, *cpu_map); | ||
6003 | if (cpus_empty(nodemask)) | ||
6004 | continue; | ||
6005 | |||
6006 | if (sg == NULL) | ||
6007 | continue; | ||
6008 | sg = sg->next; | ||
6009 | next_sg: | ||
6010 | oldsg = sg; | ||
6011 | sg = sg->next; | ||
6012 | kfree(oldsg); | ||
6013 | if (oldsg != sched_group_nodes[i]) | ||
6014 | goto next_sg; | ||
6015 | } | ||
6016 | kfree(sched_group_nodes); | ||
6017 | sched_group_nodes_bycpu[cpu] = NULL; | ||
6018 | } | ||
6019 | #endif | ||
6020 | } | 6455 | } |
6021 | 6456 | ||
6022 | /* | 6457 | /* |
@@ -6041,9 +6476,10 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
6041 | * correct sched domains | 6476 | * correct sched domains |
6042 | * Call with hotplug lock held | 6477 | * Call with hotplug lock held |
6043 | */ | 6478 | */ |
6044 | void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) | 6479 | int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) |
6045 | { | 6480 | { |
6046 | cpumask_t change_map; | 6481 | cpumask_t change_map; |
6482 | int err = 0; | ||
6047 | 6483 | ||
6048 | cpus_and(*partition1, *partition1, cpu_online_map); | 6484 | cpus_and(*partition1, *partition1, cpu_online_map); |
6049 | cpus_and(*partition2, *partition2, cpu_online_map); | 6485 | cpus_and(*partition2, *partition2, cpu_online_map); |
@@ -6052,10 +6488,86 @@ void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) | |||
6052 | /* Detach sched domains from all of the affected cpus */ | 6488 | /* Detach sched domains from all of the affected cpus */ |
6053 | detach_destroy_domains(&change_map); | 6489 | detach_destroy_domains(&change_map); |
6054 | if (!cpus_empty(*partition1)) | 6490 | if (!cpus_empty(*partition1)) |
6055 | build_sched_domains(partition1); | 6491 | err = build_sched_domains(partition1); |
6056 | if (!cpus_empty(*partition2)) | 6492 | if (!err && !cpus_empty(*partition2)) |
6057 | build_sched_domains(partition2); | 6493 | err = build_sched_domains(partition2); |
6494 | |||
6495 | return err; | ||
6496 | } | ||
6497 | |||
6498 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
6499 | int arch_reinit_sched_domains(void) | ||
6500 | { | ||
6501 | int err; | ||
6502 | |||
6503 | lock_cpu_hotplug(); | ||
6504 | detach_destroy_domains(&cpu_online_map); | ||
6505 | err = arch_init_sched_domains(&cpu_online_map); | ||
6506 | unlock_cpu_hotplug(); | ||
6507 | |||
6508 | return err; | ||
6509 | } | ||
6510 | |||
6511 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | ||
6512 | { | ||
6513 | int ret; | ||
6514 | |||
6515 | if (buf[0] != '0' && buf[0] != '1') | ||
6516 | return -EINVAL; | ||
6517 | |||
6518 | if (smt) | ||
6519 | sched_smt_power_savings = (buf[0] == '1'); | ||
6520 | else | ||
6521 | sched_mc_power_savings = (buf[0] == '1'); | ||
6522 | |||
6523 | ret = arch_reinit_sched_domains(); | ||
6524 | |||
6525 | return ret ? ret : count; | ||
6526 | } | ||
6527 | |||
6528 | int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | ||
6529 | { | ||
6530 | int err = 0; | ||
6531 | #ifdef CONFIG_SCHED_SMT | ||
6532 | if (smt_capable()) | ||
6533 | err = sysfs_create_file(&cls->kset.kobj, | ||
6534 | &attr_sched_smt_power_savings.attr); | ||
6535 | #endif | ||
6536 | #ifdef CONFIG_SCHED_MC | ||
6537 | if (!err && mc_capable()) | ||
6538 | err = sysfs_create_file(&cls->kset.kobj, | ||
6539 | &attr_sched_mc_power_savings.attr); | ||
6540 | #endif | ||
6541 | return err; | ||
6542 | } | ||
6543 | #endif | ||
6544 | |||
6545 | #ifdef CONFIG_SCHED_MC | ||
6546 | static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) | ||
6547 | { | ||
6548 | return sprintf(page, "%u\n", sched_mc_power_savings); | ||
6549 | } | ||
6550 | static ssize_t sched_mc_power_savings_store(struct sys_device *dev, const char *buf, size_t count) | ||
6551 | { | ||
6552 | return sched_power_savings_store(buf, count, 0); | ||
6553 | } | ||
6554 | SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, | ||
6555 | sched_mc_power_savings_store); | ||
6556 | #endif | ||
6557 | |||
6558 | #ifdef CONFIG_SCHED_SMT | ||
6559 | static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) | ||
6560 | { | ||
6561 | return sprintf(page, "%u\n", sched_smt_power_savings); | ||
6562 | } | ||
6563 | static ssize_t sched_smt_power_savings_store(struct sys_device *dev, const char *buf, size_t count) | ||
6564 | { | ||
6565 | return sched_power_savings_store(buf, count, 1); | ||
6058 | } | 6566 | } |
6567 | SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, | ||
6568 | sched_smt_power_savings_store); | ||
6569 | #endif | ||
6570 | |||
6059 | 6571 | ||
6060 | #ifdef CONFIG_HOTPLUG_CPU | 6572 | #ifdef CONFIG_HOTPLUG_CPU |
6061 | /* | 6573 | /* |
@@ -6138,7 +6650,6 @@ void __init sched_init(void) | |||
6138 | rq->push_cpu = 0; | 6650 | rq->push_cpu = 0; |
6139 | rq->migration_thread = NULL; | 6651 | rq->migration_thread = NULL; |
6140 | INIT_LIST_HEAD(&rq->migration_queue); | 6652 | INIT_LIST_HEAD(&rq->migration_queue); |
6141 | rq->cpu = i; | ||
6142 | #endif | 6653 | #endif |
6143 | atomic_set(&rq->nr_iowait, 0); | 6654 | atomic_set(&rq->nr_iowait, 0); |
6144 | 6655 | ||
@@ -6153,6 +6664,7 @@ void __init sched_init(void) | |||
6153 | } | 6664 | } |
6154 | } | 6665 | } |
6155 | 6666 | ||
6667 | set_load_weight(&init_task); | ||
6156 | /* | 6668 | /* |
6157 | * The boot idle thread does lazy MMU switching as well: | 6669 | * The boot idle thread does lazy MMU switching as well: |
6158 | */ | 6670 | */ |
@@ -6199,11 +6711,12 @@ void normalize_rt_tasks(void) | |||
6199 | runqueue_t *rq; | 6711 | runqueue_t *rq; |
6200 | 6712 | ||
6201 | read_lock_irq(&tasklist_lock); | 6713 | read_lock_irq(&tasklist_lock); |
6202 | for_each_process (p) { | 6714 | for_each_process(p) { |
6203 | if (!rt_task(p)) | 6715 | if (!rt_task(p)) |
6204 | continue; | 6716 | continue; |
6205 | 6717 | ||
6206 | rq = task_rq_lock(p, &flags); | 6718 | spin_lock_irqsave(&p->pi_lock, flags); |
6719 | rq = __task_rq_lock(p); | ||
6207 | 6720 | ||
6208 | array = p->array; | 6721 | array = p->array; |
6209 | if (array) | 6722 | if (array) |
@@ -6214,7 +6727,8 @@ void normalize_rt_tasks(void) | |||
6214 | resched_task(rq->curr); | 6727 | resched_task(rq->curr); |
6215 | } | 6728 | } |
6216 | 6729 | ||
6217 | task_rq_unlock(rq, &flags); | 6730 | __task_rq_unlock(rq); |
6731 | spin_unlock_irqrestore(&p->pi_lock, flags); | ||
6218 | } | 6732 | } |
6219 | read_unlock_irq(&tasklist_lock); | 6733 | read_unlock_irq(&tasklist_lock); |
6220 | } | 6734 | } |