diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 402 |
1 files changed, 215 insertions, 187 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 72bb9483d949..45e17b83b7f1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -263,6 +263,7 @@ struct rq { | |||
263 | 263 | ||
264 | unsigned int clock_warps, clock_overflows; | 264 | unsigned int clock_warps, clock_overflows; |
265 | unsigned int clock_unstable_events; | 265 | unsigned int clock_unstable_events; |
266 | u64 tick_timestamp; | ||
266 | 267 | ||
267 | atomic_t nr_iowait; | 268 | atomic_t nr_iowait; |
268 | 269 | ||
@@ -318,15 +319,19 @@ static inline int cpu_of(struct rq *rq) | |||
318 | } | 319 | } |
319 | 320 | ||
320 | /* | 321 | /* |
321 | * Per-runqueue clock, as finegrained as the platform can give us: | 322 | * Update the per-runqueue clock, as finegrained as the platform can give |
323 | * us, but without assuming monotonicity, etc.: | ||
322 | */ | 324 | */ |
323 | static unsigned long long __rq_clock(struct rq *rq) | 325 | static void __update_rq_clock(struct rq *rq) |
324 | { | 326 | { |
325 | u64 prev_raw = rq->prev_clock_raw; | 327 | u64 prev_raw = rq->prev_clock_raw; |
326 | u64 now = sched_clock(); | 328 | u64 now = sched_clock(); |
327 | s64 delta = now - prev_raw; | 329 | s64 delta = now - prev_raw; |
328 | u64 clock = rq->clock; | 330 | u64 clock = rq->clock; |
329 | 331 | ||
332 | #ifdef CONFIG_SCHED_DEBUG | ||
333 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); | ||
334 | #endif | ||
330 | /* | 335 | /* |
331 | * Protect against sched_clock() occasionally going backwards: | 336 | * Protect against sched_clock() occasionally going backwards: |
332 | */ | 337 | */ |
@@ -337,8 +342,11 @@ static unsigned long long __rq_clock(struct rq *rq) | |||
337 | /* | 342 | /* |
338 | * Catch too large forward jumps too: | 343 | * Catch too large forward jumps too: |
339 | */ | 344 | */ |
340 | if (unlikely(delta > 2*TICK_NSEC)) { | 345 | if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) { |
341 | clock++; | 346 | if (clock < rq->tick_timestamp + TICK_NSEC) |
347 | clock = rq->tick_timestamp + TICK_NSEC; | ||
348 | else | ||
349 | clock++; | ||
342 | rq->clock_overflows++; | 350 | rq->clock_overflows++; |
343 | } else { | 351 | } else { |
344 | if (unlikely(delta > rq->clock_max_delta)) | 352 | if (unlikely(delta > rq->clock_max_delta)) |
@@ -349,18 +357,12 @@ static unsigned long long __rq_clock(struct rq *rq) | |||
349 | 357 | ||
350 | rq->prev_clock_raw = now; | 358 | rq->prev_clock_raw = now; |
351 | rq->clock = clock; | 359 | rq->clock = clock; |
352 | |||
353 | return clock; | ||
354 | } | 360 | } |
355 | 361 | ||
356 | static inline unsigned long long rq_clock(struct rq *rq) | 362 | static void update_rq_clock(struct rq *rq) |
357 | { | 363 | { |
358 | int this_cpu = smp_processor_id(); | 364 | if (likely(smp_processor_id() == cpu_of(rq))) |
359 | 365 | __update_rq_clock(rq); | |
360 | if (this_cpu == cpu_of(rq)) | ||
361 | return __rq_clock(rq); | ||
362 | |||
363 | return rq->clock; | ||
364 | } | 366 | } |
365 | 367 | ||
366 | /* | 368 | /* |
@@ -386,9 +388,12 @@ unsigned long long cpu_clock(int cpu) | |||
386 | { | 388 | { |
387 | unsigned long long now; | 389 | unsigned long long now; |
388 | unsigned long flags; | 390 | unsigned long flags; |
391 | struct rq *rq; | ||
389 | 392 | ||
390 | local_irq_save(flags); | 393 | local_irq_save(flags); |
391 | now = rq_clock(cpu_rq(cpu)); | 394 | rq = cpu_rq(cpu); |
395 | update_rq_clock(rq); | ||
396 | now = rq->clock; | ||
392 | local_irq_restore(flags); | 397 | local_irq_restore(flags); |
393 | 398 | ||
394 | return now; | 399 | return now; |
@@ -637,6 +642,11 @@ static u64 div64_likely32(u64 divident, unsigned long divisor) | |||
637 | 642 | ||
638 | #define WMULT_SHIFT 32 | 643 | #define WMULT_SHIFT 32 |
639 | 644 | ||
645 | /* | ||
646 | * Shift right and round: | ||
647 | */ | ||
648 | #define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) | ||
649 | |||
640 | static unsigned long | 650 | static unsigned long |
641 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, | 651 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, |
642 | struct load_weight *lw) | 652 | struct load_weight *lw) |
@@ -644,18 +654,17 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, | |||
644 | u64 tmp; | 654 | u64 tmp; |
645 | 655 | ||
646 | if (unlikely(!lw->inv_weight)) | 656 | if (unlikely(!lw->inv_weight)) |
647 | lw->inv_weight = WMULT_CONST / lw->weight; | 657 | lw->inv_weight = (WMULT_CONST - lw->weight/2) / lw->weight + 1; |
648 | 658 | ||
649 | tmp = (u64)delta_exec * weight; | 659 | tmp = (u64)delta_exec * weight; |
650 | /* | 660 | /* |
651 | * Check whether we'd overflow the 64-bit multiplication: | 661 | * Check whether we'd overflow the 64-bit multiplication: |
652 | */ | 662 | */ |
653 | if (unlikely(tmp > WMULT_CONST)) { | 663 | if (unlikely(tmp > WMULT_CONST)) |
654 | tmp = ((tmp >> WMULT_SHIFT/2) * lw->inv_weight) | 664 | tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight, |
655 | >> (WMULT_SHIFT/2); | 665 | WMULT_SHIFT/2); |
656 | } else { | 666 | else |
657 | tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT; | 667 | tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT); |
658 | } | ||
659 | 668 | ||
660 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); | 669 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); |
661 | } | 670 | } |
@@ -703,11 +712,14 @@ static void update_load_sub(struct load_weight *lw, unsigned long dec) | |||
703 | * the relative distance between them is ~25%.) | 712 | * the relative distance between them is ~25%.) |
704 | */ | 713 | */ |
705 | static const int prio_to_weight[40] = { | 714 | static const int prio_to_weight[40] = { |
706 | /* -20 */ 88818, 71054, 56843, 45475, 36380, 29104, 23283, 18626, 14901, 11921, | 715 | /* -20 */ 88761, 71755, 56483, 46273, 36291, |
707 | /* -10 */ 9537, 7629, 6103, 4883, 3906, 3125, 2500, 2000, 1600, 1280, | 716 | /* -15 */ 29154, 23254, 18705, 14949, 11916, |
708 | /* 0 */ NICE_0_LOAD /* 1024 */, | 717 | /* -10 */ 9548, 7620, 6100, 4904, 3906, |
709 | /* 1 */ 819, 655, 524, 419, 336, 268, 215, 172, 137, | 718 | /* -5 */ 3121, 2501, 1991, 1586, 1277, |
710 | /* 10 */ 110, 87, 70, 56, 45, 36, 29, 23, 18, 15, | 719 | /* 0 */ 1024, 820, 655, 526, 423, |
720 | /* 5 */ 335, 272, 215, 172, 137, | ||
721 | /* 10 */ 110, 87, 70, 56, 45, | ||
722 | /* 15 */ 36, 29, 23, 18, 15, | ||
711 | }; | 723 | }; |
712 | 724 | ||
713 | /* | 725 | /* |
@@ -718,14 +730,14 @@ static const int prio_to_weight[40] = { | |||
718 | * into multiplications: | 730 | * into multiplications: |
719 | */ | 731 | */ |
720 | static const u32 prio_to_wmult[40] = { | 732 | static const u32 prio_to_wmult[40] = { |
721 | /* -20 */ 48356, 60446, 75558, 94446, 118058, | 733 | /* -20 */ 48388, 59856, 76040, 92818, 118348, |
722 | /* -15 */ 147573, 184467, 230589, 288233, 360285, | 734 | /* -15 */ 147320, 184698, 229616, 287308, 360437, |
723 | /* -10 */ 450347, 562979, 703746, 879575, 1099582, | 735 | /* -10 */ 449829, 563644, 704093, 875809, 1099582, |
724 | /* -5 */ 1374389, 1717986, 2147483, 2684354, 3355443, | 736 | /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, |
725 | /* 0 */ 4194304, 5244160, 6557201, 8196502, 10250518, | 737 | /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, |
726 | /* 5 */ 12782640, 16025997, 19976592, 24970740, 31350126, | 738 | /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, |
727 | /* 10 */ 39045157, 49367440, 61356675, 76695844, 95443717, | 739 | /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, |
728 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, | 740 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, |
729 | }; | 741 | }; |
730 | 742 | ||
731 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup); | 743 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup); |
@@ -745,8 +757,7 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
745 | unsigned long max_nr_move, unsigned long max_load_move, | 757 | unsigned long max_nr_move, unsigned long max_load_move, |
746 | struct sched_domain *sd, enum cpu_idle_type idle, | 758 | struct sched_domain *sd, enum cpu_idle_type idle, |
747 | int *all_pinned, unsigned long *load_moved, | 759 | int *all_pinned, unsigned long *load_moved, |
748 | int this_best_prio, int best_prio, int best_prio_seen, | 760 | int *this_best_prio, struct rq_iterator *iterator); |
749 | struct rq_iterator *iterator); | ||
750 | 761 | ||
751 | #include "sched_stats.h" | 762 | #include "sched_stats.h" |
752 | #include "sched_rt.c" | 763 | #include "sched_rt.c" |
@@ -782,14 +793,14 @@ static void __update_curr_load(struct rq *rq, struct load_stat *ls) | |||
782 | * This function is called /before/ updating rq->ls.load | 793 | * This function is called /before/ updating rq->ls.load |
783 | * and when switching tasks. | 794 | * and when switching tasks. |
784 | */ | 795 | */ |
785 | static void update_curr_load(struct rq *rq, u64 now) | 796 | static void update_curr_load(struct rq *rq) |
786 | { | 797 | { |
787 | struct load_stat *ls = &rq->ls; | 798 | struct load_stat *ls = &rq->ls; |
788 | u64 start; | 799 | u64 start; |
789 | 800 | ||
790 | start = ls->load_update_start; | 801 | start = ls->load_update_start; |
791 | ls->load_update_start = now; | 802 | ls->load_update_start = rq->clock; |
792 | ls->delta_stat += now - start; | 803 | ls->delta_stat += rq->clock - start; |
793 | /* | 804 | /* |
794 | * Stagger updates to ls->delta_fair. Very frequent updates | 805 | * Stagger updates to ls->delta_fair. Very frequent updates |
795 | * can be expensive. | 806 | * can be expensive. |
@@ -798,30 +809,28 @@ static void update_curr_load(struct rq *rq, u64 now) | |||
798 | __update_curr_load(rq, ls); | 809 | __update_curr_load(rq, ls); |
799 | } | 810 | } |
800 | 811 | ||
801 | static inline void | 812 | static inline void inc_load(struct rq *rq, const struct task_struct *p) |
802 | inc_load(struct rq *rq, const struct task_struct *p, u64 now) | ||
803 | { | 813 | { |
804 | update_curr_load(rq, now); | 814 | update_curr_load(rq); |
805 | update_load_add(&rq->ls.load, p->se.load.weight); | 815 | update_load_add(&rq->ls.load, p->se.load.weight); |
806 | } | 816 | } |
807 | 817 | ||
808 | static inline void | 818 | static inline void dec_load(struct rq *rq, const struct task_struct *p) |
809 | dec_load(struct rq *rq, const struct task_struct *p, u64 now) | ||
810 | { | 819 | { |
811 | update_curr_load(rq, now); | 820 | update_curr_load(rq); |
812 | update_load_sub(&rq->ls.load, p->se.load.weight); | 821 | update_load_sub(&rq->ls.load, p->se.load.weight); |
813 | } | 822 | } |
814 | 823 | ||
815 | static void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now) | 824 | static void inc_nr_running(struct task_struct *p, struct rq *rq) |
816 | { | 825 | { |
817 | rq->nr_running++; | 826 | rq->nr_running++; |
818 | inc_load(rq, p, now); | 827 | inc_load(rq, p); |
819 | } | 828 | } |
820 | 829 | ||
821 | static void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now) | 830 | static void dec_nr_running(struct task_struct *p, struct rq *rq) |
822 | { | 831 | { |
823 | rq->nr_running--; | 832 | rq->nr_running--; |
824 | dec_load(rq, p, now); | 833 | dec_load(rq, p); |
825 | } | 834 | } |
826 | 835 | ||
827 | static void set_load_weight(struct task_struct *p) | 836 | static void set_load_weight(struct task_struct *p) |
@@ -848,18 +857,16 @@ static void set_load_weight(struct task_struct *p) | |||
848 | p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; | 857 | p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; |
849 | } | 858 | } |
850 | 859 | ||
851 | static void | 860 | static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) |
852 | enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, u64 now) | ||
853 | { | 861 | { |
854 | sched_info_queued(p); | 862 | sched_info_queued(p); |
855 | p->sched_class->enqueue_task(rq, p, wakeup, now); | 863 | p->sched_class->enqueue_task(rq, p, wakeup); |
856 | p->se.on_rq = 1; | 864 | p->se.on_rq = 1; |
857 | } | 865 | } |
858 | 866 | ||
859 | static void | 867 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) |
860 | dequeue_task(struct rq *rq, struct task_struct *p, int sleep, u64 now) | ||
861 | { | 868 | { |
862 | p->sched_class->dequeue_task(rq, p, sleep, now); | 869 | p->sched_class->dequeue_task(rq, p, sleep); |
863 | p->se.on_rq = 0; | 870 | p->se.on_rq = 0; |
864 | } | 871 | } |
865 | 872 | ||
@@ -914,13 +921,11 @@ static int effective_prio(struct task_struct *p) | |||
914 | */ | 921 | */ |
915 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | 922 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) |
916 | { | 923 | { |
917 | u64 now = rq_clock(rq); | ||
918 | |||
919 | if (p->state == TASK_UNINTERRUPTIBLE) | 924 | if (p->state == TASK_UNINTERRUPTIBLE) |
920 | rq->nr_uninterruptible--; | 925 | rq->nr_uninterruptible--; |
921 | 926 | ||
922 | enqueue_task(rq, p, wakeup, now); | 927 | enqueue_task(rq, p, wakeup); |
923 | inc_nr_running(p, rq, now); | 928 | inc_nr_running(p, rq); |
924 | } | 929 | } |
925 | 930 | ||
926 | /* | 931 | /* |
@@ -928,13 +933,13 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | |||
928 | */ | 933 | */ |
929 | static inline void activate_idle_task(struct task_struct *p, struct rq *rq) | 934 | static inline void activate_idle_task(struct task_struct *p, struct rq *rq) |
930 | { | 935 | { |
931 | u64 now = rq_clock(rq); | 936 | update_rq_clock(rq); |
932 | 937 | ||
933 | if (p->state == TASK_UNINTERRUPTIBLE) | 938 | if (p->state == TASK_UNINTERRUPTIBLE) |
934 | rq->nr_uninterruptible--; | 939 | rq->nr_uninterruptible--; |
935 | 940 | ||
936 | enqueue_task(rq, p, 0, now); | 941 | enqueue_task(rq, p, 0); |
937 | inc_nr_running(p, rq, now); | 942 | inc_nr_running(p, rq); |
938 | } | 943 | } |
939 | 944 | ||
940 | /* | 945 | /* |
@@ -942,13 +947,11 @@ static inline void activate_idle_task(struct task_struct *p, struct rq *rq) | |||
942 | */ | 947 | */ |
943 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) | 948 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) |
944 | { | 949 | { |
945 | u64 now = rq_clock(rq); | ||
946 | |||
947 | if (p->state == TASK_UNINTERRUPTIBLE) | 950 | if (p->state == TASK_UNINTERRUPTIBLE) |
948 | rq->nr_uninterruptible++; | 951 | rq->nr_uninterruptible++; |
949 | 952 | ||
950 | dequeue_task(rq, p, sleep, now); | 953 | dequeue_task(rq, p, sleep); |
951 | dec_nr_running(p, rq, now); | 954 | dec_nr_running(p, rq); |
952 | } | 955 | } |
953 | 956 | ||
954 | /** | 957 | /** |
@@ -1516,6 +1519,7 @@ out_set_cpu: | |||
1516 | 1519 | ||
1517 | out_activate: | 1520 | out_activate: |
1518 | #endif /* CONFIG_SMP */ | 1521 | #endif /* CONFIG_SMP */ |
1522 | update_rq_clock(rq); | ||
1519 | activate_task(rq, p, 1); | 1523 | activate_task(rq, p, 1); |
1520 | /* | 1524 | /* |
1521 | * Sync wakeups (i.e. those types of wakeups where the waker | 1525 | * Sync wakeups (i.e. those types of wakeups where the waker |
@@ -1647,12 +1651,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
1647 | unsigned long flags; | 1651 | unsigned long flags; |
1648 | struct rq *rq; | 1652 | struct rq *rq; |
1649 | int this_cpu; | 1653 | int this_cpu; |
1650 | u64 now; | ||
1651 | 1654 | ||
1652 | rq = task_rq_lock(p, &flags); | 1655 | rq = task_rq_lock(p, &flags); |
1653 | BUG_ON(p->state != TASK_RUNNING); | 1656 | BUG_ON(p->state != TASK_RUNNING); |
1654 | this_cpu = smp_processor_id(); /* parent's CPU */ | 1657 | this_cpu = smp_processor_id(); /* parent's CPU */ |
1655 | now = rq_clock(rq); | 1658 | update_rq_clock(rq); |
1656 | 1659 | ||
1657 | p->prio = effective_prio(p); | 1660 | p->prio = effective_prio(p); |
1658 | 1661 | ||
@@ -1666,8 +1669,8 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
1666 | * Let the scheduling class do new task startup | 1669 | * Let the scheduling class do new task startup |
1667 | * management (if any): | 1670 | * management (if any): |
1668 | */ | 1671 | */ |
1669 | p->sched_class->task_new(rq, p, now); | 1672 | p->sched_class->task_new(rq, p); |
1670 | inc_nr_running(p, rq, now); | 1673 | inc_nr_running(p, rq); |
1671 | } | 1674 | } |
1672 | check_preempt_curr(rq, p); | 1675 | check_preempt_curr(rq, p); |
1673 | task_rq_unlock(rq, &flags); | 1676 | task_rq_unlock(rq, &flags); |
@@ -1954,7 +1957,6 @@ static void update_cpu_load(struct rq *this_rq) | |||
1954 | unsigned long total_load = this_rq->ls.load.weight; | 1957 | unsigned long total_load = this_rq->ls.load.weight; |
1955 | unsigned long this_load = total_load; | 1958 | unsigned long this_load = total_load; |
1956 | struct load_stat *ls = &this_rq->ls; | 1959 | struct load_stat *ls = &this_rq->ls; |
1957 | u64 now = __rq_clock(this_rq); | ||
1958 | int i, scale; | 1960 | int i, scale; |
1959 | 1961 | ||
1960 | this_rq->nr_load_updates++; | 1962 | this_rq->nr_load_updates++; |
@@ -1962,7 +1964,7 @@ static void update_cpu_load(struct rq *this_rq) | |||
1962 | goto do_avg; | 1964 | goto do_avg; |
1963 | 1965 | ||
1964 | /* Update delta_fair/delta_exec fields first */ | 1966 | /* Update delta_fair/delta_exec fields first */ |
1965 | update_curr_load(this_rq, now); | 1967 | update_curr_load(this_rq); |
1966 | 1968 | ||
1967 | fair_delta64 = ls->delta_fair + 1; | 1969 | fair_delta64 = ls->delta_fair + 1; |
1968 | ls->delta_fair = 0; | 1970 | ls->delta_fair = 0; |
@@ -1970,8 +1972,8 @@ static void update_cpu_load(struct rq *this_rq) | |||
1970 | exec_delta64 = ls->delta_exec + 1; | 1972 | exec_delta64 = ls->delta_exec + 1; |
1971 | ls->delta_exec = 0; | 1973 | ls->delta_exec = 0; |
1972 | 1974 | ||
1973 | sample_interval64 = now - ls->load_update_last; | 1975 | sample_interval64 = this_rq->clock - ls->load_update_last; |
1974 | ls->load_update_last = now; | 1976 | ls->load_update_last = this_rq->clock; |
1975 | 1977 | ||
1976 | if ((s64)sample_interval64 < (s64)TICK_NSEC) | 1978 | if ((s64)sample_interval64 < (s64)TICK_NSEC) |
1977 | sample_interval64 = TICK_NSEC; | 1979 | sample_interval64 = TICK_NSEC; |
@@ -2026,6 +2028,8 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
2026 | spin_lock(&rq1->lock); | 2028 | spin_lock(&rq1->lock); |
2027 | } | 2029 | } |
2028 | } | 2030 | } |
2031 | update_rq_clock(rq1); | ||
2032 | update_rq_clock(rq2); | ||
2029 | } | 2033 | } |
2030 | 2034 | ||
2031 | /* | 2035 | /* |
@@ -2166,8 +2170,7 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2166 | unsigned long max_nr_move, unsigned long max_load_move, | 2170 | unsigned long max_nr_move, unsigned long max_load_move, |
2167 | struct sched_domain *sd, enum cpu_idle_type idle, | 2171 | struct sched_domain *sd, enum cpu_idle_type idle, |
2168 | int *all_pinned, unsigned long *load_moved, | 2172 | int *all_pinned, unsigned long *load_moved, |
2169 | int this_best_prio, int best_prio, int best_prio_seen, | 2173 | int *this_best_prio, struct rq_iterator *iterator) |
2170 | struct rq_iterator *iterator) | ||
2171 | { | 2174 | { |
2172 | int pulled = 0, pinned = 0, skip_for_load; | 2175 | int pulled = 0, pinned = 0, skip_for_load; |
2173 | struct task_struct *p; | 2176 | struct task_struct *p; |
@@ -2192,12 +2195,8 @@ next: | |||
2192 | */ | 2195 | */ |
2193 | skip_for_load = (p->se.load.weight >> 1) > rem_load_move + | 2196 | skip_for_load = (p->se.load.weight >> 1) > rem_load_move + |
2194 | SCHED_LOAD_SCALE_FUZZ; | 2197 | SCHED_LOAD_SCALE_FUZZ; |
2195 | if (skip_for_load && p->prio < this_best_prio) | 2198 | if ((skip_for_load && p->prio >= *this_best_prio) || |
2196 | skip_for_load = !best_prio_seen && p->prio == best_prio; | ||
2197 | if (skip_for_load || | ||
2198 | !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { | 2199 | !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { |
2199 | |||
2200 | best_prio_seen |= p->prio == best_prio; | ||
2201 | p = iterator->next(iterator->arg); | 2200 | p = iterator->next(iterator->arg); |
2202 | goto next; | 2201 | goto next; |
2203 | } | 2202 | } |
@@ -2211,8 +2210,8 @@ next: | |||
2211 | * and the prescribed amount of weighted load. | 2210 | * and the prescribed amount of weighted load. |
2212 | */ | 2211 | */ |
2213 | if (pulled < max_nr_move && rem_load_move > 0) { | 2212 | if (pulled < max_nr_move && rem_load_move > 0) { |
2214 | if (p->prio < this_best_prio) | 2213 | if (p->prio < *this_best_prio) |
2215 | this_best_prio = p->prio; | 2214 | *this_best_prio = p->prio; |
2216 | p = iterator->next(iterator->arg); | 2215 | p = iterator->next(iterator->arg); |
2217 | goto next; | 2216 | goto next; |
2218 | } | 2217 | } |
@@ -2231,32 +2230,52 @@ out: | |||
2231 | } | 2230 | } |
2232 | 2231 | ||
2233 | /* | 2232 | /* |
2234 | * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted | 2233 | * move_tasks tries to move up to max_load_move weighted load from busiest to |
2235 | * load from busiest to this_rq, as part of a balancing operation within | 2234 | * this_rq, as part of a balancing operation within domain "sd". |
2236 | * "domain". Returns the number of tasks moved. | 2235 | * Returns 1 if successful and 0 otherwise. |
2237 | * | 2236 | * |
2238 | * Called with both runqueues locked. | 2237 | * Called with both runqueues locked. |
2239 | */ | 2238 | */ |
2240 | static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | 2239 | static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2241 | unsigned long max_nr_move, unsigned long max_load_move, | 2240 | unsigned long max_load_move, |
2242 | struct sched_domain *sd, enum cpu_idle_type idle, | 2241 | struct sched_domain *sd, enum cpu_idle_type idle, |
2243 | int *all_pinned) | 2242 | int *all_pinned) |
2244 | { | 2243 | { |
2245 | struct sched_class *class = sched_class_highest; | 2244 | struct sched_class *class = sched_class_highest; |
2246 | unsigned long load_moved, total_nr_moved = 0, nr_moved; | 2245 | unsigned long total_load_moved = 0; |
2247 | long rem_load_move = max_load_move; | 2246 | int this_best_prio = this_rq->curr->prio; |
2248 | 2247 | ||
2249 | do { | 2248 | do { |
2250 | nr_moved = class->load_balance(this_rq, this_cpu, busiest, | 2249 | total_load_moved += |
2251 | max_nr_move, (unsigned long)rem_load_move, | 2250 | class->load_balance(this_rq, this_cpu, busiest, |
2252 | sd, idle, all_pinned, &load_moved); | 2251 | ULONG_MAX, max_load_move - total_load_moved, |
2253 | total_nr_moved += nr_moved; | 2252 | sd, idle, all_pinned, &this_best_prio); |
2254 | max_nr_move -= nr_moved; | ||
2255 | rem_load_move -= load_moved; | ||
2256 | class = class->next; | 2253 | class = class->next; |
2257 | } while (class && max_nr_move && rem_load_move > 0); | 2254 | } while (class && max_load_move > total_load_moved); |
2258 | 2255 | ||
2259 | return total_nr_moved; | 2256 | return total_load_moved > 0; |
2257 | } | ||
2258 | |||
2259 | /* | ||
2260 | * move_one_task tries to move exactly one task from busiest to this_rq, as | ||
2261 | * part of active balancing operations within "domain". | ||
2262 | * Returns 1 if successful and 0 otherwise. | ||
2263 | * | ||
2264 | * Called with both runqueues locked. | ||
2265 | */ | ||
2266 | static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
2267 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
2268 | { | ||
2269 | struct sched_class *class; | ||
2270 | int this_best_prio = MAX_PRIO; | ||
2271 | |||
2272 | for (class = sched_class_highest; class; class = class->next) | ||
2273 | if (class->load_balance(this_rq, this_cpu, busiest, | ||
2274 | 1, ULONG_MAX, sd, idle, NULL, | ||
2275 | &this_best_prio)) | ||
2276 | return 1; | ||
2277 | |||
2278 | return 0; | ||
2260 | } | 2279 | } |
2261 | 2280 | ||
2262 | /* | 2281 | /* |
@@ -2588,11 +2607,6 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
2588 | */ | 2607 | */ |
2589 | #define MAX_PINNED_INTERVAL 512 | 2608 | #define MAX_PINNED_INTERVAL 512 |
2590 | 2609 | ||
2591 | static inline unsigned long minus_1_or_zero(unsigned long n) | ||
2592 | { | ||
2593 | return n > 0 ? n - 1 : 0; | ||
2594 | } | ||
2595 | |||
2596 | /* | 2610 | /* |
2597 | * Check this_cpu to ensure it is balanced within domain. Attempt to move | 2611 | * Check this_cpu to ensure it is balanced within domain. Attempt to move |
2598 | * tasks if there is an imbalance. | 2612 | * tasks if there is an imbalance. |
@@ -2601,7 +2615,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
2601 | struct sched_domain *sd, enum cpu_idle_type idle, | 2615 | struct sched_domain *sd, enum cpu_idle_type idle, |
2602 | int *balance) | 2616 | int *balance) |
2603 | { | 2617 | { |
2604 | int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | 2618 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; |
2605 | struct sched_group *group; | 2619 | struct sched_group *group; |
2606 | unsigned long imbalance; | 2620 | unsigned long imbalance; |
2607 | struct rq *busiest; | 2621 | struct rq *busiest; |
@@ -2642,18 +2656,17 @@ redo: | |||
2642 | 2656 | ||
2643 | schedstat_add(sd, lb_imbalance[idle], imbalance); | 2657 | schedstat_add(sd, lb_imbalance[idle], imbalance); |
2644 | 2658 | ||
2645 | nr_moved = 0; | 2659 | ld_moved = 0; |
2646 | if (busiest->nr_running > 1) { | 2660 | if (busiest->nr_running > 1) { |
2647 | /* | 2661 | /* |
2648 | * Attempt to move tasks. If find_busiest_group has found | 2662 | * Attempt to move tasks. If find_busiest_group has found |
2649 | * an imbalance but busiest->nr_running <= 1, the group is | 2663 | * an imbalance but busiest->nr_running <= 1, the group is |
2650 | * still unbalanced. nr_moved simply stays zero, so it is | 2664 | * still unbalanced. ld_moved simply stays zero, so it is |
2651 | * correctly treated as an imbalance. | 2665 | * correctly treated as an imbalance. |
2652 | */ | 2666 | */ |
2653 | local_irq_save(flags); | 2667 | local_irq_save(flags); |
2654 | double_rq_lock(this_rq, busiest); | 2668 | double_rq_lock(this_rq, busiest); |
2655 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2669 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
2656 | minus_1_or_zero(busiest->nr_running), | ||
2657 | imbalance, sd, idle, &all_pinned); | 2670 | imbalance, sd, idle, &all_pinned); |
2658 | double_rq_unlock(this_rq, busiest); | 2671 | double_rq_unlock(this_rq, busiest); |
2659 | local_irq_restore(flags); | 2672 | local_irq_restore(flags); |
@@ -2661,7 +2674,7 @@ redo: | |||
2661 | /* | 2674 | /* |
2662 | * some other cpu did the load balance for us. | 2675 | * some other cpu did the load balance for us. |
2663 | */ | 2676 | */ |
2664 | if (nr_moved && this_cpu != smp_processor_id()) | 2677 | if (ld_moved && this_cpu != smp_processor_id()) |
2665 | resched_cpu(this_cpu); | 2678 | resched_cpu(this_cpu); |
2666 | 2679 | ||
2667 | /* All tasks on this runqueue were pinned by CPU affinity */ | 2680 | /* All tasks on this runqueue were pinned by CPU affinity */ |
@@ -2673,7 +2686,7 @@ redo: | |||
2673 | } | 2686 | } |
2674 | } | 2687 | } |
2675 | 2688 | ||
2676 | if (!nr_moved) { | 2689 | if (!ld_moved) { |
2677 | schedstat_inc(sd, lb_failed[idle]); | 2690 | schedstat_inc(sd, lb_failed[idle]); |
2678 | sd->nr_balance_failed++; | 2691 | sd->nr_balance_failed++; |
2679 | 2692 | ||
@@ -2722,10 +2735,10 @@ redo: | |||
2722 | sd->balance_interval *= 2; | 2735 | sd->balance_interval *= 2; |
2723 | } | 2736 | } |
2724 | 2737 | ||
2725 | if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 2738 | if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
2726 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 2739 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
2727 | return -1; | 2740 | return -1; |
2728 | return nr_moved; | 2741 | return ld_moved; |
2729 | 2742 | ||
2730 | out_balanced: | 2743 | out_balanced: |
2731 | schedstat_inc(sd, lb_balanced[idle]); | 2744 | schedstat_inc(sd, lb_balanced[idle]); |
@@ -2757,7 +2770,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | |||
2757 | struct sched_group *group; | 2770 | struct sched_group *group; |
2758 | struct rq *busiest = NULL; | 2771 | struct rq *busiest = NULL; |
2759 | unsigned long imbalance; | 2772 | unsigned long imbalance; |
2760 | int nr_moved = 0; | 2773 | int ld_moved = 0; |
2761 | int sd_idle = 0; | 2774 | int sd_idle = 0; |
2762 | int all_pinned = 0; | 2775 | int all_pinned = 0; |
2763 | cpumask_t cpus = CPU_MASK_ALL; | 2776 | cpumask_t cpus = CPU_MASK_ALL; |
@@ -2792,12 +2805,13 @@ redo: | |||
2792 | 2805 | ||
2793 | schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance); | 2806 | schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance); |
2794 | 2807 | ||
2795 | nr_moved = 0; | 2808 | ld_moved = 0; |
2796 | if (busiest->nr_running > 1) { | 2809 | if (busiest->nr_running > 1) { |
2797 | /* Attempt to move tasks */ | 2810 | /* Attempt to move tasks */ |
2798 | double_lock_balance(this_rq, busiest); | 2811 | double_lock_balance(this_rq, busiest); |
2799 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2812 | /* this_rq->clock is already updated */ |
2800 | minus_1_or_zero(busiest->nr_running), | 2813 | update_rq_clock(busiest); |
2814 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | ||
2801 | imbalance, sd, CPU_NEWLY_IDLE, | 2815 | imbalance, sd, CPU_NEWLY_IDLE, |
2802 | &all_pinned); | 2816 | &all_pinned); |
2803 | spin_unlock(&busiest->lock); | 2817 | spin_unlock(&busiest->lock); |
@@ -2809,7 +2823,7 @@ redo: | |||
2809 | } | 2823 | } |
2810 | } | 2824 | } |
2811 | 2825 | ||
2812 | if (!nr_moved) { | 2826 | if (!ld_moved) { |
2813 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); | 2827 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); |
2814 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 2828 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
2815 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 2829 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
@@ -2817,7 +2831,7 @@ redo: | |||
2817 | } else | 2831 | } else |
2818 | sd->nr_balance_failed = 0; | 2832 | sd->nr_balance_failed = 0; |
2819 | 2833 | ||
2820 | return nr_moved; | 2834 | return ld_moved; |
2821 | 2835 | ||
2822 | out_balanced: | 2836 | out_balanced: |
2823 | schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]); | 2837 | schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]); |
@@ -2894,6 +2908,8 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
2894 | 2908 | ||
2895 | /* move a task from busiest_rq to target_rq */ | 2909 | /* move a task from busiest_rq to target_rq */ |
2896 | double_lock_balance(busiest_rq, target_rq); | 2910 | double_lock_balance(busiest_rq, target_rq); |
2911 | update_rq_clock(busiest_rq); | ||
2912 | update_rq_clock(target_rq); | ||
2897 | 2913 | ||
2898 | /* Search for an sd spanning us and the target CPU. */ | 2914 | /* Search for an sd spanning us and the target CPU. */ |
2899 | for_each_domain(target_cpu, sd) { | 2915 | for_each_domain(target_cpu, sd) { |
@@ -2905,8 +2921,8 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
2905 | if (likely(sd)) { | 2921 | if (likely(sd)) { |
2906 | schedstat_inc(sd, alb_cnt); | 2922 | schedstat_inc(sd, alb_cnt); |
2907 | 2923 | ||
2908 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, | 2924 | if (move_one_task(target_rq, target_cpu, busiest_rq, |
2909 | ULONG_MAX, sd, CPU_IDLE, NULL)) | 2925 | sd, CPU_IDLE)) |
2910 | schedstat_inc(sd, alb_pushed); | 2926 | schedstat_inc(sd, alb_pushed); |
2911 | else | 2927 | else |
2912 | schedstat_inc(sd, alb_failed); | 2928 | schedstat_inc(sd, alb_failed); |
@@ -3090,7 +3106,7 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3090 | if (need_resched()) | 3106 | if (need_resched()) |
3091 | break; | 3107 | break; |
3092 | 3108 | ||
3093 | rebalance_domains(balance_cpu, SCHED_IDLE); | 3109 | rebalance_domains(balance_cpu, CPU_IDLE); |
3094 | 3110 | ||
3095 | rq = cpu_rq(balance_cpu); | 3111 | rq = cpu_rq(balance_cpu); |
3096 | if (time_after(this_rq->next_balance, rq->next_balance)) | 3112 | if (time_after(this_rq->next_balance, rq->next_balance)) |
@@ -3175,8 +3191,7 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
3175 | unsigned long max_nr_move, unsigned long max_load_move, | 3191 | unsigned long max_nr_move, unsigned long max_load_move, |
3176 | struct sched_domain *sd, enum cpu_idle_type idle, | 3192 | struct sched_domain *sd, enum cpu_idle_type idle, |
3177 | int *all_pinned, unsigned long *load_moved, | 3193 | int *all_pinned, unsigned long *load_moved, |
3178 | int this_best_prio, int best_prio, int best_prio_seen, | 3194 | int *this_best_prio, struct rq_iterator *iterator) |
3179 | struct rq_iterator *iterator) | ||
3180 | { | 3195 | { |
3181 | *load_moved = 0; | 3196 | *load_moved = 0; |
3182 | 3197 | ||
@@ -3202,7 +3217,8 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
3202 | rq = task_rq_lock(p, &flags); | 3217 | rq = task_rq_lock(p, &flags); |
3203 | ns = p->se.sum_exec_runtime; | 3218 | ns = p->se.sum_exec_runtime; |
3204 | if (rq->curr == p) { | 3219 | if (rq->curr == p) { |
3205 | delta_exec = rq_clock(rq) - p->se.exec_start; | 3220 | update_rq_clock(rq); |
3221 | delta_exec = rq->clock - p->se.exec_start; | ||
3206 | if ((s64)delta_exec > 0) | 3222 | if ((s64)delta_exec > 0) |
3207 | ns += delta_exec; | 3223 | ns += delta_exec; |
3208 | } | 3224 | } |
@@ -3296,11 +3312,19 @@ void scheduler_tick(void) | |||
3296 | int cpu = smp_processor_id(); | 3312 | int cpu = smp_processor_id(); |
3297 | struct rq *rq = cpu_rq(cpu); | 3313 | struct rq *rq = cpu_rq(cpu); |
3298 | struct task_struct *curr = rq->curr; | 3314 | struct task_struct *curr = rq->curr; |
3315 | u64 next_tick = rq->tick_timestamp + TICK_NSEC; | ||
3299 | 3316 | ||
3300 | spin_lock(&rq->lock); | 3317 | spin_lock(&rq->lock); |
3318 | __update_rq_clock(rq); | ||
3319 | /* | ||
3320 | * Let rq->clock advance by at least TICK_NSEC: | ||
3321 | */ | ||
3322 | if (unlikely(rq->clock < next_tick)) | ||
3323 | rq->clock = next_tick; | ||
3324 | rq->tick_timestamp = rq->clock; | ||
3325 | update_cpu_load(rq); | ||
3301 | if (curr != rq->idle) /* FIXME: needed? */ | 3326 | if (curr != rq->idle) /* FIXME: needed? */ |
3302 | curr->sched_class->task_tick(rq, curr); | 3327 | curr->sched_class->task_tick(rq, curr); |
3303 | update_cpu_load(rq); | ||
3304 | spin_unlock(&rq->lock); | 3328 | spin_unlock(&rq->lock); |
3305 | 3329 | ||
3306 | #ifdef CONFIG_SMP | 3330 | #ifdef CONFIG_SMP |
@@ -3382,7 +3406,7 @@ static inline void schedule_debug(struct task_struct *prev) | |||
3382 | * Pick up the highest-prio task: | 3406 | * Pick up the highest-prio task: |
3383 | */ | 3407 | */ |
3384 | static inline struct task_struct * | 3408 | static inline struct task_struct * |
3385 | pick_next_task(struct rq *rq, struct task_struct *prev, u64 now) | 3409 | pick_next_task(struct rq *rq, struct task_struct *prev) |
3386 | { | 3410 | { |
3387 | struct sched_class *class; | 3411 | struct sched_class *class; |
3388 | struct task_struct *p; | 3412 | struct task_struct *p; |
@@ -3392,14 +3416,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, u64 now) | |||
3392 | * the fair class we can call that function directly: | 3416 | * the fair class we can call that function directly: |
3393 | */ | 3417 | */ |
3394 | if (likely(rq->nr_running == rq->cfs.nr_running)) { | 3418 | if (likely(rq->nr_running == rq->cfs.nr_running)) { |
3395 | p = fair_sched_class.pick_next_task(rq, now); | 3419 | p = fair_sched_class.pick_next_task(rq); |
3396 | if (likely(p)) | 3420 | if (likely(p)) |
3397 | return p; | 3421 | return p; |
3398 | } | 3422 | } |
3399 | 3423 | ||
3400 | class = sched_class_highest; | 3424 | class = sched_class_highest; |
3401 | for ( ; ; ) { | 3425 | for ( ; ; ) { |
3402 | p = class->pick_next_task(rq, now); | 3426 | p = class->pick_next_task(rq); |
3403 | if (p) | 3427 | if (p) |
3404 | return p; | 3428 | return p; |
3405 | /* | 3429 | /* |
@@ -3418,7 +3442,6 @@ asmlinkage void __sched schedule(void) | |||
3418 | struct task_struct *prev, *next; | 3442 | struct task_struct *prev, *next; |
3419 | long *switch_count; | 3443 | long *switch_count; |
3420 | struct rq *rq; | 3444 | struct rq *rq; |
3421 | u64 now; | ||
3422 | int cpu; | 3445 | int cpu; |
3423 | 3446 | ||
3424 | need_resched: | 3447 | need_resched: |
@@ -3436,6 +3459,7 @@ need_resched_nonpreemptible: | |||
3436 | 3459 | ||
3437 | spin_lock_irq(&rq->lock); | 3460 | spin_lock_irq(&rq->lock); |
3438 | clear_tsk_need_resched(prev); | 3461 | clear_tsk_need_resched(prev); |
3462 | __update_rq_clock(rq); | ||
3439 | 3463 | ||
3440 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3464 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
3441 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && | 3465 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && |
@@ -3450,9 +3474,8 @@ need_resched_nonpreemptible: | |||
3450 | if (unlikely(!rq->nr_running)) | 3474 | if (unlikely(!rq->nr_running)) |
3451 | idle_balance(cpu, rq); | 3475 | idle_balance(cpu, rq); |
3452 | 3476 | ||
3453 | now = __rq_clock(rq); | 3477 | prev->sched_class->put_prev_task(rq, prev); |
3454 | prev->sched_class->put_prev_task(rq, prev, now); | 3478 | next = pick_next_task(rq, prev); |
3455 | next = pick_next_task(rq, prev, now); | ||
3456 | 3479 | ||
3457 | sched_info_switch(prev, next); | 3480 | sched_info_switch(prev, next); |
3458 | 3481 | ||
@@ -3895,17 +3918,16 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
3895 | unsigned long flags; | 3918 | unsigned long flags; |
3896 | int oldprio, on_rq; | 3919 | int oldprio, on_rq; |
3897 | struct rq *rq; | 3920 | struct rq *rq; |
3898 | u64 now; | ||
3899 | 3921 | ||
3900 | BUG_ON(prio < 0 || prio > MAX_PRIO); | 3922 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
3901 | 3923 | ||
3902 | rq = task_rq_lock(p, &flags); | 3924 | rq = task_rq_lock(p, &flags); |
3903 | now = rq_clock(rq); | 3925 | update_rq_clock(rq); |
3904 | 3926 | ||
3905 | oldprio = p->prio; | 3927 | oldprio = p->prio; |
3906 | on_rq = p->se.on_rq; | 3928 | on_rq = p->se.on_rq; |
3907 | if (on_rq) | 3929 | if (on_rq) |
3908 | dequeue_task(rq, p, 0, now); | 3930 | dequeue_task(rq, p, 0); |
3909 | 3931 | ||
3910 | if (rt_prio(prio)) | 3932 | if (rt_prio(prio)) |
3911 | p->sched_class = &rt_sched_class; | 3933 | p->sched_class = &rt_sched_class; |
@@ -3915,7 +3937,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
3915 | p->prio = prio; | 3937 | p->prio = prio; |
3916 | 3938 | ||
3917 | if (on_rq) { | 3939 | if (on_rq) { |
3918 | enqueue_task(rq, p, 0, now); | 3940 | enqueue_task(rq, p, 0); |
3919 | /* | 3941 | /* |
3920 | * Reschedule if we are currently running on this runqueue and | 3942 | * Reschedule if we are currently running on this runqueue and |
3921 | * our priority decreased, or if we are not currently running on | 3943 | * our priority decreased, or if we are not currently running on |
@@ -3938,7 +3960,6 @@ void set_user_nice(struct task_struct *p, long nice) | |||
3938 | int old_prio, delta, on_rq; | 3960 | int old_prio, delta, on_rq; |
3939 | unsigned long flags; | 3961 | unsigned long flags; |
3940 | struct rq *rq; | 3962 | struct rq *rq; |
3941 | u64 now; | ||
3942 | 3963 | ||
3943 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) | 3964 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) |
3944 | return; | 3965 | return; |
@@ -3947,7 +3968,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
3947 | * the task might be in the middle of scheduling on another CPU. | 3968 | * the task might be in the middle of scheduling on another CPU. |
3948 | */ | 3969 | */ |
3949 | rq = task_rq_lock(p, &flags); | 3970 | rq = task_rq_lock(p, &flags); |
3950 | now = rq_clock(rq); | 3971 | update_rq_clock(rq); |
3951 | /* | 3972 | /* |
3952 | * The RT priorities are set via sched_setscheduler(), but we still | 3973 | * The RT priorities are set via sched_setscheduler(), but we still |
3953 | * allow the 'normal' nice value to be set - but as expected | 3974 | * allow the 'normal' nice value to be set - but as expected |
@@ -3960,8 +3981,8 @@ void set_user_nice(struct task_struct *p, long nice) | |||
3960 | } | 3981 | } |
3961 | on_rq = p->se.on_rq; | 3982 | on_rq = p->se.on_rq; |
3962 | if (on_rq) { | 3983 | if (on_rq) { |
3963 | dequeue_task(rq, p, 0, now); | 3984 | dequeue_task(rq, p, 0); |
3964 | dec_load(rq, p, now); | 3985 | dec_load(rq, p); |
3965 | } | 3986 | } |
3966 | 3987 | ||
3967 | p->static_prio = NICE_TO_PRIO(nice); | 3988 | p->static_prio = NICE_TO_PRIO(nice); |
@@ -3971,8 +3992,8 @@ void set_user_nice(struct task_struct *p, long nice) | |||
3971 | delta = p->prio - old_prio; | 3992 | delta = p->prio - old_prio; |
3972 | 3993 | ||
3973 | if (on_rq) { | 3994 | if (on_rq) { |
3974 | enqueue_task(rq, p, 0, now); | 3995 | enqueue_task(rq, p, 0); |
3975 | inc_load(rq, p, now); | 3996 | inc_load(rq, p); |
3976 | /* | 3997 | /* |
3977 | * If the task increased its priority or is running and | 3998 | * If the task increased its priority or is running and |
3978 | * lowered its priority, then reschedule its CPU: | 3999 | * lowered its priority, then reschedule its CPU: |
@@ -4208,6 +4229,7 @@ recheck: | |||
4208 | spin_unlock_irqrestore(&p->pi_lock, flags); | 4229 | spin_unlock_irqrestore(&p->pi_lock, flags); |
4209 | goto recheck; | 4230 | goto recheck; |
4210 | } | 4231 | } |
4232 | update_rq_clock(rq); | ||
4211 | on_rq = p->se.on_rq; | 4233 | on_rq = p->se.on_rq; |
4212 | if (on_rq) | 4234 | if (on_rq) |
4213 | deactivate_task(rq, p, 0); | 4235 | deactivate_task(rq, p, 0); |
@@ -4463,10 +4485,8 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
4463 | out_unlock: | 4485 | out_unlock: |
4464 | read_unlock(&tasklist_lock); | 4486 | read_unlock(&tasklist_lock); |
4465 | mutex_unlock(&sched_hotcpu_mutex); | 4487 | mutex_unlock(&sched_hotcpu_mutex); |
4466 | if (retval) | ||
4467 | return retval; | ||
4468 | 4488 | ||
4469 | return 0; | 4489 | return retval; |
4470 | } | 4490 | } |
4471 | 4491 | ||
4472 | /** | 4492 | /** |
@@ -4966,6 +4986,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
4966 | on_rq = p->se.on_rq; | 4986 | on_rq = p->se.on_rq; |
4967 | if (on_rq) | 4987 | if (on_rq) |
4968 | deactivate_task(rq_src, p, 0); | 4988 | deactivate_task(rq_src, p, 0); |
4989 | |||
4969 | set_task_cpu(p, dest_cpu); | 4990 | set_task_cpu(p, dest_cpu); |
4970 | if (on_rq) { | 4991 | if (on_rq) { |
4971 | activate_task(rq_dest, p, 0); | 4992 | activate_task(rq_dest, p, 0); |
@@ -5198,7 +5219,8 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
5198 | for ( ; ; ) { | 5219 | for ( ; ; ) { |
5199 | if (!rq->nr_running) | 5220 | if (!rq->nr_running) |
5200 | break; | 5221 | break; |
5201 | next = pick_next_task(rq, rq->curr, rq_clock(rq)); | 5222 | update_rq_clock(rq); |
5223 | next = pick_next_task(rq, rq->curr); | ||
5202 | if (!next) | 5224 | if (!next) |
5203 | break; | 5225 | break; |
5204 | migrate_dead(dead_cpu, next); | 5226 | migrate_dead(dead_cpu, next); |
@@ -5210,12 +5232,19 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
5210 | #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) | 5232 | #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) |
5211 | 5233 | ||
5212 | static struct ctl_table sd_ctl_dir[] = { | 5234 | static struct ctl_table sd_ctl_dir[] = { |
5213 | {CTL_UNNUMBERED, "sched_domain", NULL, 0, 0755, NULL, }, | 5235 | { |
5236 | .procname = "sched_domain", | ||
5237 | .mode = 0755, | ||
5238 | }, | ||
5214 | {0,}, | 5239 | {0,}, |
5215 | }; | 5240 | }; |
5216 | 5241 | ||
5217 | static struct ctl_table sd_ctl_root[] = { | 5242 | static struct ctl_table sd_ctl_root[] = { |
5218 | {CTL_UNNUMBERED, "kernel", NULL, 0, 0755, sd_ctl_dir, }, | 5243 | { |
5244 | .procname = "kernel", | ||
5245 | .mode = 0755, | ||
5246 | .child = sd_ctl_dir, | ||
5247 | }, | ||
5219 | {0,}, | 5248 | {0,}, |
5220 | }; | 5249 | }; |
5221 | 5250 | ||
@@ -5231,11 +5260,10 @@ static struct ctl_table *sd_alloc_ctl_entry(int n) | |||
5231 | } | 5260 | } |
5232 | 5261 | ||
5233 | static void | 5262 | static void |
5234 | set_table_entry(struct ctl_table *entry, int ctl_name, | 5263 | set_table_entry(struct ctl_table *entry, |
5235 | const char *procname, void *data, int maxlen, | 5264 | const char *procname, void *data, int maxlen, |
5236 | mode_t mode, proc_handler *proc_handler) | 5265 | mode_t mode, proc_handler *proc_handler) |
5237 | { | 5266 | { |
5238 | entry->ctl_name = ctl_name; | ||
5239 | entry->procname = procname; | 5267 | entry->procname = procname; |
5240 | entry->data = data; | 5268 | entry->data = data; |
5241 | entry->maxlen = maxlen; | 5269 | entry->maxlen = maxlen; |
@@ -5248,28 +5276,28 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) | |||
5248 | { | 5276 | { |
5249 | struct ctl_table *table = sd_alloc_ctl_entry(14); | 5277 | struct ctl_table *table = sd_alloc_ctl_entry(14); |
5250 | 5278 | ||
5251 | set_table_entry(&table[0], 1, "min_interval", &sd->min_interval, | 5279 | set_table_entry(&table[0], "min_interval", &sd->min_interval, |
5252 | sizeof(long), 0644, proc_doulongvec_minmax); | 5280 | sizeof(long), 0644, proc_doulongvec_minmax); |
5253 | set_table_entry(&table[1], 2, "max_interval", &sd->max_interval, | 5281 | set_table_entry(&table[1], "max_interval", &sd->max_interval, |
5254 | sizeof(long), 0644, proc_doulongvec_minmax); | 5282 | sizeof(long), 0644, proc_doulongvec_minmax); |
5255 | set_table_entry(&table[2], 3, "busy_idx", &sd->busy_idx, | 5283 | set_table_entry(&table[2], "busy_idx", &sd->busy_idx, |
5256 | sizeof(int), 0644, proc_dointvec_minmax); | 5284 | sizeof(int), 0644, proc_dointvec_minmax); |
5257 | set_table_entry(&table[3], 4, "idle_idx", &sd->idle_idx, | 5285 | set_table_entry(&table[3], "idle_idx", &sd->idle_idx, |
5258 | sizeof(int), 0644, proc_dointvec_minmax); | 5286 | sizeof(int), 0644, proc_dointvec_minmax); |
5259 | set_table_entry(&table[4], 5, "newidle_idx", &sd->newidle_idx, | 5287 | set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, |
5260 | sizeof(int), 0644, proc_dointvec_minmax); | 5288 | sizeof(int), 0644, proc_dointvec_minmax); |
5261 | set_table_entry(&table[5], 6, "wake_idx", &sd->wake_idx, | 5289 | set_table_entry(&table[5], "wake_idx", &sd->wake_idx, |
5262 | sizeof(int), 0644, proc_dointvec_minmax); | 5290 | sizeof(int), 0644, proc_dointvec_minmax); |
5263 | set_table_entry(&table[6], 7, "forkexec_idx", &sd->forkexec_idx, | 5291 | set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, |
5264 | sizeof(int), 0644, proc_dointvec_minmax); | 5292 | sizeof(int), 0644, proc_dointvec_minmax); |
5265 | set_table_entry(&table[7], 8, "busy_factor", &sd->busy_factor, | 5293 | set_table_entry(&table[7], "busy_factor", &sd->busy_factor, |
5266 | sizeof(int), 0644, proc_dointvec_minmax); | 5294 | sizeof(int), 0644, proc_dointvec_minmax); |
5267 | set_table_entry(&table[8], 9, "imbalance_pct", &sd->imbalance_pct, | 5295 | set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, |
5268 | sizeof(int), 0644, proc_dointvec_minmax); | 5296 | sizeof(int), 0644, proc_dointvec_minmax); |
5269 | set_table_entry(&table[10], 11, "cache_nice_tries", | 5297 | set_table_entry(&table[10], "cache_nice_tries", |
5270 | &sd->cache_nice_tries, | 5298 | &sd->cache_nice_tries, |
5271 | sizeof(int), 0644, proc_dointvec_minmax); | 5299 | sizeof(int), 0644, proc_dointvec_minmax); |
5272 | set_table_entry(&table[12], 13, "flags", &sd->flags, | 5300 | set_table_entry(&table[12], "flags", &sd->flags, |
5273 | sizeof(int), 0644, proc_dointvec_minmax); | 5301 | sizeof(int), 0644, proc_dointvec_minmax); |
5274 | 5302 | ||
5275 | return table; | 5303 | return table; |
@@ -5289,7 +5317,6 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu) | |||
5289 | i = 0; | 5317 | i = 0; |
5290 | for_each_domain(cpu, sd) { | 5318 | for_each_domain(cpu, sd) { |
5291 | snprintf(buf, 32, "domain%d", i); | 5319 | snprintf(buf, 32, "domain%d", i); |
5292 | entry->ctl_name = i + 1; | ||
5293 | entry->procname = kstrdup(buf, GFP_KERNEL); | 5320 | entry->procname = kstrdup(buf, GFP_KERNEL); |
5294 | entry->mode = 0755; | 5321 | entry->mode = 0755; |
5295 | entry->child = sd_alloc_ctl_domain_table(sd); | 5322 | entry->child = sd_alloc_ctl_domain_table(sd); |
@@ -5310,7 +5337,6 @@ static void init_sched_domain_sysctl(void) | |||
5310 | 5337 | ||
5311 | for (i = 0; i < cpu_num; i++, entry++) { | 5338 | for (i = 0; i < cpu_num; i++, entry++) { |
5312 | snprintf(buf, 32, "cpu%d", i); | 5339 | snprintf(buf, 32, "cpu%d", i); |
5313 | entry->ctl_name = i + 1; | ||
5314 | entry->procname = kstrdup(buf, GFP_KERNEL); | 5340 | entry->procname = kstrdup(buf, GFP_KERNEL); |
5315 | entry->mode = 0755; | 5341 | entry->mode = 0755; |
5316 | entry->child = sd_alloc_ctl_cpu_table(i); | 5342 | entry->child = sd_alloc_ctl_cpu_table(i); |
@@ -5379,6 +5405,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5379 | rq->migration_thread = NULL; | 5405 | rq->migration_thread = NULL; |
5380 | /* Idle task back to normal (off runqueue, low prio) */ | 5406 | /* Idle task back to normal (off runqueue, low prio) */ |
5381 | rq = task_rq_lock(rq->idle, &flags); | 5407 | rq = task_rq_lock(rq->idle, &flags); |
5408 | update_rq_clock(rq); | ||
5382 | deactivate_task(rq, rq->idle, 0); | 5409 | deactivate_task(rq, rq->idle, 0); |
5383 | rq->idle->static_prio = MAX_PRIO; | 5410 | rq->idle->static_prio = MAX_PRIO; |
5384 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); | 5411 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); |
@@ -6301,7 +6328,7 @@ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) | |||
6301 | } | 6328 | } |
6302 | 6329 | ||
6303 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 6330 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
6304 | int arch_reinit_sched_domains(void) | 6331 | static int arch_reinit_sched_domains(void) |
6305 | { | 6332 | { |
6306 | int err; | 6333 | int err; |
6307 | 6334 | ||
@@ -6330,24 +6357,6 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | |||
6330 | return ret ? ret : count; | 6357 | return ret ? ret : count; |
6331 | } | 6358 | } |
6332 | 6359 | ||
6333 | int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | ||
6334 | { | ||
6335 | int err = 0; | ||
6336 | |||
6337 | #ifdef CONFIG_SCHED_SMT | ||
6338 | if (smt_capable()) | ||
6339 | err = sysfs_create_file(&cls->kset.kobj, | ||
6340 | &attr_sched_smt_power_savings.attr); | ||
6341 | #endif | ||
6342 | #ifdef CONFIG_SCHED_MC | ||
6343 | if (!err && mc_capable()) | ||
6344 | err = sysfs_create_file(&cls->kset.kobj, | ||
6345 | &attr_sched_mc_power_savings.attr); | ||
6346 | #endif | ||
6347 | return err; | ||
6348 | } | ||
6349 | #endif | ||
6350 | |||
6351 | #ifdef CONFIG_SCHED_MC | 6360 | #ifdef CONFIG_SCHED_MC |
6352 | static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) | 6361 | static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) |
6353 | { | 6362 | { |
@@ -6358,8 +6367,8 @@ static ssize_t sched_mc_power_savings_store(struct sys_device *dev, | |||
6358 | { | 6367 | { |
6359 | return sched_power_savings_store(buf, count, 0); | 6368 | return sched_power_savings_store(buf, count, 0); |
6360 | } | 6369 | } |
6361 | SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, | 6370 | static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, |
6362 | sched_mc_power_savings_store); | 6371 | sched_mc_power_savings_store); |
6363 | #endif | 6372 | #endif |
6364 | 6373 | ||
6365 | #ifdef CONFIG_SCHED_SMT | 6374 | #ifdef CONFIG_SCHED_SMT |
@@ -6372,8 +6381,26 @@ static ssize_t sched_smt_power_savings_store(struct sys_device *dev, | |||
6372 | { | 6381 | { |
6373 | return sched_power_savings_store(buf, count, 1); | 6382 | return sched_power_savings_store(buf, count, 1); |
6374 | } | 6383 | } |
6375 | SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, | 6384 | static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, |
6376 | sched_smt_power_savings_store); | 6385 | sched_smt_power_savings_store); |
6386 | #endif | ||
6387 | |||
6388 | int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | ||
6389 | { | ||
6390 | int err = 0; | ||
6391 | |||
6392 | #ifdef CONFIG_SCHED_SMT | ||
6393 | if (smt_capable()) | ||
6394 | err = sysfs_create_file(&cls->kset.kobj, | ||
6395 | &attr_sched_smt_power_savings.attr); | ||
6396 | #endif | ||
6397 | #ifdef CONFIG_SCHED_MC | ||
6398 | if (!err && mc_capable()) | ||
6399 | err = sysfs_create_file(&cls->kset.kobj, | ||
6400 | &attr_sched_mc_power_savings.attr); | ||
6401 | #endif | ||
6402 | return err; | ||
6403 | } | ||
6377 | #endif | 6404 | #endif |
6378 | 6405 | ||
6379 | /* | 6406 | /* |
@@ -6616,12 +6643,13 @@ void normalize_rt_tasks(void) | |||
6616 | goto out_unlock; | 6643 | goto out_unlock; |
6617 | #endif | 6644 | #endif |
6618 | 6645 | ||
6646 | update_rq_clock(rq); | ||
6619 | on_rq = p->se.on_rq; | 6647 | on_rq = p->se.on_rq; |
6620 | if (on_rq) | 6648 | if (on_rq) |
6621 | deactivate_task(task_rq(p), p, 0); | 6649 | deactivate_task(rq, p, 0); |
6622 | __setscheduler(rq, p, SCHED_NORMAL, 0); | 6650 | __setscheduler(rq, p, SCHED_NORMAL, 0); |
6623 | if (on_rq) { | 6651 | if (on_rq) { |
6624 | activate_task(task_rq(p), p, 0); | 6652 | activate_task(rq, p, 0); |
6625 | resched_task(rq->curr); | 6653 | resched_task(rq->curr); |
6626 | } | 6654 | } |
6627 | #ifdef CONFIG_SMP | 6655 | #ifdef CONFIG_SMP |