diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 91 |
1 files changed, 58 insertions, 33 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 77e9166d7bbf..0c26e2df450e 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -699,7 +699,8 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
699 | cfs_rq->nr_running--; | 699 | cfs_rq->nr_running--; |
700 | } | 700 | } |
701 | 701 | ||
702 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | 702 | #ifdef CONFIG_FAIR_GROUP_SCHED |
703 | # ifdef CONFIG_SMP | ||
703 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, | 704 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, |
704 | int global_update) | 705 | int global_update) |
705 | { | 706 | { |
@@ -721,10 +722,10 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | |||
721 | u64 now, delta; | 722 | u64 now, delta; |
722 | unsigned long load = cfs_rq->load.weight; | 723 | unsigned long load = cfs_rq->load.weight; |
723 | 724 | ||
724 | if (!cfs_rq) | 725 | if (cfs_rq->tg == &root_task_group) |
725 | return; | 726 | return; |
726 | 727 | ||
727 | now = rq_of(cfs_rq)->clock; | 728 | now = rq_of(cfs_rq)->clock_task; |
728 | delta = now - cfs_rq->load_stamp; | 729 | delta = now - cfs_rq->load_stamp; |
729 | 730 | ||
730 | /* truncate load history at 4 idle periods */ | 731 | /* truncate load history at 4 idle periods */ |
@@ -762,6 +763,51 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | |||
762 | list_del_leaf_cfs_rq(cfs_rq); | 763 | list_del_leaf_cfs_rq(cfs_rq); |
763 | } | 764 | } |
764 | 765 | ||
766 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, | ||
767 | long weight_delta) | ||
768 | { | ||
769 | long load_weight, load, shares; | ||
770 | |||
771 | load = cfs_rq->load.weight + weight_delta; | ||
772 | |||
773 | load_weight = atomic_read(&tg->load_weight); | ||
774 | load_weight -= cfs_rq->load_contribution; | ||
775 | load_weight += load; | ||
776 | |||
777 | shares = (tg->shares * load); | ||
778 | if (load_weight) | ||
779 | shares /= load_weight; | ||
780 | |||
781 | if (shares < MIN_SHARES) | ||
782 | shares = MIN_SHARES; | ||
783 | if (shares > tg->shares) | ||
784 | shares = tg->shares; | ||
785 | |||
786 | return shares; | ||
787 | } | ||
788 | |||
789 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
790 | { | ||
791 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | ||
792 | update_cfs_load(cfs_rq, 0); | ||
793 | update_cfs_shares(cfs_rq, 0); | ||
794 | } | ||
795 | } | ||
796 | # else /* CONFIG_SMP */ | ||
797 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | ||
798 | { | ||
799 | } | ||
800 | |||
801 | static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, | ||
802 | long weight_delta) | ||
803 | { | ||
804 | return tg->shares; | ||
805 | } | ||
806 | |||
807 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
808 | { | ||
809 | } | ||
810 | # endif /* CONFIG_SMP */ | ||
765 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | 811 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, |
766 | unsigned long weight) | 812 | unsigned long weight) |
767 | { | 813 | { |
@@ -782,41 +828,20 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | |||
782 | { | 828 | { |
783 | struct task_group *tg; | 829 | struct task_group *tg; |
784 | struct sched_entity *se; | 830 | struct sched_entity *se; |
785 | long load_weight, load, shares; | 831 | long shares; |
786 | |||
787 | if (!cfs_rq) | ||
788 | return; | ||
789 | 832 | ||
790 | tg = cfs_rq->tg; | 833 | tg = cfs_rq->tg; |
791 | se = tg->se[cpu_of(rq_of(cfs_rq))]; | 834 | se = tg->se[cpu_of(rq_of(cfs_rq))]; |
792 | if (!se) | 835 | if (!se) |
793 | return; | 836 | return; |
794 | 837 | #ifndef CONFIG_SMP | |
795 | load = cfs_rq->load.weight + weight_delta; | 838 | if (likely(se->load.weight == tg->shares)) |
796 | 839 | return; | |
797 | load_weight = atomic_read(&tg->load_weight); | 840 | #endif |
798 | load_weight -= cfs_rq->load_contribution; | 841 | shares = calc_cfs_shares(cfs_rq, tg, weight_delta); |
799 | load_weight += load; | ||
800 | |||
801 | shares = (tg->shares * load); | ||
802 | if (load_weight) | ||
803 | shares /= load_weight; | ||
804 | |||
805 | if (shares < MIN_SHARES) | ||
806 | shares = MIN_SHARES; | ||
807 | if (shares > tg->shares) | ||
808 | shares = tg->shares; | ||
809 | 842 | ||
810 | reweight_entity(cfs_rq_of(se), se, shares); | 843 | reweight_entity(cfs_rq_of(se), se, shares); |
811 | } | 844 | } |
812 | |||
813 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
814 | { | ||
815 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | ||
816 | update_cfs_load(cfs_rq, 0); | ||
817 | update_cfs_shares(cfs_rq, 0); | ||
818 | } | ||
819 | } | ||
820 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 845 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
821 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | 846 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) |
822 | { | 847 | { |
@@ -1404,7 +1429,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu, | |||
1404 | 1429 | ||
1405 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | 1430 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) |
1406 | { | 1431 | { |
1407 | unsigned long this_load, load; | 1432 | s64 this_load, load; |
1408 | int idx, this_cpu, prev_cpu; | 1433 | int idx, this_cpu, prev_cpu; |
1409 | unsigned long tl_per_task; | 1434 | unsigned long tl_per_task; |
1410 | struct task_group *tg; | 1435 | struct task_group *tg; |
@@ -1443,8 +1468,8 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1443 | * Otherwise check if either cpus are near enough in load to allow this | 1468 | * Otherwise check if either cpus are near enough in load to allow this |
1444 | * task to be woken on this_cpu. | 1469 | * task to be woken on this_cpu. |
1445 | */ | 1470 | */ |
1446 | if (this_load) { | 1471 | if (this_load > 0) { |
1447 | unsigned long this_eff_load, prev_eff_load; | 1472 | s64 this_eff_load, prev_eff_load; |
1448 | 1473 | ||
1449 | this_eff_load = 100; | 1474 | this_eff_load = 100; |
1450 | this_eff_load *= power_of(prev_cpu); | 1475 | this_eff_load *= power_of(prev_cpu); |