diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 113 |
1 files changed, 72 insertions, 41 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c62ebae65cf0..354769979c02 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -699,7 +699,8 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
699 | cfs_rq->nr_running--; | 699 | cfs_rq->nr_running--; |
700 | } | 700 | } |
701 | 701 | ||
702 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | 702 | #ifdef CONFIG_FAIR_GROUP_SCHED |
703 | # ifdef CONFIG_SMP | ||
703 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, | 704 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, |
704 | int global_update) | 705 | int global_update) |
705 | { | 706 | { |
@@ -762,6 +763,51 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | |||
762 | list_del_leaf_cfs_rq(cfs_rq); | 763 | list_del_leaf_cfs_rq(cfs_rq); |
763 | } | 764 | } |
764 | 765 | ||
766 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, | ||
767 | long weight_delta) | ||
768 | { | ||
769 | long load_weight, load, shares; | ||
770 | |||
771 | load = cfs_rq->load.weight + weight_delta; | ||
772 | |||
773 | load_weight = atomic_read(&tg->load_weight); | ||
774 | load_weight -= cfs_rq->load_contribution; | ||
775 | load_weight += load; | ||
776 | |||
777 | shares = (tg->shares * load); | ||
778 | if (load_weight) | ||
779 | shares /= load_weight; | ||
780 | |||
781 | if (shares < MIN_SHARES) | ||
782 | shares = MIN_SHARES; | ||
783 | if (shares > tg->shares) | ||
784 | shares = tg->shares; | ||
785 | |||
786 | return shares; | ||
787 | } | ||
788 | |||
789 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
790 | { | ||
791 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | ||
792 | update_cfs_load(cfs_rq, 0); | ||
793 | update_cfs_shares(cfs_rq, 0); | ||
794 | } | ||
795 | } | ||
796 | # else /* CONFIG_SMP */ | ||
797 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | ||
798 | { | ||
799 | } | ||
800 | |||
801 | static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, | ||
802 | long weight_delta) | ||
803 | { | ||
804 | return tg->shares; | ||
805 | } | ||
806 | |||
807 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
808 | { | ||
809 | } | ||
810 | # endif /* CONFIG_SMP */ | ||
765 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | 811 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, |
766 | unsigned long weight) | 812 | unsigned long weight) |
767 | { | 813 | { |
@@ -782,7 +828,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | |||
782 | { | 828 | { |
783 | struct task_group *tg; | 829 | struct task_group *tg; |
784 | struct sched_entity *se; | 830 | struct sched_entity *se; |
785 | long load_weight, load, shares; | 831 | long shares; |
786 | 832 | ||
787 | if (!cfs_rq) | 833 | if (!cfs_rq) |
788 | return; | 834 | return; |
@@ -791,32 +837,14 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | |||
791 | se = tg->se[cpu_of(rq_of(cfs_rq))]; | 837 | se = tg->se[cpu_of(rq_of(cfs_rq))]; |
792 | if (!se) | 838 | if (!se) |
793 | return; | 839 | return; |
794 | 840 | #ifndef CONFIG_SMP | |
795 | load = cfs_rq->load.weight + weight_delta; | 841 | if (likely(se->load.weight == tg->shares)) |
796 | 842 | return; | |
797 | load_weight = atomic_read(&tg->load_weight); | 843 | #endif |
798 | load_weight -= cfs_rq->load_contribution; | 844 | shares = calc_cfs_shares(cfs_rq, tg, weight_delta); |
799 | load_weight += load; | ||
800 | |||
801 | shares = (tg->shares * load); | ||
802 | if (load_weight) | ||
803 | shares /= load_weight; | ||
804 | |||
805 | if (shares < MIN_SHARES) | ||
806 | shares = MIN_SHARES; | ||
807 | if (shares > tg->shares) | ||
808 | shares = tg->shares; | ||
809 | 845 | ||
810 | reweight_entity(cfs_rq_of(se), se, shares); | 846 | reweight_entity(cfs_rq_of(se), se, shares); |
811 | } | 847 | } |
812 | |||
813 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
814 | { | ||
815 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | ||
816 | update_cfs_load(cfs_rq, 0); | ||
817 | update_cfs_shares(cfs_rq, 0); | ||
818 | } | ||
819 | } | ||
820 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 848 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
821 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | 849 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) |
822 | { | 850 | { |
@@ -1062,6 +1090,9 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
1062 | struct sched_entity *se = __pick_next_entity(cfs_rq); | 1090 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
1063 | s64 delta = curr->vruntime - se->vruntime; | 1091 | s64 delta = curr->vruntime - se->vruntime; |
1064 | 1092 | ||
1093 | if (delta < 0) | ||
1094 | return; | ||
1095 | |||
1065 | if (delta > ideal_runtime) | 1096 | if (delta > ideal_runtime) |
1066 | resched_task(rq_of(cfs_rq)->curr); | 1097 | resched_task(rq_of(cfs_rq)->curr); |
1067 | } | 1098 | } |
@@ -1362,27 +1393,27 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | |||
1362 | return wl; | 1393 | return wl; |
1363 | 1394 | ||
1364 | for_each_sched_entity(se) { | 1395 | for_each_sched_entity(se) { |
1365 | long S, rw, s, a, b; | 1396 | long lw, w; |
1366 | 1397 | ||
1367 | S = se->my_q->tg->shares; | 1398 | tg = se->my_q->tg; |
1368 | s = se->load.weight; | 1399 | w = se->my_q->load.weight; |
1369 | rw = se->my_q->load.weight; | ||
1370 | 1400 | ||
1371 | a = S*(rw + wl); | 1401 | /* use this cpu's instantaneous contribution */ |
1372 | b = S*rw + s*wg; | 1402 | lw = atomic_read(&tg->load_weight); |
1403 | lw -= se->my_q->load_contribution; | ||
1404 | lw += w + wg; | ||
1373 | 1405 | ||
1374 | wl = s*(a-b); | 1406 | wl += w; |
1375 | 1407 | ||
1376 | if (likely(b)) | 1408 | if (lw > 0 && wl < lw) |
1377 | wl /= b; | 1409 | wl = (wl * tg->shares) / lw; |
1410 | else | ||
1411 | wl = tg->shares; | ||
1378 | 1412 | ||
1379 | /* | 1413 | /* zero point is MIN_SHARES */ |
1380 | * Assume the group is already running and will | 1414 | if (wl < MIN_SHARES) |
1381 | * thus already be accounted for in the weight. | 1415 | wl = MIN_SHARES; |
1382 | * | 1416 | wl -= se->load.weight; |
1383 | * That is, moving shares between CPUs, does not | ||
1384 | * alter the group weight. | ||
1385 | */ | ||
1386 | wg = 0; | 1417 | wg = 0; |
1387 | } | 1418 | } |
1388 | 1419 | ||