aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c126
1 files changed, 77 insertions, 49 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c62ebae65cf0..0c26e2df450e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -699,7 +699,8 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
699 cfs_rq->nr_running--; 699 cfs_rq->nr_running--;
700} 700}
701 701
702#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED 702#ifdef CONFIG_FAIR_GROUP_SCHED
703# ifdef CONFIG_SMP
703static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, 704static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
704 int global_update) 705 int global_update)
705{ 706{
@@ -721,10 +722,10 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
721 u64 now, delta; 722 u64 now, delta;
722 unsigned long load = cfs_rq->load.weight; 723 unsigned long load = cfs_rq->load.weight;
723 724
724 if (!cfs_rq) 725 if (cfs_rq->tg == &root_task_group)
725 return; 726 return;
726 727
727 now = rq_of(cfs_rq)->clock; 728 now = rq_of(cfs_rq)->clock_task;
728 delta = now - cfs_rq->load_stamp; 729 delta = now - cfs_rq->load_stamp;
729 730
730 /* truncate load history at 4 idle periods */ 731 /* truncate load history at 4 idle periods */
@@ -762,6 +763,51 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
762 list_del_leaf_cfs_rq(cfs_rq); 763 list_del_leaf_cfs_rq(cfs_rq);
763} 764}
764 765
766static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
767 long weight_delta)
768{
769 long load_weight, load, shares;
770
771 load = cfs_rq->load.weight + weight_delta;
772
773 load_weight = atomic_read(&tg->load_weight);
774 load_weight -= cfs_rq->load_contribution;
775 load_weight += load;
776
777 shares = (tg->shares * load);
778 if (load_weight)
779 shares /= load_weight;
780
781 if (shares < MIN_SHARES)
782 shares = MIN_SHARES;
783 if (shares > tg->shares)
784 shares = tg->shares;
785
786 return shares;
787}
788
789static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
790{
791 if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
792 update_cfs_load(cfs_rq, 0);
793 update_cfs_shares(cfs_rq, 0);
794 }
795}
796# else /* CONFIG_SMP */
797static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
798{
799}
800
801static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
802 long weight_delta)
803{
804 return tg->shares;
805}
806
807static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
808{
809}
810# endif /* CONFIG_SMP */
765static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 811static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
766 unsigned long weight) 812 unsigned long weight)
767{ 813{
@@ -782,41 +828,20 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
782{ 828{
783 struct task_group *tg; 829 struct task_group *tg;
784 struct sched_entity *se; 830 struct sched_entity *se;
785 long load_weight, load, shares; 831 long shares;
786
787 if (!cfs_rq)
788 return;
789 832
790 tg = cfs_rq->tg; 833 tg = cfs_rq->tg;
791 se = tg->se[cpu_of(rq_of(cfs_rq))]; 834 se = tg->se[cpu_of(rq_of(cfs_rq))];
792 if (!se) 835 if (!se)
793 return; 836 return;
794 837#ifndef CONFIG_SMP
795 load = cfs_rq->load.weight + weight_delta; 838 if (likely(se->load.weight == tg->shares))
796 839 return;
797 load_weight = atomic_read(&tg->load_weight); 840#endif
798 load_weight -= cfs_rq->load_contribution; 841 shares = calc_cfs_shares(cfs_rq, tg, weight_delta);
799 load_weight += load;
800
801 shares = (tg->shares * load);
802 if (load_weight)
803 shares /= load_weight;
804
805 if (shares < MIN_SHARES)
806 shares = MIN_SHARES;
807 if (shares > tg->shares)
808 shares = tg->shares;
809 842
810 reweight_entity(cfs_rq_of(se), se, shares); 843 reweight_entity(cfs_rq_of(se), se, shares);
811} 844}
812
813static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
814{
815 if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
816 update_cfs_load(cfs_rq, 0);
817 update_cfs_shares(cfs_rq, 0);
818 }
819}
820#else /* CONFIG_FAIR_GROUP_SCHED */ 845#else /* CONFIG_FAIR_GROUP_SCHED */
821static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) 846static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
822{ 847{
@@ -1062,6 +1087,9 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1062 struct sched_entity *se = __pick_next_entity(cfs_rq); 1087 struct sched_entity *se = __pick_next_entity(cfs_rq);
1063 s64 delta = curr->vruntime - se->vruntime; 1088 s64 delta = curr->vruntime - se->vruntime;
1064 1089
1090 if (delta < 0)
1091 return;
1092
1065 if (delta > ideal_runtime) 1093 if (delta > ideal_runtime)
1066 resched_task(rq_of(cfs_rq)->curr); 1094 resched_task(rq_of(cfs_rq)->curr);
1067 } 1095 }
@@ -1362,27 +1390,27 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
1362 return wl; 1390 return wl;
1363 1391
1364 for_each_sched_entity(se) { 1392 for_each_sched_entity(se) {
1365 long S, rw, s, a, b; 1393 long lw, w;
1366 1394
1367 S = se->my_q->tg->shares; 1395 tg = se->my_q->tg;
1368 s = se->load.weight; 1396 w = se->my_q->load.weight;
1369 rw = se->my_q->load.weight;
1370 1397
1371 a = S*(rw + wl); 1398 /* use this cpu's instantaneous contribution */
1372 b = S*rw + s*wg; 1399 lw = atomic_read(&tg->load_weight);
1400 lw -= se->my_q->load_contribution;
1401 lw += w + wg;
1373 1402
1374 wl = s*(a-b); 1403 wl += w;
1375 1404
1376 if (likely(b)) 1405 if (lw > 0 && wl < lw)
1377 wl /= b; 1406 wl = (wl * tg->shares) / lw;
1407 else
1408 wl = tg->shares;
1378 1409
1379 /* 1410 /* zero point is MIN_SHARES */
1380 * Assume the group is already running and will 1411 if (wl < MIN_SHARES)
1381 * thus already be accounted for in the weight. 1412 wl = MIN_SHARES;
1382 * 1413 wl -= se->load.weight;
1383 * That is, moving shares between CPUs, does not
1384 * alter the group weight.
1385 */
1386 wg = 0; 1414 wg = 0;
1387 } 1415 }
1388 1416
@@ -1401,7 +1429,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
1401 1429
1402static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) 1430static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1403{ 1431{
1404 unsigned long this_load, load; 1432 s64 this_load, load;
1405 int idx, this_cpu, prev_cpu; 1433 int idx, this_cpu, prev_cpu;
1406 unsigned long tl_per_task; 1434 unsigned long tl_per_task;
1407 struct task_group *tg; 1435 struct task_group *tg;
@@ -1440,8 +1468,8 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1440 * Otherwise check if either cpus are near enough in load to allow this 1468 * Otherwise check if either cpus are near enough in load to allow this
1441 * task to be woken on this_cpu. 1469 * task to be woken on this_cpu.
1442 */ 1470 */
1443 if (this_load) { 1471 if (this_load > 0) {
1444 unsigned long this_eff_load, prev_eff_load; 1472 s64 this_eff_load, prev_eff_load;
1445 1473
1446 this_eff_load = 100; 1474 this_eff_load = 100;
1447 this_eff_load *= power_of(prev_cpu); 1475 this_eff_load *= power_of(prev_cpu);