aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-08 12:04:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-08 12:04:34 -0400
commit369da7fc6d627aca19baec09ebe4486c69aef5f2 (patch)
tree6f61b228432aa12c166722fe7d3da2694e8a760d
parent612807fe28abb0a04a627684fb5d4d23108edb1b (diff)
parentea1dc6fc6242f991656e35e2ed3d90ec1cd13418 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Two load-balancing fixes for cgroups-intense workloads" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix calc_cfs_shares() fixed point arithmetics width confusion sched/fair: Fix effective_load() to consistently use smoothed load
-rw-r--r--kernel/sched/fair.c42
1 files changed, 20 insertions, 22 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bdcbeea90c95..c8c5d2d48424 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
735 } 735 }
736} 736}
737 737
738static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
739static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
740#else 738#else
741void init_entity_runnable_average(struct sched_entity *se) 739void init_entity_runnable_average(struct sched_entity *se)
742{ 740{
@@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2499 2497
2500#ifdef CONFIG_FAIR_GROUP_SCHED 2498#ifdef CONFIG_FAIR_GROUP_SCHED
2501# ifdef CONFIG_SMP 2499# ifdef CONFIG_SMP
2502static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) 2500static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2503{ 2501{
2504 long tg_weight; 2502 long tg_weight, load, shares;
2505 2503
2506 /* 2504 /*
2507 * Use this CPU's real-time load instead of the last load contribution 2505 * This really should be: cfs_rq->avg.load_avg, but instead we use
2508 * as the updating of the contribution is delayed, and we will use the 2506 * cfs_rq->load.weight, which is its upper bound. This helps ramp up
2509 * the real-time load to calc the share. See update_tg_load_avg(). 2507 * the shares for small weight interactive tasks.
2510 */ 2508 */
2511 tg_weight = atomic_long_read(&tg->load_avg); 2509 load = scale_load_down(cfs_rq->load.weight);
2512 tg_weight -= cfs_rq->tg_load_avg_contrib;
2513 tg_weight += cfs_rq->load.weight;
2514 2510
2515 return tg_weight; 2511 tg_weight = atomic_long_read(&tg->load_avg);
2516}
2517
2518static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2519{
2520 long tg_weight, load, shares;
2521 2512
2522 tg_weight = calc_tg_weight(tg, cfs_rq); 2513 /* Ensure tg_weight >= load */
2523 load = cfs_rq->load.weight; 2514 tg_weight -= cfs_rq->tg_load_avg_contrib;
2515 tg_weight += load;
2524 2516
2525 shares = (tg->shares * load); 2517 shares = (tg->shares * load);
2526 if (tg_weight) 2518 if (tg_weight)
@@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2539 return tg->shares; 2531 return tg->shares;
2540} 2532}
2541# endif /* CONFIG_SMP */ 2533# endif /* CONFIG_SMP */
2534
2542static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 2535static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2543 unsigned long weight) 2536 unsigned long weight)
2544{ 2537{
@@ -4946,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4946 return wl; 4939 return wl;
4947 4940
4948 for_each_sched_entity(se) { 4941 for_each_sched_entity(se) {
4949 long w, W; 4942 struct cfs_rq *cfs_rq = se->my_q;
4943 long W, w = cfs_rq_load_avg(cfs_rq);
4950 4944
4951 tg = se->my_q->tg; 4945 tg = cfs_rq->tg;
4952 4946
4953 /* 4947 /*
4954 * W = @wg + \Sum rw_j 4948 * W = @wg + \Sum rw_j
4955 */ 4949 */
4956 W = wg + calc_tg_weight(tg, se->my_q); 4950 W = wg + atomic_long_read(&tg->load_avg);
4951
4952 /* Ensure \Sum rw_j >= rw_i */
4953 W -= cfs_rq->tg_load_avg_contrib;
4954 W += w;
4957 4955
4958 /* 4956 /*
4959 * w = rw_i + @wl 4957 * w = rw_i + @wl
4960 */ 4958 */
4961 w = cfs_rq_load_avg(se->my_q) + wl; 4959 w += wl;
4962 4960
4963 /* 4961 /*
4964 * wl = S * s'_i; see (2) 4962 * wl = S * s'_i; see (2)