aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2017-06-23 12:55:30 -0400
committerIngo Molnar <mingo@kernel.org>2017-06-24 02:57:53 -0400
commit815abf5af45f04f759f12f3172afd15226fd7f71 (patch)
tree8dd9657b3d337a39871fcd1a13c06dea96cb2050
parent3fed382b46baac83703130fe4cd3d9147f427fb9 (diff)
sched/fair: Remove effective_load()
The effective_load() function was only used by the NUMA balancing code, and not by the regular load balancing code. Now that the NUMA balancing code no longer uses it either, get rid of it. Signed-off-by: Rik van Riel <riel@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: jhladky@redhat.com Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/20170623165530.22514-5-riel@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/fair.c124
1 files changed, 1 insertions, 123 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 79ac078caf5d..6f4f155adf5f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1382,7 +1382,6 @@ static unsigned long weighted_cpuload(const int cpu);
1382static unsigned long source_load(int cpu, int type); 1382static unsigned long source_load(int cpu, int type);
1383static unsigned long target_load(int cpu, int type); 1383static unsigned long target_load(int cpu, int type);
1384static unsigned long capacity_of(int cpu); 1384static unsigned long capacity_of(int cpu);
1385static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1386 1385
1387/* Cached statistics for all CPUs within a node */ 1386/* Cached statistics for all CPUs within a node */
1388struct numa_stats { 1387struct numa_stats {
@@ -3045,8 +3044,7 @@ __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
3045 * differential update where we store the last value we propagated. This in 3044 * differential update where we store the last value we propagated. This in
3046 * turn allows skipping updates if the differential is 'small'. 3045 * turn allows skipping updates if the differential is 'small'.
3047 * 3046 *
3048 * Updating tg's load_avg is necessary before update_cfs_share() (which is 3047 * Updating tg's load_avg is necessary before update_cfs_share().
3049 * done) and effective_load() (which is not done because it is too costly).
3050 */ 3048 */
3051static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) 3049static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
3052{ 3050{
@@ -5298,126 +5296,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
5298 return 0; 5296 return 0;
5299} 5297}
5300 5298
5301#ifdef CONFIG_FAIR_GROUP_SCHED
5302/*
5303 * effective_load() calculates the load change as seen from the root_task_group
5304 *
5305 * Adding load to a group doesn't make a group heavier, but can cause movement
5306 * of group shares between cpus. Assuming the shares were perfectly aligned one
5307 * can calculate the shift in shares.
5308 *
5309 * Calculate the effective load difference if @wl is added (subtracted) to @tg
5310 * on this @cpu and results in a total addition (subtraction) of @wg to the
5311 * total group weight.
5312 *
5313 * Given a runqueue weight distribution (rw_i) we can compute a shares
5314 * distribution (s_i) using:
5315 *
5316 * s_i = rw_i / \Sum rw_j (1)
5317 *
5318 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
5319 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
5320 * shares distribution (s_i):
5321 *
5322 * rw_i = { 2, 4, 1, 0 }
5323 * s_i = { 2/7, 4/7, 1/7, 0 }
5324 *
5325 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
5326 * task used to run on and the CPU the waker is running on), we need to
5327 * compute the effect of waking a task on either CPU and, in case of a sync
5328 * wakeup, compute the effect of the current task going to sleep.
5329 *
5330 * So for a change of @wl to the local @cpu with an overall group weight change
5331 * of @wl we can compute the new shares distribution (s'_i) using:
5332 *
5333 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
5334 *
5335 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
5336 * differences in waking a task to CPU 0. The additional task changes the
5337 * weight and shares distributions like:
5338 *
5339 * rw'_i = { 3, 4, 1, 0 }
5340 * s'_i = { 3/8, 4/8, 1/8, 0 }
5341 *
5342 * We can then compute the difference in effective weight by using:
5343 *
5344 * dw_i = S * (s'_i - s_i) (3)
5345 *
5346 * Where 'S' is the group weight as seen by its parent.
5347 *
5348 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
5349 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
5350 * 4/7) times the weight of the group.
5351 */
5352static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5353{
5354 struct sched_entity *se = tg->se[cpu];
5355
5356 if (!tg->parent) /* the trivial, non-cgroup case */
5357 return wl;
5358
5359 for_each_sched_entity(se) {
5360 struct cfs_rq *cfs_rq = se->my_q;
5361 long W, w = cfs_rq_load_avg(cfs_rq);
5362
5363 tg = cfs_rq->tg;
5364
5365 /*
5366 * W = @wg + \Sum rw_j
5367 */
5368 W = wg + atomic_long_read(&tg->load_avg);
5369
5370 /* Ensure \Sum rw_j >= rw_i */
5371 W -= cfs_rq->tg_load_avg_contrib;
5372 W += w;
5373
5374 /*
5375 * w = rw_i + @wl
5376 */
5377 w += wl;
5378
5379 /*
5380 * wl = S * s'_i; see (2)
5381 */
5382 if (W > 0 && w < W)
5383 wl = (w * (long)scale_load_down(tg->shares)) / W;
5384 else
5385 wl = scale_load_down(tg->shares);
5386
5387 /*
5388 * Per the above, wl is the new se->load.weight value; since
5389 * those are clipped to [MIN_SHARES, ...) do so now. See
5390 * calc_cfs_shares().
5391 */
5392 if (wl < MIN_SHARES)
5393 wl = MIN_SHARES;
5394
5395 /*
5396 * wl = dw_i = S * (s'_i - s_i); see (3)
5397 */
5398 wl -= se->avg.load_avg;
5399
5400 /*
5401 * Recursively apply this logic to all parent groups to compute
5402 * the final effective load change on the root group. Since
5403 * only the @tg group gets extra weight, all parent groups can
5404 * only redistribute existing shares. @wl is the shift in shares
5405 * resulting from this level per the above.
5406 */
5407 wg = 0;
5408 }
5409
5410 return wl;
5411}
5412#else
5413
5414static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5415{
5416 return wl;
5417}
5418
5419#endif
5420
5421static void record_wakee(struct task_struct *p) 5299static void record_wakee(struct task_struct *p)
5422{ 5300{
5423 /* 5301 /*