aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-09-07 09:05:42 -0400
committerIngo Molnar <mingo@kernel.org>2015-09-13 03:52:59 -0400
commit54a21385facbdcd89a78e8c3e5025f04c5f2b59c (patch)
treec730d1646928df978e88485fc7ed7fd208b5c120 /kernel/sched
parent98d8fd8126676f7ba6e133e65b2ca4b17989d32c (diff)
sched/fair: Rename scale() to cap_scale()
Rename scale() to cap_scale() to better reflect its purpose, it is after all not a general purpose scale function, it has SCHED_CAPACITY_SHIFT hardcoded in it. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 047fd1c78a94..7109047731eb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2515,7 +2515,7 @@ static u32 __compute_runnable_contrib(u64 n)
2515 return contrib + runnable_avg_yN_sum[n]; 2515 return contrib + runnable_avg_yN_sum[n];
2516} 2516}
2517 2517
2518#define scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 2518#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
2519 2519
2520/* 2520/*
2521 * We can represent the historical contribution to runnable average as the 2521 * We can represent the historical contribution to runnable average as the
@@ -2588,7 +2588,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2588 * period and accrue it. 2588 * period and accrue it.
2589 */ 2589 */
2590 delta_w = 1024 - delta_w; 2590 delta_w = 1024 - delta_w;
2591 scaled_delta_w = scale(delta_w, scale_freq); 2591 scaled_delta_w = cap_scale(delta_w, scale_freq);
2592 if (weight) { 2592 if (weight) {
2593 sa->load_sum += weight * scaled_delta_w; 2593 sa->load_sum += weight * scaled_delta_w;
2594 if (cfs_rq) { 2594 if (cfs_rq) {
@@ -2597,7 +2597,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2597 } 2597 }
2598 } 2598 }
2599 if (running) 2599 if (running)
2600 sa->util_sum += scale(scaled_delta_w, scale_cpu); 2600 sa->util_sum += cap_scale(scaled_delta_w, scale_cpu);
2601 2601
2602 delta -= delta_w; 2602 delta -= delta_w;
2603 2603
@@ -2614,25 +2614,25 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2614 2614
2615 /* Efficiently calculate \sum (1..n_period) 1024*y^i */ 2615 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2616 contrib = __compute_runnable_contrib(periods); 2616 contrib = __compute_runnable_contrib(periods);
2617 contrib = scale(contrib, scale_freq); 2617 contrib = cap_scale(contrib, scale_freq);
2618 if (weight) { 2618 if (weight) {
2619 sa->load_sum += weight * contrib; 2619 sa->load_sum += weight * contrib;
2620 if (cfs_rq) 2620 if (cfs_rq)
2621 cfs_rq->runnable_load_sum += weight * contrib; 2621 cfs_rq->runnable_load_sum += weight * contrib;
2622 } 2622 }
2623 if (running) 2623 if (running)
2624 sa->util_sum += scale(contrib, scale_cpu); 2624 sa->util_sum += cap_scale(contrib, scale_cpu);
2625 } 2625 }
2626 2626
2627 /* Remainder of delta accrued against u_0` */ 2627 /* Remainder of delta accrued against u_0` */
2628 scaled_delta = scale(delta, scale_freq); 2628 scaled_delta = cap_scale(delta, scale_freq);
2629 if (weight) { 2629 if (weight) {
2630 sa->load_sum += weight * scaled_delta; 2630 sa->load_sum += weight * scaled_delta;
2631 if (cfs_rq) 2631 if (cfs_rq)
2632 cfs_rq->runnable_load_sum += weight * scaled_delta; 2632 cfs_rq->runnable_load_sum += weight * scaled_delta;
2633 } 2633 }
2634 if (running) 2634 if (running)
2635 sa->util_sum += scale(scaled_delta, scale_cpu); 2635 sa->util_sum += cap_scale(scaled_delta, scale_cpu);
2636 2636
2637 sa->period_contrib += delta; 2637 sa->period_contrib += delta;
2638 2638