aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2015-02-27 10:54:08 -0500
committerIngo Molnar <mingo@kernel.org>2015-03-27 04:36:01 -0400
commitb5b4860d1d61ddc5308c7d492cbeaa3a6e508d7f (patch)
tree645493c8328b0673cd2e14193a98dd732a4ab244 /kernel/sched
parent0c1dc6b27dac883ee78392189c8e20e764d79bfa (diff)
sched: Make scale_rt invariant with frequency
The average running time of RT tasks is used to estimate the remaining compute capacity for CFS tasks. This remaining capacity is the original capacity scaled down by a factor (aka scale_rt_capacity). This estimation of available capacity must also be invariant with frequency scaling. A frequency scaling factor is applied on the running time of the RT tasks for computing scale_rt_capacity. In sched_rt_avg_update(), we now scale the RT execution time like below: rq->rt_avg += rt_delta * arch_scale_freq_capacity() >> SCHED_CAPACITY_SHIFT Then, scale_rt_capacity can be summarized by: scale_rt_capacity = SCHED_CAPACITY_SCALE * available / total with available = total - rq->rt_avg This has been been optimized in current code by: scale_rt_capacity = available / (total >> SCHED_CAPACITY_SHIFT) But we can also developed the equation like below: scale_rt_capacity = SCHED_CAPACITY_SCALE - ((rq->rt_avg << SCHED_CAPACITY_SHIFT) / total) and we can optimize the equation by removing SCHED_CAPACITY_SHIFT shift in the computation of rq->rt_avg and scale_rt_capacity(). so rq->rt_avg += rt_delta * arch_scale_freq_capacity() and scale_rt_capacity = SCHED_CAPACITY_SCALE - (rq->rt_avg / total) arch_scale_frequency_capacity() will be called in the hot path of the scheduler which implies to have a short and efficient function. As an example, arch_scale_frequency_capacity() should return a cached value that is updated periodically outside of the hot path. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Morten.Rasmussen@arm.com Cc: dietmar.eggemann@arm.com Cc: efault@gmx.de Cc: kamalesh@linux.vnet.ibm.com Cc: linaro-kernel@lists.linaro.org Cc: nicolas.pitre@linaro.org Cc: preeti@linux.vnet.ibm.com Cc: riel@redhat.com Link: http://lkml.kernel.org/r/1425052454-25797-6-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c17
-rw-r--r--kernel/sched/sched.h4
2 files changed, 8 insertions, 13 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7f031e454740..dc7c693f044a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6004,7 +6004,7 @@ unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
6004static unsigned long scale_rt_capacity(int cpu) 6004static unsigned long scale_rt_capacity(int cpu)
6005{ 6005{
6006 struct rq *rq = cpu_rq(cpu); 6006 struct rq *rq = cpu_rq(cpu);
6007 u64 total, available, age_stamp, avg; 6007 u64 total, used, age_stamp, avg;
6008 s64 delta; 6008 s64 delta;
6009 6009
6010 /* 6010 /*
@@ -6020,19 +6020,12 @@ static unsigned long scale_rt_capacity(int cpu)
6020 6020
6021 total = sched_avg_period() + delta; 6021 total = sched_avg_period() + delta;
6022 6022
6023 if (unlikely(total < avg)) { 6023 used = div_u64(avg, total);
6024 /* Ensures that capacity won't end up being negative */
6025 available = 0;
6026 } else {
6027 available = total - avg;
6028 }
6029 6024
6030 if (unlikely((s64)total < SCHED_CAPACITY_SCALE)) 6025 if (likely(used < SCHED_CAPACITY_SCALE))
6031 total = SCHED_CAPACITY_SCALE; 6026 return SCHED_CAPACITY_SCALE - used;
6032 6027
6033 total >>= SCHED_CAPACITY_SHIFT; 6028 return 1;
6034
6035 return div_u64(available, total);
6036} 6029}
6037 6030
6038static void update_cpu_capacity(struct sched_domain *sd, int cpu) 6031static void update_cpu_capacity(struct sched_domain *sd, int cpu)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4c95cc2e0be2..36000029f33b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1386,9 +1386,11 @@ static inline int hrtick_enabled(struct rq *rq)
1386 1386
1387#ifdef CONFIG_SMP 1387#ifdef CONFIG_SMP
1388extern void sched_avg_update(struct rq *rq); 1388extern void sched_avg_update(struct rq *rq);
1389extern unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
1390
1389static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 1391static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1390{ 1392{
1391 rq->rt_avg += rt_delta; 1393 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
1392 sched_avg_update(rq); 1394 sched_avg_update(rq);
1393} 1395}
1394#else 1396#else