aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c53
1 files changed, 40 insertions, 13 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 32fd976f8566..1f14b56d0d00 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -217,6 +217,15 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
217 * Scheduling class statistics methods: 217 * Scheduling class statistics methods:
218 */ 218 */
219 219
220
221/*
222 * The idea is to set a period in which each task runs once.
223 *
224 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
225 * this period because otherwise the slices get too small.
226 *
227 * p = (nr <= nl) ? l : l*nr/nl
228 */
220static u64 __sched_period(unsigned long nr_running) 229static u64 __sched_period(unsigned long nr_running)
221{ 230{
222 u64 period = sysctl_sched_latency; 231 u64 period = sysctl_sched_latency;
@@ -230,27 +239,45 @@ static u64 __sched_period(unsigned long nr_running)
230 return period; 239 return period;
231} 240}
232 241
242/*
243 * We calculate the wall-time slice from the period by taking a part
244 * proportional to the weight.
245 *
246 * s = p*w/rw
247 */
233static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 248static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
234{ 249{
235 u64 period = __sched_period(cfs_rq->nr_running); 250 u64 slice = __sched_period(cfs_rq->nr_running);
236 251
237 period *= se->load.weight; 252 slice *= se->load.weight;
238 do_div(period, cfs_rq->load.weight); 253 do_div(slice, cfs_rq->load.weight);
239 254
240 return period; 255 return slice;
241} 256}
242 257
243static u64 __sched_vslice(unsigned long nr_running) 258/*
259 * We calculate the vruntime slice.
260 *
261 * vs = s/w = p/rw
262 */
263static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
244{ 264{
245 unsigned long period = sysctl_sched_latency; 265 u64 vslice = __sched_period(nr_running);
246 unsigned long nr_latency = sysctl_sched_nr_latency;
247 266
248 if (unlikely(nr_running > nr_latency)) 267 do_div(vslice, rq_weight);
249 nr_running = nr_latency;
250 268
251 period /= nr_running; 269 return vslice;
270}
252 271
253 return (u64)period; 272static u64 sched_vslice(struct cfs_rq *cfs_rq)
273{
274 return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
275}
276
277static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
278{
279 return __sched_vslice(cfs_rq->load.weight + se->load.weight,
280 cfs_rq->nr_running + 1);
254} 281}
255 282
256/* 283/*
@@ -469,10 +496,10 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
469 vruntime >>= 1; 496 vruntime >>= 1;
470 } 497 }
471 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running) 498 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
472 vruntime += __sched_vslice(cfs_rq->nr_running)/2; 499 vruntime += sched_vslice(cfs_rq)/2;
473 500
474 if (initial && sched_feat(START_DEBIT)) 501 if (initial && sched_feat(START_DEBIT))
475 vruntime += __sched_vslice(cfs_rq->nr_running + 1); 502 vruntime += sched_vslice_add(cfs_rq, se);
476 503
477 if (!initial) { 504 if (!initial) {
478 if (sched_feat(NEW_FAIR_SLEEPERS)) 505 if (sched_feat(NEW_FAIR_SLEEPERS))