diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:13 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:13 -0400 |
commit | 647e7cac2d215fb8890f79252d7eaee3d6743d66 (patch) | |
tree | 22ca4b4d3f218107935f0a128a7114a3ceba19f5 /kernel/sched_fair.c | |
parent | 3a2520157234d58abce89526756a32c272824f3f (diff) |
sched: vslice fixups for non-0 nice levels
Make vslice accurate wrt nice levels, and add some comments
while we're at it.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 53 |
1 files changed, 40 insertions, 13 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 32fd976f8566..1f14b56d0d00 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -217,6 +217,15 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | |||
217 | * Scheduling class statistics methods: | 217 | * Scheduling class statistics methods: |
218 | */ | 218 | */ |
219 | 219 | ||
220 | |||
221 | /* | ||
222 | * The idea is to set a period in which each task runs once. | ||
223 | * | ||
224 | * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch | ||
225 | * this period because otherwise the slices get too small. | ||
226 | * | ||
227 | * p = (nr <= nl) ? l : l*nr/nl | ||
228 | */ | ||
220 | static u64 __sched_period(unsigned long nr_running) | 229 | static u64 __sched_period(unsigned long nr_running) |
221 | { | 230 | { |
222 | u64 period = sysctl_sched_latency; | 231 | u64 period = sysctl_sched_latency; |
@@ -230,27 +239,45 @@ static u64 __sched_period(unsigned long nr_running) | |||
230 | return period; | 239 | return period; |
231 | } | 240 | } |
232 | 241 | ||
242 | /* | ||
243 | * We calculate the wall-time slice from the period by taking a part | ||
244 | * proportional to the weight. | ||
245 | * | ||
246 | * s = p*w/rw | ||
247 | */ | ||
233 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 248 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) |
234 | { | 249 | { |
235 | u64 period = __sched_period(cfs_rq->nr_running); | 250 | u64 slice = __sched_period(cfs_rq->nr_running); |
236 | 251 | ||
237 | period *= se->load.weight; | 252 | slice *= se->load.weight; |
238 | do_div(period, cfs_rq->load.weight); | 253 | do_div(slice, cfs_rq->load.weight); |
239 | 254 | ||
240 | return period; | 255 | return slice; |
241 | } | 256 | } |
242 | 257 | ||
243 | static u64 __sched_vslice(unsigned long nr_running) | 258 | /* |
259 | * We calculate the vruntime slice. | ||
260 | * | ||
261 | * vs = s/w = p/rw | ||
262 | */ | ||
263 | static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running) | ||
244 | { | 264 | { |
245 | unsigned long period = sysctl_sched_latency; | 265 | u64 vslice = __sched_period(nr_running); |
246 | unsigned long nr_latency = sysctl_sched_nr_latency; | ||
247 | 266 | ||
248 | if (unlikely(nr_running > nr_latency)) | 267 | do_div(vslice, rq_weight); |
249 | nr_running = nr_latency; | ||
250 | 268 | ||
251 | period /= nr_running; | 269 | return vslice; |
270 | } | ||
252 | 271 | ||
253 | return (u64)period; | 272 | static u64 sched_vslice(struct cfs_rq *cfs_rq) |
273 | { | ||
274 | return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running); | ||
275 | } | ||
276 | |||
277 | static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
278 | { | ||
279 | return __sched_vslice(cfs_rq->load.weight + se->load.weight, | ||
280 | cfs_rq->nr_running + 1); | ||
254 | } | 281 | } |
255 | 282 | ||
256 | /* | 283 | /* |
@@ -469,10 +496,10 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
469 | vruntime >>= 1; | 496 | vruntime >>= 1; |
470 | } | 497 | } |
471 | } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running) | 498 | } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running) |
472 | vruntime += __sched_vslice(cfs_rq->nr_running)/2; | 499 | vruntime += sched_vslice(cfs_rq)/2; |
473 | 500 | ||
474 | if (initial && sched_feat(START_DEBIT)) | 501 | if (initial && sched_feat(START_DEBIT)) |
475 | vruntime += __sched_vslice(cfs_rq->nr_running + 1); | 502 | vruntime += sched_vslice_add(cfs_rq, se); |
476 | 503 | ||
477 | if (!initial) { | 504 | if (!initial) { |
478 | if (sched_feat(NEW_FAIR_SLEEPERS)) | 505 | if (sched_feat(NEW_FAIR_SLEEPERS)) |