diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-09-23 08:54:23 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-23 08:54:23 -0400 |
commit | 695698500912c4479ddf4723e492de3970ff8530 (patch) | |
tree | b84c708843998d2124573b809b8618a4e304e253 /kernel/sched_fair.c | |
parent | 1a73ef6ac3f4b44abc9d1875eb9240d7524a7cf7 (diff) |
sched: rework wakeup preemption
Rework the wakeup preemption to work on real runtime instead of
the virtual runtime. This greatly simplifies the code.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 133 |
1 files changed, 4 insertions, 129 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3b89aa6594a..c2089976345 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -409,64 +409,6 @@ static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
409 | } | 409 | } |
410 | 410 | ||
411 | /* | 411 | /* |
412 | * The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in | ||
413 | * that it favours >=0 over <0. | ||
414 | * | ||
415 | * -20 | | ||
416 | * | | ||
417 | * 0 --------+------- | ||
418 | * .' | ||
419 | * 19 .' | ||
420 | * | ||
421 | */ | ||
422 | static unsigned long | ||
423 | calc_delta_asym(unsigned long delta, struct sched_entity *se) | ||
424 | { | ||
425 | struct load_weight lw = { | ||
426 | .weight = NICE_0_LOAD, | ||
427 | .inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT) | ||
428 | }; | ||
429 | |||
430 | for_each_sched_entity(se) { | ||
431 | struct load_weight *se_lw = &se->load; | ||
432 | unsigned long rw = cfs_rq_of(se)->load.weight; | ||
433 | |||
434 | #ifdef CONFIG_FAIR_SCHED_GROUP | ||
435 | struct cfs_rq *cfs_rq = se->my_q; | ||
436 | struct task_group *tg = NULL | ||
437 | |||
438 | if (cfs_rq) | ||
439 | tg = cfs_rq->tg; | ||
440 | |||
441 | if (tg && tg->shares < NICE_0_LOAD) { | ||
442 | /* | ||
443 | * scale shares to what it would have been had | ||
444 | * tg->weight been NICE_0_LOAD: | ||
445 | * | ||
446 | * weight = 1024 * shares / tg->weight | ||
447 | */ | ||
448 | lw.weight *= se->load.weight; | ||
449 | lw.weight /= tg->shares; | ||
450 | |||
451 | lw.inv_weight = 0; | ||
452 | |||
453 | se_lw = &lw; | ||
454 | rw += lw.weight - se->load.weight; | ||
455 | } else | ||
456 | #endif | ||
457 | |||
458 | if (se->load.weight < NICE_0_LOAD) { | ||
459 | se_lw = &lw; | ||
460 | rw += NICE_0_LOAD - se->load.weight; | ||
461 | } | ||
462 | |||
463 | delta = calc_delta_mine(delta, rw, se_lw); | ||
464 | } | ||
465 | |||
466 | return delta; | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * Update the current task's runtime statistics. Skip current tasks that | 412 | * Update the current task's runtime statistics. Skip current tasks that |
471 | * are not in our scheduling class. | 413 | * are not in our scheduling class. |
472 | */ | 414 | */ |
@@ -1281,54 +1223,12 @@ static unsigned long wakeup_gran(struct sched_entity *se) | |||
1281 | * + nice tasks. | 1223 | * + nice tasks. |
1282 | */ | 1224 | */ |
1283 | if (sched_feat(ASYM_GRAN)) | 1225 | if (sched_feat(ASYM_GRAN)) |
1284 | gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se); | 1226 | gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load); |
1285 | else | ||
1286 | gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); | ||
1287 | 1227 | ||
1288 | return gran; | 1228 | return gran; |
1289 | } | 1229 | } |
1290 | 1230 | ||
1291 | /* | 1231 | /* |
1292 | * Should 'se' preempt 'curr'. | ||
1293 | * | ||
1294 | * |s1 | ||
1295 | * |s2 | ||
1296 | * |s3 | ||
1297 | * g | ||
1298 | * |<--->|c | ||
1299 | * | ||
1300 | * w(c, s1) = -1 | ||
1301 | * w(c, s2) = 0 | ||
1302 | * w(c, s3) = 1 | ||
1303 | * | ||
1304 | */ | ||
1305 | static int | ||
1306 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | ||
1307 | { | ||
1308 | s64 gran, vdiff = curr->vruntime - se->vruntime; | ||
1309 | |||
1310 | if (vdiff < 0) | ||
1311 | return -1; | ||
1312 | |||
1313 | gran = wakeup_gran(curr); | ||
1314 | if (vdiff > gran) | ||
1315 | return 1; | ||
1316 | |||
1317 | return 0; | ||
1318 | } | ||
1319 | |||
1320 | /* return depth at which a sched entity is present in the hierarchy */ | ||
1321 | static inline int depth_se(struct sched_entity *se) | ||
1322 | { | ||
1323 | int depth = 0; | ||
1324 | |||
1325 | for_each_sched_entity(se) | ||
1326 | depth++; | ||
1327 | |||
1328 | return depth; | ||
1329 | } | ||
1330 | |||
1331 | /* | ||
1332 | * Preempt the current task with a newly woken task if needed: | 1232 | * Preempt the current task with a newly woken task if needed: |
1333 | */ | 1233 | */ |
1334 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | 1234 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) |
@@ -1336,7 +1236,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
1336 | struct task_struct *curr = rq->curr; | 1236 | struct task_struct *curr = rq->curr; |
1337 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1237 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1338 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1238 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1339 | int se_depth, pse_depth; | 1239 | s64 delta_exec; |
1340 | 1240 | ||
1341 | if (unlikely(rt_prio(p->prio))) { | 1241 | if (unlikely(rt_prio(p->prio))) { |
1342 | update_rq_clock(rq); | 1242 | update_rq_clock(rq); |
@@ -1374,33 +1274,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
1374 | return; | 1274 | return; |
1375 | } | 1275 | } |
1376 | 1276 | ||
1377 | /* | 1277 | delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime; |
1378 | * preemption test can be made between sibling entities who are in the | 1278 | if (delta_exec > wakeup_gran(pse)) |
1379 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | ||
1380 | * both tasks until we find their ancestors who are siblings of common | ||
1381 | * parent. | ||
1382 | */ | ||
1383 | |||
1384 | /* First walk up until both entities are at same depth */ | ||
1385 | se_depth = depth_se(se); | ||
1386 | pse_depth = depth_se(pse); | ||
1387 | |||
1388 | while (se_depth > pse_depth) { | ||
1389 | se_depth--; | ||
1390 | se = parent_entity(se); | ||
1391 | } | ||
1392 | |||
1393 | while (pse_depth > se_depth) { | ||
1394 | pse_depth--; | ||
1395 | pse = parent_entity(pse); | ||
1396 | } | ||
1397 | |||
1398 | while (!is_same_group(se, pse)) { | ||
1399 | se = parent_entity(se); | ||
1400 | pse = parent_entity(pse); | ||
1401 | } | ||
1402 | |||
1403 | if (wakeup_preempt_entity(se, pse) == 1) | ||
1404 | resched_task(curr); | 1279 | resched_task(curr); |
1405 | } | 1280 | } |
1406 | 1281 | ||