aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c59
1 files changed, 53 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a7e50ba185ac..bc1563e7a248 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1309,16 +1309,63 @@ out:
1309} 1309}
1310#endif /* CONFIG_SMP */ 1310#endif /* CONFIG_SMP */
1311 1311
1312static unsigned long wakeup_gran(struct sched_entity *se) 1312/*
1313 * Adaptive granularity
1314 *
1315 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1316 * with the limit of wakeup_gran -- when it never does a wakeup.
1317 *
1318 * So the smaller avg_wakeup is the faster we want this task to preempt,
1319 * but we don't want to treat the preemptee unfairly and therefore allow it
1320 * to run for at least the amount of time we'd like to run.
1321 *
1322 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1323 *
1324 * NOTE: we use *nr_running to scale with load, this nicely matches the
1325 * degrading latency on load.
1326 */
1327static unsigned long
1328adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1329{
1330 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1331 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1332 u64 gran = 0;
1333
1334 if (this_run < expected_wakeup)
1335 gran = expected_wakeup - this_run;
1336
1337 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1338}
1339
1340static unsigned long
1341wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1313{ 1342{
1314 unsigned long gran = sysctl_sched_wakeup_granularity; 1343 unsigned long gran = sysctl_sched_wakeup_granularity;
1315 1344
1345 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1346 gran = adaptive_gran(curr, se);
1347
1316 /* 1348 /*
1317 * More easily preempt - nice tasks, while not making it harder for 1349 * Since its curr running now, convert the gran from real-time
1318 * + nice tasks. 1350 * to virtual-time in his units.
1319 */ 1351 */
1320 if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD) 1352 if (sched_feat(ASYM_GRAN)) {
1321 gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); 1353 /*
1354 * By using 'se' instead of 'curr' we penalize light tasks, so
1355 * they get preempted easier. That is, if 'se' < 'curr' then
1356 * the resulting gran will be larger, therefore penalizing the
1357 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1358 * be smaller, again penalizing the lighter task.
1359 *
1360 * This is especially important for buddies when the leftmost
1361 * task is higher priority than the buddy.
1362 */
1363 if (unlikely(se->load.weight != NICE_0_LOAD))
1364 gran = calc_delta_fair(gran, se);
1365 } else {
1366 if (unlikely(curr->load.weight != NICE_0_LOAD))
1367 gran = calc_delta_fair(gran, curr);
1368 }
1322 1369
1323 return gran; 1370 return gran;
1324} 1371}
@@ -1345,7 +1392,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1345 if (vdiff <= 0) 1392 if (vdiff <= 0)
1346 return -1; 1393 return -1;
1347 1394
1348 gran = wakeup_gran(curr); 1395 gran = wakeup_gran(curr, se);
1349 if (vdiff > gran) 1396 if (vdiff > gran)
1350 return 1; 1397 return 1;
1351 1398