aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c108
1 files changed, 67 insertions, 41 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 98345e45b059..5cc1c162044f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
283 struct sched_entity, 283 struct sched_entity,
284 run_node); 284 run_node);
285 285
286 if (vruntime == cfs_rq->min_vruntime) 286 if (!cfs_rq->curr)
287 vruntime = se->vruntime; 287 vruntime = se->vruntime;
288 else 288 else
289 vruntime = min_vruntime(vruntime, se->vruntime); 289 vruntime = min_vruntime(vruntime, se->vruntime);
@@ -386,20 +386,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
386#endif 386#endif
387 387
388/* 388/*
389 * delta *= P[w / rw]
390 */
391static inline unsigned long
392calc_delta_weight(unsigned long delta, struct sched_entity *se)
393{
394 for_each_sched_entity(se) {
395 delta = calc_delta_mine(delta,
396 se->load.weight, &cfs_rq_of(se)->load);
397 }
398
399 return delta;
400}
401
402/*
403 * delta /= w 389 * delta /= w
404 */ 390 */
405static inline unsigned long 391static inline unsigned long
@@ -440,12 +426,23 @@ static u64 __sched_period(unsigned long nr_running)
440 */ 426 */
441static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 427static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
442{ 428{
443 unsigned long nr_running = cfs_rq->nr_running; 429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
444 430
445 if (unlikely(!se->on_rq)) 431 for_each_sched_entity(se) {
446 nr_running++; 432 struct load_weight *load;
433
434 cfs_rq = cfs_rq_of(se);
435 load = &cfs_rq->load;
447 436
448 return calc_delta_weight(__sched_period(nr_running), se); 437 if (unlikely(!se->on_rq)) {
438 struct load_weight lw = cfs_rq->load;
439
440 update_load_add(&lw, se->load.weight);
441 load = &lw;
442 }
443 slice = calc_delta_mine(slice, se->load.weight, load);
444 }
445 return slice;
449} 446}
450 447
451/* 448/*
@@ -492,6 +489,8 @@ static void update_curr(struct cfs_rq *cfs_rq)
492 * overflow on 32 bits): 489 * overflow on 32 bits):
493 */ 490 */
494 delta_exec = (unsigned long)(now - curr->exec_start); 491 delta_exec = (unsigned long)(now - curr->exec_start);
492 if (!delta_exec)
493 return;
495 494
496 __update_curr(cfs_rq, curr, delta_exec); 495 __update_curr(cfs_rq, curr, delta_exec);
497 curr->exec_start = now; 496 curr->exec_start = now;
@@ -681,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
681 unsigned long thresh = sysctl_sched_latency; 680 unsigned long thresh = sysctl_sched_latency;
682 681
683 /* 682 /*
684 * convert the sleeper threshold into virtual time 683 * Convert the sleeper threshold into virtual time.
684 * SCHED_IDLE is a special sub-class. We care about
685 * fairness only relative to other SCHED_IDLE tasks,
686 * all of which have the same weight.
685 */ 687 */
686 if (sched_feat(NORMALIZED_SLEEPER)) 688 if (sched_feat(NORMALIZED_SLEEPER) &&
689 task_of(se)->policy != SCHED_IDLE)
687 thresh = calc_delta_fair(thresh, se); 690 thresh = calc_delta_fair(thresh, se);
688 691
689 vruntime -= thresh; 692 vruntime -= thresh;
@@ -1017,16 +1020,33 @@ static void yield_task_fair(struct rq *rq)
1017 * search starts with cpus closest then further out as needed, 1020 * search starts with cpus closest then further out as needed,
1018 * so we always favor a closer, idle cpu. 1021 * so we always favor a closer, idle cpu.
1019 * Domains may include CPUs that are not usable for migration, 1022 * Domains may include CPUs that are not usable for migration,
1020 * hence we need to mask them out (cpu_active_map) 1023 * hence we need to mask them out (cpu_active_mask)
1021 * 1024 *
1022 * Returns the CPU we should wake onto. 1025 * Returns the CPU we should wake onto.
1023 */ 1026 */
1024#if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1027#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1025static int wake_idle(int cpu, struct task_struct *p) 1028static int wake_idle(int cpu, struct task_struct *p)
1026{ 1029{
1027 cpumask_t tmp;
1028 struct sched_domain *sd; 1030 struct sched_domain *sd;
1029 int i; 1031 int i;
1032 unsigned int chosen_wakeup_cpu;
1033 int this_cpu;
1034
1035 /*
1036 * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
1037 * are idle and this is not a kernel thread and this task's affinity
1038 * allows it to be moved to preferred cpu, then just move!
1039 */
1040
1041 this_cpu = smp_processor_id();
1042 chosen_wakeup_cpu =
1043 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu;
1044
1045 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
1046 idle_cpu(cpu) && idle_cpu(this_cpu) &&
1047 p->mm && !(p->flags & PF_KTHREAD) &&
1048 cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
1049 return chosen_wakeup_cpu;
1030 1050
1031 /* 1051 /*
1032 * If it is idle, then it is the best cpu to run this task. 1052 * If it is idle, then it is the best cpu to run this task.
@@ -1044,10 +1064,9 @@ static int wake_idle(int cpu, struct task_struct *p)
1044 if ((sd->flags & SD_WAKE_IDLE) 1064 if ((sd->flags & SD_WAKE_IDLE)
1045 || ((sd->flags & SD_WAKE_IDLE_FAR) 1065 || ((sd->flags & SD_WAKE_IDLE_FAR)
1046 && !task_hot(p, task_rq(p)->clock, sd))) { 1066 && !task_hot(p, task_rq(p)->clock, sd))) {
1047 cpus_and(tmp, sd->span, p->cpus_allowed); 1067 for_each_cpu_and(i, sched_domain_span(sd),
1048 cpus_and(tmp, tmp, cpu_active_map); 1068 &p->cpus_allowed) {
1049 for_each_cpu_mask_nr(i, tmp) { 1069 if (cpu_active(i) && idle_cpu(i)) {
1050 if (idle_cpu(i)) {
1051 if (i != task_cpu(p)) { 1070 if (i != task_cpu(p)) {
1052 schedstat_inc(p, 1071 schedstat_inc(p,
1053 se.nr_wakeups_idle); 1072 se.nr_wakeups_idle);
@@ -1240,13 +1259,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1240 * this_cpu and prev_cpu are present in: 1259 * this_cpu and prev_cpu are present in:
1241 */ 1260 */
1242 for_each_domain(this_cpu, sd) { 1261 for_each_domain(this_cpu, sd) {
1243 if (cpu_isset(prev_cpu, sd->span)) { 1262 if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
1244 this_sd = sd; 1263 this_sd = sd;
1245 break; 1264 break;
1246 } 1265 }
1247 } 1266 }
1248 1267
1249 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) 1268 if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
1250 goto out; 1269 goto out;
1251 1270
1252 /* 1271 /*
@@ -1328,14 +1347,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1328 1347
1329static void set_last_buddy(struct sched_entity *se) 1348static void set_last_buddy(struct sched_entity *se)
1330{ 1349{
1331 for_each_sched_entity(se) 1350 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1332 cfs_rq_of(se)->last = se; 1351 for_each_sched_entity(se)
1352 cfs_rq_of(se)->last = se;
1353 }
1333} 1354}
1334 1355
1335static void set_next_buddy(struct sched_entity *se) 1356static void set_next_buddy(struct sched_entity *se)
1336{ 1357{
1337 for_each_sched_entity(se) 1358 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1338 cfs_rq_of(se)->next = se; 1359 for_each_sched_entity(se)
1360 cfs_rq_of(se)->next = se;
1361 }
1339} 1362}
1340 1363
1341/* 1364/*
@@ -1345,12 +1368,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1345{ 1368{
1346 struct task_struct *curr = rq->curr; 1369 struct task_struct *curr = rq->curr;
1347 struct sched_entity *se = &curr->se, *pse = &p->se; 1370 struct sched_entity *se = &curr->se, *pse = &p->se;
1371 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1348 1372
1349 if (unlikely(rt_prio(p->prio))) { 1373 update_curr(cfs_rq);
1350 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1351 1374
1352 update_rq_clock(rq); 1375 if (unlikely(rt_prio(p->prio))) {
1353 update_curr(cfs_rq);
1354 resched_task(curr); 1376 resched_task(curr);
1355 return; 1377 return;
1356 } 1378 }
@@ -1382,12 +1404,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1382 return; 1404 return;
1383 1405
1384 /* 1406 /*
1385 * Batch tasks do not preempt (their preemption is driven by 1407 * Batch and idle tasks do not preempt (their preemption is driven by
1386 * the tick): 1408 * the tick):
1387 */ 1409 */
1388 if (unlikely(p->policy == SCHED_BATCH)) 1410 if (unlikely(p->policy != SCHED_NORMAL))
1389 return; 1411 return;
1390 1412
1413 /* Idle tasks are by definition preempted by everybody. */
1414 if (unlikely(curr->policy == SCHED_IDLE)) {
1415 resched_task(curr);
1416 return;
1417 }
1418
1391 if (!sched_feat(WAKEUP_PREEMPT)) 1419 if (!sched_feat(WAKEUP_PREEMPT))
1392 return; 1420 return;
1393 1421
@@ -1606,8 +1634,6 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1606 } 1634 }
1607} 1635}
1608 1636
1609#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
1610
1611/* 1637/*
1612 * Share the fairness runtime between parent and child, thus the 1638 * Share the fairness runtime between parent and child, thus the
1613 * total amount of pressure for CPU stays equal - new tasks 1639 * total amount of pressure for CPU stays equal - new tasks