diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 64 |
1 files changed, 36 insertions, 28 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5ad4440f0fc4..8e1352c75557 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -386,20 +386,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write, | |||
386 | #endif | 386 | #endif |
387 | 387 | ||
388 | /* | 388 | /* |
389 | * delta *= P[w / rw] | ||
390 | */ | ||
391 | static inline unsigned long | ||
392 | calc_delta_weight(unsigned long delta, struct sched_entity *se) | ||
393 | { | ||
394 | for_each_sched_entity(se) { | ||
395 | delta = calc_delta_mine(delta, | ||
396 | se->load.weight, &cfs_rq_of(se)->load); | ||
397 | } | ||
398 | |||
399 | return delta; | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * delta /= w | 389 | * delta /= w |
404 | */ | 390 | */ |
405 | static inline unsigned long | 391 | static inline unsigned long |
@@ -440,12 +426,20 @@ static u64 __sched_period(unsigned long nr_running) | |||
440 | */ | 426 | */ |
441 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 427 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) |
442 | { | 428 | { |
443 | unsigned long nr_running = cfs_rq->nr_running; | 429 | u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); |
444 | 430 | ||
445 | if (unlikely(!se->on_rq)) | 431 | for_each_sched_entity(se) { |
446 | nr_running++; | 432 | struct load_weight *load = &cfs_rq->load; |
433 | |||
434 | if (unlikely(!se->on_rq)) { | ||
435 | struct load_weight lw = cfs_rq->load; | ||
447 | 436 | ||
448 | return calc_delta_weight(__sched_period(nr_running), se); | 437 | update_load_add(&lw, se->load.weight); |
438 | load = &lw; | ||
439 | } | ||
440 | slice = calc_delta_mine(slice, se->load.weight, load); | ||
441 | } | ||
442 | return slice; | ||
449 | } | 443 | } |
450 | 444 | ||
451 | /* | 445 | /* |
@@ -1019,16 +1013,33 @@ static void yield_task_fair(struct rq *rq) | |||
1019 | * search starts with cpus closest then further out as needed, | 1013 | * search starts with cpus closest then further out as needed, |
1020 | * so we always favor a closer, idle cpu. | 1014 | * so we always favor a closer, idle cpu. |
1021 | * Domains may include CPUs that are not usable for migration, | 1015 | * Domains may include CPUs that are not usable for migration, |
1022 | * hence we need to mask them out (cpu_active_map) | 1016 | * hence we need to mask them out (cpu_active_mask) |
1023 | * | 1017 | * |
1024 | * Returns the CPU we should wake onto. | 1018 | * Returns the CPU we should wake onto. |
1025 | */ | 1019 | */ |
1026 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) | 1020 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) |
1027 | static int wake_idle(int cpu, struct task_struct *p) | 1021 | static int wake_idle(int cpu, struct task_struct *p) |
1028 | { | 1022 | { |
1029 | cpumask_t tmp; | ||
1030 | struct sched_domain *sd; | 1023 | struct sched_domain *sd; |
1031 | int i; | 1024 | int i; |
1025 | unsigned int chosen_wakeup_cpu; | ||
1026 | int this_cpu; | ||
1027 | |||
1028 | /* | ||
1029 | * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu | ||
1030 | * are idle and this is not a kernel thread and this task's affinity | ||
1031 | * allows it to be moved to preferred cpu, then just move! | ||
1032 | */ | ||
1033 | |||
1034 | this_cpu = smp_processor_id(); | ||
1035 | chosen_wakeup_cpu = | ||
1036 | cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu; | ||
1037 | |||
1038 | if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP && | ||
1039 | idle_cpu(cpu) && idle_cpu(this_cpu) && | ||
1040 | p->mm && !(p->flags & PF_KTHREAD) && | ||
1041 | cpu_isset(chosen_wakeup_cpu, p->cpus_allowed)) | ||
1042 | return chosen_wakeup_cpu; | ||
1032 | 1043 | ||
1033 | /* | 1044 | /* |
1034 | * If it is idle, then it is the best cpu to run this task. | 1045 | * If it is idle, then it is the best cpu to run this task. |
@@ -1046,10 +1057,9 @@ static int wake_idle(int cpu, struct task_struct *p) | |||
1046 | if ((sd->flags & SD_WAKE_IDLE) | 1057 | if ((sd->flags & SD_WAKE_IDLE) |
1047 | || ((sd->flags & SD_WAKE_IDLE_FAR) | 1058 | || ((sd->flags & SD_WAKE_IDLE_FAR) |
1048 | && !task_hot(p, task_rq(p)->clock, sd))) { | 1059 | && !task_hot(p, task_rq(p)->clock, sd))) { |
1049 | cpus_and(tmp, sd->span, p->cpus_allowed); | 1060 | for_each_cpu_and(i, sched_domain_span(sd), |
1050 | cpus_and(tmp, tmp, cpu_active_map); | 1061 | &p->cpus_allowed) { |
1051 | for_each_cpu_mask_nr(i, tmp) { | 1062 | if (cpu_active(i) && idle_cpu(i)) { |
1052 | if (idle_cpu(i)) { | ||
1053 | if (i != task_cpu(p)) { | 1063 | if (i != task_cpu(p)) { |
1054 | schedstat_inc(p, | 1064 | schedstat_inc(p, |
1055 | se.nr_wakeups_idle); | 1065 | se.nr_wakeups_idle); |
@@ -1242,13 +1252,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1242 | * this_cpu and prev_cpu are present in: | 1252 | * this_cpu and prev_cpu are present in: |
1243 | */ | 1253 | */ |
1244 | for_each_domain(this_cpu, sd) { | 1254 | for_each_domain(this_cpu, sd) { |
1245 | if (cpu_isset(prev_cpu, sd->span)) { | 1255 | if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) { |
1246 | this_sd = sd; | 1256 | this_sd = sd; |
1247 | break; | 1257 | break; |
1248 | } | 1258 | } |
1249 | } | 1259 | } |
1250 | 1260 | ||
1251 | if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) | 1261 | if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed))) |
1252 | goto out; | 1262 | goto out; |
1253 | 1263 | ||
1254 | /* | 1264 | /* |
@@ -1607,8 +1617,6 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | |||
1607 | } | 1617 | } |
1608 | } | 1618 | } |
1609 | 1619 | ||
1610 | #define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0) | ||
1611 | |||
1612 | /* | 1620 | /* |
1613 | * Share the fairness runtime between parent and child, thus the | 1621 | * Share the fairness runtime between parent and child, thus the |
1614 | * total amount of pressure for CPU stays equal - new tasks | 1622 | * total amount of pressure for CPU stays equal - new tasks |