diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 63 |
1 files changed, 36 insertions, 27 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 7ecc237e2aab..2711130cd973 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -309,7 +309,7 @@ static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) | |||
309 | * bump this up when changing the output format or the meaning of an existing | 309 | * bump this up when changing the output format or the meaning of an existing |
310 | * format, so that tools can adapt (or abort) | 310 | * format, so that tools can adapt (or abort) |
311 | */ | 311 | */ |
312 | #define SCHEDSTAT_VERSION 11 | 312 | #define SCHEDSTAT_VERSION 12 |
313 | 313 | ||
314 | static int show_schedstat(struct seq_file *seq, void *v) | 314 | static int show_schedstat(struct seq_file *seq, void *v) |
315 | { | 315 | { |
@@ -356,9 +356,10 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
356 | sd->lb_nobusyq[itype], | 356 | sd->lb_nobusyq[itype], |
357 | sd->lb_nobusyg[itype]); | 357 | sd->lb_nobusyg[itype]); |
358 | } | 358 | } |
359 | seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu\n", | 359 | seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n", |
360 | sd->alb_cnt, sd->alb_failed, sd->alb_pushed, | 360 | sd->alb_cnt, sd->alb_failed, sd->alb_pushed, |
361 | sd->sbe_pushed, sd->sbe_attempts, | 361 | sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed, |
362 | sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed, | ||
362 | sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); | 363 | sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); |
363 | } | 364 | } |
364 | #endif | 365 | #endif |
@@ -1264,24 +1265,34 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) | |||
1264 | sd = tmp; | 1265 | sd = tmp; |
1265 | 1266 | ||
1266 | if (sd) { | 1267 | if (sd) { |
1268 | int new_cpu; | ||
1267 | struct sched_group *group; | 1269 | struct sched_group *group; |
1268 | 1270 | ||
1271 | schedstat_inc(sd, sbf_cnt); | ||
1269 | cpu = task_cpu(p); | 1272 | cpu = task_cpu(p); |
1270 | group = find_idlest_group(sd, p, cpu); | 1273 | group = find_idlest_group(sd, p, cpu); |
1271 | if (group) { | 1274 | if (!group) { |
1272 | int new_cpu; | 1275 | schedstat_inc(sd, sbf_balanced); |
1273 | new_cpu = find_idlest_cpu(group, cpu); | 1276 | goto no_forkbalance; |
1274 | if (new_cpu != -1 && new_cpu != cpu && | 1277 | } |
1275 | cpu_isset(new_cpu, p->cpus_allowed)) { | 1278 | |
1276 | set_task_cpu(p, new_cpu); | 1279 | new_cpu = find_idlest_cpu(group, cpu); |
1277 | task_rq_unlock(rq, &flags); | 1280 | if (new_cpu == -1 || new_cpu == cpu) { |
1278 | rq = task_rq_lock(p, &flags); | 1281 | schedstat_inc(sd, sbf_balanced); |
1279 | cpu = task_cpu(p); | 1282 | goto no_forkbalance; |
1280 | } | 1283 | } |
1284 | |||
1285 | if (cpu_isset(new_cpu, p->cpus_allowed)) { | ||
1286 | schedstat_inc(sd, sbf_pushed); | ||
1287 | set_task_cpu(p, new_cpu); | ||
1288 | task_rq_unlock(rq, &flags); | ||
1289 | rq = task_rq_lock(p, &flags); | ||
1290 | cpu = task_cpu(p); | ||
1281 | } | 1291 | } |
1282 | } | 1292 | } |
1283 | #endif | ||
1284 | 1293 | ||
1294 | no_forkbalance: | ||
1295 | #endif | ||
1285 | /* | 1296 | /* |
1286 | * We decrease the sleep average of forking parents | 1297 | * We decrease the sleep average of forking parents |
1287 | * and children as well, to keep max-interactive tasks | 1298 | * and children as well, to keep max-interactive tasks |
@@ -1618,30 +1629,28 @@ void sched_exec(void) | |||
1618 | struct sched_domain *tmp, *sd = NULL; | 1629 | struct sched_domain *tmp, *sd = NULL; |
1619 | int new_cpu, this_cpu = get_cpu(); | 1630 | int new_cpu, this_cpu = get_cpu(); |
1620 | 1631 | ||
1621 | /* Prefer the current CPU if there's only this task running */ | ||
1622 | if (this_rq()->nr_running <= 1) | ||
1623 | goto out; | ||
1624 | |||
1625 | for_each_domain(this_cpu, tmp) | 1632 | for_each_domain(this_cpu, tmp) |
1626 | if (tmp->flags & SD_BALANCE_EXEC) | 1633 | if (tmp->flags & SD_BALANCE_EXEC) |
1627 | sd = tmp; | 1634 | sd = tmp; |
1628 | 1635 | ||
1629 | if (sd) { | 1636 | if (sd) { |
1630 | struct sched_group *group; | 1637 | struct sched_group *group; |
1631 | schedstat_inc(sd, sbe_attempts); | 1638 | schedstat_inc(sd, sbe_cnt); |
1632 | group = find_idlest_group(sd, current, this_cpu); | 1639 | group = find_idlest_group(sd, current, this_cpu); |
1633 | if (!group) | 1640 | if (!group) { |
1641 | schedstat_inc(sd, sbe_balanced); | ||
1634 | goto out; | 1642 | goto out; |
1643 | } | ||
1635 | new_cpu = find_idlest_cpu(group, this_cpu); | 1644 | new_cpu = find_idlest_cpu(group, this_cpu); |
1636 | if (new_cpu == -1) | 1645 | if (new_cpu == -1 || new_cpu == this_cpu) { |
1646 | schedstat_inc(sd, sbe_balanced); | ||
1637 | goto out; | 1647 | goto out; |
1638 | |||
1639 | if (new_cpu != this_cpu) { | ||
1640 | schedstat_inc(sd, sbe_pushed); | ||
1641 | put_cpu(); | ||
1642 | sched_migrate_task(current, new_cpu); | ||
1643 | return; | ||
1644 | } | 1648 | } |
1649 | |||
1650 | schedstat_inc(sd, sbe_pushed); | ||
1651 | put_cpu(); | ||
1652 | sched_migrate_task(current, new_cpu); | ||
1653 | return; | ||
1645 | } | 1654 | } |
1646 | out: | 1655 | out: |
1647 | put_cpu(); | 1656 | put_cpu(); |