aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h10
-rw-r--r--kernel/sched.c63
2 files changed, 44 insertions, 29 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 613491d3a875..36a10781c3f3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -517,10 +517,16 @@ struct sched_domain {
517 unsigned long alb_failed; 517 unsigned long alb_failed;
518 unsigned long alb_pushed; 518 unsigned long alb_pushed;
519 519
520 /* sched_balance_exec() stats */ 520 /* SD_BALANCE_EXEC stats */
521 unsigned long sbe_attempts; 521 unsigned long sbe_cnt;
522 unsigned long sbe_balanced;
522 unsigned long sbe_pushed; 523 unsigned long sbe_pushed;
523 524
525 /* SD_BALANCE_FORK stats */
526 unsigned long sbf_cnt;
527 unsigned long sbf_balanced;
528 unsigned long sbf_pushed;
529
524 /* try_to_wake_up() stats */ 530 /* try_to_wake_up() stats */
525 unsigned long ttwu_wake_remote; 531 unsigned long ttwu_wake_remote;
526 unsigned long ttwu_move_affine; 532 unsigned long ttwu_move_affine;
diff --git a/kernel/sched.c b/kernel/sched.c
index 7ecc237e2aab..2711130cd973 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -309,7 +309,7 @@ static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
309 * bump this up when changing the output format or the meaning of an existing 309 * bump this up when changing the output format or the meaning of an existing
310 * format, so that tools can adapt (or abort) 310 * format, so that tools can adapt (or abort)
311 */ 311 */
312#define SCHEDSTAT_VERSION 11 312#define SCHEDSTAT_VERSION 12
313 313
314static int show_schedstat(struct seq_file *seq, void *v) 314static int show_schedstat(struct seq_file *seq, void *v)
315{ 315{
@@ -356,9 +356,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
356 sd->lb_nobusyq[itype], 356 sd->lb_nobusyq[itype],
357 sd->lb_nobusyg[itype]); 357 sd->lb_nobusyg[itype]);
358 } 358 }
359 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu\n", 359 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
360 sd->alb_cnt, sd->alb_failed, sd->alb_pushed, 360 sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
361 sd->sbe_pushed, sd->sbe_attempts, 361 sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
362 sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
362 sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); 363 sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance);
363 } 364 }
364#endif 365#endif
@@ -1264,24 +1265,34 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
1264 sd = tmp; 1265 sd = tmp;
1265 1266
1266 if (sd) { 1267 if (sd) {
1268 int new_cpu;
1267 struct sched_group *group; 1269 struct sched_group *group;
1268 1270
1271 schedstat_inc(sd, sbf_cnt);
1269 cpu = task_cpu(p); 1272 cpu = task_cpu(p);
1270 group = find_idlest_group(sd, p, cpu); 1273 group = find_idlest_group(sd, p, cpu);
1271 if (group) { 1274 if (!group) {
1272 int new_cpu; 1275 schedstat_inc(sd, sbf_balanced);
1273 new_cpu = find_idlest_cpu(group, cpu); 1276 goto no_forkbalance;
1274 if (new_cpu != -1 && new_cpu != cpu && 1277 }
1275 cpu_isset(new_cpu, p->cpus_allowed)) { 1278
1276 set_task_cpu(p, new_cpu); 1279 new_cpu = find_idlest_cpu(group, cpu);
1277 task_rq_unlock(rq, &flags); 1280 if (new_cpu == -1 || new_cpu == cpu) {
1278 rq = task_rq_lock(p, &flags); 1281 schedstat_inc(sd, sbf_balanced);
1279 cpu = task_cpu(p); 1282 goto no_forkbalance;
1280 } 1283 }
1284
1285 if (cpu_isset(new_cpu, p->cpus_allowed)) {
1286 schedstat_inc(sd, sbf_pushed);
1287 set_task_cpu(p, new_cpu);
1288 task_rq_unlock(rq, &flags);
1289 rq = task_rq_lock(p, &flags);
1290 cpu = task_cpu(p);
1281 } 1291 }
1282 } 1292 }
1283#endif
1284 1293
1294no_forkbalance:
1295#endif
1285 /* 1296 /*
1286 * We decrease the sleep average of forking parents 1297 * We decrease the sleep average of forking parents
1287 * and children as well, to keep max-interactive tasks 1298 * and children as well, to keep max-interactive tasks
@@ -1618,30 +1629,28 @@ void sched_exec(void)
1618 struct sched_domain *tmp, *sd = NULL; 1629 struct sched_domain *tmp, *sd = NULL;
1619 int new_cpu, this_cpu = get_cpu(); 1630 int new_cpu, this_cpu = get_cpu();
1620 1631
1621 /* Prefer the current CPU if there's only this task running */
1622 if (this_rq()->nr_running <= 1)
1623 goto out;
1624
1625 for_each_domain(this_cpu, tmp) 1632 for_each_domain(this_cpu, tmp)
1626 if (tmp->flags & SD_BALANCE_EXEC) 1633 if (tmp->flags & SD_BALANCE_EXEC)
1627 sd = tmp; 1634 sd = tmp;
1628 1635
1629 if (sd) { 1636 if (sd) {
1630 struct sched_group *group; 1637 struct sched_group *group;
1631 schedstat_inc(sd, sbe_attempts); 1638 schedstat_inc(sd, sbe_cnt);
1632 group = find_idlest_group(sd, current, this_cpu); 1639 group = find_idlest_group(sd, current, this_cpu);
1633 if (!group) 1640 if (!group) {
1641 schedstat_inc(sd, sbe_balanced);
1634 goto out; 1642 goto out;
1643 }
1635 new_cpu = find_idlest_cpu(group, this_cpu); 1644 new_cpu = find_idlest_cpu(group, this_cpu);
1636 if (new_cpu == -1) 1645 if (new_cpu == -1 || new_cpu == this_cpu) {
1646 schedstat_inc(sd, sbe_balanced);
1637 goto out; 1647 goto out;
1638
1639 if (new_cpu != this_cpu) {
1640 schedstat_inc(sd, sbe_pushed);
1641 put_cpu();
1642 sched_migrate_task(current, new_cpu);
1643 return;
1644 } 1648 }
1649
1650 schedstat_inc(sd, sbe_pushed);
1651 put_cpu();
1652 sched_migrate_task(current, new_cpu);
1653 return;
1645 } 1654 }
1646out: 1655out:
1647 put_cpu(); 1656 put_cpu();