aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 11:00:18 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:18 -0400
commitcc367732ff0b1c63d0d7bdd11e6d1661794ef6a3 (patch)
tree27c65e05bf19644224937624ca356c876e6d1318 /kernel/sched.c
parent2d92f22784b7b8879ebe3254e44c92cb8792b0dd (diff)
sched: debug, improve migration statistics
add new migration statistics when SCHED_DEBUG and SCHEDSTATS is enabled. Available in /proc/<PID>/sched. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c76
1 files changed, 54 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 945ab1322e18..3b27c3a553aa 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1005,6 +1005,23 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1005 1005
1006#ifdef CONFIG_SMP 1006#ifdef CONFIG_SMP
1007 1007
1008/*
1009 * Is this task likely cache-hot:
1010 */
1011static inline int
1012task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
1013{
1014 s64 delta;
1015
1016 if (p->sched_class != &fair_sched_class)
1017 return 0;
1018
1019 delta = now - p->se.exec_start;
1020
1021 return delta < (s64)sysctl_sched_migration_cost;
1022}
1023
1024
1008void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1025void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1009{ 1026{
1010 int old_cpu = task_cpu(p); 1027 int old_cpu = task_cpu(p);
@@ -1022,6 +1039,11 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1022 p->se.sleep_start -= clock_offset; 1039 p->se.sleep_start -= clock_offset;
1023 if (p->se.block_start) 1040 if (p->se.block_start)
1024 p->se.block_start -= clock_offset; 1041 p->se.block_start -= clock_offset;
1042 if (old_cpu != new_cpu) {
1043 schedstat_inc(p, se.nr_migrations);
1044 if (task_hot(p, old_rq->clock, NULL))
1045 schedstat_inc(p, se.nr_forced2_migrations);
1046 }
1025#endif 1047#endif
1026 p->se.vruntime -= old_cfsrq->min_vruntime - 1048 p->se.vruntime -= old_cfsrq->min_vruntime -
1027 new_cfsrq->min_vruntime; 1049 new_cfsrq->min_vruntime;
@@ -1394,8 +1416,13 @@ static int wake_idle(int cpu, struct task_struct *p)
1394 if (sd->flags & SD_WAKE_IDLE) { 1416 if (sd->flags & SD_WAKE_IDLE) {
1395 cpus_and(tmp, sd->span, p->cpus_allowed); 1417 cpus_and(tmp, sd->span, p->cpus_allowed);
1396 for_each_cpu_mask(i, tmp) { 1418 for_each_cpu_mask(i, tmp) {
1397 if (idle_cpu(i)) 1419 if (idle_cpu(i)) {
1420 if (i != task_cpu(p)) {
1421 schedstat_inc(p,
1422 se.nr_wakeups_idle);
1423 }
1398 return i; 1424 return i;
1425 }
1399 } 1426 }
1400 } else { 1427 } else {
1401 break; 1428 break;
@@ -1426,7 +1453,7 @@ static inline int wake_idle(int cpu, struct task_struct *p)
1426 */ 1453 */
1427static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) 1454static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1428{ 1455{
1429 int cpu, this_cpu, success = 0; 1456 int cpu, orig_cpu, this_cpu, success = 0;
1430 unsigned long flags; 1457 unsigned long flags;
1431 long old_state; 1458 long old_state;
1432 struct rq *rq; 1459 struct rq *rq;
@@ -1445,6 +1472,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1445 goto out_running; 1472 goto out_running;
1446 1473
1447 cpu = task_cpu(p); 1474 cpu = task_cpu(p);
1475 orig_cpu = cpu;
1448 this_cpu = smp_processor_id(); 1476 this_cpu = smp_processor_id();
1449 1477
1450#ifdef CONFIG_SMP 1478#ifdef CONFIG_SMP
@@ -1488,6 +1516,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1488 unsigned long tl = this_load; 1516 unsigned long tl = this_load;
1489 unsigned long tl_per_task; 1517 unsigned long tl_per_task;
1490 1518
1519 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1491 tl_per_task = cpu_avg_load_per_task(this_cpu); 1520 tl_per_task = cpu_avg_load_per_task(this_cpu);
1492 1521
1493 /* 1522 /*
@@ -1507,6 +1536,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1507 * there is no bad imbalance. 1536 * there is no bad imbalance.
1508 */ 1537 */
1509 schedstat_inc(this_sd, ttwu_move_affine); 1538 schedstat_inc(this_sd, ttwu_move_affine);
1539 schedstat_inc(p, se.nr_wakeups_affine);
1510 goto out_set_cpu; 1540 goto out_set_cpu;
1511 } 1541 }
1512 } 1542 }
@@ -1518,6 +1548,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1518 if (this_sd->flags & SD_WAKE_BALANCE) { 1548 if (this_sd->flags & SD_WAKE_BALANCE) {
1519 if (imbalance*this_load <= 100*load) { 1549 if (imbalance*this_load <= 100*load) {
1520 schedstat_inc(this_sd, ttwu_move_balance); 1550 schedstat_inc(this_sd, ttwu_move_balance);
1551 schedstat_inc(p, se.nr_wakeups_passive);
1521 goto out_set_cpu; 1552 goto out_set_cpu;
1522 } 1553 }
1523 } 1554 }
@@ -1543,6 +1574,15 @@ out_set_cpu:
1543 1574
1544out_activate: 1575out_activate:
1545#endif /* CONFIG_SMP */ 1576#endif /* CONFIG_SMP */
1577 schedstat_inc(p, se.nr_wakeups);
1578 if (sync)
1579 schedstat_inc(p, se.nr_wakeups_sync);
1580 if (orig_cpu != cpu)
1581 schedstat_inc(p, se.nr_wakeups_migrate);
1582 if (cpu == this_cpu)
1583 schedstat_inc(p, se.nr_wakeups_local);
1584 else
1585 schedstat_inc(p, se.nr_wakeups_remote);
1546 update_rq_clock(rq); 1586 update_rq_clock(rq);
1547 activate_task(rq, p, 1); 1587 activate_task(rq, p, 1);
1548 /* 1588 /*
@@ -2119,22 +2159,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
2119} 2159}
2120 2160
2121/* 2161/*
2122 * Is this task likely cache-hot:
2123 */
2124static inline int
2125task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2126{
2127 s64 delta;
2128
2129 if (p->sched_class != &fair_sched_class)
2130 return 0;
2131
2132 delta = now - p->se.exec_start;
2133
2134 return delta < (s64)sysctl_sched_migration_cost;
2135}
2136
2137/*
2138 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 2162 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2139 */ 2163 */
2140static 2164static
@@ -2148,12 +2172,16 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2148 * 2) cannot be migrated to this CPU due to cpus_allowed, or 2172 * 2) cannot be migrated to this CPU due to cpus_allowed, or
2149 * 3) are cache-hot on their current CPU. 2173 * 3) are cache-hot on their current CPU.
2150 */ 2174 */
2151 if (!cpu_isset(this_cpu, p->cpus_allowed)) 2175 if (!cpu_isset(this_cpu, p->cpus_allowed)) {
2176 schedstat_inc(p, se.nr_failed_migrations_affine);
2152 return 0; 2177 return 0;
2178 }
2153 *all_pinned = 0; 2179 *all_pinned = 0;
2154 2180
2155 if (task_running(rq, p)) 2181 if (task_running(rq, p)) {
2182 schedstat_inc(p, se.nr_failed_migrations_running);
2156 return 0; 2183 return 0;
2184 }
2157 2185
2158 /* 2186 /*
2159 * Aggressive migration if: 2187 * Aggressive migration if:
@@ -2163,14 +2191,18 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2163 2191
2164 if (sd->nr_balance_failed > sd->cache_nice_tries) { 2192 if (sd->nr_balance_failed > sd->cache_nice_tries) {
2165#ifdef CONFIG_SCHEDSTATS 2193#ifdef CONFIG_SCHEDSTATS
2166 if (task_hot(p, rq->clock, sd)) 2194 if (task_hot(p, rq->clock, sd)) {
2167 schedstat_inc(sd, lb_hot_gained[idle]); 2195 schedstat_inc(sd, lb_hot_gained[idle]);
2196 schedstat_inc(p, se.nr_forced_migrations);
2197 }
2168#endif 2198#endif
2169 return 1; 2199 return 1;
2170 } 2200 }
2171 2201
2172 if (task_hot(p, rq->clock, sd)) 2202 if (task_hot(p, rq->clock, sd)) {
2203 schedstat_inc(p, se.nr_failed_migrations_hot);
2173 return 0; 2204 return 0;
2205 }
2174 return 1; 2206 return 1;
2175} 2207}
2176 2208