aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c76
-rw-r--r--kernel/sched_debug.c87
2 files changed, 131 insertions, 32 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 945ab1322e18..3b27c3a553aa 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1005,6 +1005,23 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1005 1005
1006#ifdef CONFIG_SMP 1006#ifdef CONFIG_SMP
1007 1007
1008/*
1009 * Is this task likely cache-hot:
1010 */
1011static inline int
1012task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
1013{
1014 s64 delta;
1015
1016 if (p->sched_class != &fair_sched_class)
1017 return 0;
1018
1019 delta = now - p->se.exec_start;
1020
1021 return delta < (s64)sysctl_sched_migration_cost;
1022}
1023
1024
1008void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1025void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1009{ 1026{
1010 int old_cpu = task_cpu(p); 1027 int old_cpu = task_cpu(p);
@@ -1022,6 +1039,11 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1022 p->se.sleep_start -= clock_offset; 1039 p->se.sleep_start -= clock_offset;
1023 if (p->se.block_start) 1040 if (p->se.block_start)
1024 p->se.block_start -= clock_offset; 1041 p->se.block_start -= clock_offset;
1042 if (old_cpu != new_cpu) {
1043 schedstat_inc(p, se.nr_migrations);
1044 if (task_hot(p, old_rq->clock, NULL))
1045 schedstat_inc(p, se.nr_forced2_migrations);
1046 }
1025#endif 1047#endif
1026 p->se.vruntime -= old_cfsrq->min_vruntime - 1048 p->se.vruntime -= old_cfsrq->min_vruntime -
1027 new_cfsrq->min_vruntime; 1049 new_cfsrq->min_vruntime;
@@ -1394,8 +1416,13 @@ static int wake_idle(int cpu, struct task_struct *p)
1394 if (sd->flags & SD_WAKE_IDLE) { 1416 if (sd->flags & SD_WAKE_IDLE) {
1395 cpus_and(tmp, sd->span, p->cpus_allowed); 1417 cpus_and(tmp, sd->span, p->cpus_allowed);
1396 for_each_cpu_mask(i, tmp) { 1418 for_each_cpu_mask(i, tmp) {
1397 if (idle_cpu(i)) 1419 if (idle_cpu(i)) {
1420 if (i != task_cpu(p)) {
1421 schedstat_inc(p,
1422 se.nr_wakeups_idle);
1423 }
1398 return i; 1424 return i;
1425 }
1399 } 1426 }
1400 } else { 1427 } else {
1401 break; 1428 break;
@@ -1426,7 +1453,7 @@ static inline int wake_idle(int cpu, struct task_struct *p)
1426 */ 1453 */
1427static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) 1454static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1428{ 1455{
1429 int cpu, this_cpu, success = 0; 1456 int cpu, orig_cpu, this_cpu, success = 0;
1430 unsigned long flags; 1457 unsigned long flags;
1431 long old_state; 1458 long old_state;
1432 struct rq *rq; 1459 struct rq *rq;
@@ -1445,6 +1472,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1445 goto out_running; 1472 goto out_running;
1446 1473
1447 cpu = task_cpu(p); 1474 cpu = task_cpu(p);
1475 orig_cpu = cpu;
1448 this_cpu = smp_processor_id(); 1476 this_cpu = smp_processor_id();
1449 1477
1450#ifdef CONFIG_SMP 1478#ifdef CONFIG_SMP
@@ -1488,6 +1516,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1488 unsigned long tl = this_load; 1516 unsigned long tl = this_load;
1489 unsigned long tl_per_task; 1517 unsigned long tl_per_task;
1490 1518
1519 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1491 tl_per_task = cpu_avg_load_per_task(this_cpu); 1520 tl_per_task = cpu_avg_load_per_task(this_cpu);
1492 1521
1493 /* 1522 /*
@@ -1507,6 +1536,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1507 * there is no bad imbalance. 1536 * there is no bad imbalance.
1508 */ 1537 */
1509 schedstat_inc(this_sd, ttwu_move_affine); 1538 schedstat_inc(this_sd, ttwu_move_affine);
1539 schedstat_inc(p, se.nr_wakeups_affine);
1510 goto out_set_cpu; 1540 goto out_set_cpu;
1511 } 1541 }
1512 } 1542 }
@@ -1518,6 +1548,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1518 if (this_sd->flags & SD_WAKE_BALANCE) { 1548 if (this_sd->flags & SD_WAKE_BALANCE) {
1519 if (imbalance*this_load <= 100*load) { 1549 if (imbalance*this_load <= 100*load) {
1520 schedstat_inc(this_sd, ttwu_move_balance); 1550 schedstat_inc(this_sd, ttwu_move_balance);
1551 schedstat_inc(p, se.nr_wakeups_passive);
1521 goto out_set_cpu; 1552 goto out_set_cpu;
1522 } 1553 }
1523 } 1554 }
@@ -1543,6 +1574,15 @@ out_set_cpu:
1543 1574
1544out_activate: 1575out_activate:
1545#endif /* CONFIG_SMP */ 1576#endif /* CONFIG_SMP */
1577 schedstat_inc(p, se.nr_wakeups);
1578 if (sync)
1579 schedstat_inc(p, se.nr_wakeups_sync);
1580 if (orig_cpu != cpu)
1581 schedstat_inc(p, se.nr_wakeups_migrate);
1582 if (cpu == this_cpu)
1583 schedstat_inc(p, se.nr_wakeups_local);
1584 else
1585 schedstat_inc(p, se.nr_wakeups_remote);
1546 update_rq_clock(rq); 1586 update_rq_clock(rq);
1547 activate_task(rq, p, 1); 1587 activate_task(rq, p, 1);
1548 /* 1588 /*
@@ -2119,22 +2159,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
2119} 2159}
2120 2160
2121/* 2161/*
2122 * Is this task likely cache-hot:
2123 */
2124static inline int
2125task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2126{
2127 s64 delta;
2128
2129 if (p->sched_class != &fair_sched_class)
2130 return 0;
2131
2132 delta = now - p->se.exec_start;
2133
2134 return delta < (s64)sysctl_sched_migration_cost;
2135}
2136
2137/*
2138 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 2162 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2139 */ 2163 */
2140static 2164static
@@ -2148,12 +2172,16 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2148 * 2) cannot be migrated to this CPU due to cpus_allowed, or 2172 * 2) cannot be migrated to this CPU due to cpus_allowed, or
2149 * 3) are cache-hot on their current CPU. 2173 * 3) are cache-hot on their current CPU.
2150 */ 2174 */
2151 if (!cpu_isset(this_cpu, p->cpus_allowed)) 2175 if (!cpu_isset(this_cpu, p->cpus_allowed)) {
2176 schedstat_inc(p, se.nr_failed_migrations_affine);
2152 return 0; 2177 return 0;
2178 }
2153 *all_pinned = 0; 2179 *all_pinned = 0;
2154 2180
2155 if (task_running(rq, p)) 2181 if (task_running(rq, p)) {
2182 schedstat_inc(p, se.nr_failed_migrations_running);
2156 return 0; 2183 return 0;
2184 }
2157 2185
2158 /* 2186 /*
2159 * Aggressive migration if: 2187 * Aggressive migration if:
@@ -2163,14 +2191,18 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2163 2191
2164 if (sd->nr_balance_failed > sd->cache_nice_tries) { 2192 if (sd->nr_balance_failed > sd->cache_nice_tries) {
2165#ifdef CONFIG_SCHEDSTATS 2193#ifdef CONFIG_SCHEDSTATS
2166 if (task_hot(p, rq->clock, sd)) 2194 if (task_hot(p, rq->clock, sd)) {
2167 schedstat_inc(sd, lb_hot_gained[idle]); 2195 schedstat_inc(sd, lb_hot_gained[idle]);
2196 schedstat_inc(p, se.nr_forced_migrations);
2197 }
2168#endif 2198#endif
2169 return 1; 2199 return 1;
2170 } 2200 }
2171 2201
2172 if (task_hot(p, rq->clock, sd)) 2202 if (task_hot(p, rq->clock, sd)) {
2203 schedstat_inc(p, se.nr_failed_migrations_hot);
2173 return 0; 2204 return 0;
2205 }
2174 return 1; 2206 return 1;
2175} 2207}
2176 2208
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 755815937417..27e82cbccaa5 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -260,6 +260,7 @@ __initcall(init_sched_debug_procfs);
260 260
261void proc_sched_show_task(struct task_struct *p, struct seq_file *m) 261void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
262{ 262{
263 unsigned long nr_switches;
263 unsigned long flags; 264 unsigned long flags;
264 int num_threads = 1; 265 int num_threads = 1;
265 266
@@ -273,8 +274,12 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
273 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads); 274 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
274 SEQ_printf(m, 275 SEQ_printf(m,
275 "---------------------------------------------------------\n"); 276 "---------------------------------------------------------\n");
277#define __P(F) \
278 SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
276#define P(F) \ 279#define P(F) \
277 SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F) 280 SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
281#define __PN(F) \
282 SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
278#define PN(F) \ 283#define PN(F) \
279 SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) 284 SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
280 285
@@ -282,6 +287,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
282 PN(se.vruntime); 287 PN(se.vruntime);
283 PN(se.sum_exec_runtime); 288 PN(se.sum_exec_runtime);
284 289
290 nr_switches = p->nvcsw + p->nivcsw;
291
285#ifdef CONFIG_SCHEDSTATS 292#ifdef CONFIG_SCHEDSTATS
286 PN(se.wait_start); 293 PN(se.wait_start);
287 PN(se.sleep_start); 294 PN(se.sleep_start);
@@ -292,14 +299,55 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
292 PN(se.slice_max); 299 PN(se.slice_max);
293 PN(se.wait_max); 300 PN(se.wait_max);
294 P(sched_info.bkl_count); 301 P(sched_info.bkl_count);
302 P(se.nr_migrations);
303 P(se.nr_migrations_cold);
304 P(se.nr_failed_migrations_affine);
305 P(se.nr_failed_migrations_running);
306 P(se.nr_failed_migrations_hot);
307 P(se.nr_forced_migrations);
308 P(se.nr_forced2_migrations);
309 P(se.nr_wakeups);
310 P(se.nr_wakeups_sync);
311 P(se.nr_wakeups_migrate);
312 P(se.nr_wakeups_local);
313 P(se.nr_wakeups_remote);
314 P(se.nr_wakeups_affine);
315 P(se.nr_wakeups_affine_attempts);
316 P(se.nr_wakeups_passive);
317 P(se.nr_wakeups_idle);
318
319 {
320 u64 avg_atom, avg_per_cpu;
321
322 avg_atom = p->se.sum_exec_runtime;
323 if (nr_switches)
324 do_div(avg_atom, nr_switches);
325 else
326 avg_atom = -1LL;
327
328 avg_per_cpu = p->se.sum_exec_runtime;
329 if (p->se.nr_migrations)
330 avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations);
331 else
332 avg_per_cpu = -1LL;
333
334 __PN(avg_atom);
335 __PN(avg_per_cpu);
336 }
295#endif 337#endif
338 __P(nr_switches);
296 SEQ_printf(m, "%-35s:%21Ld\n", 339 SEQ_printf(m, "%-35s:%21Ld\n",
297 "nr_switches", (long long)(p->nvcsw + p->nivcsw)); 340 "nr_voluntary_switches", (long long)p->nvcsw);
341 SEQ_printf(m, "%-35s:%21Ld\n",
342 "nr_involuntary_switches", (long long)p->nivcsw);
343
298 P(se.load.weight); 344 P(se.load.weight);
299 P(policy); 345 P(policy);
300 P(prio); 346 P(prio);
301#undef P
302#undef PN 347#undef PN
348#undef __PN
349#undef P
350#undef __P
303 351
304 { 352 {
305 u64 t0, t1; 353 u64 t0, t1;
@@ -314,13 +362,32 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
314void proc_sched_set_task(struct task_struct *p) 362void proc_sched_set_task(struct task_struct *p)
315{ 363{
316#ifdef CONFIG_SCHEDSTATS 364#ifdef CONFIG_SCHEDSTATS
317 p->se.sleep_max = 0; 365 p->se.wait_max = 0;
318 p->se.block_max = 0; 366 p->se.sleep_max = 0;
319 p->se.exec_max = 0; 367 p->se.sum_sleep_runtime = 0;
320 p->se.slice_max = 0; 368 p->se.block_max = 0;
321 p->se.wait_max = 0; 369 p->se.exec_max = 0;
322 p->sched_info.bkl_count = 0; 370 p->se.slice_max = 0;
371 p->se.nr_migrations = 0;
372 p->se.nr_migrations_cold = 0;
373 p->se.nr_failed_migrations_affine = 0;
374 p->se.nr_failed_migrations_running = 0;
375 p->se.nr_failed_migrations_hot = 0;
376 p->se.nr_forced_migrations = 0;
377 p->se.nr_forced2_migrations = 0;
378 p->se.nr_wakeups = 0;
379 p->se.nr_wakeups_sync = 0;
380 p->se.nr_wakeups_migrate = 0;
381 p->se.nr_wakeups_local = 0;
382 p->se.nr_wakeups_remote = 0;
383 p->se.nr_wakeups_affine = 0;
384 p->se.nr_wakeups_affine_attempts = 0;
385 p->se.nr_wakeups_passive = 0;
386 p->se.nr_wakeups_idle = 0;
387 p->sched_info.bkl_count = 0;
323#endif 388#endif
324 p->se.sum_exec_runtime = 0; 389 p->se.sum_exec_runtime = 0;
325 p->se.prev_sum_exec_runtime = 0; 390 p->se.prev_sum_exec_runtime = 0;
391 p->nvcsw = 0;
392 p->nivcsw = 0;
326} 393}