diff options
-rw-r--r-- | include/linux/sched.h | 54 | ||||
-rw-r--r-- | kernel/sched.c | 47 | ||||
-rw-r--r-- | kernel/sched_debug.c | 101 | ||||
-rw-r--r-- | kernel/sched_fair.c | 65 | ||||
-rw-r--r-- | kernel/sched_rt.c | 2 |
5 files changed, 106 insertions, 163 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4b1753f7e48e..8cc863d66477 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1127,36 +1127,8 @@ struct load_weight { | |||
1127 | unsigned long weight, inv_weight; | 1127 | unsigned long weight, inv_weight; |
1128 | }; | 1128 | }; |
1129 | 1129 | ||
1130 | /* | ||
1131 | * CFS stats for a schedulable entity (task, task-group etc) | ||
1132 | * | ||
1133 | * Current field usage histogram: | ||
1134 | * | ||
1135 | * 4 se->block_start | ||
1136 | * 4 se->run_node | ||
1137 | * 4 se->sleep_start | ||
1138 | * 6 se->load.weight | ||
1139 | */ | ||
1140 | struct sched_entity { | ||
1141 | struct load_weight load; /* for load-balancing */ | ||
1142 | struct rb_node run_node; | ||
1143 | struct list_head group_node; | ||
1144 | unsigned int on_rq; | ||
1145 | |||
1146 | u64 exec_start; | ||
1147 | u64 sum_exec_runtime; | ||
1148 | u64 vruntime; | ||
1149 | u64 prev_sum_exec_runtime; | ||
1150 | |||
1151 | u64 last_wakeup; | ||
1152 | u64 avg_overlap; | ||
1153 | |||
1154 | u64 nr_migrations; | ||
1155 | |||
1156 | u64 start_runtime; | ||
1157 | u64 avg_wakeup; | ||
1158 | |||
1159 | #ifdef CONFIG_SCHEDSTATS | 1130 | #ifdef CONFIG_SCHEDSTATS |
1131 | struct sched_statistics { | ||
1160 | u64 wait_start; | 1132 | u64 wait_start; |
1161 | u64 wait_max; | 1133 | u64 wait_max; |
1162 | u64 wait_count; | 1134 | u64 wait_count; |
@@ -1188,6 +1160,30 @@ struct sched_entity { | |||
1188 | u64 nr_wakeups_affine_attempts; | 1160 | u64 nr_wakeups_affine_attempts; |
1189 | u64 nr_wakeups_passive; | 1161 | u64 nr_wakeups_passive; |
1190 | u64 nr_wakeups_idle; | 1162 | u64 nr_wakeups_idle; |
1163 | }; | ||
1164 | #endif | ||
1165 | |||
1166 | struct sched_entity { | ||
1167 | struct load_weight load; /* for load-balancing */ | ||
1168 | struct rb_node run_node; | ||
1169 | struct list_head group_node; | ||
1170 | unsigned int on_rq; | ||
1171 | |||
1172 | u64 exec_start; | ||
1173 | u64 sum_exec_runtime; | ||
1174 | u64 vruntime; | ||
1175 | u64 prev_sum_exec_runtime; | ||
1176 | |||
1177 | u64 last_wakeup; | ||
1178 | u64 avg_overlap; | ||
1179 | |||
1180 | u64 nr_migrations; | ||
1181 | |||
1182 | u64 start_runtime; | ||
1183 | u64 avg_wakeup; | ||
1184 | |||
1185 | #ifdef CONFIG_SCHEDSTATS | ||
1186 | struct sched_statistics statistics; | ||
1191 | #endif | 1187 | #endif |
1192 | 1188 | ||
1193 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1189 | #ifdef CONFIG_FAIR_GROUP_SCHED |
diff --git a/kernel/sched.c b/kernel/sched.c index 2c1db81f80eb..a4aa071f08f3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2437,15 +2437,15 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2437 | 2437 | ||
2438 | out_activate: | 2438 | out_activate: |
2439 | #endif /* CONFIG_SMP */ | 2439 | #endif /* CONFIG_SMP */ |
2440 | schedstat_inc(p, se.nr_wakeups); | 2440 | schedstat_inc(p, se.statistics.nr_wakeups); |
2441 | if (wake_flags & WF_SYNC) | 2441 | if (wake_flags & WF_SYNC) |
2442 | schedstat_inc(p, se.nr_wakeups_sync); | 2442 | schedstat_inc(p, se.statistics.nr_wakeups_sync); |
2443 | if (orig_cpu != cpu) | 2443 | if (orig_cpu != cpu) |
2444 | schedstat_inc(p, se.nr_wakeups_migrate); | 2444 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); |
2445 | if (cpu == this_cpu) | 2445 | if (cpu == this_cpu) |
2446 | schedstat_inc(p, se.nr_wakeups_local); | 2446 | schedstat_inc(p, se.statistics.nr_wakeups_local); |
2447 | else | 2447 | else |
2448 | schedstat_inc(p, se.nr_wakeups_remote); | 2448 | schedstat_inc(p, se.statistics.nr_wakeups_remote); |
2449 | activate_task(rq, p, 1); | 2449 | activate_task(rq, p, 1); |
2450 | success = 1; | 2450 | success = 1; |
2451 | 2451 | ||
@@ -2532,36 +2532,7 @@ static void __sched_fork(struct task_struct *p) | |||
2532 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | 2532 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; |
2533 | 2533 | ||
2534 | #ifdef CONFIG_SCHEDSTATS | 2534 | #ifdef CONFIG_SCHEDSTATS |
2535 | p->se.wait_start = 0; | 2535 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
2536 | p->se.wait_max = 0; | ||
2537 | p->se.wait_count = 0; | ||
2538 | p->se.wait_sum = 0; | ||
2539 | |||
2540 | p->se.sleep_start = 0; | ||
2541 | p->se.sleep_max = 0; | ||
2542 | p->se.sum_sleep_runtime = 0; | ||
2543 | |||
2544 | p->se.block_start = 0; | ||
2545 | p->se.block_max = 0; | ||
2546 | p->se.exec_max = 0; | ||
2547 | p->se.slice_max = 0; | ||
2548 | |||
2549 | p->se.nr_migrations_cold = 0; | ||
2550 | p->se.nr_failed_migrations_affine = 0; | ||
2551 | p->se.nr_failed_migrations_running = 0; | ||
2552 | p->se.nr_failed_migrations_hot = 0; | ||
2553 | p->se.nr_forced_migrations = 0; | ||
2554 | |||
2555 | p->se.nr_wakeups = 0; | ||
2556 | p->se.nr_wakeups_sync = 0; | ||
2557 | p->se.nr_wakeups_migrate = 0; | ||
2558 | p->se.nr_wakeups_local = 0; | ||
2559 | p->se.nr_wakeups_remote = 0; | ||
2560 | p->se.nr_wakeups_affine = 0; | ||
2561 | p->se.nr_wakeups_affine_attempts = 0; | ||
2562 | p->se.nr_wakeups_passive = 0; | ||
2563 | p->se.nr_wakeups_idle = 0; | ||
2564 | |||
2565 | #endif | 2536 | #endif |
2566 | 2537 | ||
2567 | INIT_LIST_HEAD(&p->rt.run_list); | 2538 | INIT_LIST_HEAD(&p->rt.run_list); |
@@ -7910,9 +7881,9 @@ void normalize_rt_tasks(void) | |||
7910 | 7881 | ||
7911 | p->se.exec_start = 0; | 7882 | p->se.exec_start = 0; |
7912 | #ifdef CONFIG_SCHEDSTATS | 7883 | #ifdef CONFIG_SCHEDSTATS |
7913 | p->se.wait_start = 0; | 7884 | p->se.statistics.wait_start = 0; |
7914 | p->se.sleep_start = 0; | 7885 | p->se.statistics.sleep_start = 0; |
7915 | p->se.block_start = 0; | 7886 | p->se.statistics.block_start = 0; |
7916 | #endif | 7887 | #endif |
7917 | 7888 | ||
7918 | if (!rt_task(p)) { | 7889 | if (!rt_task(p)) { |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 67f95aada4b9..ad9df4422763 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -70,16 +70,16 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, | |||
70 | PN(se->vruntime); | 70 | PN(se->vruntime); |
71 | PN(se->sum_exec_runtime); | 71 | PN(se->sum_exec_runtime); |
72 | #ifdef CONFIG_SCHEDSTATS | 72 | #ifdef CONFIG_SCHEDSTATS |
73 | PN(se->wait_start); | 73 | PN(se->statistics.wait_start); |
74 | PN(se->sleep_start); | 74 | PN(se->statistics.sleep_start); |
75 | PN(se->block_start); | 75 | PN(se->statistics.block_start); |
76 | PN(se->sleep_max); | 76 | PN(se->statistics.sleep_max); |
77 | PN(se->block_max); | 77 | PN(se->statistics.block_max); |
78 | PN(se->exec_max); | 78 | PN(se->statistics.exec_max); |
79 | PN(se->slice_max); | 79 | PN(se->statistics.slice_max); |
80 | PN(se->wait_max); | 80 | PN(se->statistics.wait_max); |
81 | PN(se->wait_sum); | 81 | PN(se->statistics.wait_sum); |
82 | P(se->wait_count); | 82 | P(se->statistics.wait_count); |
83 | #endif | 83 | #endif |
84 | P(se->load.weight); | 84 | P(se->load.weight); |
85 | #undef PN | 85 | #undef PN |
@@ -104,7 +104,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
104 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", | 104 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", |
105 | SPLIT_NS(p->se.vruntime), | 105 | SPLIT_NS(p->se.vruntime), |
106 | SPLIT_NS(p->se.sum_exec_runtime), | 106 | SPLIT_NS(p->se.sum_exec_runtime), |
107 | SPLIT_NS(p->se.sum_sleep_runtime)); | 107 | SPLIT_NS(p->se.statistics.sum_sleep_runtime)); |
108 | #else | 108 | #else |
109 | SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", | 109 | SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", |
110 | 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); | 110 | 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); |
@@ -413,34 +413,34 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
413 | nr_switches = p->nvcsw + p->nivcsw; | 413 | nr_switches = p->nvcsw + p->nivcsw; |
414 | 414 | ||
415 | #ifdef CONFIG_SCHEDSTATS | 415 | #ifdef CONFIG_SCHEDSTATS |
416 | PN(se.wait_start); | 416 | PN(se.statistics.wait_start); |
417 | PN(se.sleep_start); | 417 | PN(se.statistics.sleep_start); |
418 | PN(se.block_start); | 418 | PN(se.statistics.block_start); |
419 | PN(se.sleep_max); | 419 | PN(se.statistics.sleep_max); |
420 | PN(se.block_max); | 420 | PN(se.statistics.block_max); |
421 | PN(se.exec_max); | 421 | PN(se.statistics.exec_max); |
422 | PN(se.slice_max); | 422 | PN(se.statistics.slice_max); |
423 | PN(se.wait_max); | 423 | PN(se.statistics.wait_max); |
424 | PN(se.wait_sum); | 424 | PN(se.statistics.wait_sum); |
425 | P(se.wait_count); | 425 | P(se.statistics.wait_count); |
426 | PN(se.iowait_sum); | 426 | PN(se.statistics.iowait_sum); |
427 | P(se.iowait_count); | 427 | P(se.statistics.iowait_count); |
428 | P(sched_info.bkl_count); | 428 | P(sched_info.bkl_count); |
429 | P(se.nr_migrations); | 429 | P(se.nr_migrations); |
430 | P(se.nr_migrations_cold); | 430 | P(se.statistics.nr_migrations_cold); |
431 | P(se.nr_failed_migrations_affine); | 431 | P(se.statistics.nr_failed_migrations_affine); |
432 | P(se.nr_failed_migrations_running); | 432 | P(se.statistics.nr_failed_migrations_running); |
433 | P(se.nr_failed_migrations_hot); | 433 | P(se.statistics.nr_failed_migrations_hot); |
434 | P(se.nr_forced_migrations); | 434 | P(se.statistics.nr_forced_migrations); |
435 | P(se.nr_wakeups); | 435 | P(se.statistics.nr_wakeups); |
436 | P(se.nr_wakeups_sync); | 436 | P(se.statistics.nr_wakeups_sync); |
437 | P(se.nr_wakeups_migrate); | 437 | P(se.statistics.nr_wakeups_migrate); |
438 | P(se.nr_wakeups_local); | 438 | P(se.statistics.nr_wakeups_local); |
439 | P(se.nr_wakeups_remote); | 439 | P(se.statistics.nr_wakeups_remote); |
440 | P(se.nr_wakeups_affine); | 440 | P(se.statistics.nr_wakeups_affine); |
441 | P(se.nr_wakeups_affine_attempts); | 441 | P(se.statistics.nr_wakeups_affine_attempts); |
442 | P(se.nr_wakeups_passive); | 442 | P(se.statistics.nr_wakeups_passive); |
443 | P(se.nr_wakeups_idle); | 443 | P(se.statistics.nr_wakeups_idle); |
444 | 444 | ||
445 | { | 445 | { |
446 | u64 avg_atom, avg_per_cpu; | 446 | u64 avg_atom, avg_per_cpu; |
@@ -491,32 +491,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
491 | void proc_sched_set_task(struct task_struct *p) | 491 | void proc_sched_set_task(struct task_struct *p) |
492 | { | 492 | { |
493 | #ifdef CONFIG_SCHEDSTATS | 493 | #ifdef CONFIG_SCHEDSTATS |
494 | p->se.wait_max = 0; | 494 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
495 | p->se.wait_sum = 0; | ||
496 | p->se.wait_count = 0; | ||
497 | p->se.iowait_sum = 0; | ||
498 | p->se.iowait_count = 0; | ||
499 | p->se.sleep_max = 0; | ||
500 | p->se.sum_sleep_runtime = 0; | ||
501 | p->se.block_max = 0; | ||
502 | p->se.exec_max = 0; | ||
503 | p->se.slice_max = 0; | ||
504 | p->se.nr_migrations = 0; | ||
505 | p->se.nr_migrations_cold = 0; | ||
506 | p->se.nr_failed_migrations_affine = 0; | ||
507 | p->se.nr_failed_migrations_running = 0; | ||
508 | p->se.nr_failed_migrations_hot = 0; | ||
509 | p->se.nr_forced_migrations = 0; | ||
510 | p->se.nr_wakeups = 0; | ||
511 | p->se.nr_wakeups_sync = 0; | ||
512 | p->se.nr_wakeups_migrate = 0; | ||
513 | p->se.nr_wakeups_local = 0; | ||
514 | p->se.nr_wakeups_remote = 0; | ||
515 | p->se.nr_wakeups_affine = 0; | ||
516 | p->se.nr_wakeups_affine_attempts = 0; | ||
517 | p->se.nr_wakeups_passive = 0; | ||
518 | p->se.nr_wakeups_idle = 0; | ||
519 | p->sched_info.bkl_count = 0; | ||
520 | #endif | 495 | #endif |
521 | p->se.sum_exec_runtime = 0; | 496 | p->se.sum_exec_runtime = 0; |
522 | p->se.prev_sum_exec_runtime = 0; | 497 | p->se.prev_sum_exec_runtime = 0; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3e1fd96c6cf9..8ad164bbdac1 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -505,7 +505,8 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
505 | { | 505 | { |
506 | unsigned long delta_exec_weighted; | 506 | unsigned long delta_exec_weighted; |
507 | 507 | ||
508 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); | 508 | schedstat_set(curr->statistics.exec_max, |
509 | max((u64)delta_exec, curr->statistics.exec_max)); | ||
509 | 510 | ||
510 | curr->sum_exec_runtime += delta_exec; | 511 | curr->sum_exec_runtime += delta_exec; |
511 | schedstat_add(cfs_rq, exec_clock, delta_exec); | 512 | schedstat_add(cfs_rq, exec_clock, delta_exec); |
@@ -548,7 +549,7 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
548 | static inline void | 549 | static inline void |
549 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 550 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) |
550 | { | 551 | { |
551 | schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); | 552 | schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock); |
552 | } | 553 | } |
553 | 554 | ||
554 | /* | 555 | /* |
@@ -567,18 +568,18 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
567 | static void | 568 | static void |
568 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 569 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) |
569 | { | 570 | { |
570 | schedstat_set(se->wait_max, max(se->wait_max, | 571 | schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max, |
571 | rq_of(cfs_rq)->clock - se->wait_start)); | 572 | rq_of(cfs_rq)->clock - se->statistics.wait_start)); |
572 | schedstat_set(se->wait_count, se->wait_count + 1); | 573 | schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1); |
573 | schedstat_set(se->wait_sum, se->wait_sum + | 574 | schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum + |
574 | rq_of(cfs_rq)->clock - se->wait_start); | 575 | rq_of(cfs_rq)->clock - se->statistics.wait_start); |
575 | #ifdef CONFIG_SCHEDSTATS | 576 | #ifdef CONFIG_SCHEDSTATS |
576 | if (entity_is_task(se)) { | 577 | if (entity_is_task(se)) { |
577 | trace_sched_stat_wait(task_of(se), | 578 | trace_sched_stat_wait(task_of(se), |
578 | rq_of(cfs_rq)->clock - se->wait_start); | 579 | rq_of(cfs_rq)->clock - se->statistics.wait_start); |
579 | } | 580 | } |
580 | #endif | 581 | #endif |
581 | schedstat_set(se->wait_start, 0); | 582 | schedstat_set(se->statistics.wait_start, 0); |
582 | } | 583 | } |
583 | 584 | ||
584 | static inline void | 585 | static inline void |
@@ -657,39 +658,39 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
657 | if (entity_is_task(se)) | 658 | if (entity_is_task(se)) |
658 | tsk = task_of(se); | 659 | tsk = task_of(se); |
659 | 660 | ||
660 | if (se->sleep_start) { | 661 | if (se->statistics.sleep_start) { |
661 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | 662 | u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start; |
662 | 663 | ||
663 | if ((s64)delta < 0) | 664 | if ((s64)delta < 0) |
664 | delta = 0; | 665 | delta = 0; |
665 | 666 | ||
666 | if (unlikely(delta > se->sleep_max)) | 667 | if (unlikely(delta > se->statistics.sleep_max)) |
667 | se->sleep_max = delta; | 668 | se->statistics.sleep_max = delta; |
668 | 669 | ||
669 | se->sleep_start = 0; | 670 | se->statistics.sleep_start = 0; |
670 | se->sum_sleep_runtime += delta; | 671 | se->statistics.sum_sleep_runtime += delta; |
671 | 672 | ||
672 | if (tsk) { | 673 | if (tsk) { |
673 | account_scheduler_latency(tsk, delta >> 10, 1); | 674 | account_scheduler_latency(tsk, delta >> 10, 1); |
674 | trace_sched_stat_sleep(tsk, delta); | 675 | trace_sched_stat_sleep(tsk, delta); |
675 | } | 676 | } |
676 | } | 677 | } |
677 | if (se->block_start) { | 678 | if (se->statistics.block_start) { |
678 | u64 delta = rq_of(cfs_rq)->clock - se->block_start; | 679 | u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start; |
679 | 680 | ||
680 | if ((s64)delta < 0) | 681 | if ((s64)delta < 0) |
681 | delta = 0; | 682 | delta = 0; |
682 | 683 | ||
683 | if (unlikely(delta > se->block_max)) | 684 | if (unlikely(delta > se->statistics.block_max)) |
684 | se->block_max = delta; | 685 | se->statistics.block_max = delta; |
685 | 686 | ||
686 | se->block_start = 0; | 687 | se->statistics.block_start = 0; |
687 | se->sum_sleep_runtime += delta; | 688 | se->statistics.sum_sleep_runtime += delta; |
688 | 689 | ||
689 | if (tsk) { | 690 | if (tsk) { |
690 | if (tsk->in_iowait) { | 691 | if (tsk->in_iowait) { |
691 | se->iowait_sum += delta; | 692 | se->statistics.iowait_sum += delta; |
692 | se->iowait_count++; | 693 | se->statistics.iowait_count++; |
693 | trace_sched_stat_iowait(tsk, delta); | 694 | trace_sched_stat_iowait(tsk, delta); |
694 | } | 695 | } |
695 | 696 | ||
@@ -826,9 +827,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
826 | struct task_struct *tsk = task_of(se); | 827 | struct task_struct *tsk = task_of(se); |
827 | 828 | ||
828 | if (tsk->state & TASK_INTERRUPTIBLE) | 829 | if (tsk->state & TASK_INTERRUPTIBLE) |
829 | se->sleep_start = rq_of(cfs_rq)->clock; | 830 | se->statistics.sleep_start = rq_of(cfs_rq)->clock; |
830 | if (tsk->state & TASK_UNINTERRUPTIBLE) | 831 | if (tsk->state & TASK_UNINTERRUPTIBLE) |
831 | se->block_start = rq_of(cfs_rq)->clock; | 832 | se->statistics.block_start = rq_of(cfs_rq)->clock; |
832 | } | 833 | } |
833 | #endif | 834 | #endif |
834 | } | 835 | } |
@@ -912,7 +913,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
912 | * when there are only lesser-weight tasks around): | 913 | * when there are only lesser-weight tasks around): |
913 | */ | 914 | */ |
914 | if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { | 915 | if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { |
915 | se->slice_max = max(se->slice_max, | 916 | se->statistics.slice_max = max(se->statistics.slice_max, |
916 | se->sum_exec_runtime - se->prev_sum_exec_runtime); | 917 | se->sum_exec_runtime - se->prev_sum_exec_runtime); |
917 | } | 918 | } |
918 | #endif | 919 | #endif |
@@ -1306,7 +1307,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1306 | if (sync && balanced) | 1307 | if (sync && balanced) |
1307 | return 1; | 1308 | return 1; |
1308 | 1309 | ||
1309 | schedstat_inc(p, se.nr_wakeups_affine_attempts); | 1310 | schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); |
1310 | tl_per_task = cpu_avg_load_per_task(this_cpu); | 1311 | tl_per_task = cpu_avg_load_per_task(this_cpu); |
1311 | 1312 | ||
1312 | if (balanced || | 1313 | if (balanced || |
@@ -1318,7 +1319,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1318 | * there is no bad imbalance. | 1319 | * there is no bad imbalance. |
1319 | */ | 1320 | */ |
1320 | schedstat_inc(sd, ttwu_move_affine); | 1321 | schedstat_inc(sd, ttwu_move_affine); |
1321 | schedstat_inc(p, se.nr_wakeups_affine); | 1322 | schedstat_inc(p, se.statistics.nr_wakeups_affine); |
1322 | 1323 | ||
1323 | return 1; | 1324 | return 1; |
1324 | } | 1325 | } |
@@ -1844,13 +1845,13 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
1844 | * 3) are cache-hot on their current CPU. | 1845 | * 3) are cache-hot on their current CPU. |
1845 | */ | 1846 | */ |
1846 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { | 1847 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { |
1847 | schedstat_inc(p, se.nr_failed_migrations_affine); | 1848 | schedstat_inc(p, se.statistics.nr_failed_migrations_affine); |
1848 | return 0; | 1849 | return 0; |
1849 | } | 1850 | } |
1850 | *all_pinned = 0; | 1851 | *all_pinned = 0; |
1851 | 1852 | ||
1852 | if (task_running(rq, p)) { | 1853 | if (task_running(rq, p)) { |
1853 | schedstat_inc(p, se.nr_failed_migrations_running); | 1854 | schedstat_inc(p, se.statistics.nr_failed_migrations_running); |
1854 | return 0; | 1855 | return 0; |
1855 | } | 1856 | } |
1856 | 1857 | ||
@@ -1866,14 +1867,14 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
1866 | #ifdef CONFIG_SCHEDSTATS | 1867 | #ifdef CONFIG_SCHEDSTATS |
1867 | if (tsk_cache_hot) { | 1868 | if (tsk_cache_hot) { |
1868 | schedstat_inc(sd, lb_hot_gained[idle]); | 1869 | schedstat_inc(sd, lb_hot_gained[idle]); |
1869 | schedstat_inc(p, se.nr_forced_migrations); | 1870 | schedstat_inc(p, se.statistics.nr_forced_migrations); |
1870 | } | 1871 | } |
1871 | #endif | 1872 | #endif |
1872 | return 1; | 1873 | return 1; |
1873 | } | 1874 | } |
1874 | 1875 | ||
1875 | if (tsk_cache_hot) { | 1876 | if (tsk_cache_hot) { |
1876 | schedstat_inc(p, se.nr_failed_migrations_hot); | 1877 | schedstat_inc(p, se.statistics.nr_failed_migrations_hot); |
1877 | return 0; | 1878 | return 0; |
1878 | } | 1879 | } |
1879 | return 1; | 1880 | return 1; |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index c4fb42a66cab..0335e87f5204 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -613,7 +613,7 @@ static void update_curr_rt(struct rq *rq) | |||
613 | if (unlikely((s64)delta_exec < 0)) | 613 | if (unlikely((s64)delta_exec < 0)) |
614 | delta_exec = 0; | 614 | delta_exec = 0; |
615 | 615 | ||
616 | schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); | 616 | schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); |
617 | 617 | ||
618 | curr->se.sum_exec_runtime += delta_exec; | 618 | curr->se.sum_exec_runtime += delta_exec; |
619 | account_group_exec_runtime(curr, delta_exec); | 619 | account_group_exec_runtime(curr, delta_exec); |