diff options
author | Lucas De Marchi <lucas.de.marchi@gmail.com> | 2010-03-10 21:37:45 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-11 09:22:28 -0500 |
commit | 41acab8851a0408c1d5ad6c21a07456f88b54d40 (patch) | |
tree | 28b23b930571c1f6dfd5c4e8129a2a7ea2056307 /kernel/sched_fair.c | |
parent | 3d07467b7aa91623b31d7b5888a123a2c8c8e9cc (diff) |
sched: Implement group scheduler statistics in one struct
Put all statistic fields of sched_entity in one struct, sched_statistics,
and embed it into sched_entity.
This change allows to memset the sched_statistics to 0 when needed (for
instance when forking), avoiding bugs of non initialized fields.
Signed-off-by: Lucas De Marchi <lucas.de.marchi@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1268275065-18542-1-git-send-email-lucas.de.marchi@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 65 |
1 files changed, 33 insertions, 32 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3e1fd96c6cf9..8ad164bbdac1 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -505,7 +505,8 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
505 | { | 505 | { |
506 | unsigned long delta_exec_weighted; | 506 | unsigned long delta_exec_weighted; |
507 | 507 | ||
508 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); | 508 | schedstat_set(curr->statistics.exec_max, |
509 | max((u64)delta_exec, curr->statistics.exec_max)); | ||
509 | 510 | ||
510 | curr->sum_exec_runtime += delta_exec; | 511 | curr->sum_exec_runtime += delta_exec; |
511 | schedstat_add(cfs_rq, exec_clock, delta_exec); | 512 | schedstat_add(cfs_rq, exec_clock, delta_exec); |
@@ -548,7 +549,7 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
548 | static inline void | 549 | static inline void |
549 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 550 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) |
550 | { | 551 | { |
551 | schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); | 552 | schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock); |
552 | } | 553 | } |
553 | 554 | ||
554 | /* | 555 | /* |
@@ -567,18 +568,18 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
567 | static void | 568 | static void |
568 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 569 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) |
569 | { | 570 | { |
570 | schedstat_set(se->wait_max, max(se->wait_max, | 571 | schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max, |
571 | rq_of(cfs_rq)->clock - se->wait_start)); | 572 | rq_of(cfs_rq)->clock - se->statistics.wait_start)); |
572 | schedstat_set(se->wait_count, se->wait_count + 1); | 573 | schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1); |
573 | schedstat_set(se->wait_sum, se->wait_sum + | 574 | schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum + |
574 | rq_of(cfs_rq)->clock - se->wait_start); | 575 | rq_of(cfs_rq)->clock - se->statistics.wait_start); |
575 | #ifdef CONFIG_SCHEDSTATS | 576 | #ifdef CONFIG_SCHEDSTATS |
576 | if (entity_is_task(se)) { | 577 | if (entity_is_task(se)) { |
577 | trace_sched_stat_wait(task_of(se), | 578 | trace_sched_stat_wait(task_of(se), |
578 | rq_of(cfs_rq)->clock - se->wait_start); | 579 | rq_of(cfs_rq)->clock - se->statistics.wait_start); |
579 | } | 580 | } |
580 | #endif | 581 | #endif |
581 | schedstat_set(se->wait_start, 0); | 582 | schedstat_set(se->statistics.wait_start, 0); |
582 | } | 583 | } |
583 | 584 | ||
584 | static inline void | 585 | static inline void |
@@ -657,39 +658,39 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
657 | if (entity_is_task(se)) | 658 | if (entity_is_task(se)) |
658 | tsk = task_of(se); | 659 | tsk = task_of(se); |
659 | 660 | ||
660 | if (se->sleep_start) { | 661 | if (se->statistics.sleep_start) { |
661 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | 662 | u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start; |
662 | 663 | ||
663 | if ((s64)delta < 0) | 664 | if ((s64)delta < 0) |
664 | delta = 0; | 665 | delta = 0; |
665 | 666 | ||
666 | if (unlikely(delta > se->sleep_max)) | 667 | if (unlikely(delta > se->statistics.sleep_max)) |
667 | se->sleep_max = delta; | 668 | se->statistics.sleep_max = delta; |
668 | 669 | ||
669 | se->sleep_start = 0; | 670 | se->statistics.sleep_start = 0; |
670 | se->sum_sleep_runtime += delta; | 671 | se->statistics.sum_sleep_runtime += delta; |
671 | 672 | ||
672 | if (tsk) { | 673 | if (tsk) { |
673 | account_scheduler_latency(tsk, delta >> 10, 1); | 674 | account_scheduler_latency(tsk, delta >> 10, 1); |
674 | trace_sched_stat_sleep(tsk, delta); | 675 | trace_sched_stat_sleep(tsk, delta); |
675 | } | 676 | } |
676 | } | 677 | } |
677 | if (se->block_start) { | 678 | if (se->statistics.block_start) { |
678 | u64 delta = rq_of(cfs_rq)->clock - se->block_start; | 679 | u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start; |
679 | 680 | ||
680 | if ((s64)delta < 0) | 681 | if ((s64)delta < 0) |
681 | delta = 0; | 682 | delta = 0; |
682 | 683 | ||
683 | if (unlikely(delta > se->block_max)) | 684 | if (unlikely(delta > se->statistics.block_max)) |
684 | se->block_max = delta; | 685 | se->statistics.block_max = delta; |
685 | 686 | ||
686 | se->block_start = 0; | 687 | se->statistics.block_start = 0; |
687 | se->sum_sleep_runtime += delta; | 688 | se->statistics.sum_sleep_runtime += delta; |
688 | 689 | ||
689 | if (tsk) { | 690 | if (tsk) { |
690 | if (tsk->in_iowait) { | 691 | if (tsk->in_iowait) { |
691 | se->iowait_sum += delta; | 692 | se->statistics.iowait_sum += delta; |
692 | se->iowait_count++; | 693 | se->statistics.iowait_count++; |
693 | trace_sched_stat_iowait(tsk, delta); | 694 | trace_sched_stat_iowait(tsk, delta); |
694 | } | 695 | } |
695 | 696 | ||
@@ -826,9 +827,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
826 | struct task_struct *tsk = task_of(se); | 827 | struct task_struct *tsk = task_of(se); |
827 | 828 | ||
828 | if (tsk->state & TASK_INTERRUPTIBLE) | 829 | if (tsk->state & TASK_INTERRUPTIBLE) |
829 | se->sleep_start = rq_of(cfs_rq)->clock; | 830 | se->statistics.sleep_start = rq_of(cfs_rq)->clock; |
830 | if (tsk->state & TASK_UNINTERRUPTIBLE) | 831 | if (tsk->state & TASK_UNINTERRUPTIBLE) |
831 | se->block_start = rq_of(cfs_rq)->clock; | 832 | se->statistics.block_start = rq_of(cfs_rq)->clock; |
832 | } | 833 | } |
833 | #endif | 834 | #endif |
834 | } | 835 | } |
@@ -912,7 +913,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
912 | * when there are only lesser-weight tasks around): | 913 | * when there are only lesser-weight tasks around): |
913 | */ | 914 | */ |
914 | if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { | 915 | if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { |
915 | se->slice_max = max(se->slice_max, | 916 | se->statistics.slice_max = max(se->statistics.slice_max, |
916 | se->sum_exec_runtime - se->prev_sum_exec_runtime); | 917 | se->sum_exec_runtime - se->prev_sum_exec_runtime); |
917 | } | 918 | } |
918 | #endif | 919 | #endif |
@@ -1306,7 +1307,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1306 | if (sync && balanced) | 1307 | if (sync && balanced) |
1307 | return 1; | 1308 | return 1; |
1308 | 1309 | ||
1309 | schedstat_inc(p, se.nr_wakeups_affine_attempts); | 1310 | schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); |
1310 | tl_per_task = cpu_avg_load_per_task(this_cpu); | 1311 | tl_per_task = cpu_avg_load_per_task(this_cpu); |
1311 | 1312 | ||
1312 | if (balanced || | 1313 | if (balanced || |
@@ -1318,7 +1319,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1318 | * there is no bad imbalance. | 1319 | * there is no bad imbalance. |
1319 | */ | 1320 | */ |
1320 | schedstat_inc(sd, ttwu_move_affine); | 1321 | schedstat_inc(sd, ttwu_move_affine); |
1321 | schedstat_inc(p, se.nr_wakeups_affine); | 1322 | schedstat_inc(p, se.statistics.nr_wakeups_affine); |
1322 | 1323 | ||
1323 | return 1; | 1324 | return 1; |
1324 | } | 1325 | } |
@@ -1844,13 +1845,13 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
1844 | * 3) are cache-hot on their current CPU. | 1845 | * 3) are cache-hot on their current CPU. |
1845 | */ | 1846 | */ |
1846 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { | 1847 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { |
1847 | schedstat_inc(p, se.nr_failed_migrations_affine); | 1848 | schedstat_inc(p, se.statistics.nr_failed_migrations_affine); |
1848 | return 0; | 1849 | return 0; |
1849 | } | 1850 | } |
1850 | *all_pinned = 0; | 1851 | *all_pinned = 0; |
1851 | 1852 | ||
1852 | if (task_running(rq, p)) { | 1853 | if (task_running(rq, p)) { |
1853 | schedstat_inc(p, se.nr_failed_migrations_running); | 1854 | schedstat_inc(p, se.statistics.nr_failed_migrations_running); |
1854 | return 0; | 1855 | return 0; |
1855 | } | 1856 | } |
1856 | 1857 | ||
@@ -1866,14 +1867,14 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
1866 | #ifdef CONFIG_SCHEDSTATS | 1867 | #ifdef CONFIG_SCHEDSTATS |
1867 | if (tsk_cache_hot) { | 1868 | if (tsk_cache_hot) { |
1868 | schedstat_inc(sd, lb_hot_gained[idle]); | 1869 | schedstat_inc(sd, lb_hot_gained[idle]); |
1869 | schedstat_inc(p, se.nr_forced_migrations); | 1870 | schedstat_inc(p, se.statistics.nr_forced_migrations); |
1870 | } | 1871 | } |
1871 | #endif | 1872 | #endif |
1872 | return 1; | 1873 | return 1; |
1873 | } | 1874 | } |
1874 | 1875 | ||
1875 | if (tsk_cache_hot) { | 1876 | if (tsk_cache_hot) { |
1876 | schedstat_inc(p, se.nr_failed_migrations_hot); | 1877 | schedstat_inc(p, se.statistics.nr_failed_migrations_hot); |
1877 | return 0; | 1878 | return 0; |
1878 | } | 1879 | } |
1879 | return 1; | 1880 | return 1; |